diff --git "a/4432.jsonl" "b/4432.jsonl" new file mode 100644--- /dev/null +++ "b/4432.jsonl" @@ -0,0 +1,762 @@ +{"seq_id":"626674960","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 7 20:26:53 2020\r\n\r\n@author: Carlos\r\n\"\"\"\r\n\r\n#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\nimport tweepy #https://github.com/tweepy/tweepy\r\nimport csv\r\nimport sys\r\nfrom datetime import datetime\r\n\r\n#Twitter API credentials\r\nconsumer_key = \"\"\r\nconsumer_secret = \"\"\r\naccess_key = \"\"\r\naccess_secret = \"\"\r\n\r\n\r\ndef get_all_tweets(screen_name):\r\n #Twitter only allows access to a users most recent 3240 tweets with this method\r\n\r\n #authorize twitter, initialize tweepy\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_key, access_secret)\r\n api = tweepy.API(auth)\r\n\r\n #initialize a list to hold all the tweepy Tweets\r\n alltweets = []\r\n\r\n #make initial request for most recent tweets (200 is the maximum allowed count)\r\n new_tweets = api.user_timeline(screen_name = screen_name,count=1)\r\n\r\n #save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n #save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n #keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(\"getting tweets before %s\" % (oldest))\r\n\r\n #all subsequent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)\r\n\r\n #save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n #update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(\"...%s tweets downloaded so far\" % (len(alltweets)))\r\n\r\n #go through all found tweets and remove the ones with no images \r\n outtweets = [] #initialize master list to hold our ready tweets\r\n i=0;\r\n for tweet in alltweets:\r\n #not all tweets will have media url, so lets skip them\r\n try:\r\n print(tweet.entities['media'][0]['media_url'])\r\n print(i)\r\n except (NameError, KeyError):\r\n #we dont want to have any entries without the media_url so lets do nothing\r\n pass\r\n else:\r\n #got media_url - means add it to the output\r\n outtweets.append([tweet.entities['media'][0]['media_url']+\"||\"+(tweet.created_at).strftime(\"%Y-%m-%d,%H:%M:%S\")+\"||\"+str((tweet.text).encode('ascii','ignore').decode('ascii')).rstrip('\\n\\n').replace(\"\\n\\n\",\"\").replace(\"\\n\",\"\").replace(\"\\r\",\"\")])\r\n \r\n i+=1\r\n print(outtweets)\r\n #write the csv \r\n with open('tweets.csv', 'w') as f:\r\n writer = csv.writer(f)\r\n # #writer.writerow([\"id\",\"created_at\",\"text\",\"media_url\"])\r\n writer.writerows(outtweets)\r\n\r\n pass\r\n with open('tweets.csv') as infile, open('tweets_clean.csv', 'w') as outfile:\r\n for line in infile:\r\n if not line.strip(): continue # skip the empty line\r\n outfile.write(line) # \r\n\r\n\r\nif __name__ == '__main__':\r\n #pass in the username of the account you want to download\r\n get_all_tweets(sys.argv[1])","sub_path":"Tweets.py","file_name":"Tweets.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380966334","text":"# coding:utf-8\n\nimport numpy as np\nimport three_d_pde as pde\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport pickle\nfrom matplotlib import cm\n\nwith open('einzel_lens_vector.binaryfile', 'rb') as lens:\n V = pickle.load(lens)\n\nmesh = pde.CartesianGrid()\nV_pre = V[:, int(mesh.ny/2), :]\nV = V_pre.transpose()\n\nm = (40e-3)/(6.0e+23)\nq = 1.6e-19\nV_extract = 2000\nH = 3e-10\nz0 = mesh.zmin\ny0 = input(\"初期位置r:\")\ny0 = int(y0)\nvz0 = np.sqrt(2*q*V_extract/m)\nvy0 = 0\nt = 0\n\n\nEz = np.empty((mesh.nz-1, mesh.nx-1))\nEy = np.empty((mesh.nz-1, mesh.nx-1))\n\ndelta_y = (mesh.xmax - mesh.xmin)/(mesh.nx*1000)\ndelta_z = (mesh.zmax - mesh.zmin)/(mesh.nz*1000)\n\nfor i in range(mesh.nx -1):\n for j in range(mesh.nz -1):\n Ey[i, j] = (V[i+1, j] - V[i, j])/delta_y\n Ez[i, j] = -(V[i, j+1] - V[i, j])/delta_z\n\nAz = Ez*(q/m)\nAy = Ey*(q/m)\n\ndef Runge_Kutta(x0, a, v, h):\n\n k1 = v\n k2 = v+a*h/2\n k3 = v+a*h/2\n k4 = v+a*h\n\n x = x0 + 1000*(k1+2*k2+2*k3+k4)*h/6\n return x\n\nfig = plt.figure()\n\nims = []\n\nwhile mesh.zmin<=z0<=mesh.zmax and mesh.ymin<=y0<=mesh.ymax:\n t += H\n\n az = Az[int((mesh.ny-1)*(mesh.ymax-y0)/(mesh.ymax-mesh.ymin)), int((mesh.nz-1)*(z0+mesh.zmax)/(mesh.zmax - mesh.zmin))]\n ay = Ay[int((mesh.ny-1)*(mesh.ymax-y0)/(mesh.ymax-mesh.ymin)), int((mesh.nz-1)*(z0+mesh.zmax)/(mesh.zmax - mesh.zmin))]\n\n vz0 += az*H\n vy0 += ay*H\n\n z0 = Runge_Kutta(z0, az, vz0, H)\n y0 = Runge_Kutta(y0, ay, vy0, H)\n\n im = plt.plot(z0, y0, \"o\", color=\"red\")\n ims.append(im)\n\nprint(z0, y0, vz0, vy0, t)\nplt.xlim([mesh.zmin, mesh.zmax])\nplt.ylim([mesh.xmin, mesh.xmax])\n\nani = animation.ArtistAnimation(fig, ims, interval=1)\n\nplt.show()","sub_path":"orbit_of_chargedparticle.py","file_name":"orbit_of_chargedparticle.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209769927","text":"import datetime\nimport json\nimport re\nimport typing\nfrom pathlib import Path\n\nfrom ts3bot import events\nfrom ts3bot.bot import Bot\nfrom ts3bot.config import Config\n\nMESSAGE_REGEX = \"!sheet\\\\s* (\\\\w+)(.*)\"\nUSAGE = \"!sheet [note]\"\nSTATE_FILE = Path(\"sheet.json\")\n\nCOMMAND_MAPPING = {\n \"ebg\": \"EBG\",\n \"red\": \"Red\",\n \"green\": \"Green\",\n \"blue\": \"Blue\",\n \"r\": \"Red\",\n \"g\": \"Green\",\n \"b\": \"Blue\",\n}\n\n\ndef handle(bot: Bot, event: events.TextMessage, match: typing.Match):\n sheet_channel_id = Config.get(\"teamspeak\", \"sheet_channel_id\")\n if sheet_channel_id == 0:\n return\n\n current_state = {\"EBG\": [], \"Red\": [], \"Green\": [], \"Blue\": []}\n\n if match.group(1) == \"help\" and event.uid in Config.whitelist_admin:\n bot.send_message(\n event.id,\n \"!sheet \\n!sheet set \",\n is_translation=False,\n )\n return\n\n if match.group(1) == \"reset\" and event.uid in Config.whitelist_admin:\n pass # Don't load the current file, just use the defaults\n elif match.group(1) == \"set\" and event.uid in Config.whitelist_admin:\n # Force-set an entry\n match = re.match(\n \"!sheet set (ebg|red|green|blue|r|g|b|remove) (.*)\",\n event.message.strip(),\n )\n if not match:\n bot.send_message(event.id, \"invalid_input\")\n return\n\n if STATE_FILE.exists():\n current_state = json.loads(STATE_FILE.read_text())\n\n if match.group(1) == \"remove\":\n current_state = _remove_lead(current_state, name_field=match.group(2))\n else:\n # Add new entry\n current_state = _add_lead(\n current_state,\n wvw_map=match.group(1),\n note=\"\",\n name=match.group(2),\n )\n if not current_state:\n bot.send_message(event.id, \"sheet_map_full\")\n return\n\n elif match.group(1) in [\"ebg\", \"red\", \"green\", \"blue\", \"r\", \"g\", \"b\", \"remove\"]:\n if STATE_FILE.exists():\n current_state = json.loads(STATE_FILE.read_text())\n\n if match.group(1) == \"remove\":\n current_state = _remove_lead(current_state, uid=event.uid)\n else:\n current_state = _add_lead(\n current_state,\n wvw_map=match.group(1),\n note=match.group(2),\n uid=event.uid,\n name=event.name,\n )\n if not current_state:\n bot.send_message(event.id, \"sheet_map_full\")\n return\n else:\n bot.send_message(event.id, \"invalid_input\")\n return\n\n # Build new table\n desc = \"[table][tr][td] | Map | [/td][td] | Lead | [/td][td] | Note | [/td][td] | Date | [/td][/tr]\"\n for _map, leads in current_state.items():\n if len(leads) == 0:\n desc += f\"[tr][td]{_map}[/td][td]-[/td][td]-[/td][td]-[/td][/tr]\"\n continue\n\n for lead in leads:\n desc += (\n f\"[tr][td]{_map}[/td][td]{lead['lead']}[/td][td]{_encode(lead['note'])}[/td]\"\n f\"[td]{lead['date']}[/td][/tr]\"\n )\n\n desc += (\n f\"[/table]\\n[hr]Last change: {_tidy_date()}\\n\\n\"\n f\"Link to bot: [URL=client://0/{bot.own_uid}]{Config.get('bot_login', 'nickname')}[/URL]\\n\" # Add link to self\n \"Usage:\\n\"\n \"- !sheet red/green/blue (note)\\t—\\tRegister your lead with an optional note (20 characters).\\n\"\n \"- !sheet remove\\t—\\tRemove the lead\"\n )\n bot.exec_(\"channeledit\", cid=sheet_channel_id, channel_description=desc)\n bot.send_message(event.id, \"sheet_changed\")\n\n STATE_FILE.write_text(json.dumps(current_state))\n\n\ndef _tidy_date(date: datetime.datetime = None):\n if not date:\n date = datetime.datetime.now()\n return date.strftime(\"%d.%m. %H:%M\")\n\n\ndef _add_lead(\n maps: typing.Dict,\n wvw_map: str,\n note: str,\n name: str,\n uid: typing.Optional[str] = None,\n) -> typing.Optional[typing.Dict]:\n mapping = COMMAND_MAPPING[wvw_map]\n\n lead = f\"[URL=client://0/{uid}]{name}[/URL]\" if uid else name\n\n # Remove leads with the same name\n maps = _remove_lead(maps, name_field=lead)\n\n # Only allow two leads per map\n if len(maps[mapping]) >= 2:\n return None\n\n maps[mapping].append(\n {\"lead\": lead, \"note\": note.strip()[0:20], \"date\": _tidy_date()}\n )\n return maps\n\n\ndef _remove_lead(\n maps: typing.Dict,\n name_field: typing.Optional[str] = None,\n uid: typing.Optional[str] = None,\n):\n def compare(_lead):\n if uid:\n return uid not in _lead[\"lead\"]\n\n return name_field != _lead[\"lead\"]\n\n new_leads: typing.Dict[str, typing.List[typing.Dict[str, str]]] = {}\n for _map, leads in maps.items():\n new_leads[_map] = []\n for lead in leads:\n if compare(lead):\n new_leads[_map].append(lead)\n return new_leads\n\n\ndef _encode(s: str):\n return s.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n","sub_path":"ts3bot/commands/sheet.py","file_name":"sheet.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38599327","text":"import numpy as np\nimport pandas as pd\nimport re\nimport pytest\n\nfrom dptools import print_missings\nfrom dptools import fill_missings\nfrom dptools import print_factor_levels\nfrom dptools import split_nested_features\nfrom dptools import correct_colnames\n\ndef test_split_nested_features_4():\n data = {'age': [27, np.nan, 30, 25, np.nan], \n 'height': [170, 168, 173, 177, 165], \n 'income': ['high,100', 'medium,50', 'low,25', 'low,28', 'no income,0']}\n df = pd.DataFrame(data)\n df = split_nested_features(df, split_vars = 'income', sep = ',', drop = True)\n assert df.shape[1] == 4\n\ndef test_split_nested_features_5():\n data = {'age': [27, np.nan, 30, 25, np.nan], \n 'height': [170, 168, 173, 177, 165], \n 'income': ['high 100', 'medium 50', 'low 25', 'low 28', 'no_income 0']}\n df = pd.DataFrame(data)\n df = split_nested_features(df, split_vars = 'income', sep = ' ', drop = False)\n assert df.shape[1] == 5\n\ndef test_fill_missings_0():\n data = {'age': [27, np.nan, 30, 25, np.nan], \n 'height': [170, 168, 173, 177, 165], \n 'income': ['high', 'medium', 'low', 'low', 'no_income']}\n df = pd.DataFrame(data)\n df = fill_missings(df, to_0_cols = 'age')\n assert df['age'][4] == 0\n\ndef test_fill_missings_unknown():\n data = {'age': [27, np.nan, 30, 25, np.nan], \n 'height': [170, 168, 173, 177, 165], \n 'income': ['high', np.nan, 'low', 'low', 'no_income']}\n df = pd.DataFrame(data)\n df = fill_missings(df, to_0_cols = 'age', to_unknown_cols = 'income')\n assert df['income'][1] == 'unknown'\n\ndef correct_colnames():\n data = {'age': [27, np.nan, 30, 25, np.nan], \n 'height': [170, 168, 173, 177, 165], \n 'height': ['female', 'male', np.nan, 'male', 'female'],\n 'income': ['high', 'medium', 'low', 'low', 'no income']}\n df = pd.DataFrame(data)\n df.columns = ['age', 'height', 'height', 'incöme']\n df = correct_colnames(df)\n assert all(df.columns == ['age', 'height', 'height_2', 'incme'])","sub_path":"dptools/tests/test_data_processing.py","file_name":"test_data_processing.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317346664","text":"import re\nfrom incorporations.exceptions import NotCompanyIncorporationException\nfrom incorporations.exceptions import NoDataException\n\n\nclass IncorporatedCompany:\n \"\"\"\n An Object created from this class represents a single Incorporated Company\n\n Attributes:\n Name: str\n Name of the company\n Date: str\n Date of incorporation\n Registration Number: str\n Registration Number of Incorporated company\n Capital (optional): str\n Initial Invested Capital\n Address (optional): str\n Location of the incorporated company\n\n To Do:\n Capital Regex\n Address Regex\n Create an Object for each incorporated company\n Do more Tests\n\n Alternate Code:\n company_name and reg_No = re.findall(r'(\\w+.*)(?:\\s|\\n)(?:limited|ltd.)\\s\\(((?:reg.|no.|reg. no) .*?)\\)', text, flags=re.I | re.M)\n\n \"\"\"\n\n def __init__(self, company_names=None, date=None, reg_no=None):\n self.company_names = company_names\n self.date = date\n self.reg_no = reg_no\n\n def __str__(self):\n return str(self.__dict__)\n\n\ndef is_incorporation(notice):\n \"\"\"\n\n :param notice: Notice class\n :return: True if the notice has incorporated companies\n \"\"\"\n has_company_act = re.findall(r'(the companies act)\\n(\\Scap\\W+\\d+\\S)(?:\\nincorporations)?\\n(it is notified for general information that)', notice, re.I)\n\n if has_company_act:\n return True\n return False\n\n\ndef get_incorporated_company(notice):\n \"\"\"\n\n :param notice: Notice Class\n :return: Incorporated Company given a notice\n \"\"\"\n\n # notice = notice.body\n\n if not is_incorporation(notice):\n raise NotCompanyIncorporationException\n\n # names of companies (list)\n try:\n company_names = re.findall(r'(\\w+.*)\\s(?:limited|ltd.)', notice, flags=re.I | re.M)\n except NoDataException:\n company_names = []\n\n # date of incorporation\n try:\n date = re.findall(r'incorporated in Kenya during the period\\s(:of\\s)?(\\w+.*\\n\\w+.*)', notice, flags=re.I | re.M)\n except NoDataException:\n date = None\n\n # Registration Number (list)\n try:\n reg_no = re.findall(r'\\(((?:reg.|no.|reg. no) .*?)\\)', notice, flags=re.I)\n if not reg_no:\n reg_no = []\n for c in company_names:\n reg_no.append('null')\n except NoDataException:\n reg_no = []\n\n \"\"\"\n # Notice Number\n try:\n notice_number = re.findall(r'gazette notice no\\.?\\s\\d+', notice, flags=re.I)\n except NoDataException:\n notice_number = None\n \"\"\"\n\n # Gazette Date\n try:\n notice_date = re.findall(r'\\d{0,2}\\w+\\s\\w+,?\\s\\d{4}', notice)[0]\n except NoDataException:\n notice_date = None\n\n \"\"\"\n Get the length of Company_names list.\n Assume the length of company_names list is same length as reg_no list\n Iterate through both lists creating company objects\n Company Object Attributes:\n Name of Company\n Reg No\n Date of Incorporation\n Notice Number\n Gazette Date\n \"\"\"\n\n try:\n list_size = len(company_names)\n except IndexError:\n print('List is empty')\n\n i = 0\n incorporations = []\n try:\n while i < list_size:\n incorporations.append(IncorporatedCompany(company_names=company_names[i], date=date, reg_no=reg_no[i]))\n # print(incorporations[i])\n i += 1\n except IndexError:\n print('Index Error')\n\n return incorporations\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"incorporation.py","file_name":"incorporation.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"87808839","text":"import os\nimport uuid\nimport base64\n\nimport fontforge\nimport tornado.gen as gen\nimport tempfile\n\nfrom collections import deque\nfrom tornado.web import HTTPError\n\n\n@gen.coroutine\ndef get_font(application, font_url):\n font_response = yield application.client.fetch(font_url)\n raise gen.Return(font_response)\n\n\n@gen.coroutine\ndef compress(application, text, font_url):\n\n try:\n font_response = yield get_font(application, font_url)\n except HTTPError as e:\n raise e\n extension = font_url.split('.')[-1]\n\n with tempfile.NamedTemporaryFile() as input_font:\n input_font.write(font_response.body)\n input_font.flush()\n font = fontforge.open(input_font.name)\n\n for i in set([i for i in deque(text.decode(\"UTF-8\"))]):\n font.selection[ord(i)] = True\n\n font.selection.invert()\n\n for i in font.selection.byGlyphs:\n\n font.removeGlyph(i)\n\n temp_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + '.' + extension)\n\n font.generate(temp_filename)\n with open(temp_filename, 'r') as read_font:\n b64_font = base64.b64encode(read_font.read())\n os.remove(temp_filename)\n\n raise gen.Return(b64_font)\n","sub_path":"fontcut/forge.py","file_name":"forge.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397297759","text":"def shuffle_data(audio_paths, durations, texts):\n \"\"\" Shuffle the data (called after making a complete pass through \n training or validation data during the training process)\n Params:\n audio_paths (list): Paths to audio clips\n durations (list): Durations of utterances for each audio clip\n texts (list): Sentences uttered in each audio clip\n \"\"\"\n p = np.random.permutation(len(audio_paths))\n audio_paths = [audio_paths[i] for i in p] \n durations = [durations[i] for i in p] \n texts = [texts[i] for i in p]\n return audio_paths, durations, texts\n\ndef sort_data(audio_paths, durations, texts):\n \"\"\" Sort the data by duration \n Params:\n audio_paths (list): Paths to audio clips\n durations (list): Durations of utterances for each audio clip\n texts (list): Sentences uttered in each audio clip\n \"\"\"\n p = np.argsort(durations).tolist()\n audio_paths = [audio_paths[i] for i in p]\n durations = [durations[i] for i in p] \n texts = [texts[i] for i in p]\n return audio_paths, durations, texts\n\ndef vis_train_features(audio_gen,index=0):\n \"\"\" Visualizing the data point in the training set at the supplied index\n \"\"\"\n # obtain spectrogram\n audio_gen.load_train_data()\n vis_audio_path = audio_gen.train_audio_paths[index]\n vis_spectrogram_feature = audio_gen.normalize(audio_gen.featurize(vis_audio_path))\n # obtain mfcc\n #audio_gen = AudioGenerator(spectrogram=False)\n audio_gen.load_train_data()\n vis_mfcc_feature = audio_gen.normalize(audio_gen.featurize(vis_audio_path))\n # obtain text label\n vis_text = audio_gen.train_texts[index]\n # obtain raw audio\n vis_raw_audio, _ = librosa.load(vis_audio_path)\n # print total number of training examples\n print('There are %d total training examples.' % len(audio_gen.train_audio_paths))\n # return labels for plotting\n return vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path\n\n\ndef plot_raw_audio(vis_raw_audio):\n # plot the raw audio signal\n fig = plt.figure(figsize=(12,3))\n ax = fig.add_subplot(111)\n steps = len(vis_raw_audio)\n ax.plot(np.linspace(1, steps, steps), vis_raw_audio)\n plt.title('Audio Signal')\n plt.xlabel('Time')\n plt.ylabel('Amplitude')\n plt.show()\n\ndef plot_mfcc_feature(vis_mfcc_feature):\n # plot the MFCC feature\n fig = plt.figure(figsize=(12,5))\n ax = fig.add_subplot(111)\n im = ax.imshow(vis_mfcc_feature, cmap=plt.cm.jet, aspect='auto')\n plt.title('Normalized MFCC')\n plt.ylabel('Time')\n plt.xlabel('MFCC Coefficient')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n ax.set_xticks(np.arange(0, 13, 2), minor=False);\n plt.show()\n\ndef plot_spectrogram_feature(vis_spectrogram_feature):\n # plot the normalized spectrogram\n fig = plt.figure(figsize=(12,5))\n ax = fig.add_subplot(111)\n im = ax.imshow(vis_spectrogram_feature, cmap=plt.cm.jet, aspect='auto')\n plt.title('Normalized Spectrogram')\n plt.ylabel('Time')\n plt.xlabel('Frequency')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(im, cax=cax)\n plt.show()","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"557249605","text":"import re\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n# mpl.rcParams['figure.figsize'] = (20,20)\n\ndef plot_learning_curves(x_axis_label, y_axis_label, x_axis, y_axis_1, y_axis_2, y_axis_3, image_name):\n print (y_axis_1)\n print (x_axis)\n plt.bar(x_axis, y_axis_1, color='blue', align='center', label='SVM')\n plt.bar(x_axis, y_axis_2, color='green', align='center',label='DecisionTree')\n plt.bar(x_axis, y_axis_3, color='pink', align='center', label='Deep Neural Network')\n plt.legend(['SVM', 'DecisionTree', 'Deep Neural Network'], loc='best')\n plt.xlabel(x_axis_label)\n plt.ylabel(y_axis_label)\n # plt.xticks(x_axis)\n plt.title(y_axis_label + ' v/s ' + x_axis_label)\n plt.savefig(image_name,dpi=300)\n plt.show()\n\ndef main():\n lists = []\n with open(\"allResults.txt\",\"rb\") as fp:\n results = fp.readlines()\n for i in range(10):\n axis = []\n lists.append(axis)\n \n for result in results[1:]:\n result = str(result)\n # print (result.split(','))\n \n result = re.sub('[^0-9.,]*', '', result)\n result = result.split(',')\n # print(result)\n for i, score in enumerate(result):\n if i == 0:\n lists[i].append(int(score))\n else:\n \n score = (float(score)*100)\n lists[i].append(score)\n\n print (len(lists[0]))\n # print (lists[1])\n # print (lists[4])\n # print (lists[7])\n plot_learning_curves('Dataset Size', 'F1 Score', lists[0], lists[1], lists[4], lists[7], 'F1Scores.png')\n # plot_learning_curves('Dataset Size', 'Accuracy', lists[0], lists[2], lists[5], lists[8], 'Accuracy.png')\n # plot_learning_curves('Dataset Size', 'Time', lists[0], lists[3], lists[6], lists[9], 'Time.png')\n\n\n\n # plot_learning_curves('Dataset Size', 'F1 Score', lists[0], lists[3], lists[4], lists[7], '')\n\nif __name__ == '__main__':\n main()\n","sub_path":"plot_graph.py","file_name":"plot_graph.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"280599112","text":"from django.urls import path, include\nfrom .views import clientes_consulta, cliente_cadastro, cliente_update, cliente_delete\n#from .views import telefone_cadastro, telefone_update, telefone_delete\nfrom cadastros.enderecos import urls as enderecos_urls\n\nurlpatterns = [ \n path('clientes_consulta/', clientes_consulta, name='clientes_consulta'), \n path('cliente_cadastro/', cliente_cadastro, name='cliente_cadastro'),\n path('cliente_update/', cliente_update, name='cliente_update'),\n path('cliente_delete/', cliente_delete, name='cliente_delete'), \n path('enderecos/', include(enderecos_urls)),\n]\n\n'''\n path('telefone_cadastro/', telefone_cadastro, name='telefone_cadastro'),\n path('telefone_update/', telefone_update, name='telefone_update'),\n path('telefone_delete/', telefone_delete, name='telefone_delete'),\n'''","sub_path":"cadastros/clientes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"590755198","text":"#!python\n# -*- coding: utf-8 -*-\n\"\"\"\n\t@package:\tbx.BLOCK\n\t@author:\tKRZYSZTOF \"@K0FF.EU\" K0FF\n\t@version:\t2.17.12\n\"\"\"\nimport bx\nimport re\n_REXP_NAME = r'^([A-z\\#\\@\\$\\&][\\w\\#\\@\\$\\&\\.\\-]*)$'\n_BLOCKS = {}\n\n#\ndef _name( name ):\n\tif re.search( _REXP_NAME, name ):\n\t\treturn name.lower()\n\telse:\n\t\tbx.error('Block name \"%s\" syntax error'%(name))\t\n\n#\ndef set( name, SOURCE ):\n\tname = _name( name )\n\tif name:\n\t\tif name in _BLOCKS:\n\t\t\t_BLOCKS[name]._name = None\n\t\t\tbx.debug('Redefined block \"%s\"'%(name))\n\t\telse:\n\t\t\tbx.debug('Define block \"%s\"'%(name))\n\t\tSOURCE._name = name\n\t\t_BLOCKS[name] = SOURCE\n\t\treturn _BLOCKS[name]\n\treturn bx.code()\n\ndef get( name ):\n\tname = _name( name )\n\tif name and name in _BLOCKS:\n\t\treturn _BLOCKS[name]\n\treturn bx.code()\n\n#\nbx.set = set\nbx.get = get","sub_path":"bx/BLOCK.py","file_name":"BLOCK.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"8300709","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\",\"r\") as fh:\n long_description = fh.read()\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(\n name='emerge',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n version='v2.0.0-alpha',\n description='Emerging technologies Management and Risk evaluation on distributions Grid Evolution',\n author='Kapil Duwadi',\n author_email='kapil.duwadi@nrel.gov',\n packages=find_packages(\"src\"),\n # package_data={\".//dssdashboard//assets\":[\"*.css\",\"*.png\"]},\n url=\"https://github.com/NREL/EMeRGE\",\n keywords=\"Distribution System DER technologies management risk impact analysis\",\n install_requires=requirements,\n package_dir={\"emerge\": \"emerge\"}, \n entry_points={\n \"console_scripts\": [\n \"emerge=emerge.cli.cli:cli\"\n ],\n },\n python_requires=\">=3.8\", \n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\"\n ]\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"195089025","text":"from metaflow import step, FlowSpec\nimport pandas as pd\nfrom os import path\n\nfrom transformations import add_date,\\\n count_column_groups,\\\n merge_detasets,\\\n extract_cities_from_airports,\\\n merge_flights_with_cities,\\\n add_connection_id,\\\n aggregate_connections_between_cities,\\\n save_json_object_as_file\n\nclass AirlinesDataPreparationFlow(FlowSpec):\n data_folder = path.join(path.dirname(__file__), 'data')\n data_output_folder = path.join(path.dirname(__file__), 'data', 'output')\n\n @step\n def start(self):\n print('Flow started')\n self.next(self.load_flights, self.load_planes, self.load_airports)\n\n @step\n def load_planes(self):\n self.planes_df = pd.read_csv(path.join(self.data_folder, 'planes.csv'))\n self.next(self.wait_all_data_is_loaded)\n\n @step\n def load_flights(self):\n self.flights_df = pd.read_csv(path.join(self.data_folder, 'flights.csv'))\n self.next(self.wait_all_data_is_loaded)\n \n @step\n def load_airports(self):\n self.airports_df = pd.read_csv(path.join(self.data_folder, 'airports.csv'))\n self.next(self.wait_all_data_is_loaded)\n\n @step\n def wait_all_data_is_loaded(self, inputs):\n # Metaflow special method to propagate values\n # that were calculated in parallel steps\n self.merge_artifacts(inputs)\n self.next(self.start_transformations)\n\n @step\n def start_transformations(self):\n self.next(\n self.get_covered_days_count,\n self.get_departure_cities_count,\n self.find_biggest_delays,\n self.calculate_cities_connectivity)\n\n @step\n def get_covered_days_count(self):\n df = add_date(self.flights_df)\n self.covered_days = count_column_groups(df, df['date'].dt.date)\n self.next(self.wait_calculations_to_complete)\n\n @step\n def get_departure_cities_count(self):\n cities = extract_cities_from_airports(self.airports_df)\n flights_with_cities = merge_flights_with_cities(self.flights_df, cities)\n\n flight_origins_count = count_column_groups(flights_with_cities, flights_with_cities['origin_city'])\n self.departure_cities_count = flight_origins_count\n self.next(self.wait_calculations_to_complete)\n\n @step\n def find_biggest_delays(self):\n flights_and_planes_df = merge_detasets(self.planes_df, self.flights_df, 'tailnum')\n\n flights_by_planes = flights_and_planes_df.groupby('manufacturer')\n\n max_departure_delays = flights_by_planes['dep_delay'].max().reset_index()\n max_departure_delays = max_departure_delays.sort_values(by='dep_delay', ascending=False)\n self.max_departure_delays = max_departure_delays\n \n max_arrival_delays = flights_by_planes['arr_delay'].max().reset_index()\n max_arrival_delays = max_arrival_delays.sort_values(by='arr_delay', ascending=False)\n self.max_arrival_delays = max_arrival_delays\n\n self.next(self.wait_calculations_to_complete)\n\n @step\n def calculate_cities_connectivity(self):\n cities = extract_cities_from_airports(self.airports_df)\n flights_with_cities = merge_flights_with_cities(self.flights_df, cities)\n flights_with_cities = add_connection_id(flights_with_cities)\n\n self.connections_between_cities = aggregate_connections_between_cities(flights_with_cities)\n\n self.next(self.wait_calculations_to_complete)\n\n @step\n def wait_calculations_to_complete(self, inputs):\n self.merge_artifacts(inputs)\n self.next(self.start_saving_output_results)\n\n @step\n def start_saving_output_results(self):\n self.next(\n self.save_general_statistics,\n self.save_delays_data,\n self.save_cities_connectivity)\n\n @step\n def save_general_statistics(self):\n statistics = {\n 'covered_days': self.covered_days,\n 'departure_cities_count': self.departure_cities_count\n }\n\n file_path = path.join(self.data_output_folder, 'general_statistics.json')\n save_json_object_as_file(statistics, file_path)\n\n self.next(self.wait_all_results_saved)\n\n @step\n def save_delays_data(self):\n departure_delays_file = path.join(self.data_output_folder, 'departure_delays.csv')\n self.max_departure_delays.to_csv(departure_delays_file, index=False)\n\n arrival_delays_file = path.join(self.data_output_folder, 'arrival_delays.csv')\n self.max_arrival_delays.to_csv(arrival_delays_file, index=False)\n\n self.next(self.wait_all_results_saved)\n\n @step\n def save_cities_connectivity(self):\n cities_connectivity_file = path.join(self.data_output_folder, 'most_connected_cities.csv')\n\n self.connections_between_cities.to_csv(cities_connectivity_file, index=False)\n\n self.next(self.wait_all_results_saved)\n\n @step\n def wait_all_results_saved(self, inputs):\n self.next(self.end)\n\n @step\n def end(self):\n print('Flow completed.')\n\n# Run the pipline by executing:\n# python pipeline.py run\n# to generate drawing of the pipeline\n# For vertical graph layout:\n# python pipeline.py output-dot | dot -Grankdir=TB -Tpng -o graph.png\nif __name__ == '__main__':\n AirlinesDataPreparationFlow()\n ","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281674602","text":"# TO-DO: Complete the selection_sort() function below\ndef selection_sort(arr):\n # loop through n-1 elements\n for i in range(0, len(arr) - 1):\n cur_index = i\n smallest_index = cur_index\n # TO-DO: find next smallest element\n # (hint, can do in 3 loc)\n # loop through list to the right of the first element\n for j in range(i + 1, len(arr)):\n if arr[j] < arr[smallest_index]:\n # TO-DO: swap\n smallest_index = j\n if smallest_index != cur_index:\n arr[i], arr[smallest_index] = arr[smallest_index], arr[i]\n return arr\n\n\n# TO-DO: implement the Bubble Sort function below\ndef bubble_sort(arr):\n # loop through array until no swaps are performed\n was_swapped = True\n while was_swapped:\n was_swapped = False\n for i in range(0, len(arr) - 1):\n current_item = arr[i]\n j = i + 1\n next_item = arr[j]\n # compare current item to next item\n # if current item is greater than next item, swap\n if current_item > next_item:\n was_swapped = True\n arr[i], arr[j] = arr[j], arr[i]\n return arr\n\n\n# list_a = [8, 5, 2, 4, 1, 3]\n# print(selection_sort(list_a))\n\n'''\nSTRETCH: implement the Counting Sort function below\n\nCounting sort is a sorting algorithm that works on a set of data where\nwe specifically know the maximum value that can exist in that set of\ndata. The idea behind this algorithm then is that we can create \"buckets\"\nfrom 0 up to the max value. This is most easily done by initializing an\narray of 0s whose length is the max value + 1 (why do we need this \"+ 1\"?\nBECAUSE ZERO INDEXING). \n\nEach buckets[i] then is responsible for keeping track of how many times \nwe've seen `i` in the input set of data as we iterate through it.\nOnce we know exactly how many times each piece of data in the input set\nshowed up, we can construct a sorted set of the input data from the \nbuckets. \n\nWhat is the time and space complexity of the counting sort algorithm?\n'''\n\n\ndef counting_sort(arr, maximum=None):\n # create an array of zeros with length of maximum + 1\n count_arr = [0] * (maximum + 1)\n # loop through array, storing the count of each number in the count array\n for i in range(0, len(arr)):\n # for each value in the original array, increment the counting array at that index\n count_arr[arr[i]] += 1\n # loop through the count array and modify so each value is the sum of previous values\n for j in range(0, maximum + 1):\n # modify the count array so each index stores the sum of previous counts\n count_arr[j] += count_arr[j-1]\n # for each index, add the correct value the requisite number of times to the original array\n # initiate the count at the last index in the original array\n count = len(arr) - 1\n # create an array of zeros the length of the original array\n empty_arr = [0] * len(arr)\n while count >= 0:\n # fill the empty array by finding the index of each original array item in the modified count array and placing in that index\n empty_arr[count_arr[arr[count]] - 1] = arr[count]\n count_arr[arr[count]] -= 1\n count -= 1\n # copy the sorted elements into original array\n for k in range(0, len(arr)):\n arr[k] = empty_arr[k]\n\n return arr\n\n\nlist_b = [1, 4, 1, 2, 7, 5, 2]\nprint(counting_sort(list_b, 9))\n","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"314983122","text":"from django import forms\nfrom moves.models import Game, Move\n\nclass MoveForm(forms.Form): \n value = forms.CharField(max_length=140)\n game = forms.CharField(max_length=140)\n\n def save(self): \n value = self.cleaned_data['value']\n game = self.cleaned_data['game']\n m = Move(value=value, game=game)\n m.save()\n return m\n\n def clean_game(self):\n game, created = Game.objects.get_or_create(title=self.cleaned_data['game'])\n return game\n","sub_path":"move_hard/moves/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"278362526","text":"# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n def cloneGraph(self, node):\n def doClone(node):\n if not node:\n return None\n if node.label in cloned:\n return cloned[node.label]\n\n new = UndirectedGraphNode(node.label)\n cloned[node.label] = new\n for n in node.neighbors:\n new.neighbors.append(doClone(n))\n return new\n\n cloned = {}\n return doClone(node)","sub_path":"133-clone-graph.py","file_name":"133-clone-graph.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276765185","text":"import tensorflow as tf\n\nclass RecurrentActivityModel:\n\n def __init__(self, x, y, x_mean, x_set_dim, y_set_dim, FLAGS):\n def squared(tensor):\n return tf.pow(tensor,2)\n\n def sse(error):\n return tf.reduce_sum(squared(error))\n\n\n # Config Variables\n self.x = x # [1,num_steps,n_use=state_size]\n self.y = y\n self.x_mean = x_mean\n self.num_steps = FLAGS.seq_len # number of truncated backprop steps ('n')\n self.learning_rate = FLAGS.lr\n self.y_mean = tf.reduce_mean(self.y,axis=1,keep_dims=True)\n global_step = tf.Variable(0, name='global_step', trainable=False)\n batch_size = FLAGS.batch\n self.n_use = FLAGS.n_use\n self.vocab_size = x_set_dim\n self.resp_size = y_set_dim\n next_n = FLAGS.guess\n\n learning_rate = tf.train.exponential_decay(\n self.learning_rate,\n global_step,\n 10000,\n 0.80,\n staircase=False\n )\n\n embedding = tf.get_variable(\n \"embedding\", [self.vocab_size,FLAGS.rnn_size],dtype=tf.float32)\n\n y_OH = tf.one_hot(self.y,self.resp_size)\n\n rnn_inputs = tf.nn.embedding_lookup(embedding,self.x)\n\n with tf.variable_scope('out_weights'):\n W = tf.get_variable(\n 'W',\n [FLAGS.rnn_size, self.n_use],\n #initializer=tf.constant_initializer(1.0)\n initializer=tf.contrib.layers.xavier_initializer()\n )\n bias = tf.get_variable(\n 'bias',\n [self.n_use],\n initializer=tf.constant_initializer(0.1)\n )\n\n with tf.variable_scope('weight2'):\n W2 = tf.get_variable(\n 'W2',\n [FLAGS.batch*self.resp_size,FLAGS.batch*FLAGS.seq_len]\n )\n b2 = tf.get_variable(\n 'b2',\n [self.n_use],\n initializer=tf.constant_initializer(0.1)\n )\n self._weight_matrix = W\n\n # Define RNN architecture\n cell = tf.nn.rnn_cell.LSTMCell(FLAGS.rnn_size, state_is_tuple=True)\n cell = tf.nn.rnn_cell.MultiRNNCell([cell] * FLAGS.layers,state_is_tuple=True)\n self.init_state = cell.zero_state(FLAGS.batch, tf.float32)\n\n\n # Connect rnn_inputs to architecture defined above\n # rnn_outputs shape = (batch_size, num_steps, state_size)\n # final_state = rnn_outputs[:,-1,:] = (batch_size, state_size)\n rnn_outputs, final_state = tf.nn.dynamic_rnn(\n cell,\n rnn_inputs,\n initial_state=self.init_state,\n dtype=tf.float32\n )\n\n # Grab last n values\n #last_n_out = rnn_outputs[:,-next_n:,:]\n # Flatten rnn_outputs down to shape = (batch_size*num_steps, state_size)\n out_mod = tf.reshape(rnn_outputs, [-1, FLAGS.rnn_size])\n\n logits = tf.matmul(out_mod, W) + bias\n\n _response_vec = tf.matmul(W2,logits) + b2\n response_vec = tf.reshape(_response_vec, [FLAGS.batch,FLAGS.n_use,self.resp_size])\n\n # Flatten y; (num_steps,n_use) -> (num_steps*n_use)\n #_y_mean = tf.reshape(self.y_mean, [-1,FLAGS.rnn_size])\n #_x_mean = tf.reshape(self.x_mean, [-1,FLAGS.n_use])\n\n # (1500,25) x (25,2) = (1500,2)\n #seqw = tf.ones((batch_size, num_steps))\n\n ###### Stopping point\n self._prediction = tf.argmax(response_vec,axis=2)\n #self._prediction = tf.reshape(self._flat_prediction, [-1, self.num_steps])\n #for i in range(self.n_use-1):\n #tf.summary.histogram('prediction_n%d'%(i),self._prediction[i,:])\n\n import pdb; pdb.set_trace()\n with tf.name_scope('metrics'):\n # Error Metrics\n with tf.name_scope('error'):\n self._error = logits-_y\n\n with tf.name_scope('2d'):\n self._error_2d = self._prediction-self.y\n\n with tf.name_scope('null_error'):\n null_error = tf.zeros_like(logits)-_y\n with tf.name_scope('mean_error'):\n mean_error = self.y - self.x_mean\n with tf.name_scope('squared_error'):\n self.se = squared(self._error)\n with tf.name_scope('2d'):\n self.se_2d = squared(self._error)\n\n # 1D Metrics\n with tf.name_scope('total_loss'):\n self._total_loss = sse(self._error)\n self.FEV_2d = tf.reduce_sum(squared(self._error_2d),[0])\n\n with tf.name_scope('null_loss'):\n null_loss = sse(null_error)\n\n with tf.name_scope('variance'):\n var = sse(mean_error)\n self._var_2d = tf.reduce_sum(squared(mean_error),[0])\n\n with tf.name_scope('FEV'):\n self.FEV = 1-(self._total_loss/var)\n #self.FEV_2d = 1-(self._sse_2d/self._var_2d)\n\n self._optimize = tf.train.AdamOptimizer(learning_rate).minimize(self._total_loss, global_step=global_step)\n\n # Log outputs to summary writer\n tf.summary.scalar('total_loss', self._total_loss)\n tf.summary.scalar('null_loss', null_loss)\n tf.summary.scalar('var', var)\n tf.summary.scalar('FEV', self.FEV)\n #tf.summary.scalar('avg_perplexity', self._avg_perplexity)\n self._merge_summaries = tf.summary.merge_all()\n self._global_step = global_step\n\n #logits_1 = tf.reshape(logits, [-1, num_steps, num_classes])\n #seq_loss = tf.nn.seq2seq.sequence_loss_by_example(\n # tf.unpack(logits_1, axis=1),\n # tf.unpack(self.y, axis=1),\n # tf.unpack(seqw, axis=1),\n # average_across_timesteps=True\n # #softmax_loss_function=\n #)\n #perplexity = tf.exp(seq_loss)\n #self._avg_perplexity = tf.reduce_mean(perplexity)\n\n\n\n def do(self, session, fetches, feed_dict):\n vals = session.run(fetches, feed_dict)\n\n return vals\n\n def step(self,session):\n return tf.train.global_step(session,self._global_step)\n\n @property\n def prediction(self):\n return self._prediction\n\n @property\n def optimize(self):\n return self._optimize\n\n @property\n def error(self):\n return self._error\n\n @property\n def avg_perplexity(self):\n return self._avg_perplexity\n\n @property\n def status(self):\n status = dict(\n prediction=self.prediction,\n lr=self._learning_rate\n )\n return status\n\n @property\n def total_loss(self):\n return self._total_loss\n @property\n def merge_summaries(self):\n return self._merge_summaries\n\n","sub_path":"rnn/embedded_rnn_model.py","file_name":"embedded_rnn_model.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364935345","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport copy\nimport os.path\nimport pandas as pd\nfrom types import SimpleNamespace as Namespace\nfrom feature.FeatureExtractor import FeatureExtractor\nfrom util.TimeUtil import TimeUtil\nclass SimpleFeatureExtractor (FeatureExtractor):\n\n def getAccelerationFromFile(self,fileName):\n # print('Reading acceleration data from file: ',fileName)\n file_directory = fileName\n json_data=open(file_directory).read()\n # print(json_data[:100])\n entity = json.loads(json_data, object_hook=lambda d: Namespace(**d))\n # print('Lines of data: ',len(entity.data))\n # print(x.data[100].event.content)\n result = list()\n variable = 'acceleration' #quaternion acceleration\n for index in range(len(entity.data)):\n # print(x.data[index].event.variable == 'acceleration')\n if(entity.data[index].event.variable == 'acceleration'):#quaternion\n result.append(entity.data[index].event.content)\n # print('File reading done. Total number of acceleration data: ',len(result))\n return result\n\n def getLabeledData(self, data, label):\n \"\"\"For example: 1 2 3 12345 normal; 1 3 5 12346 tip; 1 4 8 12347 scallop\"\"\"\n print('Adding labels to data:' ,label)\n arr = copy.deepcopy(data)\n for ele in arr:\n ele.append(label)\n return arr\n\n def saveSimpleFeaturedData(self, path, dataFileNames, labels, resultFileName):\n if not os.path.isfile(path + resultFileName):\n allData = list()\n for i in range(len(dataFileNames)):\n fileArray = self.getAccelerationFromFile(path + dataFileNames[i])\n labeledData = self.getLabeledData(fileArray, labels[i])\n allData.extend(labeledData)\n np.savez(path + resultFileName, data=allData)\n else:\n print('Feature file:\\'',path + resultFileName,'\\' already exists.')\n\n def getSimpleFeaturedData(self, dataFilePath, label, returnDataFrame = True , startMin = None, endMin = None):\n allData = list()\n fileArray = self.getAccelerationFromFile(dataFilePath)\n labeledData = self.getLabeledData(fileArray, label)\n allData.extend(labeledData)\n data = np.array(allData)\n df = pd.DataFrame({'timeStamp': data[:,3], 'x': data[:,0], 'y': data[:,1], 'z': data[:,2], 'label': data[:,4]})\n df = df[['timeStamp','x', 'y', 'z', 'label']]\n\n if not (startMin is None and endMin is None):\n fileStartTimeStamp = df.iloc[0]['timeStamp']\n fileEndTimeStamp = df.iloc[len(df)-1]['timeStamp']\n\n startTimeStamp = fileStartTimeStamp\n if not startMin is None:\n startTimeStamp = fileStartTimeStamp + TimeUtil.getMillisecondFromMinute(\n minute= startMin )\n\n if not endMin is None:\n endTimeStamp = fileStartTimeStamp + TimeUtil.getMillisecondFromMinute(\n minute= endMin )\n df = df[(df['timeStamp'] >= startTimeStamp) & (df['timeStamp'] <= endTimeStamp)]\n df.sort_values('timeStamp')\n\n if returnDataFrame:\n return df\n\n return allData\n\n def getQuaternionFromFile(self,fileName):\n file_directory = fileName\n json_data=open(file_directory).read()\n entity = json.loads(json_data, object_hook=lambda d: Namespace(**d))\n result = list()\n variable = 'quaternion' #quaternion acceleration\n for index in range(len(entity.data)):\n if(entity.data[index].event.variable == 'quaternion'):#quaternion\n result.append(entity.data[index].event.content)\n return result\n\n def getQuaternionData(self, dataFilePath, label, returnDataFrame = True):\n allData = list()\n fileArray = self.getQuaternionFromFile(dataFilePath)\n labeledData = self.getLabeledData(fileArray, label)\n allData.extend(labeledData)\n\n if returnDataFrame:\n data = np.array(allData)\n df = pd.DataFrame({'w': data[:,0], 'x': data[:,1], 'y': data[:,2], 'z': data[:,3], 'timeStamp': data[:,4], 'label': data[:,len(data[0])-1]})\n df = df[['w','x','y','z','timeStamp','label']]\n return df\n\n return allData\n","sub_path":"script/feature/SimpleFeatureExtractor.py","file_name":"SimpleFeatureExtractor.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"575738937","text":"getName = True\r\nwhile getName:\r\n name = input(\"What's your name?\")\r\n if name != \"\":\r\n surname = input(\"What's your surname?\")\r\n if surname != \"\":\r\n getName = False\r\n else:\r\n print(\"Dude, that name combination's not valid...\")\r\n else:\r\n print(\"Dude, you didn't enter a name...\")\r\nprint(\"Hello, \"+name+\" \"+surname+\"!\")\r\n","sub_path":"Week 1 and 2 stuff/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309159823","text":"from itertools import repeat\n\nfrom rite.richtext import String, Tag, TagType, Text, Protected\n\n\ndef test_string():\n x = String('hello')\n assert list(x.map_iter(repeat(str.capitalize))) == ['Hello']\n assert x.functor_map_iter(repeat(str.capitalize)) == String('Hello')\n\n\ndef test_text():\n x = Text([String('hello'), String(' '), String('world')])\n assert list(x.map_iter(repeat(str.capitalize))) \\\n == ['Hello', ' ', 'World']\n assert x.functor_map_iter(repeat(str.capitalize)) \\\n == Text([String('Hello'), String(' '), String('World')])\n\n\ndef test_tag():\n x = Tag(TagType.EMPHASIS, String('hello'))\n assert list(x.map_iter(repeat(str.capitalize))) \\\n == ['Hello']\n assert x.functor_map_iter(repeat(str.capitalize)) \\\n == Tag(TagType.EMPHASIS, String('Hello'))\n\n\ndef test_protected():\n x = Protected(String('hello'))\n assert list(x.map_iter(repeat(str.capitalize))) == ['Hello']\n assert x.functor_map_iter(repeat(str.capitalize)) == x\n\n\n# verify Text can contain String, Tag, and Text\ndef test_text_combined():\n x1 = Text([String('hello '),\n Tag(TagType.STRONG, String('brave')),\n Text([String(' world')])])\n x2 = Text([String('HELLO '),\n Tag(TagType.STRONG, String('BRAVE')),\n Text([String(' WORLD')])])\n assert x1.functor_map_iter(repeat(str.upper)) == x2\n\n\n# verify Tag can contain String, Tag, and Text\ndef test_tag_combined():\n s1 = String('hello')\n s2 = String('HELLO')\n x11 = Tag(TagType.EMPHASIS, s1)\n x21 = Tag(TagType.EMPHASIS, Tag(TagType.STRONG, s1))\n x12 = Tag(TagType.EMPHASIS, s2)\n x22 = Tag(TagType.EMPHASIS, Tag(TagType.STRONG, s2))\n assert x11.functor_map_iter(repeat(str.upper)) == x12\n assert x21.functor_map_iter(repeat(str.upper)) == x22\n","sub_path":"test/test_richtext.py","file_name":"test_richtext.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"464578989","text":"import logging\nfrom time import time\n\nimport pandas as pd\nimport numpy as np\n\nfrom cph_fusion.src.main.donors_recips import donors_recips_spark, donors_recips_pandas\n\nfrom library.src.main.pyspark_test_framework import PySparkTest\n\n\nclass CPHDonorRecipTest(PySparkTest):\n\n def test_donor_recip_spark(self):\n\n logger = logging.getLogger(__name__)\n\n test_top_200_nol_site_df = pd.DataFrame(columns=['rn_id'])\n\n test_top_200_nol_site_df['rn_id'] = [1, 2, 3, 4, 5]\n test_top_200_nol_site_df['dummy_2'] = [0, 1, 1, 1, 2]\n test_top_200_nol_site_df['dummy_1'] = [0, 2, 2, 2, 0]\n test_top_200_nol_site_df['dummy_3'] = [2, 0, 0, 0, 0]\n\n test_top_200_nol_site_df['dummy_yn002'] = [0, 1, 1, 1, 1]\n test_top_200_nol_site_df['dummy_yn001'] = [0, 1, 1, 1, 0]\n test_top_200_nol_site_df['dummy_yn003'] = [1, 0, 0, 0, 0]\n\n test_npm_tv_usage_df = pd.DataFrame(columns=['respondentid', 'a1', 'b1', 'c1', 'd1'])\n test_npm_tv_usage_df['respondentid'] = np.array([1001, 1002, 2001, 2002])\n test_npm_tv_usage_df['a1'] = [0, 1, 1, 0]\n test_npm_tv_usage_df['b1'] = [1, 0, 0, 0]\n test_npm_tv_usage_df['c1'] = [0, 1, 0, 2]\n test_npm_tv_usage_df['d1'] = [0, 0, 0, 0]\n\n npm_test_data_values = np.array(\n [[1001, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6, 1, 1, 1, 2, 5, 2, 1, 2, 2],\n [1002, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 6, 1, 1, 1, 2, 5, 2, 1, 2, 3],\n [2001, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 6, 1, 1, 2, 2, 4, 2, 1, 3, 4],\n [2002, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 6, 1, 1, 2, 2, 4, 2, 1, 3, 4],\n [3001, 1, 3, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 3, 3, 2, 6, 1, 2, 2, 5, 5, 2, 1, 3, 4],\n [3002, 2, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 2, 6, 1, 2, 2, 5, 5, 2, 1, 4, 5],\n [3003, 3, 3, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 6, 1, 2, 2, 5, 5, 2, 2, 1, 1],\n [3004, 4, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 2, 6, 1, 2, 2, 5, 5, 2, 2, 2, 2],\n [3005, 5, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 2, 4, 1, 2, 2, 5, 5, 2, 2, 1, 1],\n [3006, 6, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 2, 3, 1, 2, 2, 5, 5, 1, 4, 1, 1]])\n\n test_demos = ['hispanic', 'gender', 'race_back', 'dvr', 'cableplus', 'video_game', 'internet', 'paycable',\n 'hdtv', 'satellite', 'race_asian', 'occupation1', 'employment1', 'county_size',\n 'spanish_language1', 'kids_0to5', 'kids_6to11', 'kids_12to17', 'hh_size1', 'number_of_tvs1',\n 'education2', 'education7', 'income1', 'income9']\n\n test_npm_recode_df = pd.DataFrame(data=npm_test_data_values,\n columns=['respondentid', 'household_id', 'person_id'] + test_demos)\n\n test_npm_recode_df['weight'] = np.ones((test_npm_recode_df.shape[0],))\n test_npm_recode_df['age'] = 3\n\n test_nol_demo_df = pd.DataFrame(columns=['rn_id', 'surf_location_id', 'weight', 'age', 'gender_id',\n 'race_id', 'web_access_locations', 'education_id', 'income_id',\n 'occupation_id', 'members_2_11_count', 'members_12_17_count',\n 'zip_code',\n 'county_size_id', 'hispanic_origin_id', 'working_status_id',\n 'web_conn_speed_id'])\n\n test_nol_demo_df['rn_id'] = [1, 2, 3, 4, 5]\n test_nol_demo_df['surf_location_id'] = [1, 1, 1, 2, 1]\n test_nol_demo_df['weight'] = [0, 5, 2, 4, 6]\n test_nol_demo_df['age'] = [2, 10, 16, 25, 50]\n test_nol_demo_df['gender_id'] = [1, 1, 1, 2, 1]\n test_nol_demo_df['race_id'] = [3, 2, 1, 1, 1]\n test_nol_demo_df['income_id'] = [-1, 2, 3, -1, 1]\n test_nol_demo_df['web_access_locations'] = [2, 3, 6, -1, 11]\n test_nol_demo_df['working_status_id'] = [-2, 1, 8, 1, 11]\n test_nol_demo_df['members_2_11_count'] = [0, 4, 8, 0, 11]\n test_nol_demo_df['members_12_17_count'] = [1, 0, 0, 0, 2]\n test_nol_demo_df['education_id'] = [5, 8, 7, 0, 2]\n test_nol_demo_df['hispanic_origin_id'] = [-1, 1, 1, 2, 2]\n test_nol_demo_df['occupation_id'] = [3, 4, 1, 8, 2]\n test_nol_demo_df['web_conn_speed_id'] = [1, 6, 1, 4, 1]\n\n test_hhp_nol_df = pd.DataFrame(data=[[1001, 1],\n [1002, 2],\n [3001, 3],\n [3002, 4],\n [3003, 5]], columns=['respondentid', 'rn_id'])\n\n test_donors_df, test_recips_df = donors_recips_pandas(test_npm_recode_df, test_nol_demo_df, test_hhp_nol_df,\n test_npm_tv_usage_df, test_top_200_nol_site_df)\n\n# test_donors_df = pd.DataFrame(columns=['respondentid', 'rn_id'])\n# test_recips_df = pd.DataFrame(columns=['respondentid'])\n\n# test_donors_df['respondentid'] = []\n\n logger.info('Converting all necessary datasets into Spark Dataframes')\n\n npm_sdf = self.spark.createDataFrame(test_npm_recode_df)\n top_500_tv_sdf = self.spark.createDataFrame(test_npm_tv_usage_df)\n nol_sdf = self.spark.createDataFrame(test_nol_demo_df)\n top_200_nol_site_sdf = self.spark.createDataFrame(test_top_200_nol_site_df)\n hhp_nol_sdf = self.spark.createDataFrame(test_hhp_nol_df)\n\n start = time()\n\n res_donors_sdf, res_recips_sdf = donors_recips_spark(npm_sdf, nol_sdf, hhp_nol_sdf, top_500_tv_sdf,\n top_200_nol_site_sdf)\n\n res_donors_df = res_donors_sdf.toPandas()\n res_recips_df = res_recips_sdf.toPandas()\n\n end = time() - start\n\n logger.info('It took {} seconds'.format(end))\n\n donors_cols_to_test = test_donors_df.columns\n recips_cols_to_test = test_recips_df.columns\n\n logger.info('Changing numeric types to float')\n\n for col in donors_cols_to_test:\n res_donors_df[col] = res_donors_df[col].astype(float)\n test_donors_df[col] = test_donors_df[col].astype(float)\n\n for col in recips_cols_to_test:\n res_recips_df[col] = res_recips_df[col].astype(float)\n test_recips_df[col] = test_recips_df[col].astype(float)\n\n self.assert_frame_equal_with_sort(res_donors_df[donors_cols_to_test], test_donors_df[donors_cols_to_test],\n 'respondentid')\n self.assert_frame_equal_with_sort(res_recips_df[recips_cols_to_test], test_recips_df[recips_cols_to_test],\n 'respondentid')","sub_path":"cph_fusion/src/test/donor_recip_test.py","file_name":"donor_recip_test.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414319909","text":"# 8-2_Robot_in_a_Grid.py\n\n\n# This doesn't quite answer the question - they are asking for any path\n# not all the paths & shortest.\n\n\nfrom typing import List\n\ndef robot_paths(grid:List[List[int]]):\n \"\"\"All possible paths for a robot that can go down and right.\"\"\"\n\n def helper(\n grid:List[List[int]],\n all_paths:List[List[str]],\n row: int,\n col: int,\n curr_path: List[str],\n step: int\n ):\n\n # if we are at the end, return a good path\n if grid[row][col] == -1:\n curr_path = [i for i in curr_path if i is not None]\n all_paths.append(curr_path.copy())\n\n # otherwise go right and down\n\n else:\n # go down\n if row < (len(grid) - 1) and grid[row + 1][col] < 1:\n curr_path[step] = \"D\"\n helper(grid, all_paths, row + 1, col, curr_path, step + 1)\n # go right\n if col < (len(grid[0]) - 1) and grid[row][col + 1] < 1:\n curr_path[step] = \"R\"\n helper(grid, all_paths, row, col + 1, curr_path, step + 1)\n\n\n row = col = 0\n all_paths = []\n\n step = 0\n curr_path = [None] * ((len(grid) - 1) * (len(grid[0]) - 1))\n\n helper(grid, all_paths, row, col, curr_path, step)\n shortest_path = min([len(a) for a in all_paths])\n num_paths = len(all_paths)\n\n return {\n \"all_paths\": all_paths,\n \"shortest_path\": shortest_path,\n \"num_paths\": num_paths\n }\n\n\n\n\nif __name__ == '__main__':\n\n grid = [\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, -1]\n ]\n\n print(robot_paths(grid))\n\n grid = [\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0, -1],\n ]\n\n print(robot_paths(grid))\n\n\n\n","sub_path":"cracking_the_coding_interview/8-2_Robot_in_a_Grid.py","file_name":"8-2_Robot_in_a_Grid.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"32331888","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\nclass Base():\n def __init__(self,driver):\n self.driver = driver\n self.timeout = 10\n self.t = 0.5\n def findElement(self,locator):\n #WebDriverWait(driver, 30).until(lambda x: x.find_element(locator[0],locator[1]))\n ele = WebDriverWait(self.driver,self.timeout,self.t).until(lambda x: x.find_element(*locator))\n return ele\n\n def click(self,locator):\n ele = self.findElement(locator)\n ele.click()\n\n def sendKeys(self,locator,text):\n ele = self.findElement(locator)\n ele.send_keys(text)\n\n def getText1(self,locator):\n ele = self.findElement(locator)\n s = ele.text\n return s\n\nif __name__ == '__main__':\n driver = webdriver.Chrome()\n driver.get(\"http://hsj-test.winsea.com/#/login?redirect=%2Fhome\")\n denglu = Base(driver)\n locator1 = (\"name\", \"username\")\n locator2 = (\"name\", \"password\")\n locator3 = (\"class name\", \"el-button\")\n denglu.sendKeys(locator1,\"123123\")\n denglu.sendKeys(locator2,\"y123456\")\n denglu.click(locator3)\n","sub_path":"web_auto/common/base+.py","file_name":"base+.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594559858","text":"\"\"\"\nAuthor : 106753027 Jung, Liang@NCCUCS\nEnvironment:\n\tOS : Ubuntu 16.04 LTS\n\tPython : 2.7.12\n\tNumpy : 1.13.3\n\tScipy : 1.0.0\n\tSklearn : 0.19.1\n\tPillow : 4.3.0\n\"\"\"\nimport os\nfrom copy import deepcopy\nimport csv\n\nimport numpy as np\nfrom scipy.fftpack import dct\nfrom sklearn.cluster import KMeans\nimport sift\n\nfrom Tkinter import *\nimport tkFileDialog \nimport tkMessageBox\nfrom ttk import Frame, Button, Label, Style\n\nfrom PIL import ImageTk, Image\n\n\nclass customizedImage(object):\n\tdef __init__(self, fileName, img, clothType):\n\t\tself._img = img.resize((224, 256))\n\t\tself._fileName = fileName\n\t\tself.clothType = clothType\n\n\t\thistogram = self._img.histogram()\n\t\tif 256 == len(histogram):\n\t\t\tnewImg = Image.new(\"RGB\", self._img.size)\n\t\t\tnewImg.paste(self._img)\n\t\t\tself._img = newImg\n\t\t\tnewImg.save(\"./dataset/\"+fileName)\n\n\t\tSIFTFilename = \"./SIFT/\"+fileName.split(\".\")[0]+\".sift\"\n\t\tif not os.path.isfile(SIFTFilename):\n\t\t\tsift.process_image(\"./dataset/\"+fileName, SIFTFilename)\n\n\t\tself._colorHistogram = np.array(self._img.histogram())\n\t\tself._colorLayout = getColorLayout(self._img)\n\t\tself.MetricDic = {\"Q1-ColorHistogram\":[], \"Q2-ColorLayout\":[], \"Q3-SIFT Visual Words\":[], \"Q4-Visual Words using stop words\":[]}\n\t\tpos, descriptors = sift.read_features_from_file(SIFTFilename)\n\t\tself.SIFTDescriptors = descriptors\n\t\tself.SIFTEnc = {}#Encoded visual words\n\t\tself.SIFTVisualWords = None\n\t\tself.SIFTWithoutStopWords = None\n\t\n\tdef show(self):\n\t\tself._img.show()\n\n\tdef close(self):\n\t\tself._img.close()\n\t\n\tdef getFileName(self):\n\t\treturn self._fileName\n\t\n\tdef getClothType(self):\n\t\treturn self.clothType\n\n\tdef getColorHistogram(self):\n\t\treturn self._colorHistogram\n\n\tdef getColorLayout(self):\n\t\treturn self._colorLayout\n\t\n\tdef getSIFTDescriptors(self):\n\t\treturn self.SIFTDescriptors\n\n\tdef getSIFTVisualWords(self):\n\t\treturn self.SIFTVisualWords\n\n\tdef getSIFTEncoding(self):\n\t\treturn self.SIFTEnc\n\t\n\tdef getSIFTWithoutStopWords(self):\n\t\treturn self.SIFTWithoutStopWords\n\t\n\tdef getMetricResult(self, metric=\"\"):\n\t\treturn self.MetricDic[metric]\t\n\t\n\tdef setMetricResult(self, metricResult, metric=\"\" ):#Top 10 only\n\t\tself.MetricDic[metric] = metricResult\n\t\n\tdef setSIFTEncoding(self, enc):\n\t\tself.SIFTEnc = enc\n\t\n\tdef setSIFTVisualWords(self, visualWords):\n\t\tself.SIFTVisualWords = visualWords\n\t\n\tdef setSIFTWithoutStopWords(self, visualWordsWithoutStopWords):\n\t\tself.SIFTWithoutStopWords = visualWordsWithoutStopWords\n\ndef zigZag(array, row, col):\n\twPos = 0\n\thPos = 0\n\tdirection = 1\n\tret = []\n\twhile wPos != col-1 or hPos != row-1:\n\t\tret.append(array[row*hPos+wPos])\n\n\t\tif (hPos == 0 or hPos == col-1) and wPos%2 == 0:\n\t\t\twPos += 1\n\t\t\tdirection *= -1\n\n\t\telif (wPos == 0 or wPos == row-1) and hPos%2 == 1:\n\t\t\tdirection *= -1\n\t\t\thPos += 1\n\n\t\telse:\n\t\t\twPos += direction\n\t\t\thPos -= direction\n\tret.append(array[-1])\n\treturn ret\n\t\t\t\n\ndef getColorLayout(img):\n\twidth, height = img.size\n\tblockWidth = width/8\n\tblockHeight = height/8\n\tpartitions = []\n\tfor row in xrange(0, height, blockHeight):\n\t\tfor col in xrange(0, width, blockWidth):\n\t\t\timgSlice = img.crop((row, col, row+blockHeight, col+blockWidth))\n\t\t\tpartition = np.array(imgSlice)\n\t\t\trepresentativeIcon = partition.mean(axis=(0, 1))\n\t\t\timgSlice.paste((int(representativeIcon[0]), int(representativeIcon[1]), int(representativeIcon[2])), (0, 0, imgSlice.size[0], imgSlice.size[1]))\n\t\t\timgSlice = np.array(imgSlice.convert(\"YCbCr\"))\n\t\t\tdctY = dct(imgSlice[0])\n\t\t\tdctCb = dct(imgSlice[1])\n\t\t\tdctCr = dct(imgSlice[2])\n\t\t\tpartitions.append((dctY, dctCb, dctCr))\n\tret = (np.array(zigZag([x[0] for x in partitions], 8, 8)), np.array(zigZag([x[1] for x in partitions], 8, 8)), np.array(zigZag([x[2] for x in partitions], 8, 8)))\n\treturn ret\n\ndef readMetaData(filePath):\n\tmeta = {}\n\twith open(filePath, \"rb\") as metaFile:\n\t\tmetaReader = csv.reader(metaFile, delimiter=\",\")\n\t\tisHeader = True#for skipping header\n\t\tfor row in metaReader:\n\t\t\tif isHeader:\n\t\t\t\tisHeader = False\n\t\t\t\tcontinue\n\t\t\tmeta[row[0]] = row[1]\n\treturn meta\n\ndef openFile (app):\n\tfileName = tkFileDialog.askopenfilename(initialdir = \"./dataset\")\n\tapp.fileName.set(os.path.split(fileName)[1])\n\n\ndef l2Norm(vec1, vec2):\n\treturn np.linalg.norm(vec2-vec1)\n","sub_path":"MM/MM_HW3/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"521100591","text":"import os, subprocess, shutil, sys\n\n# Import user provided configuration \nfrom config import PROGRAM_NAME, PROGRAM_OUTPUT_NAME, INPUT_PARAMETERS, LLVM_PATH, GLOBAL_STORE_LIST, EXEC_MODE, CF_STAGE_1_NUM, CF_STAGE_2_NUM\nfrom config_gen import SHARED_MEM_USE, start_index, X_threads, Y_threads, Invoc_count, end_index\nfrom string import Template\n\nSRC_NAME = \" \"+ PROGRAM_NAME + \".cu \"\nOBJ_NAME = PROGRAM_NAME + \".o \"\nOUT_NAME = PROGRAM_NAME + \".out \"\n\n\n#############################################################################\nflagHeader = \"CICC_MODIFY_OPT_MODULE=1 LD_PRELOAD=./libnvcc.so nvcc -arch=sm_30 -rdc=true -dc -g -G -Xptxas -O0 -D BAMBOO_PROFILING -I .\"\nktraceFlag = \" -D KERNELTRACE\"\nlinkFlags = \"\"\noptFlags = \"\"\n#############################################################################\nmakeCommand1 = flagHeader + SRC_NAME + \"-o \" + OBJ_NAME + ktraceFlag\n\ndef collectData(dir_name, result_name, keep_after_ll):\n\n if result_name != \"\":\n if (os.path.exists(\"results/\" + result_name)):\n os.remove(\"results/\" + result_name)\n \n file_list = os.listdir(\"libs/\" + dir_name +\"/lib\")\n\n os.system(\"cp libs/\" + dir_name + \"/lib/* .\")\n\n os.system(makeCommand1)\n os.system(\"nvcc -arch=sm_30 \" + dir_name + \".cu -c -dc -O0\")\n os.system(\"nvcc -arch=sm_30 \" + dir_name + \".o \" + OBJ_NAME + \" -o \" + OUT_NAME + \" -O0\")\n\n goldenOutput = subprocess.check_output(\"./\" + OUT_NAME + \" \" + INPUT_PARAMETERS, shell=True)\n #print(goldenOutput)\n\n # Clean the copied files\n for file in file_list:\n os.remove(file)\n\n # Clean the produced files\n os.remove(PROGRAM_NAME + \".out\")\n os.remove(PROGRAM_NAME + \".o\")\n os.remove(dir_name + \".o\")\n os.remove(\"opt_bamboo_before.ll\")\n \n if (keep_after_ll == False):\n os.remove(\"opt_bamboo_after.ll\")\n else:\n os.rename('opt_bamboo_after.ll', 'indexed.ll')\n\n if result_name != \"\":\n # Move the results to another directory \n os.system(\"mv \" + result_name + \" results/\")\n \n if PROGRAM_OUTPUT_NAME != \"\":\n os.remove(PROGRAM_OUTPUT_NAME)\n\ndef populate_file():\n\n file1 = open('local_param.h', 'w')\n\n #Read the value from config_gen and put in the local_param.h\n file1.write('#define X_MAX '+ str(X_threads))\n file1.write('\\n#define Y_MAX ' + str(Y_threads))\n file1.write('\\n\\n#define START_LOOP ' + str(start_index))\n file1.write('\\n#define END_LOOP ' + str(end_index))\n\n file1.write('\\n\\n#define CF_1_NUM ' + str(CF_STAGE_1_NUM))\n file1.write('\\n#define CF_2_NUM ' + str(CF_STAGE_2_NUM))\n\n file1.close()\n\ndef prune_threads():\n\n #Populate the local_param.h file\n populate_file()\n\n first_stage = True\n stores = []\n loads = []\n\n param_file = open(\"local_param.h\")\n\n if start_index == 0:\n first_stage = False\n \n if first_stage == True:\n print(\"Profiling 1st stage of memory profiling\")\n\n # Control flow inside loop steps\n collectData(\"controlFlow-1\", \"control_flow_group-1.txt\", False)\n\n print(\"Profiling 2nd stage of memory profiling\")\n collectData(\"controlFlow-2\", \"control_flow_group-2.txt\", False)\n\n xIDs = []\n yIDs = []\n invo_count = []\n representative_threads = []\n \n arg = \"True\" if (first_stage == True) else \"False\"\n\n # Extract representative threads\n output = subprocess.check_output(\"python parse.py \" + arg, shell=True)\n output = output.decode(\"utf-8\")\n\n output = output.replace(\" \", \"\")\n \n representative_threads = output.splitlines()\n\n for thread in representative_threads:\n indices = thread[1:-1]\n indices = indices.split(',')\n xIDs.append(int(indices[1]))\n yIDs.append(int(indices[2]))\n invo_count.append(int(indices[0]))\n\n shared_ls = open(\"shared_mem.txt\")\n\n for line in shared_ls:\n line = line.strip()\n line = line.split(\" \")\n\n if line[1] == 'L':\n loads.append(line[0])\n else:\n stores.append(line[0])\n\n # Construct conditional for memory profiling\n cond_str = \"if(\"\n\n for iterator in range(len(xIDs)):\n cond = \"(idx == \" + str(xIDs[iterator]) + \" \\&\\& \" \"call_count == \" + str(invo_count[iterator]) + \" \\&\\& \" \"idy == \" + str(yIDs[iterator])+ \")\"\n if iterator != (len(xIDs) - 1):\n cond += ' || '\n\n cond_str += cond\n\n cond_str += \")\"\n\n if (os.path.exists(\"libs/memPro\")):\n os.system(\"rm -rf libs/memPro\")\n \n os.system(\"cp -r libs/memPro_std libs/memPro\")\n\n command = \"sed -i 's/if (COND)/\" + cond_str + \"/' libs/memPro/lib/memPro.cu\"\n os.system(command)\n\n # Put filter for shared loads\n cond_str = \"if(!(\"\n\n for iterator in range(len(loads)):\n cond = \"index==\" + loads[iterator]\n if iterator != (len(loads) - 1):\n cond += ' || '\n\n cond_str += cond\n\n cond_str += \"))\"\n\n if (len(loads) == 0):\n cond_str = \"if(1)\"\n\n command = \"sed -i 's/if (LOAD)/\" + cond_str + \"/' libs/memPro/lib/memPro.cu\"\n\n os.system(command)\n\n # Put filter for shared stores\n cond_str = \"if(!(\"\n\n for iterator in range(len(stores)):\n cond = \"index==\" + stores[iterator]\n if iterator != (len(stores) - 1):\n cond += ' || '\n\n cond_str += cond\n\n cond_str += \"))\"\n\n if (len(stores) == 0):\n cond_str = \"if(1)\"\n\n command = \"sed -i 's/if (STORE)/\" + cond_str + \"/' libs/memPro/lib/memPro.cu\"\n\n os.system(command)\n\n # Profile the load and store addreses\n collectData(\"memPro\", \"profile_mem_result.txt\", False)\n\n # If benchmark uses shared memory\n if SHARED_MEM_USE == True:\n\n # Rename the previous memory trace\n os.rename(\"results/profile_mem_result.txt\", \"results/profile_mem_result.txt_1\")\n\n # Construct conditional for memory profiling\n cond_str = \"if(\"\n\n for iterator in range(len(xIDs)):\n cond = \"(BX==((TX-\" + str(xIDs[iterator]) + \")\\/DX))\" + \" \\&\\& \" \"call_count == \" + str(invo_count[iterator]) + \" \\&\\& \" \"(BY==((TY-\" + str(yIDs[iterator]) + \")\\/DY))\"\n if iterator != (len(xIDs) - 1):\n cond += ' || '\n\n cond_str += cond\n\n cond_str += \")\"\n\n if (os.path.exists(\"libs/memPro\")):\n os.system(\"rm -rf libs/memPro\")\n \n os.system(\"cp -r libs/memPro2_std libs/memPro\")\n\n command = \"sed -i 's/if (COND)/\" + cond_str + \"/' libs/memPro/lib/memPro.cu\"\n\n os.system(command)\n\n # Filter for shared loads\n cond_str = \"if(\"\n\n for iterator in range(len(loads)):\n cond = \"index==\" + loads[iterator]\n if iterator != (len(loads) - 1):\n cond += ' || '\n\n cond_str += cond\n\n cond_str += \")\"\n\n command = \"sed -i 's/if (LOAD)/\" + cond_str + \"/' libs/memPro/lib/memPro.cu\"\n\n os.system(command)\n\n # Filter for shared stores\n cond_str = \"if(\"\n\n for iterator in range(len(stores)):\n cond = \"index==\" + stores[iterator]\n if iterator != (len(stores) - 1):\n cond += ' || '\n\n cond_str += cond\n\n cond_str += \")\"\n\n command = \"sed -i 's/if (STORE)/\" + cond_str + \"/' libs/memPro/lib/memPro.cu\"\n\n os.system(command)\n\n # Profile the load and store addreses\n collectData(\"memPro\", \"profile_mem_result.txt\", False)\n\n os.rename(\"results/profile_mem_result.txt\", \"results/profile_mem_result.txt_2\")\n\n # Concatenate the two memory traces\n os.system(\"cat results/profile_mem_result.txt_1 results/profile_mem_result.txt_2 > results/profile_mem_result.txt\" )\n\n # Removing the files\n os.remove(\"results/profile_mem_result.txt_1\")\n os.remove(\"results/profile_mem_result.txt_2\")\n\n\ndef pofile_lucky_stores():\n\n cond_str = \"if(\"\n\n for store_index in GLOBAL_STORE_LIST:\n cond = \"index == \" + str(store_index)\n cond += ' || '\n\n cond_str += cond\n\n cond_str = cond_str[:-4]\n cond_str += \")\"\n\n if (os.path.exists(\"libs/memValPro\")):\n os.system(\"rm -rf libs/memValPro\")\n \n os.system(\"cp -r libs/memValPro_std libs/memValPro\")\n\n command = \"sed -i 's/if (INDEX)/\" + cond_str + \"/' libs/memValPro/lib/memValPro.cu\"\n \n os.system(command)\n\n collectData(\"memValPro\", \"profile_mem_val_result.txt\", False)\n\n file1 = open(\"results/profile_mem_val_result.txt\")\n\n zero_count_dic = {}\n total_count_dic = {}\n\n for line in file1:\n line = line.strip()\n line = line.split(\" \")\n val = float(line[1])\n index = int(line[0])\n\n if index not in zero_count_dic:\n zero_count_dic[index] = 0\n total_count_dic[index] = 0\n\n if val == 0:\n zero_count_dic[index]+=1\n\n total_count_dic[index]+=1\n \n file1.close()\n\n file1 = open(\"results/lucky_store_details.txt\", 'w')\n\n for index in zero_count_dic:\n\n file1.write(str(index) + \" \" + str(float(zero_count_dic[index])/total_count_dic[index]) + \"\\n\")\n\n file1.close()\n\ndef profile():\n\n # Profile the nummber of times each instruction is called\n collectData(\"instCount\", \"instCountResult.txt\", False)\n\n # Profile the average value of arguments of compare instructions\n collectData(\"cmpVal\", \"profile_cmp_value_result.txt\", False)\n\n # Profile the nummber of times each instruction is called\n collectData(\"instCount\", \"instCountResult.txt\", False)\n\n # Profile the average value of multiply operands\n collectData(\"mulPro\", \"profile_mul_value_result.txt\", False)\n\n # Profile the number of time compare instructions resolve to 1 or 0\n #collectData(\"callCount\", \"profile_call_prob_result.txt\",False)\n call_file = open(\"results/profile_call_prob_result.txt\", \"w\")\n call_file.close()\n \n # Record load and store instruction and crash rate \"Temp here\"\n os.system(\"python find_load_store.py > results/crash_rate.txt\")\n \n # Produce fi_breakdown.txt, if it is already present delete it\n if (os.path.exists(\"results/fi_breakdown.txt\")):\n os.remove(\"results/fi_breakdown.txt\")\n \n with open(\"results/instCountResult.txt\", 'r') as rf:\n lines = rf.readlines()\n for line in lines:\n if \":\" not in line:\n continue\n with open(\"results/fi_breakdown.txt\", 'a') as wf:\n index = line.split(\": \")[0]\n count = line.split(\": \")[1].replace(\"\\n\", \"\")\n wf.write(\"-- FI Index: \" + index + \", : , : , : , Total FI: \" + count + \"\\n\")\n \n # Profile the number of time compare instructions resolve to 1 or 0\n collectData(\"cmpProb\", \"profile_cmp_prob_result.txt\",False)\n \n # Profile the average value of arguments of compare instructions\n collectData(\"shftVal\", \"profile_shift_value_result.txt\", False)\n \n # Run resolveCmpProb.py script\n os.system(\"python resolveCmpProb.py readable_indexed.ll\")\n \n # Find the tuples for instructions\n os.system(\"python getInstTuples.py readable_indexed.ll\")\n \n # Simplify instruction Tuples\n os.system(\"python simplifyInstTuples.py\")\n\n count_file = open(\"results/instCountResult.txt\")\n count_list = []\n DO_REDUCTION = False\n \n for line in count_file:\n count_list.append(int(line.split(\": \")[1]))\n count_file.close()\n\n if max(count_list) > 65536:\n DO_REDUCTION = True\n\n if DO_REDUCTION == True:\n prune_threads()\n else:\n collectData(\"memPro\", \"profile_mem_result.txt\", False)\n\n \n os.rename(\"results/profile_mem_result.txt\", \"results/profile_mem_result_1.txt\")\n \n memFile = open(\"results/profile_mem_result_1.txt\")\n newFile = \"\"\n \n for line in memFile:\n if \"(nil)\" in line:\n newFile += line.replace('(nil)', '0x0')\n else:\n newFile += line\n \n mem_f = open(\"results/profile_mem_result.txt\", 'w')\n mem_f.write(newFile)\n mem_f.close()\n \n os.remove(\"results/profile_mem_result_1.txt\")\n\n pofile_lucky_stores()\n\n \ndef execute_trident():\n \n print(\"\\n\\nRunning Memory sub-model ...\\n\\n\")\n os.system(\"python getStoreMaskingRate.py \" + PROGRAM_NAME + \".cu\")\n \n # Validating model at 3 level\n print(\"\\n\\nRunning GPU-Trident, Results will be in prediction.results\\n\\n\")\n if EXEC_MODE == 0:\n os.system(\"python validateModel.py \" + PROGRAM_NAME + \".cu\" + \" > results/prediction.results \")\n else:\n os.system(\"python validateModel_m.py \" + PROGRAM_NAME + \".cu\" + \" > results/prediction.results \")\n\n# Main function\nif __name__ == \"__main__\":\n\n if sys.argv[1] == 'index':\n\n # Index the instructions and get the IR file\n collectData(\"instIndexer\", \"\", True)\n \n # Convert opt_bamboo_after.ll into readable format\n os.system(LLVM_PATH + \"/bin/llvm-dis indexed.ll -o readable_indexed.ll\")\n os.remove(\"indexed.ll\")\n \n elif sys.argv[1] == 'profile':\n\n if not os.path.exists('results'):\n os.makedirs('results')\n call_file = open(\"results/loop_terminating_cmp_list.txt\", \"w\")\n call_file.close()\n\n profile()\n \n elif sys.argv[1] == 'execute': \n execute_trident()\n \n else:\n print(\"\\n\\nWrong input argument\\n\")\n","sub_path":"benchmarks/NW K1/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":13255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60924483","text":"PLUGIN_PREFIX = \"/video/pakitvtv\"\nNAME = L('Title')\n\n# make sure to replace artwork with what you want\n# these filenames reference the example files in\n# the Contents/Resources/ folder in the bundle\nART = 'art-default.jpg'\nICON = 'icon.png'\n####################################################################################################\nARYICON = 'arylogo.jpg'\nSAMAICON = \"SamaLogo.jpg\"\nDUNIYAICON = 'dunyalogo.png'\nARYDIGITALICON = 'ARYDIGITAL.jpg'\nGEOICON='geologo.gif'\n####################################################################################################\n\ndef Start():\n Plugin.AddPrefixHandler(PLUGIN_PREFIX, MainMenu, NAME, ICON, ART)\n Plugin.AddViewGroup(\"InfoList\", viewMode=\"InfoList\", mediaType=\"items\")\n Plugin.AddViewGroup(\"List\", viewMode=\"List\", mediaType=\"items\")\n MediaContainer.title1 = NAME\n MediaContainer.viewGroup = \"Grid\"\n MediaContainer.art = R(ART)\n DirectoryItem.thumb = R(ICON)\n VideoItem.thumb = R(ICON)\n PrefsItem.thumb = R(ICON)\n\n InputDirectoryItem.thumb = R(ICON)\n HTTP.CacheTime = CACHE_1MONTH\n # Set header for all HTTP requests\n# HTTP.Headers[\"User-agent\"] = USER_AGENT\n# HTTP.Headers[\"Referer\"] = REFERER\n\n\n#### the rest of these are user created functions and\n#### are not reserved by the plugin framework.\n#### see: http://dev.plexapp.com/docs/Functions.html for\ndef MainMenu():\n dir = MediaContainer(viewGroup=\"InfoList\")\n\t\n title = \"Abb Takk\"\n summary = \"Abb Takk Live\"\n provider=\"http://dstreamone.com:1935/veritas/smil:veritas1.smil/manifest.m3u8\"\n image=\"http://dstreamone.com/veritas/jp/AbbTakk.jpg\"\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n\t\t\t\n title = \"ARY News\"\n summary = \"ARY News Live\"\n provider=\"http://109.163.232.58:1935/live/myStream/playlist.m3u8\"\n image=R(ARYICON)\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n \n \n title = \"ARY Digital\"\n summary = \"ARY Digital Live\"\n provider=\"http://93.115.85.17:1935/ARYDIGITAL/myStream/playlist.m3u8\"\n image=R(ARYDIGITALICON)\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image, clip=\"myStream\"))\n \n \n title = \"SAMA News\"\n summary = \"SAMA News Live\"\n provider=\"http://samaatr.videocdn.scaleengine.net/samaatr-iphone/play/samaatr_264k.stream/playlist.m3u8\"\n image=R(SAMAICON)\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n \n \n \n title = \"HUM TV\"\n summary = \"HUM TV Live\"\n provider=\"rtmp://cdn.ebound.tv/tv?wmsAuthSign=/c2VydmVyX3RpbWU9MS8xNy8yMDE1IDEwOjM3OjEzIFBNJm\"#humtv\n image=\"http://www.hum.tv/images/hum_logo.png\"\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image,clip=\"Live\"))\n \n \n title = \"Duniya News\"\n summary = \"Duniya News Live\"\n provider=\"http://imob.dunyanews.tv:1935/live/smil:stream.smil/playlist.m3u8\"\n image=R(DUNIYAICON)\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n \n \n title = \"Geo News\"\n summary = \"Geo News Live\"\n provider=\"rtmp://198.199.107.119/live?dig=4a38ebb81706e5cde9aea26c\"\n image=R(GEOICON)\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n \n \n title = \"Channel92\"\n summary = \"Channel92 Live\"\n provider=\"http://92hd.styleofglobal.com:1935/live/92news_360p/playlist.m3u8\"\n image=\"http://92newshd.tv/wp-content/themes/channel-92c/images/logo-newp.png\"\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=\"http://92newshd.tv/live-tv/player.png\"))\n\t\t\t\n title = \"Jaag TV\"\n summary = \"JAAG TV Live\"\n provider=\"http://38.96.148.99:1935/live2/jaagtv/playlist.m3u8\"\n image=\"https://lh6.ggpht.com/2ILquUUWCnnwuCBglGpx2-yvz4sb75Xop1EbI3OQgU4lY3bIJKyjUUUu-IPtUX-BEHk=w300\"\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n\n title = \"PTV World\"\n summary = \"PTV World Live\"\n provider=\"http://app.pakistanvision.tv:1935/live/8090/player.m3u8\"\n image=\"http://dnews.pk/wp-content/uploads/2014/10/ptv-news-live.jpg\"\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=image))\n\t\t\t\n title = \"PTV News\"\n summary = \"PTV News Live\"\n provider=\"http://app.pakistanvision.tv:1935/live/PTVnews/player.m3u8\"\n image=\"https://lh6.ggpht.com/2ILquUUWCnnwuCBglGpx2-yvz4sb75Xop1EbI3OQgU4lY3bIJKyjUUUu-IPtUX-BEHk=w300\"\n dir.Append(\n Function(\n DirectoryItem(\n key=StreamMenu,\n title=title,\n subtitle=\"\",\n summary=summary,\n thumb=image,\n art=image\n ),title=title,provider=provider, image=\"https://lh6.ggpht.com/2ILquUUWCnnwuCBglGpx2-yvz4sb75Xop1EbI3OQgU4lY3bIJKyjUUUu-IPtUX-BEHk=w300\"))\n\t\n return dir\n \ndef StreamMenu(sender,title, provider,image,clip=\"\"):\n dir = ObjectContainer(title2=title)\n vco = VideoClipObject(key= Callback(StreamMenu, sender=sender, title=title, provider=provider, image=image,clip=clip),\n\t\t\trating_key = title\n\t\t\t,title=title,\n\t\t\tthumb=image,\n\t\t\tart=image,\n\t\t\titems=[CreateMediaObject(provider,clip)])\n dir.add(vco)\n return dir\n\ndef CreateMediaObject(provider,clip):\n mediakey=HTTPLiveStreamURL(provider)\n protocol = None\n container = None\n mediaobject = None\n \n if provider.startswith('rtmp'):\n protocol='rtmp'\n mediakey=RTMPVideoURL(provider, clip=clip, live=True)\n mediaobject= MediaObject(\n\t\t\t parts = [\n\t\t\t PartObject(key=mediakey)\n\t\t\t ],\n\t\t\t video_codec = VideoCodec.H264,\n\t\t\t audio_codec = AudioCodec.AAC,\n\t\t\t audio_channels = 2,\n\t\t\t optimized_for_streaming = True\n\t\t\t ,protocol=protocol\n\t\t\t ,container = container\n\t\t\t )\n else: \n mediaobject = MediaObject(\n\t\t\t parts = [\n\t\t\t PartObject(key=mediakey)\n\t\t\t ],\n\t\t\t optimized_for_streaming = True\n\t\t\t ,protocol=protocol\n\t\t\t ,container = container\n\t\t\t )\n \n return mediaobject\n\n\ndef ValidatePrefs():\n pass\n\n#####################################################################################################\ndef LiveStream(provider,title,thumb=R(ICON), include_container=False):\n vco = VideoClipObject(\n key = Callback(LiveStream, provider=provider, title=title, thumb=thumb, include_container=True),\n rating_key = provider,\n title = title,\n thumb = thumb,\n#\theight = HEIGHT,\n# width = WIDTH,\n items = [\n MediaObject(\n\tparts = [\n\t PartObject(key=HTTPLiveStreamURL(provider))\n\t ]\n\t,optimized_for_streaming = True\n\t)\n ]\n )\n if include_container:\n return ObjectContainer(objects=[vco])\n else:\n return vco\n","sub_path":"Contents/Code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"435219180","text":"#!/usr/bin/env python\n#-*-coding:utf-8 -*-\n\nimport os\nimport numpy as np \n\nfrom VLADlib import VLAD \nfrom RWoperation import rwOperation\n\nimport itertools\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import BallTree\nfrom sklearn.externals import joblib\nimport pickle\nimport glob\nimport cv2\n\n\ndef getDescriptors(src_image_feature_path):\n #read image feature for \n #path = '../datafolder/test'\n image_feature_path = os.path.join(src_image_feature_path, 'src_image_feature_path.path')\n if os.path.exists( image_feature_path ):\n path_dict = rwOperation.read_dict(image_feature_path)\n else:\n print('have no src_image_feature_path.path to read!')\n\n descriptors=list()\n\n for key in path_dict.keys()[0:100]:\n one_image_feature_path = path_dict[key]\n _, _, img_des = rwOperation.read_feature(one_image_feature_path)\n descriptors.extend(img_des.tolist())\n\n descriptors = np.asarray(descriptors)\n return descriptors\n\n # for imagePath in glob.glob(path+\"/*.jpg\"):\n # print(imagePath)\n # im=cv2.imread(imagePath)\n # kp,des = functionHandleDescriptor(im)\n # if len(des)!=0:\n # descriptors.append(des)\n # print(len(kp))\n \n # #flatten list \n # descriptors = list(itertools.chain.from_iterable(descriptors))\n #list to array\n\n# training = a set of descriptors\ndef kMeansDictionary(training, k, save_path):\n\n #K-means algorithm\n est = KMeans(n_clusters=k,init='k-means++',tol=0.0001,verbose=1).fit(training)\n joblib.dump( est, os.path.join( save_path, 'surf_cluster.pkl'))#save cluster result\n\ndef read_kmean_result(surf_cluster_path):\n est = joblib.load(os.path.join( surf_cluster_path, 'surf_cluster.pkl') )\n return est\n\ndef save_VLAD_to_proto(src_image_feature_path, visualDictionary):\n image_feature_path = os.path.join(src_image_feature_path, 'src_image_feature_path.path')\n if os.path.exists( image_feature_path ):\n path_dict = rwOperation.read_dict(image_feature_path)\n else:\n print('have no src_image_feature_path.path to read!')\n\n descriptors_dict=dict()\n\n for key in path_dict.keys():\n one_image_feature_path = path_dict[key]\n _, _, img_des = rwOperation.read_feature(one_image_feature_path)\n v=VLAD.VLAD(img_des,visualDictionary)\n descriptors_dict[key] = v\n rwOperation.save_dict_des(descriptors_dict, os.path.join(src_image_feature_path, 'descriptors_dict.vlad'))\n\ndef load_VLAD_from_proto(descriptor_dict_path):\n if not os.path.exists( descriptor_dict_path ):\n print('descriptor_dict_path is not exist!')\n descriptors_dict = rwOperation.read_dict_des( descriptor_dict_path)\n return descriptors_dict\n\nif __name__ == '__main__':\n\n #test1\n path = '../datafolder/test2'\n train_feature = getDescriptors(path)\n kMeansDictionary(train_feature, 100, path)\n print( 'cluster finished!')\n res = read_kmean_result(path)\n\n save_VLAD_to_proto(path, res)\n print( 'save vlad feature finished!')\n des_dict = load_VLAD_from_proto(os.path.join(path, 'descriptors_dict.vlad'))\n\n\n ##cluster result save and load example\n #joblib.dump( res, 'surf_cluster.pkl')\n #km = joblib.load('surf_cluster.pkl')\n\n","sub_path":"VLADtoproto.py","file_name":"VLADtoproto.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"269445332","text":"\ndef NameRepairing(name):\n\n\n for i in range(0,len(name)):\n if i == name[0]:\n name[0] = name[0].upper()\n if i == \" \":\n name[i+1] = name[i+1:].upper()\n\n\n\n print(\"The repaired name: \", name)\n\n\ndef main():\n\n print(\"Enter a full name:\")\n\n name = str(input())\n\n print(NameRepairing(name))\n\n\nmain()\n","sub_path":"Intro to Python Notes/new14/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"162680580","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom batchgenerators.augmentations.crop_and_pad_augmentations import crop\n\n\ndef reshape_old(orig_img, append_value=-1024, new_shape=(512, 512, 512)):\n reshaped_image = np.zeros(new_shape)\n reshaped_image[...] = append_value\n x_offset = 0\n y_offset = 0 # (new_shape[1] - orig_img.shape[1]) // 2\n z_offset = 0 # (new_shape[2] - orig_img.shape[2]) // 2\n\n reshaped_image[x_offset:orig_img.shape[0]+x_offset, y_offset:orig_img.shape[1]+y_offset, z_offset:orig_img.shape[2]+z_offset] = orig_img\n # insert temp_img.min() as background value\n\n return reshaped_image\n\n\ndef reshape(image_seg, crop_size):\n # image_seg is numpy array shape [2, #slices, y, x]: for dim0, index 0: image and index 1: seg labels\n # this needs to be separated for the crop function.\n # But crop function expects images and segs (see below) to have shape [batch, #slices, y, x]\n # I am not using batches here so that's why i insert dummy first dim.\n # crop_size: integer (assuming new size is 2**n and width=height\n images = image_seg[0][np.newaxis]\n segs = image_seg[1][np.newaxis]\n\n data_cropped, segs_cropped = crop(images, seg=segs, crop_size=crop_size, margins=(0, 0, 0), crop_type=\"center\",\n pad_mode='constant', pad_kwargs={'constant_values': 0},\n pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0})\n # squeeze out again dummy dims\n return np.stack((np.squeeze(data_cropped), np.squeeze(segs_cropped)))","sub_path":"datasets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"67123861","text":"\n\n\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.io\nfrom imgaug import augmenters as iaa\n\nROOT_DIR = os.path.abspath(\"D:/xampp/htdocs/Mask_RCNN\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\n\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nfrom mrcnn import model as modellib\nfrom mrcnn import visualize\nglobimage_ids=[]\nclass CarConfig(Config):\n \"\"\"Configuration for training on the Car segmentation dataset.\"\"\"\n # Give the configuration a recognizable name\n NAME = \"Car\"\n\n # Adjust depending on your GPU memory\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 2 # Background + Car\n\n # Number of training and validation steps per epoch\n STEPS_PER_EPOCH = 500\n\n # Don't exclude based on confidence. Since we have two classes\n # then 0.5 is the minimum anyway as it picks between Car and BG\n DETECTION_MIN_CONFIDENCE = 0.90\n\n # Backbone network architecture\n # Supported values are: resnet50, resnet101\n BACKBONE = \"resnet50\"\n\n # Input image resizing\n # Random crops of size 512x512\n IMAGE_RESIZE_MODE = \"crop\"\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n IMAGE_MIN_SCALE = 2.0\n\n # Length of square anchor side in pixels\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n\n # ROIs kept after non-maximum supression (training and inference)\n POST_NMS_ROIS_TRAINING = 1000\n POST_NMS_ROIS_INFERENCE = 2000\n\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.9\n\n # How many anchors per image to use for RPN training\n RPN_TRAIN_ANCHORS_PER_IMAGE = 64\n\n # Image mean (RGB)\n MEAN_PIXEL = np.array([43.53, 39.56, 48.22])\n\n # If enabled, resizes instance masks to a smaller size to reduce\n # memory load. Recommended when using high-resolution images.\n USE_MINI_MASK = True\n MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n\n # Number of ROIs per image to feed to classifier/mask heads\n # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n # enough positive proposals to fill this and keep a positive:negative\n # ratio of 1:3. You can increase the number of proposals by adjusting\n # the RPN NMS threshold.\n TRAIN_ROIS_PER_IMAGE = 128\n\n # Maximum number of ground truth instances to use in one image\n MAX_GT_INSTANCES = 200\n\n # Max number of final detections per image\n DETECTION_MAX_INSTANCES = 6\n\n\nclass CarInferenceConfig(CarConfig):\n # Set batch size to 1 to run one image at a time\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n # Don't resize imager for inferencing\n IMAGE_RESIZE_MODE = \"pad64\"\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass CarDataset(utils.Dataset):\n globimage_ids=[]\n def getimgid(self):\n\n return self.globimage_ids\n\n def load_Car(self, dataset_dir, subset):\n\n # Add classes. We have one class.\n # Naming the dataset Car, and the class Car\n\n self.add_class(\"Car\",1,\"Damage\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n subset_dir = subset\n datasets_dir = os.path.join(dataset_dir, subset_dir)\n\n # Get image ids from directory names\n\n # Get image ids from directory names\n image_ids = next(os.walk(datasets_dir))[2]\n self.globimage_ids = image_ids\n\n\n\n for image_id in image_ids:\n image_path = os.path.join(datasets_dir, image_id)\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"Car\",\n image_id=image_id, # use file name as a unique image id\n path=image_path,\n width=width, height=height)\n\n\n\n def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"Car\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"Car\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\n\ndef detect(dataset_dir, subset,logs):\n import matplotlib\n # Agg backend runs without a display\n\n import matplotlib.pyplot as plt\n\n import os\n import sys\n import json\n import datetime\n import numpy as np\n import skimage.io\n from imgaug import augmenters as iaa\n\n # Root directory of the project\n ROOT_DIR = os.path.abspath(\"../../\")\n path = os.path.join(dataset_dir,subset)\n # Import Mask RCNN\n sys.path.append(ROOT_DIR) # To find local version of the library\n from mrcnn.config import Config\n from mrcnn import utils\n from mrcnn import model as modellib\n from mrcnn import visualize\n\n # Path to trained weights file\n COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n # Directory to save logs and model checkpoints, if not provided\n # through the command line argument --logs\n DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n # Results directory\n # Save submission files here\n RESULTS_DIR = os.path.join(ROOT_DIR, \"results/Car/\")\n config = CarInferenceConfig()\n config.display()\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=logs)\n weights_path = \"D:/HARSH/Documents/GitHub/Mask_RCNN/logs/car20191015T1253/mask_rcnn_car_0033.h5\"\n model.load_weights(weights_path, by_name=True)\n\n \"\"\"Run detection on images in the given directory.\"\"\"\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = CarDataset()\n dataset.load_Car(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n\n\n result={}\n globimage_ids = dataset.getimgid();\n\n\n result['claim_id']=subset\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Encode image to RLE. Returns a string of multiple lines\n\n\n area = np.reshape(r['masks'], (-1, r['masks'].shape[-1])).astype(np.float32).sum()\n print( \"Area of Damage detected :\",area)\n h,w,c = image.shape\n perarea = (area*100)/(h*w)\n print(\"Percent : \",perarea)\n name = globimage_ids[image_id].replace(\".jpg\",\"\")\n result[name]=f\"{perarea:.2f}\"\n\n # Save image with masks\n visualize.display_instances(\n image ,name,path, r['rois'], r['masks'], r['class_ids'],\n dataset.class_names, r['scores'],\n show_bbox=True, show_mask=True,\n title=\"Predictions\")\n print(result)\n return result\n # Save to csv file\n","sub_path":"Back end/samples/Car/Car.py","file_name":"Car.py","file_ext":"py","file_size_in_byte":8355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235317762","text":"'''\nCreated on Sep 8, 2012\n\n@author: jgiles\n'''\n\nfrom django.conf.urls.defaults import patterns, include\n\nurlpatterns = patterns('colonial.views',\n (r'^api/', include('colonial.apiurls')),\n (r'^cron/', include('colonial.cronurls')),\n (r'^/?$', 'home'),\n (r'^events/?$', 'events'),\n (r'^photos/?$', 'photos'),\n (r'^about/?$', 'about'),\n (r'^officers/?$', 'officers'),\n (r'^management/?$', 'management'),\n (r'^joining/?$', 'joining'),\n (r'^history/?$', 'history'),\n (r'^feedback/?$', 'feedback'),\n (r'^meals/(?P-?\\d+)$', 'meals'),\n)\n","sub_path":"colonial/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393167438","text":"# Programmer: Rohan Kosalge\n# Date: October 29th, 2018 \n# Purpose: Find all roots of any polynomial\n\nfrom rutils import *\nfrom ralgebra import *\n\n# lol back when I totally forgot about ASCII\nletters = ['a', 'b', 'c', 'd', 'e', 'f',\n 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's',\n 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n# I should really make this program compact\n\ndef editFindList(alist, key): # special one I really needed for NR\n keyFound = False\n keyNum = 0\n blist = []\n for x in range(len(alist)):\n element = alist[len(alist)-(x+1)]\n num = element[len(element)-1]\n for y in range(len(element)):\n if element[y] == key and keyFound == False:\n keyFound = True\n keyNum = num\n break\n if keyFound == True and num == keyNum-1:\n for x in range(len(element[:len(element)-1])):\n blist.append(element[x])\n break\n\n return blist\n\ndef editEx(expression):\n syms = ['+', '-']\n lim = 0\n for x in range(len(expression)-1):\n if expression[x] == 'x' and (x!= len(expression)-1 and expression[x+1] not in syms):\n lim+=1\n for x in range(len(expression)-1+lim):\n if expression[x] == 'x' and (x!= len(expression)-1 and expression[x+1] not in syms):\n expressionList = list(expression)\n expressionList.insert(x+1, '^')\n expression = combine(expressionList)\n \n expression = expression.replace('^', '**')\n \n newx = \"(x)\"\n expression = expression.replace(\"x\", newx)\n\n lim = 0\n for x in range(len(expression)):\n if expression[x] == '(' and (expression[x-1] not in syms and x!=0):\n lim+=1\n \n for x in range(len(expression)+lim):\n if expression[x] == '(' and (expression[x-1] not in syms and x!=0):\n expressionList = list(expression)\n expressionList.insert(x+1, '*')\n expression = combine(expressionList)\n\n expression = expression.replace(\"(*\", \"*(\")\n return expression\n\n\ndef findDerivative(terms):\n syms = ['+', '-']\n newterms = []\n for x in range(len(terms)):\n term = terms[x]\n newterm = ''\n\n for y in range(len(term)):\n if term[y] == 'x': # assuming that variable is 'x' for now...\n coefficient = term[:y]\n exponent = term[y+1:]\n\n termfound = True\n addx = True\n addplus = False\n if 'x' not in term: \n coefficient = '0'\n exponent = '0'\n termfound = False\n \n if (coefficient in syms) or (coefficient == ''):\n coefficient += '1'\n\n if exponent == '':\n exponent = '1'\n\n coefficient = float(coefficient)\n exponent = float(exponent)\n\n coefficient*=exponent\n exponent-=1\n\n if coefficient.is_integer() == True:\n coefficient = int(coefficient)\n if exponent.is_integer() == True:\n exponent = int(exponent)\n\n if coefficient > 0 and len(newterms)!=0:\n addplus = True\n newcoefficient = str(coefficient)\n newexponent = str(exponent)\n if newexponent == '1':\n newexponent = ''\n if newexponent == '0':\n newexponent = ''\n addx = False\n \n if termfound == False:\n newterm = ''\n else:\n if addx == False:\n if addplus == True:\n newterm = '+' + str(newcoefficient)\n else:\n newterm = str(newcoefficient)\n else:\n if addplus == True:\n newterm = '+' + str(newcoefficient) + 'x' + str(newexponent)\n else:\n newterm = str(newcoefficient) + 'x' + str(newexponent)\n \n newterms.append(newterm)\n\n derivative = combine(newterms)\n return derivative\n\ndef combine(terms):\n finalstr = ''\n for x in range(len(terms)):\n finalstr += terms[x]\n return finalstr\n\ndef revCombine(terms):\n finalstr = ''\n for x in range(len(terms)):\n finalstr += terms[len(terms)-(x+1)]\n return finalstr\n\ndef reverse(alist):\n revlist = []\n for x in range(len(alist)):\n revlist.append(alist[len(alist)-(x+1)])\n return revlist\n\ndef getTerms(expression):\n syms = ['+', '-']\n terms = []\n tmin = 0\n for x in range(len(expression)):\n if expression[x] in syms:\n term = expression[tmin:x]\n terms.append(term)\n tmin = x\n if x == len(expression)-1:\n term = expression[tmin:]\n terms.append(term)\n\n return terms\n\ndef findMinDegree(terms):\n term = terms[0]\n degree = \"\"\n for x in range(len(term)):\n if term[x] in letters:\n degree = term[x+1:]\n if degree == \"\":\n degree = 1\n degree = int(degree)\n return degree\n\ndef findMaxDegree(terms):\n terms.sort()\n terms = reverse(terms)\n #print(terms)\n degree = \"\"\n degreeList = []\n for x in range(len(terms)):\n term = terms[x]\n for y in range(len(term)):\n if term[y] in letters:\n degree = term[y+1:]\n degreeList.append(degree)\n \n for x in range(len(degreeList)):\n if degreeList[x] == \"\":\n degreeList[x] = 1\n else:\n degreeList[x] = int(degreeList[x])\n\n #print(degreeList) \n maxDegree = max(degreeList)\n return maxDegree\n\ndef findDerivatives(polynomial):\n dList = [polynomial]\n maxD = findMaxDegree(getTerms(polynomial))\n #print(maxD)\n while True:\n derivative = findDerivative(getTerms(polynomial))\n dTerms = getTerms(derivative)\n deg = findMinDegree(dTerms)\n if deg==1:\n break\n else:\n polynomial = combine(dTerms)\n dList.append(polynomial)\n \n finalDerivative = combine(dTerms)\n dList.append(finalDerivative)\n\n # get the int d for linear sol\n # we only need it if degree>1 though\n lastD = findDerivative(getTerms(finalDerivative))\n \n if maxD>1:\n dList.append(lastD) \n return dList\n\ndef evaluate(val, func):\n func = editEx(func)\n func = func.replace(\"x\", str(val))\n #print(func)\n sol = float(eval(func))\n return sol\n \ndef newtonRaphson(x0, polynomial, derivative):\n #print(\"Input: \" + str(x0))\n #print(\"Derivative: \" + str(derivative) + \", and polynomial: \" + polynomial)\n fx0 = evaluate(x0, polynomial)\n dfx0 = evaluate(x0, derivative)\n if dfx0 ==0:\n x1 = x0\n else:\n x1 = x0 - (fx0/dfx0)\n #print(\"Output: \" + str(x1))\n #print()\n return x1\n\ndef betweenVals(vals, avals):\n #print(\"VALS: \" + str(vals))\n # important function because it will return\n # list of inputs where each input fits certain range\n # of the vals passed in this function.\n # *really important for NR-Method*!\n\n vals.sort()\n \n bVals = []\n bVals.append(vals[0]-1)\n for x in range(len(vals)-1):\n val1 = vals[x] \n val2 = vals[x+1]\n avgVal = (val1+val2)/2\n if avgVal in avals:\n avgVal+=0.001\n bVals.append(avgVal)\n \n bVals.append(vals[len(vals)-1]+1)\n\n #print(len(bVals))\n return bVals\n \n\ndef findRoots(dList):\n # initial guess for NR-Method.\n # No wrong answer for a linear equation!\n # loop goes as follows: linear, square, cubic, etc.\n x0 = 1\n vertexX = 1 # not needed in first loop (linear)\n archivevertices = []\n lastvertices = [] # keep track of the vertices before\n useLast = False\n vertices = [] # needed above square graphs\n x0s = [x0]\n for x in range(len(dList)-1):\n for a in range(len(vertices)):\n archivevertices.append([vertices[a], x])\n #print(\"A: \" + str(archivevertices))\n if len(vertices)>0:\n if useLast == True:\n #print(\"lv: \" + str(lastvertices))\n #print(\"V: \" + str(vertices))\n #print(\"A: \" + str(archivevertices))\n lastvertices = editFindList(archivevertices, \"NO SOLUTIONS\")\n #print(\"lv: \" + str(lastvertices))\n x0s = betweenVals(lastvertices, archivevertices)\n else:\n x0s = betweenVals(vertices, archivevertices)\n useLast = False\n #print()\n #print(\"Vertices: \" + str(vertices))\n #print(\"X0s: \" + str(x0s))\n #print()\n lastvertices = vertices\n vertices.clear()\n derivative = dList[x]\n polynomial = dList[x+1]\n for y in range(len(x0s)):\n x0 = x0s[y]\n x1List = []\n done_checking = False\n weirdcounter = 0\n weirdcounterlimit = 10\n while done_checking==False:\n x0 = newtonRaphson(x0, polynomial, derivative)\n x0 = round(x0, 5)\n x1List.append(x0)\n\n for i in range(len(x1List)-1):\n if x1List[i-1] < x1List[i] and x1List[i] > x1List[i+1] and weirdcounter == weirdcounterlimit:\n vertices.append(\"NO SOLUTIONS\")\n useLast = True\n #print(useLast)\n done_checking = True\n break\n if x1List[i] == x1List[i+1]:\n vertexX = x1List[i+1]\n vertices.append(vertexX)\n done_checking = True\n break\n if x1List[i-1] < x1List[i] and x1List[i] > x1List[i+1]:\n weirdcounter +=1\n \n #print(\"AA: \" + str(archivevertices))\n return vertices\n\n \ndef getSolutions(polynomial): \n dList = findDerivatives(polynomial)\n dList = reverse(dList)\n roots = findRoots(dList)\n return roots\n\nprint(getSolutions(\"x4+2x3-2-x\"))\n# EQUATION IS PASSED HERE\n# THE PROGRAM WILL PASS THE SOLUTIONS IN AN ARRAY FORMAT\n# THE ARRAY MAY CONTAIN DUPLICATES.\n# THIS IS A BASIC PROGRAM AND CANNOT SOLVE ALL TYPES OF PROBLEMS.\n# POLYNOMIALS ARE PREFERRED. \n\n\n \n\n \n\n\n\n\n\n","sub_path":"newton-raphson.py","file_name":"newton-raphson.py","file_ext":"py","file_size_in_byte":10290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558834549","text":"from idm.objects import dp, MySignalEvent, db_gen\nfrom idm.utils import find_mention_by_event, get_plural\nfrom typing import Union\nimport requests\n\nsession = None\n\nDC = 'https://IrcaDC.pythonanywhere.com'\n\nerrors = {\n 4: ('❗ На удаленном сервере отсутствует данный чат\\n' +\n 'Необходимо связать чат (на том аккаунте, не на этом)'),\n 3: '❗ Неверная сессия. Перезапусти дежурного',\n 2: '❗ Удаленный дежурный тебе не доверяет',\n 1: '❗ Неизвестная ошибка на удаленном сервере',\n 0: '❗ Пользователь не зарегистрирован\\nВозможно у него старая версия дежурного' # noqa\n}\n\n\ndef set_session(ses: str) -> str:\n global session\n session = ses\n return ses\n\n\n@dp.longpoll_event_register('цод')\n@dp.my_signal_event_register('цод')\ndef dc(event: MySignalEvent):\n resp = requests.post(DC, json={\n 'method': 'info',\n 'user_id': str(event.db.duty_id),\n 'session': session\n })\n if resp.status_code != 200:\n if resp.status_code == 403:\n return \"ok\"\n event.msg_op(1, '❗ Проблемы с центром обработки данных\\n' +\n 'Напиши [id332619272|этому челику], если он еще живой',\n disable_mentions=1)\n return \"ok\"\n users = resp.json()['users']\n event.msg_op(2, f'Зарегистрировано {users} пользовател{get_plural(users, \"ь\", \"я\", \"ей\")}') # noqa\n return \"ok\"\n\n\n@dp.longpoll_event_register('унапиши', 'у')\n@dp.my_signal_event_register('унапиши', 'у')\ndef remote_control(event: MySignalEvent) -> Union[str, dict]:\n if db_gen.dc_auth is False:\n event.msg_op(2, '❗ Для использования этой команды необходимо ' +\n 'разрешить авторизацию в ЦОД по токену (на сайте)')\n return \"ok\"\n\n uid = find_mention_by_event(event)\n if uid is None:\n event.msg_op(2, '❗ Необходимо указать пользователя')\n return \"ok\"\n\n resp = requests.post(DC, json={\n 'method': 'remote_control',\n 'remote_user': str(uid),\n 'user_id': str(event.db.duty_id),\n 'session': session,\n 'data': {\n 'chat': event.chat.iris_id,\n 'local_id': event.msg['conversation_message_id']\n }\n })\n if resp.status_code != 200:\n event.msg_op(1, '❗ Проблемы с центром обработки данных\\n' +\n 'Напиши [id332619272|этому челику], если он еще живой',\n disable_mentions=1)\n return \"ok\"\n\n resp = resp.json()\n\n if 'error' in resp:\n code = resp['error']\n if code == 5:\n msg = f\"❗ Ошибка VK #{resp['code']}: {resp['msg']}\"\n else:\n msg = errors.get(code, '❗ Неизвестный код ошибки')\n event.msg_op(2, msg)\n return \"ok\"\n\n event.msg_op(3)\n return \"ok\"\n","sub_path":"idm/my_signals/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125969297","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom classy import Class\n\nk = 1 # 1/Mpc\n\ncosmo = {'output' : 'mPk', 'k_output_values' : k, 'h' : 0.67556, 'omega_b' : 0.022032, 'omega_cdm' : 0.12038, 'A_s' : 2.215e-9, 'n_s' : 0.9619, 'tau_reio' : 0.0925, 'YHe' : 0.246, 'compute damping scale' : 'yes', 'gauge' : 'newtonian', 'ic' : 'ad&addcs', 'f_addcs' : 0.0001, 'n_addcs' : 1., 'alpha_addcs' : 0., 'phi_addcs' : 0.785}\n\nM = Class()\nM.set(cosmo)\nM.compute()\nM.struct_cleanup\nM.empty()\n\nall_k = M.get_perturbations()\none_k = all_k['scalar'][0]\nphi = one_k['phi']\ntau = one_k['tau [Mpc]']\na = one_k['a']\n\n#plt.xlim(10**(-1), 10**1)\nplt.loglog(tau, phi)\nplt.show()\n","sub_path":"python/Decay_analysis/phi_decay.py","file_name":"phi_decay.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551546240","text":"#Assignment 4a to convert a df to dict in pyspark and read the same dict with a_ prefixed to the coloumn name\nfrom pyspark.sql import *\nfrom pyspark.sql.functions import Column,when\nfrom in_spark import createDefaultSparkSession\nimport re\nfrom pyspark.sql.functions import udf\nimport json\nif __name__ == \"__main__\":\n # in Spark\n spark = createDefaultSparkSession(\"Email Validator\", \"local[2]\")\n\n employee1 = Row(\"michael\", \"armbrust\", \"no-reply@berkeley.edu\", 100000)\n employee2 = Row(\"xiangrui\", \"meng\", \"no-reply@stanford.edu\", 120000)\n employee3 = Row(\"matei\", \"chung\", \"no-reply@waterloo.edu\", 140000)\n employee4 = Row(\"sandrew\", \"wendell\", \"no-reply@berk\", 160000)\n allEmps = [employee1, employee2, employee3, employee4]\n empDF = spark.createDataFrame(allEmps).toDF(\"firstname\", \"lastname\", \"email\", \"revenue\")\n #generating dict\n jsonData = empDF.toJSON().collect()\n print(jsonData)\n #printing list of dicts\n firstele=json.loads(jsonData[0])\n existingcol= firstele.keys()\n newcols=[]\n for cols in existingcol:\n if (cols==None) :\n cols=\"\"\n newcols.append(\"a_\"+cols)\n print(newcols)\n #creating new DF with new coloumn names\n newDF=spark.read.json(spark.sparkContext.parallelize(jsonData)).toDF(*newcols)\n newDF.printSchema()\n # root\n # | -- a_firstname: string(nullable=true)\n # | -- a_lastname: string(nullable=true)\n # | -- a_email: string(nullable=true)\n # | -- a_revenue: long(nullable=true)","sub_path":"in_spark/dftodict_pyspark.py","file_name":"dftodict_pyspark.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232369348","text":"from functools import partialmethod\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name='index'),\n path('create/',views.create,name='create'),\n path('show/',views.show,name='show'),\n path('edit/',views.edit,name='edit'),\n path('show/',views.update,name='update'),\n path('delete/', views.destroy,name='destroy')\n]","sub_path":"travello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"17143277","text":"#\n# @lc app=leetcode id=1027 lang=python\n#\n# [1027] Longest Arithmetic Sequence\n#\n\n# @lc code=start\nclass Solution(object):\n def longestArithSeqLength(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n longest = 0\n if not A:\n return longest\n d = {}\n for i in range(len(A)):\n d[A[i]] = {}\n for j in range(i - 1, -1, -1):\n diff = A[i] - A[j]\n if diff in d[A[j]]:\n d[A[i]][diff] = d[A[j]][diff] + 1\n else:\n d[A[i]][diff] = 1\n longest = max(longest, d[A[i]][diff])\n return longest + 1\n# @lc code=end\n","sub_path":"Leetcode/1027.longest-arithmetic-sequence.py","file_name":"1027.longest-arithmetic-sequence.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432529164","text":"import requests\nimport json\nimport pickle\n\n#api.octane.gg\n\ndef collectAllTeams():\n \n r=requests.get('https://api.octane.gg/api/search/teams/')\n json=r.json()\n return json\n\ndef collectAllPlayers():\n \n r=requests.get('https://api.octane.gg/api/search/players/')\n json=r.json()\n return json\n\ndef collectAllEvents():\n \n r=requests.get('https://api.octane.gg/api/search/events')\n json=r.json()\n return json\n\ndef collectAllMatchURLs():\n #https://api.octane.gg/api/matches?page=2 20 per page\n #Collects all played match URLs\n\n print('Collecting all avaliable match URLs')\n\n r=requests.get('https://api.octane.gg/api/matches?page=1')\n json=r.json()\n\n last_page=json['last_page']\n\n jsonData=json['data']\n\n all_match_urls=[jsonData[i]['match_url'] for i in range(0,20)]\n\n for pageNo in range(1,(last_page+1)):\n r=requests.get(f'https://api.octane.gg/api/matches?page={pageNo}')\n json=r.json()\n\n jsonData=json['data']\n\n match_urls=[jsonData[i]['match_url'] for i in range(0,len(jsonData))]\n all_match_urls.extend(match_urls)\n\n #Save every 20 pages\n if pageNo % 20==0:\n try:\n with open('RLEsports/all_match_urls.txt', 'wb') as f:\n pickle.dump(all_match_urls, f)\n except FileNotFoundError:\n with open('all_match_urls.txt', 'wb') as f:\n pickle.dump(all_match_urls, f)\n \n print(f'Page {pageNo} complete')\n\n try:\n with open('RLEsports/all_match_urls.txt', 'wb') as f:\n pickle.dump(all_match_urls, f)\n except FileNotFoundError:\n with open('all_match_urls.txt', 'wb') as f:\n pickle.dump(all_match_urls, f)\n\n print('Successfully collected and saved all macth URLs')\n\ndef readAllMatchUrls():\n\n try:\n with open ('RLEsports/all_match_urls.txt', 'rb') as f:\n all_match_urls = pickle.load(f)\n except FileNotFoundError:\n with open ('all_match_urls.txt', 'rb') as f:\n all_match_urls = pickle.load(f)\n\n return all_match_urls\n \ndef collectSeriesData(match_url):\n #https://api.octane.gg/api/match/MATCHID or https://api.octane.gg/api/series/MATCHID \n\n r=requests.get(f'https://api.octane.gg/api/series/{match_url}')\n JSON=r.json()\n\n return JSON\n\ndef collectGameData(match_url,game_no):\n #https://api.octane.gg/api/match_scoreboard_info/4770126/1\n\n r=requests.get(f'https://api.octane.gg/api/match_scoreboard_info/{match_url}/{game_no}')\n JSON=r.json()\n\n return JSON\n\ndef checkMatchURLIncludingTeams(team_list,list_name):\n \n all_match_url=readAllMatchUrls()\n saved_URLs=[]\n counter=0\n\n for match_url in all_match_url:\n counter+=1\n JSON=collectSeriesData(match_url)\n try:\n team1=JSON['data'][0]['Team1']\n team2=JSON['data'][0]['Team2']\n except KeyError:\n print(f'Failed on {match_url}')\n break\n\n if (team1 in team_list) or (team2 in team_list):\n saved_URLs.append(match_url)\n\n if counter % 25==0:\n try:\n with open(f'RLEsports/{list_name}URLs.txt', 'wb') as f:\n pickle.dump(saved_URLs, f)\n except FileNotFoundError:\n with open(f'{list_name}URLs.txt', 'wb') as f:\n pickle.dump(saved_URLs, f)\n print(f'{counter} matches checked')\n\n try:\n with open(f'RLEsports/{list_name}URLs.txt', 'wb') as f:\n pickle.dump(saved_URLs, f)\n except FileNotFoundError:\n with open(f'{list_name}URLs.txt', 'wb') as f:\n pickle.dump(saved_URLs, f)\n\n print('Successfully collected all match URLs for given teams') \n\ndef readTeamURLs(list_name):\n\n try:\n with open(f'RLEsports/{list_name}URLs.txt', 'rb') as f:\n savedURLs=pickle.load(f)\n except FileNotFoundError:\n with open(f'{list_name}URLs.txt', 'rb') as f:\n savedURLs=pickle.load(f)\n\n return savedURLs\n\ndef collectTeamMapInfo(match_urls,team_list):\n collectedData={}\n\n for team in team_list:\n teamData={}\n collectedData[team]=teamData\n\n counter=0\n\n for matchURL in match_urls:\n counter+=1\n JSON=collectSeriesData(matchURL)\n total_games=JSON['data'][0]['Team1Games'] + JSON['data'][0]['Team2Games']\n\n for i in range(1,total_games+1):\n #Collect data\n gameData=collectGameData(matchURL,i)\n try:\n mapName=gameData['data']['Map']\n team1=gameData['data']['Team1']\n team2=gameData['data']['Team2']\n result=gameData['data']['Result']\n except KeyError:\n print(f'No game data found for match: {matchURL}, game: {i}. Moving onto next match...')\n break\n\n #Add to dictionary\n if team1 in team_list:\n if mapName not in collectedData[team1]:\n collectedData[team1][mapName]= {\n \"total\": 0,\n \"wins\": 0\n }\n \n if result == team1:\n collectedData[team1][mapName]['wins']+=1\n \n collectedData[team1][mapName]['total']+=1\n\n if team2 in team_list:\n if mapName not in collectedData[team2]:\n collectedData[team2][mapName]= {\n \"total\": 0,\n \"wins\": 0\n }\n\n if result == team2:\n collectedData[team2][mapName]['wins']+=1\n \n collectedData[team2][mapName]['total']+=1\n\n if counter % 25==0:\n try:\n with open(f'RLEsports/TeamMapData.json','w') as f:\n json.dump(collectedData,f,indent=2)\n except FileNotFoundError:\n with open(f'TeamMapData.json','w') as f:\n json.dump(collectedData,f,indent=2)\n print(f'{counter} matches collected')\n\n\n #Save collected Data\n try:\n with open(f'RLEsports/TeamMapData.json','w') as f:\n json.dump(collectedData,f,indent=2)\n except FileNotFoundError:\n with open(f'TeamMapData.json','w') as f:\n json.dump(collectedData,f,indent=2)\n\n print('Successfully collected all match data')\n\n return collectedData\n\n\nif __name__ == \"__main__\":\n big_six=['Renault Vitality', 'Dignitas', 'mousesports', 'G2 Esports', 'NRG Esports', 'Spacestation Gaming']\n savedURLs=readTeamURLs('bigSix')\n collectTeamMapInfo(savedURLs,big_six)","sub_path":"RLEsports/Data_Collection.py","file_name":"Data_Collection.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18625066","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n sets = set()\n a =[]\n res = []\n for _ in range(n):\n thing,kind = input().split()\n a.append(kind)\n sets.add(kind)\n for v in sets:\n res.append(a.count(v))\n ans = 1\n for v in res:\n ans*=v+1\n\n print(ans-1)","sub_path":"Gold3/Jobs.py","file_name":"Jobs.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398256454","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\ndb = SQLAlchemy(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://user:password@127.0.0.1:5432/mydatabase'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\nclass Alumni(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n graduation = db.Column(db.String())\n\n # Debugging use\n def __repr__(self):\n return f''\n\ndb.create_all()\n\nnewAlumni = Alumni(name='Nichiren', graduation='1222')\ndb.session.add(newAlumni)\ndb.session.commit()\n\n@app.route('/')\ndef index():\n person = Alumni.query.first()\n return 'Hello ' + person.name\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443797356","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nimport os\nimport bert\n\nfrom bert import run_classifier\nfrom bert import tokenization\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'\nos.environ['TFHUB_CACHE_DIR'] = '/tmp/tfhub'\n\n\nclass BertEncoder:\n \"\"\"\n This will get a pre-trained BERT\n \"\"\"\n\n # This is a path to an uncased (all lowercase) version of BERT\n BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n SPLIT_ID = 'case_id'\n DATA_COLUMN_A = 'case_text'\n DATA_COLUMN_B = 'candidate_text'\n LABEL_COLUMN = 'candidate_is_noticed'\n LABEL_LIST = (0, 1)\n # We'll set sequences to be at most 512 tokens long.\n MAX_SEQ_LENGTH = 512\n\n def __init__(self):\n super().__init__()\n\n @staticmethod\n def __create_model__(input_ids, input_mask, segment_ids):\n \"\"\"Creates a classification model.\"\"\"\n bert_module = hub.Module(\n BertEncoder.BERT_MODEL_HUB,\n trainable=False)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n return bert_outputs[\"pooled_output\"]\n\n # model_fn_builder actually creates our model function\n # using the passed parameters for num_labels, learning_rate, etc.\n @staticmethod\n def __model_fn_builder__(num_labels):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n embeddings = BertEncoder.__create_model__(input_ids, input_mask, segment_ids)\n\n return tf.estimator.EstimatorSpec(mode, predictions=embeddings)\n\n # Return the actual model function in the closure\n return model_fn\n\n @staticmethod\n def __create_tokenizer_from_hub_module__():\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(BertEncoder.BERT_MODEL_HUB)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n\n return bert.tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n\n @staticmethod\n def preprocessdata(X):\n \"\"\"\n Transform the dataset into Bert-edible features.\n\n :param X: a Pandas DataFrame with columns 'case_id', 'case_text','candidate_text', 'candidate_is_noticed'\n :return: a list of bert.run_classifier.InputFeatures\n \"\"\"\n tokenizer = BertEncoder.__create_tokenizer_from_hub_module__()\n data = X\n data_InputExamples = data.apply(lambda x: bert.run_classifier.InputExample(guid=None,\n text_a=x[BertEncoder.DATA_COLUMN_A],\n text_b=x[BertEncoder.DATA_COLUMN_B],\n label=x[BertEncoder.LABEL_COLUMN] if BertEncoder.LABEL_COLUMN in x else 0), axis=1)\n data_features = bert.run_classifier.convert_examples_to_features(\n data_InputExamples,\n BertEncoder.LABEL_LIST,\n BertEncoder.MAX_SEQ_LENGTH, tokenizer\n )\n return data_features\n\n @staticmethod\n def encode(X):\n \"\"\"\n Rank all candidate cases with regards to relevance to query case.\n X should contain NB_CANDIDATES_PER_CASE rows, each one having case_text the text of the query case, and\n candidate_text the text of the candidate case\n\n :param X: a dataframe with 2 columns : 'case_text', 'candidate_text'\n :return:\n \"\"\"\n # Convert our train and test features to InputFeatures that BERT understands.\n data_features = BertEncoder.preprocessdata(X)\n\n # Specify output directory and number of checkpoint steps to save\n run_config = tf.estimator.RunConfig()\n model_fn = BertEncoder.__model_fn_builder__(num_labels=2)\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={\"batch_size\": 64})\n\n # Create an input function\n predict_input_fn = bert.run_classifier.input_fn_builder(\n features=data_features,\n seq_length=BertEncoder.MAX_SEQ_LENGTH,\n is_training=False,\n drop_remainder=False)\n\n embeddings = list(estimator.predict(input_fn=predict_input_fn))\n return embeddings\n","sub_path":"Task_01/bert_encoder.py","file_name":"bert_encoder.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368242695","text":"import tensorflow as tf\n\n\nsess=tf.InteractiveSession()\n\n\n\nx=tf.Variable([1.0,2.0])\nxt=tf.Variable([1.0],[2.0])\nxx1=tf.Variable([[1.0,2.0]])\nxx2=tf.Variable([[1.0],[2.0]])\nxx=tf.Variable([[1.0,2.0],[3.0,4.0]])\n\na=tf.constant([3.0,3.0])\naa=tf.constant([[3.0,3.0],[3.0,3.0]])\n\n\n#initialize x\nx.initializer.run()\nxt.initializer.run()\nxx.initializer.run()\nxx1.initializer.run()\n\n\n#add a sub op\nm1=tf.subtract(x,a) #\nprint(\"m1=tf.subtract(x,a)\",m1.eval())\n\n\n#add an add op\nm2=tf.add(x,a)\nprint(\"m2=tf.add(x,a)\",m2.eval())\n\n\n#add a mul op\nm3=tf.multiply(x,a)\nprint(\"m3=tf.multiply(x,a)\",m3.eval())\n\n\n#add a div op\nm4=tf.div(x,a)\nprint(\"m4=tf.div(x,a)\",m4.eval())\n\n\n\n\n# mat mul\nmat6=tf.matmul(xx,aa)\nprint(\"mat6=tf.matmul(xx,aa)\",mat6.eval())\n\n\nmat7=tf.reduce_sum(tf.multiply(x,a))\nprint(\"tf.reduce_sum(tf.multiply(x,a))\",mat7.eval())\n\nshape1=tf.shape(xx1)\nprint(\"shape1=tf.shape(xx1)\",shape1)\nprint(\"sess.run(shape1)\",sess.run(shape1))\n\n\nshape2=tf.shape(xx2)\nprint(\"shape2=tf.shape(xx2)\",shape2)\nprint(\"sess.run(shape2)\",sess.run(shape2))\n\n\n# mat mul\nm7=tf.matmul(xx1,aa)\nprint(\"m7=tf.matmul(xx1,aa)\",m7.eval())\n\n# mat mul\n#m8=tf.matmul(x,aa)\n#print(m8.eval())\ntf.reduce_sum()\ntf.reduce_max()\ntf.reduce_mean()\ntf.reduce_min()\n\n\n\n# # 算术操作符:+ - * / %\n# tf.add(x, y, name=None) # 加法(支持 broadcasting)\n# tf.subtract(x, y, name=None) # 减法\n# tf.multiply(x, y, name=None) # 乘法\n# tf.divide(x, y, name=None) # 浮点除法, 返回浮点数(python3 除法)\n# tf.mod(x, y, name=None) # 取余\n#\n#\n# # 幂指对数操作符:^ ^2 ^0.5 e^ ln\n# tf.pow(x, y, name=None) # 幂次方\n# tf.square(x, name=None) # 平方\n# tf.sqrt(x, name=None) # 开根号,必须传入浮点数或复数\n# tf.exp(x, name=None) # 计算 e 的次方\n# tf.log(x, name=None) # 以 e 为底,必须传入浮点数或复数\n#\n#\n# # 取符号、负、倒数、绝对值、近似、两数中较大/小的\n# tf.negative(x, name=None) # 取负(y = -x).\n# tf.sign(x, name=None) # 返回 x 的符号\n# tf.reciprocal(x, name=None) # 取倒数\n# tf.abs(x, name=None) # 求绝对值\n# tf.round(x, name=None) # 四舍五入\n# tf.ceil(x, name=None) # 向上取整\n# tf.floor(x, name=None) # 向下取整\n# tf.rint(x, name=None) # 取最接近的整数\n# tf.maximum(x, y, name=None) # 返回两tensor中的最大值 (x > y ? x : y)\n# tf.minimum(x, y, name=None) # 返回两tensor中的最小值 (x < y ? x : y)\n#\n#\n# # 三角函数和反三角函数\n# tf.cos(x, name=None)\n# tf.sin(x, name=None)\n# tf.tan(x, name=None)\n# tf.acos(x, name=None)\n# tf.asin(x, name=None)\n# tf.atan(x, name=None)\n#\n#\n# # 其它\n# tf.div(x, y, name=None) # python 2.7 除法, x/y-->int or x/float(y)-->float\n# tf.truediv(x, y, name=None) # python 3 除法, x/y-->float\n# tf.floordiv(x, y, name=None) # python 3 除法, x//y-->int\n# tf.realdiv(x, y, name=None)\n# tf.truncatediv(x, y, name=None)\n# tf.floor_div(x, y, name=None)\n# tf.truncatemod(x, y, name=None)\n# tf.floormod(x, y, name=None)\n# tf.cross(x, y, name=None)\n# tf.add_n(inputs, name=None) # inputs: A list of Tensor objects, each with same shape and type\n# tf.squared_difference(x, y, name=None)","sub_path":"scripts/tf-op/TFStandMath.py","file_name":"TFStandMath.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"329275351","text":"\"\"\"\n\"\"\"\n\nimport shapefile\nimport os\n\nimport wget\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\nimport numpy as np\nfrom matplotlib.collections import LineCollection\n\n\n\nclass Map(object):\n def __init__(self, region=None):\n \"\"\"\n init class\n\n Parameters\n\n region : str or user defined dictionary\n \"\"\"\n self._set_region(region)\n self._default_shp = 'TM_WORLD_BORDERS-0.3.shp'\n\n\n def _set_region(self, region):\n assert region is not None, 'Region needs to be specified!'\n self._set_default_regions()\n if type(region) is str:\n # use default region\n if region in self.regions.keys():\n self.x1 = self.regions[region]['lon1']\n self.x2 = self.regions[region]['lon2']\n self.y1 = self.regions[region]['lat1']\n self.y2 = self.regions[region]['lat2']\n self.label = region\n else:\n print('ERROR: region is not in list of default regions')\n assert False\n else:\n assert len(region.keys()) == 1, 'Only a single region can be specified!'\n k = region.keys()[0]\n self.x1 = region[k]['lon1']\n self.x2 = region[k]['lon2']\n self.y1 = region[k]['lat1']\n self.y2 = region[k]['lat2']\n self.label = k\n\n\n def _set_default_regions(self):\n r_eur = {'lon1' : -30., 'lon2' : 35., 'lat1' : 30., 'lat2' : 72.}\n r_world = {'lon1' : -179., 'lon2' : 179., 'lat1' : -89., 'lat2' : 89.}\n self.regions = {'europe' : r_eur, 'world' : r_world}\n\n def _download_shape(self):\n \"\"\"\n download default shapefile\n\n for some reason this does not work in automatic mode so far ...\n \"\"\"\n url = 'http://thematicmapping.org/downloads/TM_WORLD_BORDERS-0.3.zip'\n filename = wget.download(url)\n\n # still need to implement unzi here\n\n def get_country_names(self):\n \"\"\"\n returns a list with country names\n \"\"\"\n l = []\n for record in self.records:\n l.append(record[4])\n return l\n\n\n def read_shape(self, shpname=None):\n if shpname is None:\n # set default shapename\n shpname = self._default_shp\n\n # check if shapefile existing\n if not os.path.exists(shpname):\n # in case that default is missing, try to download\n if shpname == self._default_shp:\n pass\n else:\n assert False, 'Can not continue with processing as shapefile not existing!'\n\n # read shapefile\n r = shapefile.Reader(shpname)\n #print r.fields\n self.shapes = r.shapes()\n self.records = r.records()\n\n\n def draw(self, projection='merc'):\n self._draw_basic(projection=projection)\n\n\n def _draw_basic(self, projection='merc'):\n \"\"\"\n This functions draws and returns a map of Portugal, either just of the mainland or including the Azores and Madeira islands.\n \"\"\"\n\n fig = plt.figure(figsize=(15.7,12.3))\n self.ax = fig.add_subplot(111)\n\n\n llcrnrlat=-90\n urcrnrlat=90\n llcrnrlon=-180\n urcrnrlon=180\n resolution='i'\n lon_0 = 0.\n\n m = Basemap(projection=projection, llcrnrlat=self.y1, urcrnrlat=self.y2, llcrnrlon=self.x1,\n urcrnrlon=self.x2, resolution=resolution, ax=self.ax, lon_0 = lon_0)\n m.drawcoastlines()\n m.drawmapboundary()\n #m.drawcountries()\n #m.fillcontinents(color = '#C0C0C0')\n m.fillcontinents(color = 'lightgrey')\n\n self.m = m\n\n def draw_details(self, names=None, color='red'):\n\n names1 = []\n for n in names:\n names1.append(n.upper())\n\n for record, shape in zip(self.records,self.shapes):\n #read shape\n if len(shape.points) < 1:\n continue\n lons,lats = zip(*shape.points)\n data = np.array(self.m(lons, lats)).T\n\n #each shape may have different segments\n if len(shape.parts) == 1:\n segs = [data,]\n else:\n segs = []\n for i in range(1,len(shape.parts)):\n index = shape.parts[i-1]\n index2 = shape.parts[i]\n segs.append(data[index:index2])\n segs.append(data[index2:])\n\n #draws the segments, and sets its properties. A colormap is used to get the gradient effect.\n lines = LineCollection(segs,antialiaseds=(1,))\n #lines.set_facecolors(cm.YlGn(record[-1]))\n if record[4].upper() in names1:\n lines.set_facecolors(color)\n lines.set_edgecolors('k')\n lines.set_linewidth(1)\n self.ax.add_collection(lines)\n","sub_path":"countrymap/countrymap.py","file_name":"countrymap.py","file_ext":"py","file_size_in_byte":4891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"492487749","text":"from django.urls import path, include\nfrom django.contrib.auth import views as auth_views\n\nfrom . import views\n\napp_name = 'user'\n\nurlpatterns = [\n path('', views.index, name = 'index'),\n path('add/', views.addvehicle, name='add_vehicle'),\n path('vehicles/', views.listvehicles, name = 'view_vehicles' ),\n path('/qrcode/', views.return_qr, name = 'return_qr'),\n path('history/', views.vehicleparking, name = 'history'),\n path('/vehicles_history/', views.parking_history, name = 'vehicles_history'),\n\n]\n","sub_path":"user_dash/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"107855234","text":"import numpy as np\nimport pandas as pd\n\ndef basis_function(ind, knots_including_bounds, order):\n \"\"\"\n returns a list of f(x), basis functions in BSpline functional space, specifying knots and order\n :param ind: returns ith basis function\n :param knots: list of float, knots, including bounds\n :param order: int, order of function\n :return: a list of functions, each function takes one float\n \"\"\"\n\n # check input validity\n if not isinstance(knots_including_bounds, list):\n raise TypeError('input k not a list!')\n if not isinstance(knots_including_bounds[0], float):\n raise TypeError('input k not a list of float!')\n if not (isinstance(order, int) or isinstance(order, float)):\n raise TypeError('input t not an int or float')\n\n knots_including_bounds.sort()\n order = int(order)\n num_of_knots = len(knots_including_bounds) - 2\n lower_bound = knots_including_bounds[0]\n upper_bound = knots_including_bounds[-1]\n knots = [knots_including_bounds[i] for i in range(1, num_of_knots + 1)]\n knots_expanded = [lower_bound] * order + knots + [upper_bound] * order\n\n def B(x, i, m):\n \"\"\"\n returns the ith basis function of order m\n :param x: float\n :param i: int : 0 <= i <= m + num_of_knots - 1\n :param m: int\n :return:\n \"\"\"\n if (i < 0) or (i > 2 * order + num_of_knots - m):\n raise ValueError('{index}th basis function does not exist!'.format(index=i))\n if m == 1:\n return 1.0 if (x >= knots_expanded[i]) and (x < knots_expanded[i + m]) else 0.0\n elif m > 1:\n if knots_expanded[i + m - 1] - knots_expanded[i] > 0:\n c1 = (x - knots_expanded[i]) / (knots_expanded[i + m - 1] - knots_expanded[i]) * B(x, i, m - 1)\n else:\n c1 = 0\n if knots_expanded[i + m] - knots_expanded[i + 1] > 0:\n c2 = (knots_expanded[i + m] - x) / (knots_expanded[i + m] - knots_expanded[i + 1]) * B(x, i + 1, m - 1)\n else:\n c2 = 0\n return c1 + c2\n else:\n raise ValueError('order cannot be negative!')\n\n func = (lambda x: B(x, ind, order))\n\n return func\n\n\ndef test_B_list():\n y = {}\n for order in [1, 2, 3, 4, 5]:\n knots_including_bounds = [0.0, 0.3, 0.5, 0.7, 1.0]\n x_list = np.linspace(start=0.0, stop=0.99, num=100)\n num_of_cols = len(knots_including_bounds) - 2 + order\n y[order] = pd.DataFrame(np.nan, index=x_list, columns=range(num_of_cols))\n for i in range(num_of_cols):\n func = basis_function(i, knots_including_bounds, order)\n y[order][i] = [func(x) for x in x_list]\n\n return y\n","sub_path":"bspline.py","file_name":"bspline.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"153852306","text":"#!/usr/bin/python3\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import NullFormatter\n\nfig, ax = plt.subplots(2, 2)\nprint(ax, type(ax), len(ax))\n\nx = [10.09950494, 16.43167673, 27.03701167]\n\nax[0][0].set(title=\"nSteps\", xlabel=\"sqrt(n_squares)\", ylabel=\"nSteps\")\ny1 = [15, 36, 185,]\nax[0][0].plot(x, y1, label=\"DFS w/o Length\", color=\"blue\", marker='o')\ny1 = [15, 36, 53,]\nax[0][0].plot(x, y1, label=\"BFS w/o Length\", color=\"red\", marker='o')\ny1 = [15, 36, 53,]\nax[0][0].plot(x, y1, label=\"UCS w/o Length\", color=\"green\", marker='o')\nax[0][0].set_ylim(bottom=0)\nax[0][0].legend()\n#fig.legend()\n\nax[0][1].set(title=\"Successors\", xlabel=\"sqrt(n_squares)\", ylabel=\"Successors\")\ny1 = [23, 58, 250, ]\nax[0][1].plot(x, y1, label=\"DFS w/o Length\", color=\"blue\", marker='o')\ny1 = [148, 2337, 428, ]\nax[0][1].plot(x, y1, label=\"BFS w/o Length\", color=\"red\", marker='o')\ny1 = [162, 2526, 436]\nax[0][1].plot(x, y1, label=\"UCS w/o Length\", color=\"green\", marker='o')\n\n\nax[1][0].set(title=\"Search Time\", xlabel=\"sqrt(n_squares)\", ylabel=\"Search Time (s)\")\ny1 = [0.019, 0.126, 1.581]\nax[1][0].plot(x, y1, label=\"DFS w/o Length\", color=\"blue\", marker='o')\ny1 = [0.167, 5.701, 2.85, ]\nax[1][0].plot(x, y1, label=\"BFS w/o Length\", color=\"red\", marker='o')\ny1 = [0.228, 7.013, 3.478]\nax[1][0].plot(x, y1, label=\"UCS w/o Length\", color=\"green\", marker='o')\n\nax[1][1].set(title=\"Memory\", xlabel=\"sqrt(n_squares)\", ylabel=\"MB\")\ny1 = [66.49, 68.04, 68.89, ]\nax[1][1].plot(x, y1, label=\"DFS w/o Length\", color=\"blue\", marker='o')\ny1 = [68.09, 71.2, 67.815, ]\nax[1][1].plot(x, y1, label=\"BFS w/o Length\", color=\"red\", marker='o')\ny1 = [66.4, 72.89, 67.851, ]\nax[1][1].plot(x, y1, label=\"UCS w/o Length\", color=\"green\", marker='o')\n\nfig.suptitle(\"Uninformed search compared\\nBody Length/History Excluded\")\nplt.show()\n","sub_path":"knowledgerep/Assignment1/plots/plot_uninformed_wo.py","file_name":"plot_uninformed_wo.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"577704649","text":"import random\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom string import ascii_lowercase, ascii_uppercase, digits\nfrom hashlib import sha224\n\nfrom clink.type import Service, AuthConf\nfrom clink.com import stamp\nfrom clink.dflow import verify, ExistError, NonExistError, ExpiredError\nfrom clink.model.std import acc_name as name_model, acc_pwd as pwd_model\nfrom clink.model.std import email as email_model, phone as phone_model\nfrom clink.model.acc import confirm_code as confirm_code_model\n\nfrom .authdb_sv import AuthDbSv\nfrom .type import ConfirmCodeSpec\n\n_ACT_REGISTERED = 'REGISTERED'\n_ACT_CHANGE_PWD = 'CHANGE_PWD'\n_ACT_RESET_PWD = 'RESET_PWD'\n_ACT_ADD_TO_GRP = 'ADD_TO_GRP'\n_ACT_RM_FRM_GRP = 'RM_FRM_GRP'\n\n_PWD_CHARS = ascii_lowercase + ascii_uppercase + digits\n_CODE_CHARS = ascii_uppercase\n\n\ndef _hash_pwd(password):\n return sha224(password.encode('utf-8')).hexdigest()\n\n\ndef _rand_pwd():\n return ''.join(random.sample(_PWD_CHARS, 6))\n\n\ndef rand_code():\n a = ''.join(random.sample(_CODE_CHARS, 4))\n b = ''.join(random.sample(_CODE_CHARS, 4))\n c = ''.join(random.sample(_CODE_CHARS, 4))\n d = ''.join(random.sample(_CODE_CHARS, 4))\n\n return '-'.join([a, b, c, d])\n\n\n@stamp(AuthDbSv, AuthConf)\nclass AccSv(Service):\n '''\n Manage accounts and related concepts\n '''\n\n def __init__(self, authdb_sv, auth_conf):\n '''\n :param AuthDbSv authdb_sv:\n :param AuthConf auth_conf:\n '''\n\n self._acc_doc = authdb_sv.acc_doc()\n self._grp_doc = authdb_sv.grp_doc()\n self._rpwd_doc = authdb_sv.rpwd_doc()\n self._acctmp_doc = authdb_sv.acctmp_doc()\n\n self.rpwd_time = 3600\n self.create_time = 3600\n\n root_acc = self.find_name('root')\n if root_acc is None:\n self.mk_acc('root', auth_conf.root_pwd, auth_conf.root_email)\n\n @verify(None, name_model, pwd_model, email_model, phone_model)\n def mk_acc(self, name, password, email, phone=None):\n '''\n Create new account\n\n :param str name:\n :param str password:\n :param str email:\n :param str phone:\n :rtype: bson.objectid.ObjectId\n :raise TypeError:\n '''\n\n account = {\n 'name': name,\n 'hashpwd': _hash_pwd(password),\n 'email': email,\n 'phone': phone,\n 'groups': [],\n 'created_date': datetime.utcnow(),\n 'modified_date': datetime.utcnow(),\n 'last_action': _ACT_REGISTERED\n }\n result = self._acc_doc.insert_one(account)\n return result.inserted_id\n\n @verify(None, name_model, pwd_model, email_model, phone_model)\n def mk_reg_code(self, name, password, email, phone=None):\n '''\n Create a registration code. Use returned code with cf_reg_code()\n to create account\n\n :param str name:\n :param str password:\n :param str email:\n :param str phone:\n :rtype: ConfirmCodeSpec\n :raise TypeError:\n :raise ExistError:\n '''\n\n if self._acc_doc.find_one({'name': name}) is not None:\n raise ExistError({'name': name})\n if self._acc_doc.find_one({'email': email}) is not None:\n raise ExistError({'email': email})\n if phone is not None:\n if self._acc_doc.find_one({'phone': phone}) is not None:\n raise ExistError({'phone': phone})\n\n if self._acctmp_doc.find_one({'name': name}) is not None:\n raise ExistError({'name': name})\n if self._acctmp_doc.find_one({'email': email}) is not None:\n raise ExistError({'email': email})\n if phone is not None:\n if self._acctmp_doc.find_one({'phone': phone}) is not None:\n raise ExistError({'phone': 'phone'})\n\n datetime_now = datetime.utcnow().timestamp()\n self._acctmp_doc.delete_many({'_expired_date': {'$lt': datetime_now}})\n\n creation_code = rand_code()\n expired_date = datetime.utcnow() + timedelta(hours=self.create_time)\n acctmp = {\n 'name': name,\n 'hashpwd': _hash_pwd(password),\n 'email': email,\n 'phone': phone,\n 'groups': [],\n 'created_date': datetime.utcnow(),\n 'modified_date': datetime.utcnow(),\n 'last_action': _ACT_REGISTERED,\n\n '_expired_date': expired_date.timestamp(),\n '_creation_code': creation_code\n }\n self._acctmp_doc.insert_one(acctmp)\n\n return ConfirmCodeSpec(creation_code, expired_date)\n\n @verify(None, confirm_code_model)\n def cf_reg_code(self, code):\n '''\n Use registration code to create account\n\n :param str code:\n :rtype: dict\n :raise ExistError:\n :raise ExpiredError:\n '''\n\n acctmp = self._acctmp_doc.find_one({'_creation_code': code})\n if acctmp is None:\n raise NonExistError({'code': code})\n if acctmp['_expired_date'] < datetime.utcnow().timestamp():\n raise ExpiredError({'code': time()})\n self._acctmp_doc.delete_one({'_creation_code': code})\n\n del acctmp['_id']\n del acctmp['_expired_date']\n del acctmp['_creation_code']\n\n self._acc_doc.insert_one(acctmp)\n del acctmp['hashpwd']\n\n return acctmp\n\n def find_id(self, id):\n '''\n Find account by identity\n\n :param bson.objectid.ObjectId id:\n :rtype: dict\n '''\n\n return self._acc_doc.find_one({'_id': id})\n\n @verify(None, name_model)\n def find_name(self, name):\n '''\n Find account by name\n\n :param str name:\n :rtype: dict\n :raise TypeError:\n '''\n\n return self._acc_doc.find_one({'name': name})\n\n @verify(None, email_model)\n def find_email(self, email):\n '''\n Find account by email\n\n :param str email:\n :rtype: dict\n :raise TypeError:\n '''\n\n return self._acc_doc.find_one({'email': email})\n\n @verify(None, phone_model)\n def find_phone(self, phone):\n '''\n Find account by phone number\n\n :param str phone:\n :rtype: dict\n :raise TypeError:\n '''\n\n return self._acc_doc.find_one({'phone': phone})\n\n @verify(None, name_model, pwd_model)\n def find_pwd(self, name, pwd):\n '''\n Find account by name and password\n\n :param str name:\n :param str pwd:\n :rtype: dict\n :raise TypeError:\n '''\n\n hashpwd = _hash_pwd(pwd)\n return self._acc_doc.find_one({'name': name, 'hashpwd': hashpwd})\n\n def rm_acc(self, id):\n '''\n Remove account by identity\n\n :param bson.objectid.ObjectId id:\n '''\n\n result = self._acc_doc.delete_one({'_id': id})\n if result.deleted_count != 1:\n raise NonExistError({'id': id})\n\n @verify(None, None, pwd_model)\n def ch_pwd(self, id, new_pwd):\n '''\n Change password of account by identity\n\n :param bson.objectid.ObjectId id:\n :param str new_pwd:\n :raise TypeError:\n '''\n\n upd = {\n '$set': {\n 'hashpwd': _hash_pwd(new_pwd),\n 'modified_date': datetime.utcnow(),\n 'last_action': _ACT_CHANGE_PWD\n }\n }\n result = self._acc_doc.update_one({'_id': id}, upd)\n\n if result.modified_count != 1:\n raise NonExistError({'id': id})\n\n @verify(None, email_model)\n def mk_rpwd_code(self, email):\n '''\n Create reset password code from email.\n Use returned code with cf_rpwd_code() to reset to new password\n\n :param str email:\n :rtype: ConfirmCodeSpec\n :raise TypeError:\n '''\n\n acc = self._acc_doc.find_one({'email': email})\n if acc is None:\n raise NonExistError({'email': email})\n self._rpwd_doc.delete_many({'acc_id': acc['_id']})\n\n reset_code = rand_code()\n exp_date = datetime.utcnow() + timedelta(hours=self.rpwd_time)\n code_spec = {\n 'code': reset_code,\n 'expired_date': exp_date.timestamp(),\n 'acc_id': acc['_id'],\n 'acc_email': acc['email']\n }\n self._rpwd_doc.insert_one(code_spec)\n\n return ConfirmCodeSpec(reset_code, exp_date)\n\n @verify(None, confirm_code_model, pwd_model)\n def cf_rpwd_code(self, code, new_pwd):\n '''\n Reset password from code\n\n :param str code:\n :param str new_pwd:\n :rtype: bson.objectid.ObjectId\n :raise TypeError:\n '''\n\n code_spec = self._rpwd_doc.find_one({'code': code})\n if code_spec is None:\n raise NonExistError({'code': code})\n if code_spec['expired_date'] < time():\n raise ExpiredError({'code': time()})\n\n acc_id = code_spec['acc_id']\n self._rpwd_doc.delete_many({'acc_id': acc_id})\n\n new_hashpwd = _hash_pwd(new_pwd)\n upd = {\n '$set': {\n 'hashpwd': new_hashpwd,\n 'last_action': _ACT_RESET_PWD\n }\n }\n self._acc_doc.update_one({'_id': acc_id}, upd)\n\n return acc_id\n\n def mk_group(self, group_name):\n '''\n Create new account group\n\n :param str group_name:\n '''\n\n self._grp_doc.insert_one({'name': group_name})\n\n def rm_group(self, group_name):\n '''\n Remove account group\n\n :param str group_name:\n '''\n\n result = self._grp_doc.delete_one({'name': group_name})\n\n if result.deleted_count != 1:\n raise NonExistError({'group_name': group_name})\n\n def add_to_group(self, acc_id, group_name):\n '''\n Put an account into group\n\n :param bson.objectid.ObjectId acc_id:\n :param str group_name:\n '''\n\n acc = self._acc_doc.find_one({'_id': acc_id})\n if acc is None:\n raise NonExistError({'id': acc_id})\n\n grp = self._grp_doc.find_one({'name': group_name})\n if grp is None:\n raise NonExistError({'group_name': group_name})\n if group_name in acc['groups']:\n raise ExistError({'group_name': group_name})\n\n upd = {\n '$push': {'groups': group_name},\n '$set': {'last_action': _ACT_ADD_TO_GRP}\n }\n self._acc_doc.update_one({'_id': acc_id}, upd)\n\n def del_fm_group(self, acc_id, group_name):\n '''\n Remove an account from group\n\n :param bson.objectid.ObjectId acc_id:\n :param str group_name:\n '''\n\n acc = self._acc_doc.find_one({'_id': acc_id})\n\n if acc is None:\n raise NonExistError({'id': acc_id})\n if group_name not in acc['groups']:\n raise NonExistError({'group_name': group_name})\n\n upd = {\n '$pull': {'groups': group_name},\n '$set': {'last_action': _ACT_RM_FRM_GRP}\n }\n self._acc_doc.update_one({'_id': acc_id}, upd)\n","sub_path":"clink/service/auth/acc_sv.py","file_name":"acc_sv.py","file_ext":"py","file_size_in_byte":11044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"415921187","text":"from django.conf.urls import include, url\nfrom . import views as tv\nurlpatterns = [\n url(r'^search/delete/(\\S+)', tv.do_delete),\n url(r'^index', tv.index),\n url(r'^left', tv.left),\n url(r'^main', tv.main),\n url(r'^top', tv.top),\n url(r'^search/(\\d)', tv.do_searchFile),\n\n]","sub_path":"FileManager/spider_url.py","file_name":"spider_url.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"640903761","text":"from felrun import app, db\nfrom models import Vehicles, Invoices, JO, Income, Bills, Accounts, OverSeas, Orders, Moving, Storage, Horizon\nfrom models import Autos, People, Interchange, Drivers, ChalkBoard, Proofs, Services, Moving, Drops\nfrom flask import session, logging, request\nimport datetime\nimport calendar\nimport re\nimport os\nimport shutil\nfrom systemdepfunc import myoslist, addpath, addtxt, addpath2\n\ndef isoB(indat):\n\n if request.method == 'POST':\n\n from viewfuncs import popjo, jovec, timedata, nonone, nononef, nons, numcheck, newjo, init_billing_zero, init_billing_blank\n from viewfuncs import sdiff, calendar7_weeks, txtfile, numcheckvec, d2s\n username = session['username'].capitalize()\n bill_path = 'tmp/processing/bills'\n bill, peep, cache, modata, modlink, fdata, adata, cdat, pb, passdata, vdata, caldays, daylist, weeksum, nweeks = init_billing_zero()\n filesel, docref, search11, search12, search13, search14, search21, search22, bType, bClass = init_billing_blank()\n billhold = 0\n acdata = 0\n expdata = 0\n username = session['username'].capitalize()\n\n monlvec = ['January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'August', 'September', 'October', 'November', 'December']\n monsvec = ['Jan', 'Feb', 'Mar', 'Apr', 'May',\n 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n leftscreen = 1\n err = ['All is well', ' ', ' ', ' ', ' ']\n ldata = None\n today = datetime.datetime.today().strftime('%Y-%m-%d')\n leftsize = 10\n addjobselect = 0\n jobdata = 0\n modal = 0\n docref = ' '\n doctxt = ' '\n if indat == 'stay' or indat == 0:\n indat = '0'\n\n match = request.values.get('Match')\n modify = request.values.get('Modify')\n modify2 = request.values.get('Modify2')\n vmod = request.values.get('Vmod')\n viewo = request.values.get('ViewO')\n addE = request.values.get('addentity')\n addE2 = request.values.get('addentity2')\n paybill = request.values.get('PayB')\n paybill2 = request.values.get('PayB2')\n paynmake = request.values.get('paynmake')\n unpay = request.values.get('UnPay')\n printck = request.values.get('Prt')\n openbal = request.values.get('Open')\n returnhit = request.values.get('Return')\n deletehit = request.values.get('Delete')\n # hidden values\n update = request.values.get('Update')\n modlink = request.values.get('modlink')\n newbill = request.values.get('NewB')\n thisbill = request.values.get('ThisBill')\n newxfer = request.values.get('Xfer')\n thisxfer = request.values.get('ThisXfer')\n calendar = request.values.get('Calendar')\n calupdate = request.values.get('calupdate')\n incoming = request.values.get('incoming')\n incoming_O = request.values.get('incoming_O')\n datatable1 = request.values.get('datatable1')\n datatable2 = request.values.get('datatable2')\n dlist = [datatable1, datatable2]\n\n copy = request.values.get('copy')\n copy12 = request.values.get('copy12')\n qpay = request.values.get('qpay')\n\n bill = request.values.get('bill')\n cache = request.values.get('cache')\n peep = request.values.get('peep')\n\n modlink = nonone(modlink)\n bill = nonone(bill)\n peep = nonone(peep)\n # if modlink==9 or modlink==8:\n # peephold=peep\n\n if returnhit is not None:\n modlink = 0\n leftscreen = 1\n indat = '0'\n # Delete all vendors created but not completed\n pgone = People.query.filter(People.Ptype == 'TowCo').order_by(People.Company).all()\n for pg in pgone:\n company = pg.Company\n if company == '' or company == ' ' or company == None or company == 'None':\n People.query.filter(People.id == pg.id).delete()\n db.session.commit()\n # Put all bills in Paying Status back to unpaid\n pybills = Bills.query.filter(Bills.Status == 'Paying').all()\n for py in pybills:\n py.Status = 'Unpaid'\n db.session.commit()\n# ____________________________________________________________________________________________________________________B.QuickBillPayTowing\n if incoming is not None:\n # Came to this point from outside so nothing is translated\n bill, peep, cache, modata, modlink, fdata, adata, cdat, pb, passdata, vdata, caldays, daylist, weeksum, nweeks = init_billing_zero()\n filesel, docref, search11, search12, search13, search14, search21, search22, bType, bClass = init_billing_blank()\n\n towid = request.values.get('towid')\n towid = nonone(towid)\n adat = Autos.query.get(towid)\n pufrom = adat.Pufrom\n bdesc = 'Towing performed by \\r\\n'+adat.TowCompany\n adata = Autos.query.filter(Autos.Orderid == adat.Orderid).all()\n for dat in adata:\n bdesc = bdesc+'\\r\\n'+nons(dat.Year)+' '+nons(dat.Make) + \\\n ' '+nons(dat.Model)+' VIN: '+nons(dat.VIN)\n dat.Status = 'Paid'\n db.session.commit()\n\n cdat = People.query.filter((People.Company == adat.TowCompany) & (\n (People.Ptype == 'TowCo') | (People.Ptype == 'Vendor'))).first()\n\n if cdat is not None:\n cid = cdat.Accountid\n aid = cdat.id\n company = cdat.Company\n acdat = Accounts.query.filter(Accounts.id == cid).first()\n if acdat is not None:\n descript = acdat.Description\n baccount = acdat.Name\n btype = acdat.Type\n bcat = acdat.Category\n bsubcat = acdat.Subcategory\n else:\n bcat = 'NAY'\n bsubcat = 'NAY'\n descript = ''\n baccount = ''\n else:\n bcat = 'NAY'\n bsubcat = 'NAY'\n descript = ''\n baccount = ''\n aid = 0\n company = 'NAY'\n\n # Create the new database entry for the source document\n sdate = adat.Date2\n billno = 'TBD'+str(aid)\n ckmemo = 'Towing for Horizon Motors Car Purchase'\n bamt = d2s(adat.TowCost)\n bcomp = request.values.get('bcomp')\n bref = ''\n btype= 'Expense'\n bcat = 'Direct'\n bsubcat = 'Overseas'\n cco = 'F'\n account = 'FEL CitiBank'\n baccount = 'Towing Costs'\n #jobdata = OverSeas.query.filter(OverSeas.Status != 'Complete').all()\n\n input = Bills(Jo=billno, Pid=aid, Company=company, Memo='', Description=bdesc, bAmount=bamt, Status='Paid', Cache=0, Original='',\n Ref=bref, bDate=sdate, pDate=today, pAmount=bamt, pMulti=None, pAccount=account, bAccount=baccount, bType=btype,\n bCat=bcat, bSubcat=bsubcat, Link=pufrom, User=username, Co='F', Temp1=None, Temp2=str(towid), Recurring=0, dDate=today, pAmount2='0.00', pDate2=None)\n\n db.session.add(input)\n db.session.commit()\n\n modata = Bills.query.filter(Bills.Jo == billno).first()\n csize = People.query.filter(People.Ptype == 'TowCo').order_by(People.Company).all()\n bill = modata.id\n leftscreen = 1\n leftsize = 10\n\n err = ['All is well', ' ', ' ', ' ', ' ']\n bdata = Bills.query.order_by(Bills.bDate).all()\n cdata = People.query.filter((People.Ptype == 'Vendor') | (\n People.Ptype == 'TowCo')).order_by(People.Company).all()\n modlink = 12\n# ____________________________________________________________________________________________________________________E.QuickBillPayTowing\n\n\n# ____________________________________________________________________________________________________________________B.UpdateDatabasesSection\n if (update is not None and modlink == 1) or modlink == 9 or modlink == 8 or modlink == 7 or modlink == 6:\n if bill > 0:\n modata = Bills.query.get(bill)\n ifxfer = modata.bType\n if ifxfer == 'XFER':\n vals = ['fromacct', 'toacct', 'pamt', 'bref',\n 'pdate', 'ckmemo', 'bdesc', 'ddate']\n a = list(range(len(vals)))\n i = 0\n for v in vals:\n a[i] = request.values.get(v)\n i = i+1\n\n if a[4] is None:\n a[4] = today\n bdol = d2s(a[2])\n modata.Memo = a[5]\n modata.Description = a[6]\n modata.bAmount = bdol\n modata.bDate = a[4]\n modata.dDate = a[7]\n modata.pAmount = bdol\n modata.pDate = a[4]\n modata.pAccount = a[0]\n modata.Ref = a[3]\n modata.User = username\n modata.Company = a[1]\n err[3] = 'Modification to Xfer ' + modata.Jo + ' completed.'\n\n else:\n\n filesel = request.values.get('FileSel')\n fdata = myoslist(bill_path)\n fdata.sort()\n leftsize = 8\n leftscreen = 0\n if update is not None and filesel != '1' and filesel != '':\n month_name = modata.bDate.strftime('%B')\n cache = modata.Cache\n cache = cache+1\n newfile = modata.Company+'_'+month_name+str(bill)+'c'+str(cache)+'.pdf'\n docold = bill_path+'/'+filesel\n docref = 'tmp/data/vbills/'+newfile\n try:\n shutil.move(addpath(docold), addpath(docref))\n shutil.move(addtxt(docold), addtxt(docref))\n except:\n err[4] = 'File already moved'\n modata.Original = docref\n modata.Cache = cache\n elif filesel != '1' and filesel != '':\n docref = bill_path+'/'+filesel\n else:\n docref = modata.Original\n\n if update is None:\n pbill = request.values.get('pbill')\n pb = nonone(pbill)\n bval = request.values.get('ctype')\n if bval is not None:\n modata.Co = bval\n db.session.commit()\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(bval))).all()\n else:\n\n vals = ['bdesc', 'bamt', 'bdate', 'pamt', 'pdate', 'account',\n 'bref', 'ckmemo', 'ctype', 'thiscomp', 'ddate', 'billacct']\n a = list(range(len(vals)))\n i = 0\n for v in vals:\n a[i] = request.values.get(v)\n i = i+1\n\n if a[4] is None:\n a[4] = today\n bdol = d2s(a[1])\n pdol = d2s(a[3])\n modata.Company = a[9]\n cdat = People.query.filter((People.Company == a[9]) & ((People.Ptype == 'Vendor') | (\n People.Ptype == 'TowCo') | (People.Ptype == 'Overseas'))).first()\n if cdat is not None:\n modata.Pid = cdat.id\n else:\n cdat = People.query.filter(People.Company == modata.Company).first()\n if cdat is not None:\n modata.Pid = cdat.id\n else:\n modata.Pid = 0\n modata.Memo = a[7]\n modata.Description = a[0]\n modata.bAmount = bdol\n modata.bDate = a[2]\n modata.dDate = a[10]\n modata.pAmount = pdol\n modata.pDate = a[4]\n modata.pAccount = a[5]\n modata.Ref = a[6]\n if modata.Status == 'Paying':\n if float(bdol) == float(pdol):\n modata.Status = 'Paid'\n else:\n modata.Status = 'PartPaid'\n\n modata.User = username\n modata.Co = a[8]\n modata.Original = docref\n acctname = a[11]\n acctco = a[8]\n modata.bAccount = acctname\n acdat1 = Accounts.query.filter((Accounts.Name == acctname) & (Accounts.Co == acctco)).first()\n if acdat1 is not None:\n modata.bType = acdat1.Type\n modata.bCat = acdat1.Category\n modata.bSubcat = acdat1.Subcategory\n modata.Recurring = acdat1.id\n\n # err[3]= 'Modification to Bill No ' + modata.Jo + ' completed.\n db.session.commit()\n\n if modal == 1:\n billno = 'Bill'+str(bill)\n input = ChalkBoard(Jo=billno, creator=username,\n comments='Auto Message Bill Paid', status=1)\n db.session.add(input)\n db.session.commit()\n calendar = 1\n modlink = 0\n\n if peep > 0:\n modata = People.query.get(peep)\n modata.Ptype = 'Vendor'\n vals = ['company', 'addr1', 'addr2', 'tid',\n 'tel', 'email', 'date1', 'billacct']\n a = list(range(len(vals)))\n i = 0\n for v in vals:\n a[i] = request.values.get(v)\n i = i+1\n modata.Company = a[0]\n modata.Addr1 = a[1]\n modata.Addr2 = a[2]\n modata.Idnumber = a[3]\n modata.Telephone = a[4]\n modata.Email = a[5]\n modata.Date1 = a[6]\n aname = a[7]\n aname = aname.strip()\n modata.Associate1 = aname\n aadat = Accounts.query.filter(Accounts.Name == aname).first()\n if aadat is not None:\n modata.Accountid = aadat.id\n modata.Associate2 = aadat.Type\n modata.Temp1 = aadat.Category\n modata.Temp2 = aadat.Subcategory\n modata.Idtype = aadat.Co\n\n db.session.commit()\n err = [' ', ' ', 'Continue New Entry for Entity ID ' + str(modata.id), ' ', ' ']\n if update is not None:\n err = [' ', ' ', 'Modification to Entity ID ' +\n str(modata.id) + ' completed.', ' ', ' ']\n # if modlink=8 then we are updating vendor while entering a new bill, time to transition back to the bill\n if modlink == 8:\n modlink = 4\n else:\n modlink = 0\n else:\n leftsize = 8\n bval = request.values.get('ctype')\n if bval is not None:\n modata.Idtype = bval\n db.session.commit()\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(bval))).all()\n\n if modlink == 8:\n filesel = request.values.get('FileSel')\n fdata = myoslist(bill_path)\n fdata.sort()\n leftsize = 8\n leftscreen = 0\n docref = bill_path+filesel\n\n # And now update the crossover to Storage in case the Company info changed:\n cross = Bills.query.filter(Bills.Pid == peep)\n for cros in cross:\n cros.Company = a[0]\n db.session.commit()\n\n # create return status\n if update is not None and modlink != 6:\n modlink = 0\n leftscreen = 1\n indat = '0'\n\n# ____________________________________________________________________________________________________________________B.UpdateDatabasesSection\n\n bdata = Bills.query.order_by(Bills.bDate).all()\n cdata = People.query.filter((People.Ptype == 'Vendor') | (\n People.Ptype == 'TowCo')).order_by(People.Company).all()\n\n# ____________________________________________________________________________________________________________________B.SearchFilters\n\n if modlink < 5:\n bill, peep, numchecked = numcheck(2, bdata, cdata, 0, 0, 0, ['bill', 'peep'])\n else:\n numchecked = 0\n\n\n# ____________________________________________________________________________________________________________________E.SearchFilters\n\n\n# ____________________________________________________________________________________________________________________B.Viewers\n if viewo is not None and numchecked == 1:\n err = [' ', ' ', 'There is no document available for this selection', ' ', ' ']\n if bill > 0:\n modata = Bills.query.get(bill)\n if modata.Original is not None:\n if len(modata.Original) > 5:\n docref = modata.Original\n leftscreen = 0\n leftsize = 8\n modlink = 0\n err = [' ', ' ', 'Viewing document '+docref, ' ', ' ']\n\n if viewo is not None and numchecked != 1:\n err = ['Must check exactly one box to use this option', ' ', ' ', ' ', ' ']\n\n# ____________________________________________________________________________________________________________________E.Viewers\n# ____________________________________________________________________________________________________________________B.Modify Entries\n if (modify is not None or vmod is not None) and numchecked == 1:\n leftsize = 8\n\n if bill > 0:\n modata = Bills.query.get(bill)\n filesel = request.values.get('FileSel')\n fdata = myoslist(bill_path)\n fdata.sort()\n vendor = modata.Company\n co = modata.Co\n vdat = People.query.filter((People.Ptype == 'Vendor') & (People.Company == vendor)).first()\n # co = vdat.Idtype\n if vdat is not None:\n defexp = modata.bAccount\n if defexp is not None:\n if len(defexp) < 5:\n defexp = vdat.Associate1\n else:\n defexp = vdat.Associate1\n else:\n defexp = None\n modata.bAccount = defexp\n duedate = modata.dDate\n if duedate is None:\n modata.dDate = modata.bDate\n paccount = modata.pAccount\n print(paccount)\n if paccount is None:\n lastbill = Bills.query.filter((Bills.Company == modata.Company) & (Bills.id != modata.id)).first()\n if lastbill is not None:\n modata.pAccount = lastbill.pAccount\n db.session.commit()\n modata = Bills.query.get(bill)\n\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(co))).all()\n leftsize = 8\n leftscreen = 0\n docref = modata.Original\n modlink = 7\n if vmod is not None:\n err = [' ', ' ', 'There is no document available for this selection', ' ', ' ']\n if modata.Original is not None:\n if len(modata.Original) > 5:\n leftscreen = 0\n docref = modata.Original\n doctxt = txtfile(docref)\n err = ['All is well', ' ', ' ', ' ', ' ']\n\n if peep > 0:\n modata = People.query.get(peep)\n modlink = 9\n co = modata.Idtype\n if co is None:\n co = 'F'\n modata.Idtype = co\n db.session.commit()\n modata = People.query.get(peep)\n\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(co))).all()\n # peephold=peep\n if vmod is not None:\n err = [' ', ' ', 'There is no document available for this selection', ' ', ' ']\n\n # Modification coming from calendar\n if modify2 is not None:\n bill = nonone(modify2)\n modata = Bills.query.get(bill)\n modlink = 7\n leftsize = 8\n\n# ____________________________________________________________________________________________________________________E.Modify Entries\n# ____________________________________________________________________________________________________________________B.Add Entries\n\n if addE is not None:\n leftsize = 8\n modlink = 9\n # We will create a blank line and simply modify that by updating:\n input = People(Company='New', First=None, Middle=None, Last=None, Addr1=None, Addr2=None, Addr3=None, Idtype=None, Idnumber=None, Telephone=None,\n Email=None, Associate1=None, Associate2=None, Date1=today, Date2=None, Original=None, Ptype='Vendor', Temp1=None, Temp2=None, Accountid=None)\n db.session.add(input)\n db.session.commit()\n modata = People.query.filter((People.Company == 'New') &\n (People.Ptype == 'Vendor')).first()\n peep = modata.id\n # peephold=peep\n billhold = bill\n if bill > 0:\n filesel = request.values.get('FileSel')\n if filesel != '1':\n fdata = myoslist(bill_path)\n fdata.sort()\n leftsize = 8\n leftscreen = 0\n docref = bill_path+filesel\n err = [' ', ' ', 'Enter Data for New Entity', ' ', ' ']\n expdata = Accounts.query.filter(Accounts.Type == 'Expense').all()\n\n if addE2 is not None:\n leftsize = 8\n modlink = 8\n # We will create a blank line and simply modify that by updating:\n input = People(Company='New', First=None, Middle=None, Last=None, Addr1=None, Addr2=None, Addr3=None, Idtype=None, Idnumber=None, Telephone=None,\n Email=None, Associate1=None, Associate2=None, Date1=today, Date2=None, Original=None, Ptype='Vendor', Temp1=None, Temp2=None, Accountid=None)\n db.session.add(input)\n db.session.commit()\n modata = People.query.filter((People.Company == 'New') &\n (People.Ptype == 'Vendor')).first()\n peep = modata.id\n # peephold=peep\n filesel = request.values.get('FileSel')\n if filesel != '1':\n fdata = myoslist(bill_path)\n fdata.sort()\n leftsize = 8\n leftscreen = 0\n docref = bill_path+filesel\n err = [' ', ' ', 'Enter Data for New Entity', ' ', ' ']\n expdata = Accounts.query.filter(Accounts.Type == 'Expense').all()\n\n# ____________________________________________________________________________________________________________________E.Add Entries\n\n# ____________________________________________________________________________________________________________________B.Delete an Entry\n if deletehit is not None and numchecked >= 1:\n if bill > 0:\n bdat = Bills.query.get(bill)\n try:\n orderid = nonone(bdat.Temp2)\n adat = Autos.query.get(orderid)\n if adat is not None:\n adata = Autos.query.filter(Autos.Orderid == adat.Orderid).all()\n for dat in adata:\n dat.Status = 'Novo'\n except:\n err[0] = 'Delete problem'\n\n Bills.query.filter(Bills.id == bill).delete()\n db.session.commit()\n if peep > 0:\n peepvec = numcheckvec(cdata, 'peep')\n for peep in peepvec:\n People.query.filter(People.id == peep).delete()\n db.session.commit()\n\n bdata = Bills.query.order_by(Bills.bDate).all()\n cdata = People.query.filter((People.Ptype == 'Vendor') | (\n People.Ptype == 'TowCo')).order_by(People.Company).all()\n\n if deletehit is not None and numchecked == 0:\n err = [' ', ' ', 'Must have at least one item checked to use this option', ' ', ' ']\n# ____________________________________________________________________________________________________________________E.Delete an Entry\n# ____________________________________________________________________________________________________________________B.NewXfer.Billing\n if newxfer is not None:\n modlink = 3\n leftsize = 8\n leftscreen = 0\n docref = ''\n vdata = [' ', ' ', '0.00', today]\n\n if newxfer is None and modlink == 3:\n leftsize = 8\n leftscreen = 0\n docref = ''\n\n fromacct = request.values.get('fromacct')\n toacct = request.values.get('toacct')\n vdata = [' ', ' ', '0.00', today]\n\n if thisxfer is not None:\n modlink = 0\n\n sdate = request.values.get('bdate')\n if sdate is None or sdate == '':\n sdate = today\n\n fromacct = request.values.get('fromacct')\n toacct = request.values.get('toacct')\n btype = 'XFER'\n bclass = 'Non Expense'\n billno = 'New Xfer'\n ckmemo = request.values.get('ckmemo')\n bdesc = request.values.get('bdesc')\n baccount = request.values.get('baccount')\n bamt = request.values.get('bamt')\n bref = request.values.get('bref')\n bamt = d2s(bamt)\n\n input = Bills(Jo=billno, Pid=0, Company=toacct, Memo=ckmemo, Description=bdesc, bAmount=bamt, Status='Paid', Cache=0, Original=docref,\n Ref=bref, bDate=sdate, pDate=sdate, pAmount=bamt, pMulti=None, pAccount=fromacct, bAccount=baccount, bType=btype,\n bCat=bclass, bSubcat='', Link=None, User=username, Co=None, Temp1=None, Temp2=None, Recurring=0, dDate=today, pAmount2='0.00', pDate2=None)\n\n db.session.add(input)\n db.session.commit()\n\n modata = Bills.query.filter(Bills.Jo == 'New Xfer').first()\n billno = 'X'+str(modata.id)\n modata.Jo = billno\n db.session.commit()\n\n modata = Bills.query.filter(Bills.Jo == billno).first()\n csize = People.query.filter(People.Ptype == 'Vendor').order_by(People.Company).all()\n bill = modata.id\n leftscreen = 1\n leftsize = 10\n err = ['All is well', ' ', ' ', ' ', ' ']\n bdata = Bills.query.order_by(Bills.bDate).all()\n# ____________________________________________________________________________________________________________________E.NewXfer.Billing\n if copy is not None:\n if bill > 0 and numchecked == 1:\n # sdate=today.strftime('%Y-%m-%d')\n bdat = Bills.query.get(bill)\n thisdate = bdat.bDate\n nextdate = thisdate + datetime.timedelta(days=30)\n input = Bills(Jo=bdat.Jo, Pid=bdat.Pid, Company=bdat.Company, Memo=bdat.Memo, Description=bdat.Description, bAmount=bdat.bAmount, Status=bdat.Status, Cache=0, Original=bdat.Original,\n Ref=bdat.Ref, bDate=nextdate, pDate=None, pAmount='0.00', pMulti=None, pAccount=bdat.pAccount, bAccount=bdat.bAccount, bType=bdat.bType,\n bCat=bdat.bCat, bSubcat=bdat.bSubcat, Link=None, User=username, Co=bdat.Co, Temp1=None, Temp2='Copy', Recurring=0, dDate=today, pAmount2='0.00', pDate2=None)\n\n db.session.add(input)\n db.session.commit()\n\n if copy12 is not None:\n if bill > 0 and numchecked == 1:\n # sdate=today.strftime('%Y-%m-%d')\n bdat = Bills.query.get(bill)\n thisdate = bdat.bDate\n year = thisdate.year\n month = thisdate.month\n day = thisdate.day\n while month < 12:\n month = month+1\n nextdate = datetime.datetime(year, month, day)\n input = Bills(Jo=bdat.Jo, Pid=bdat.Pid, Company=bdat.Company, Memo=bdat.Memo, Description=bdat.Description, bAmount=bdat.bAmount, Status=bdat.Status, Cache=0, Original=bdat.Original,\n Ref=bdat.Ref, bDate=nextdate, pDate=None, pAmount='0.00', pMulti=None, pAccount=bdat.pAccount, bAccount=bdat.bAccount, bType=bdat.bType,\n bCat=bdat.bCat, bSubcat=bdat.Subcat, Link=None, User=username, Co=bdat.Co, Temp1=None, Temp2='Copy', Recurring=0, dDate=today, pAmount2='0.00', pDate2=None)\n\n db.session.add(input)\n db.session.commit()\n\n if qpay is not None:\n if bill > 0 and numchecked == 1:\n bdat = Bills.query.get(bill)\n bdat.pDate = bdat.bDate\n bdat.pAmount = bdat.bAmount\n bdat.Temp2 = ''\n bdat.Status = 'Paid'\n db.session.commit()\n# ____________________________________________________________________________________________________________________B.NewJob\n if newbill is not None:\n err = ['Select Source Document from List']\n fdata = myoslist(bill_path)\n fdata.sort()\n modlink = 4\n leftsize = 8\n leftscreen = 0\n if len(fdata) > 0:\n bill1 = fdata[0]\n docref = bill_path+'/'+bill1\n else:\n docref = ''\n expdata = Accounts.query.filter(Accounts.Type == 'Expense').all()\n\n if newbill is None and modlink == 4:\n filesel = request.values.get('FileSel')\n fdata = myoslist(bill_path)\n fdata.sort()\n leftsize = 8\n leftscreen = 0\n if filesel != '1':\n ftxt = txtfile(filesel)\n docref = bill_path+'/'+filesel\n doctxt = bill_path+'/'+ftxt\n else:\n docref = None\n\n thiscompany = request.values.get('thiscomp')\n cdat = People.query.filter((People.Company == thiscompany) & (\n (People.Ptype == 'Vendor') | (People.Ptype == 'TowCo'))).first()\n # Get information from previous bill paid by the vendor\n if cdat is not None:\n co = cdat.Idtype\n coc = request.values.get('ctype')\n if coc != co:\n cdat.Idtype = coc\n db.session.commit()\n co = cdat.Idtype\n if co is not None:\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(co))).all()\n else:\n expdata = Accounts.query.filter(Accounts.Type == 'Expense').all()\n\n ldata = Bills.query.filter(Bills.Company == cdat.Company).all()\n if ldata:\n ldat = ldata[-1]\n last_desc = ldat.Description\n last_memo = ldat.Memo\n new_desc = ''\n new_memo = ''\n for i, x in enumerate(monlvec):\n if i == 11:\n k = 0\n else:\n k = i+1\n if x in last_desc:\n new_desc = last_desc.replace(x, monlvec[k])\n if x in last_memo:\n new_memo = last_memo.replace(x, monlvec[k])\n last_amt = d2s(ldat.bAmount)\n # last_date=datetime.datetime.strptime(ldat.bDate,\"%Y-%m-%d\")+datetime.timedelta(days=30)\n last_date = ldat.bDate+datetime.timedelta(days=30)\n next_date = last_date.strftime(\"%Y-%m-%d\")\n vdata = [new_desc, new_memo, last_amt, next_date]\n # vmemo=ldat.Memo\n else:\n # if not then get the category info from the vendor data\n vdata = ['X', 'X', '0.00', today]\n else:\n expdata = Accounts.query.all()\n\n if thisbill is not None:\n modlink = 0\n # Create the new database entry for the source document\n sdate = request.values.get('bdate')\n print('bdate:', sdate)\n if sdate is None or sdate == '':\n sdate = today\n\n thiscomp = request.values.get('thiscomp')\n cdat = People.query.filter((People.Company == thiscompany) & (\n (People.Ptype == 'Vendor') | (People.Ptype == 'TowCo'))).first()\n if cdat is not None:\n acomp = cdat.Company\n cid = cdat.Accountid\n aid = cdat.id\n acdat = Accounts.query.filter(Accounts.id == cid).first()\n if acdat is not None:\n baccount = acdat.Name\n category = acdat.Category\n subcat = acdat.Subcategory\n descript = acdat.Description\n btype = acdat.Type\n else:\n category = 'NAY'\n subcat = 'NAY'\n descript = ''\n btype = ''\n baccount = ''\n else:\n acomp = None\n aid = None\n category = 'NAY'\n subcat = 'NAY'\n descript = ''\n btype = ''\n baccount = ''\n\n filesel = request.values.get('FileSel')\n fdata = myoslist(bill_path)\n fdata.sort()\n if filesel != '1':\n ftxt = txtfile(filesel)\n doctxt = bill_path+'/'+ftxt\n docold = bill_path+'/'+filesel\n month_name = datetime.datetime.strptime(sdate, \"%Y-%m-%d\").strftime('%B')\n newfile = thiscomp+'_'+month_name+'.pdf'\n docref = 'tmp/data/vbills/'+newfile\n try:\n shutil.move(addpath(docold), addpath(docref))\n shutil.move(addtxt(docold), addtxt(docref))\n except OSError:\n print('File already moved')\n else:\n docref = None\n\n billno = 'XFER'\n bdesc = request.values.get('bdesc')\n bamt = request.values.get('bamt')\n bamt = d2s(bamt)\n bcomp = request.values.get('bcomp')\n cco = request.values.get('ctype')\n account = request.values.get('crataccount')\n\n input = Bills(Jo=billno, Pid=aid, Company=acomp, Memo='', Description=bdesc, bAmount=bamt, Status='Unpaid', Cache=0, Original=docref,\n Ref='', bDate=sdate, pDate=today, pAmount='0.00', pMulti=None, pAccount=account, bAccount=baccount, bType=btype,\n bCat=category, bSubcat=subcat, Link=None, User=username, Co=cco, Temp1=None, Temp2=None, Recurring=0, dDate=today, pAmount2='0.00', pDate2=None)\n\n db.session.add(input)\n db.session.commit()\n\n # reget because we need the bill unique id number in the document\n modata = Bills.query.filter(Bills.Jo == billno).first()\n csize = People.query.filter((People.Ptype == 'Vendor') | (\n People.Ptype == 'TowCo')).order_by(People.Company).all()\n bill = modata.id\n leftscreen = 1\n leftsize = 10\n err = ['All is well', ' ', ' ', ' ', ' ']\n bdata = Bills.query.order_by(Bills.bDate).all()\n# ____________________________________________________________________________________________________________________E.New Bill\n if unpay is not None:\n bill = nonone(unpay)\n myb = Bills.query.get(bill)\n myb.Status = 'Unpaid'\n billno = 'Bill'+str(bill)\n input = ChalkBoard(Jo=billno, creator=username,\n comments='Auto Message Bill Unpaid', status=1)\n db.session.add(input)\n db.session.commit()\n calendar = 1\n\n if paybill is not None or paybill2 is not None:\n err = [' ', ' ', ' ', ' ', ' ']\n exit = 0\n fdata = myoslist(bill_path)\n fdata.sort()\n if paybill2 is not None:\n bill = nonone(paybill2)\n numchecked = 1\n billno = 'Bill'+str(bill)\n input = ChalkBoard(Jo=billno, creator=username,\n comments='Auto Message Bill Pay Started', status=1)\n db.session.add(input)\n db.session.commit()\n calendar = 1\n if numchecked == 1 and bill > 0:\n myb = Bills.query.get(bill)\n status = myb.Status\n if status != 'Paid':\n myb.pDate = today\n myb.pAmount = myb.bAmount\n acct = myb.pAccount\n if acct is None or len(acct) < 4:\n myb.pAccount = 'FEL CitiBank'\n myb.Status = 'Paying'\n db.session.commit()\n co = myb.Co\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(co))).all()\n\n else:\n err[0] = 'Bill for single bill pay has been paid already'\n exit = 1\n\n if numchecked > 1 and bill > 0:\n # See how many bills are to be paid together\n nbill = 0\n bill_ids = []\n total = 0\n for data in bdata:\n testone = request.values.get('bill'+str(data.id))\n if testone:\n myb = Bills.query.get(data.id)\n try:\n nbill = nbill+1\n bamt = myb.bAmount\n bamt = d2s(bamt)\n amt = float(bamt)\n total = total+amt\n bill_ids.append(data.id)\n except:\n err[1] = 'Some checked bills of multi-pay already paid'\n exit = 1\n if exit == 0 and nbill > 1:\n # Create link code\n linkcode = 'Link'\n # Create a master reference from first bill\n masterbill = bill_ids[0]\n myb = Bills.query.get(masterbill)\n masterref = myb.Ref\n masteracct = myb.Account\n masterdesc = ''\n masterpid = myb.Pid\n masterpayee = myb.Company\n if masteracct is None or len(masteracct) < 4:\n masteracct = 'Industrial Bank'\n for bill in bill_ids:\n myb = Bills.query.get(bill)\n linkcode = linkcode+'+'+str(bill)\n try:\n descline = 'Bill ID:' + str(myb.id) + ' Billcode: ' + \\\n myb.Jo + ' Amount: ' + str(myb.bAmount) + '\\n'\n except:\n descline = 'Bill ID:' + str(myb.id)\n masterdesc = masterdesc+descline\n for bill in bill_ids:\n myb = Bills.query.get(bill)\n myb.Status = 'Paid-M'\n myb.Link = linkcode\n myb.pDate = today\n myb.pAmount = myb.bAmount\n myb.pMulti = \"{:.2f}\".format(total)\n myb.Ref = masterref\n myb.Account = masteracct\n myb.Description = masterdesc\n myb.Pid = masterpid\n myb.Company = masterpayee\n db.session.commit()\n\n if numchecked == 0 or bill == 0:\n err[4] = 'Must check at least one bill for this selection'\n exit = 1\n\n if exit == 0:\n modlink = 7\n leftsize = 8\n modata = Bills.query.get(bill)\n pb = 1\n else:\n err[2] = 'Could not complete billpay'\n leftside = 10\n modlink = 0\n\n bdata = Bills.query.order_by(Bills.bDate).all()\n\n if printck is not None or modlink == 6 or modlink == 12 or paynmake is not None or indat != '0':\n fdata = myoslist(bill_path)\n fdata.sort()\n if paynmake is not None:\n bill = nonone(paynmake)\n bdat = Bills.query.get(bill)\n bdat.Status = 'Paid'\n bdat.pAmount = bdat.bAmount\n ckref = request.values.get('ckref'+str(bill))\n bdat.Ref = ckref\n db.session.commit()\n numchecked = 1\n\n if indat != '0':\n bdat = Bills.query.filter(Bills.Jo == indat).first()\n bill = bdat.id\n modlink = 6\n\n if (numchecked == 1 and bill > 0) or modlink == 6 or modlink == 12:\n exit = 0\n bdat = Bills.query.get(bill)\n ifxfer = bdat.bType\n if ifxfer == 'XFER':\n acct_to = bdat.Company\n acdat = Accounts.query.filter(Accounts.Name == acct_to).first()\n pdat = People.query.filter((People.Ptype == 'Vendor') &\n (People.Company == acdat.Payee)).first()\n else:\n pdat = People.query.get(bdat.Pid)\n\n if bdat.Status == 'Unpaid':\n bdat.Status = 'Paid'\n db.session.commit()\n if bdat.Status == 'Paid-M':\n linkcode = bdat.Link\n sbdata = Bills.query.filter(Bills.Link == linkcode)\n link = linkcode.replace('Link+', '')\n items = link.split('+')\n links = []\n [links.append(int(item)) for item in items]\n else:\n links = 0\n sbdata = 0\n\n if exit == 0:\n cache = bdat.Cache\n if cache is None or cache == 0:\n cache = 1\n cache = cache+1\n bdat.Cache = cache\n cknum = 'Ck'+str(bdat.id)+'_R_'+bdat.Ref\n docref = 'tmp/data/vchecks/'+cknum+'c'+str(cache)+'.pdf'\n pamount = bdat.pAmount\n if pamount == '0.00':\n bdat.pAmount = bdat.bAmount\n db.session.commit()\n\n from writechecks import writechecks\n writechecks(bdat, pdat, cache, sbdata, links)\n modlink = 6\n leftsize = 8\n leftscreen = 0\n co = bdat.Co\n expdata = Accounts.query.filter((Accounts.Type == 'Expense') & (Accounts.Co.contains(co))).all()\n\n db.session.commit()\n\n bdata = Bills.query.order_by(Bills.bDate).all()\n cdata = People.query.filter((People.Ptype == 'Vendor') | (People.Ptype == 'TowCo') | (\n People.Ptype == 'Overseas')).order_by(People.Company).all()\n modata = Bills.query.get(bill)\n pb = 1\n\n else:\n err[1] = 'Must select exactly 1 Bill box to use this option.'\n\n\n# ____________________________________________________________________________________________________________________B.Matching\n if match is not None:\n if bill > 0 and peep > 0 and numchecked == 2:\n myo = Bills.query.get(bill)\n myp = People.query.get(peep)\n myo.Pid = peep\n myo.Company = myp.Company\n myo.Description = myp.Associate1\n db.session.commit()\n if numchecked != 2:\n err[1] = 'Must select exactly 2 boxes to use this option.'\n err[0] = ' '\n# ____________________________________________________________________________________________________________________E.Matching\n# ____________________________________________________________________________________________________________________B.Calendar.Billing\n if calendar is not None or calupdate is not None:\n leftscreen = 2\n if calupdate is not None:\n waft = request.values.get('waft')\n wbef = request.values.get('wbef')\n waft = nonone(waft)\n wbef = nonone(wbef)\n nweeks = [wbef, waft]\n else:\n nweeks = [2, 3]\n caldays, daylist, weeksum = calendar7_weeks('Billing', nweeks)\n\n if calupdate is not None:\n for j in range(len(daylist)):\n ilist = daylist[j]\n if ilist:\n tid = ilist[0]\n fnum = 'note'+str(tid)\n fput = request.values.get(fnum)\n if len(fput) > 3:\n billno = 'Bill'+str(tid)\n input = ChalkBoard(Jo=billno, creator=username,\n comments=fput, status=1)\n db.session.add(input)\n db.session.commit()\n\n caldays, daylist, weeksum = calendar7_weeks('Billing', nweeks)\n# ____________________________________________________________________________________________________________________E.Calendar.Billing\n\n if (modlink > 0 and bill > 0) or (modlink > 0 and peep > 0) or leftscreen == 0:\n leftsize = 8\n elif leftscreen == 2:\n leftsize = 10\n else:\n leftsize = 10\n else:\n\n # ____________________________________________________________________________________________________________________B.BillingNotPost\n from viewfuncs import timedata, nonone, init_billing_zero, init_billing_blank, nononef\n bdata = Bills.query.order_by(Bills.bDate).all()\n cdata = People.query.filter((People.Ptype == 'Vendor') | (\n People.Ptype == 'TowCo')).order_by(People.Company).all()\n today = datetime.datetime.today().strftime('%Y-%m-%d')\n bill, peep, cache, modata, modlink, fdata, adata, cdat, pb, passdata, vdata, caldays, daylist, weeksum, nweeks = init_billing_zero()\n filesel = ''\n docref = ' '\n doctxt = ' '\n leftscreen = 1\n leftsize = 10\n addjobselect = 0\n dlist = ['on']*2\n jobdata = 0\n modal = 0\n expdata = 0\n\n err = ['All is well', ' ', ' ', ' ', ' ']\n\n today = datetime.date.today()\n critday = datetime.date.today()+datetime.timedelta(days=7)\n acdata = Accounts.query.filter((Accounts.Type == 'Bank') | (Accounts.Type == 'CC')).all()\n bdata = Bills.query.order_by(Bills.bDate).all()\n if docref is not None:\n docref = docref.replace('tmp/vbills', 'tmp/data/vbills')\n if doctxt is not None:\n doctxt = doctxt.replace('tmp/vbills', 'tmp/data/vbills')\n return bdata, cdata, bill, peep, err, modata, adata, acdata, expdata, modlink, caldays, daylist, weeksum, nweeks, addjobselect, jobdata, modal, dlist, fdata, today, cdat, pb, critday, vdata, leftscreen, docref, doctxt, leftsize, cache, filesel\n","sub_path":"iso_B.py","file_name":"iso_B.py","file_ext":"py","file_size_in_byte":49765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"417987765","text":"#import libraries\nimport pandas as pd\nimport numpy as np\n\n#Reading the dataset\ndataset=pd.read_csv('social_adv.csv')\n#dependent variable\ny=dataset.iloc[:,-1].values\nprint(y)\n\n#independent variable\nx=dataset.iloc[:,:-2].values\nprint(x)\n\n#Diving the dataset into training and testing set\nfrom sklearn.model_selection import train_test_split \nx_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size = 0.25)\n\n#feature scaling scaling the data into a scale so that none of feature get dominant by other features\nfrom sklearn.preprocessing import StandardScaler\nsc_x= StandardScaler()\nx_train=sc_x.fit_transform(x_train)\nx_test=sc_x.fit_transform(x_test)\n\n#KNN classifier\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifer= KNeighborsClassifier(n_neighbors=3)\nclassifer.fit(x_train,y_train)\n#Logistic regressiomn predict the result\ny_pred = classifer.predict(x_test)\nprint(y_pred)\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test, y_pred))\n\n# Visualising the Training set results\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = x_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[: , 0].min() - 1, stop = X_set[: , 0].max() + 1, step = 0.01),\n np.arange(start = X_set[: , 1].min() - 1, stop = X_set[: , 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifer.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('KNN Classifier (Training set)')\nplt.xlabel('Marks')\nplt.ylabel('Reocmmmede Course')\nplt.legend()\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558855269","text":"#!/usr/bin/python\n\n#######################################\n# module: find_usenet_posts.py\n# description: indexing and searching usenet posts.\n# bugs to vladimir kulyukin in canvas\n#######################################\n\nimport os\nimport sys\nimport sklearn.datasets\nimport scipy as sp\nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport nltk.stem\n\n## define the stemmer\nenglish_stemmer = nltk.stem.SnowballStemmer('english')\n## define the vectorizer\nclass StemmedCountVectorizer(CountVectorizer):\n def build_analyzer(self):\n analyzer = super(StemmedCountVectorizer, self).build_analyzer()\n return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\n\n## define two distances\ndef euclid_dist(v1, v2):\n diff = v1 - v2\n return sp.linalg.norm(diff.toarray())\n\ndef norm_euclid_dist(v1, v2):\n \"\"\" Normalized Euclid distance b/w vectors v1 and v2 \"\"\"\n v1_normalized = v1/sp.linalg.norm(v1.toarray())\n v2_normalized = v2/sp.linalg.norm(v2.toarray())\n diff = v1_normalized - v2_normalized\n return sp.linalg.norm(diff.toarray())\n\ndef pickle_save(obj, file_name):\n with open(file_name, 'wb') as fp:\n pickle.dump(obj, fp)\n\ndef pickle_load(file_name):\n with open(file_name, 'rb') as fp:\n obj = pickle.load(fp)\n return obj\n\n## load the texts of usenet newsgroups\ndef load_usenet_data():\n \"\"\" Load USENET data \"\"\"\n print('Loading USENET data...')\n usenet_data = sklearn.datasets.fetch_20newsgroups()\n assert len(usenet_data.target_names) == 20\n print('USENET data loaded...')\n return usenet_data\n\ndef vocab_normalize_usenet_data(usenet_data):\n \"\"\"\n Normalize the vocabulary of USENET newsgoup posts with NLTK stemming \n and stoplisting.\n \"\"\"\n vectorizer = StemmedCountVectorizer(min_df=1, stop_words='english')\n feat_mat = vectorizer.fit_transform(usenet_data.data)\n ## the next two lines are for debugging purposes only.\n num_samples, num_features = feat_mat.shape\n print('number of posts: {}, number of features: {}'.format(num_samples, num_features))\n return vectorizer, feat_mat\n\ndef pickle_usenet_feat_mat(feat_mat, path):\n pickle_save(feat_mat, path)\n\ndef unpickle_usenet_feat_mat(path):\n return pickle_load(path)\n\ndef pickle_usenet_vectorizer(vectorizer, path):\n pickle_save(vectorizer, path)\n\ndef unpickle_usenet_vectorizer(path):\n return pickle_load(path)\n\n## find the closest USENET posts.\ndef find_top_n_posts(vectorizer, user_query, doc_feat_mat, dist_fun, top_n=10):\n# def find_top_n_posts():\n\n # vectorizer = unpickle_usenet_vectorizer('usenet_vectorizer.pck')\n # user_query = 'is fuel injector cleaning necessary?'\n # # feat_mat = unpickle_usenet_feat_mat('usenet_feat_mat.pck')\n # doc_feat_mat = unpickle_usenet_feat_mat('usenet_feat_mat.pck')\n # dist_fun = norm_euclid_dist\n # top_n = 5\n\n\n # 1. compute feature vector of user_query\n user_query_vec = vectorizer.transform([user_query])\n print('user query: {}'.format(user_query))\n print('user query feat vector:\\n {}'.format(user_query_vec))\n print('Searching USENET posts..')\n \n doc_match_scores = {}\n best_dist = sys.maxsize\n num_docs, _ = doc_feat_mat.shape\n for i in range(0, num_docs):\n # print(str(doc_feat_mat[i]))\n \n # 2. get the i-th feature mat (i-th row in doc_feat_math).\n ithFeatureMat = doc_feat_mat.getrow(i)\n # input(str(ithFeatureMat))\n \n \n # 3. compute the distance b/w user_query_vector and document vector\n # with dist_fun\n # your code here\n distanceThing = dist_fun(user_query_vec,ithFeatureMat)\n # input(\"here\" + str(distanceThing))\n\n # 4. Store the similarity coefficient in doc_match_scores dictinary\n # that maps post vector vector numbers (i.e., i's) to the\n # similarity scores.\n # your code here\n doc_match_scores[i] = distanceThing\n\n # after the for-loop is done \n # 5. convert the doc_math_scores dictionary into a list of key-val pairs,\n # your code here\n someList = []\n for key, value in doc_match_scores.items():\n temp = [key, value]\n someList.append(temp)\n \n\n # 6. sort it from smallest to largest by the second element in each pair. \n # your codee here\n someList.sort(key = lambda x: x[1])\n \n print('Searching over...')\n \n # 7. return the first top_n elements from the sorted list.\n # your code here\n return someList[0:top_n]\n\nif __name__ == \"__main__\":\n find_top_n_posts()","sub_path":"CS6600/Homework/hw11/Deliver/find_usenet_posts.py","file_name":"find_usenet_posts.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"275549623","text":"import json\nimport os\nimport logging\n\nimport pytest\nimport xero.auth\nimport xero\nimport voluptuous as vp\n\nimport xeroex\nfrom xeroex.extractor import main\n\n@pytest.fixture(scope='module')\ndef image_parameters():\n return {\n \"#consumer_key\": os.environ['XERO_PUBLIC_CONSUMER_KEY'],\n \"#consumer_secret\": os.environ['XERO_PUBLIC_CONSUMER_SECRET']\n }\n\n@pytest.fixture(scope='module')\ndef kbc_credentials():\n return {\n \"consumer_key\": os.environ['XERO_PUBLIC_CONSUMER_KEY'],\n \"consumer_secret\": os.environ['XERO_PUBLIC_CONSUMER_SECRET'],\n \"oauth_token\": os.getenv(\"XERO_KBC_OAUTH_TOKEN\"),\n \"oauth_token_secret\": os.getenv(\"XERO_KBC_OAUTH_TOKEN_SECRET\")\n }\n\n#valid configs\nCONFIGS = {\n 'get_authorization_url': {\n \"debug\": True,\n \"action\": \"get_authorization_url\",\n },\n 'verify': {\n \"debug\": True,\n \"action\": \"verify\",\n },\n 'extract': {\n \"debug\": True,\n \"action\": \"extract\",\n \"endpoints\": [\n {\n \"endpoint\": \"Contacts\",\n \"parameters\": {\"since\": \"2 years ago UTC\"}\n }\n ]\n }\n}\n\n@pytest.fixture(scope='module')\ndef xero_credentials():\n creds_type = os.environ[\"XERO_CREDENTIALS_TYPE\"]\n if creds_type == 'public':\n credentials = xero.auth.PublicCredentials(\n **json.loads(os.environ['XERO_PUBLIC_CREDENTIALS_STATE'])\n )\n elif creds_type == 'private':\n try:\n raw_rsa_key = os.environ['XERO_PRIVATE_RSA_KEY']\n rsa_key = eval(raw_rsa_key)\n except (SyntaxError, NameError):\n # travis\n rsa_key = raw_rsa_key.replace('\\\\n', '\\n')\n credentials = xero.auth.PrivateCredentials(\n os.environ['XERO_CONSUMER_KEY'],\n rsa_key)\n else:\n raise NotImplementedError(\"not yet, choose public or private\")\n # xero.auth.PartnerCredentials()\n\n return credentials\n\n\n@pytest.mark.parametrize(\"endpoint,params\", [\n (\"Accounts\", {}),\n (\"CreditNotes\",{}),\n (\"Contacts\", {}),\n (\"Currencies\", {}),\n (\"BankTransfers\", {}),\n (\"Journals\", {}),\n])\ndef test_downloading_data_all_data_paginated(xero_credentials, endpoint, params):\n ex = xeroex.extractor.XeroEx(xero_credentials)\n for chunk in ex.get_endpoint_data(endpoint, params):\n # each chunk must be at least\n assert len(chunk) >= 0 # just make sure we don't get error and a deadlock\n\n\n@pytest.mark.parametrize(\"config\",[\n CONFIGS['get_authorization_url'],\n CONFIGS['verify'],\n CONFIGS['extract']\n])\ndef test_validating_real_valid_configs(config):\n xeroex.utils.validate_config(config)\n\n\n@pytest.mark.skipif(not os.getenv(\"XERO_PUBLIC_CREDENTIALS_STATE\"),\n reason='requires manualy setting XERO_PUBLIC_CREDENTIALS_STATE')\ndef test_main_downloading_data(tmpdir, image_parameters):\n datadir = tmpdir.mkdir(\"data\")\n outtables = datadir.mkdir(\"out\").mkdir(\"tables\")\n instate = datadir.mkdir(\"in\").join(\"state.json\")\n instate.write(os.getenv(\"XERO_PUBLIC_CREDENTIALS_STATE\"))\n xeroex.extractor.main(datadir.strpath, CONFIGS['extract'], image_parameters)\n\n assert 'Contacts.csv' in outtables.listdir()[0].strpath\n assert os.path.isfile(os.path.join(datadir.strpath, 'out','state.json'))\n\n\n@pytest.mark.skipif(not os.getenv(\"XERO_KBC_OAUTH_TOKEN\") and not os.getenv(\"XERO_KBC_OAUTH_TOKEN_SECRET\"),\n reason=\"Needs manual setting\")\ndef test_main_downloading_data_with_kbc_token(tmpdir, kbc_credentials):\n datadir = tmpdir.mkdir(\"data\")\n outtables = datadir.mkdir(\"out\").mkdir(\"tables\")\n instate = datadir.mkdir(\"in\").join(\"state.json\").write('{}')\n xeroex.extractor.main(datadir.strpath, CONFIGS['extract'], kbc_credentials)\n\n assert 'Contacts.csv' in outtables.listdir()[0].strpath\n assert os.path.isfile(os.path.join(datadir.strpath, 'out','state.json'))\n","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185122027","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 5 11:50:19 2020\n\n@author: Sarah\n\"\"\"\n\nimport requests\nimport time\nimport pymongo \n\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nfrom pymongo import MongoClient\nfrom splinter import Browser\n\nimport feedparser\nfrom pandas.io.json import json_normalize\nimport pandas as pd\nimport requests\nimport os\n\nis_heroku = False\nif 'IS_HEROKU' in os.environ:\n is_heroku = True\n\ndef scrape_all():\n if is_heroku == False:\n executable_path = {\"executable_path\": \"/usr/local/bin/chromedriver\"}\n else:\n executable_path = {\"executable_path\": os.environ.get('CHROME_DRIVER_BIN')}\n browser = Browser('chrome', **executable_path, headless=True)\n\n url = \"https://www.cdc.gov/coronavirus/2019-ncov/cases-updates/cases-in-us.html/\"\n browser.visit(url)\n html = browser.html\n\n cdc_soup = BeautifulSoup(html, 'html.parser')\n cdc_soup = cdc_soup.find_all(\"div\", id=\"viz001_uscases\")\n cdc_viz001_uscases = cdc_soup.find_all(\"div\", class_='card_number')\n\n cdc_death = cdc_soup.find_all(\"div\", id=\"viz002_usdeaths\")\n cdc_viz002_usdeaths = cdc_death.find_all(\"div\", class_=\"card_number\")\n\n cdc_confirmed = cdc_viz001_uscases[0].text\n cdc_deaths = cdc_viz002_usdeaths[1].text\n\n # cdc_confirmed = cdc_confirmed.replace(',',\"\")\n # cdc_confirmed = int(cdc_confirmed)\n\n # cdc_deaths = cdc_deaths.replace(',',\"\")\n # cdc_deaths = int(cdc_deaths)\n\n url = 'https://coronavirus.jhu.edu/'\n browser.visit(url)\n\n html = browser.html\n jhu_soup = BeautifulSoup(html, 'html.parser')\n\n cdc_callouts = jhu_soup.find_all(\"li\", class_='FeaturedStats_stat__1MPv_')\n cdc = cdc_callouts[2].text\n\n jhu_confirmed = cdc[14:]\n # jhu_confirmed = jhu_confirmed.replace(',',\"\")\n # jhu_confirmed = int(jhu_confirmed)\n\n cdc = cdc_callouts[3].text\n jhu_deaths = cdc[11:]\n # jhu_deaths = jhu_deaths.replace(',',\"\")\n # jhu_deaths = int(jhu_deaths)\n \n url = 'https://covidtracking.com/'\n browser.visit(url)\n\n html = browser.html\n\n news_soup = BeautifulSoup(html, 'html.parser')\n\n atlantic_confirmed = news_soup.find_all('div', class_=\"_9083\")\n atlantic_confirmed = atlantic_confirmed[1].text\n # atlantic_confirmed = atlantic_confirmed.replace(',',\"\")\n # atlantic_confirmed = int(atlantic_confirmed)\n\n atlantic_death = news_soup.find_all('div', class_=\"_9083\")\n atlantic_death = atlantic_death[2].text\n # atlantic_death = atlantic_death.replace(',',\"\")\n # atlantic_death = int(atlantic_death)\n\n mongo_push = {\"cdc_confirmed\":cdc_confirmed,\n \"cdc_deaths\":cdc_deaths,\n \"jhu_confirmed\":jhu_confirmed,\n \"jhu_deaths\":jhu_deaths,\n \"atlantic_confirmed\":atlantic_confirmed,\n \"atlantic_death\":atlantic_death\n }\n\n return mongo_push\n\ndef google_scrape():\n\n url='https://news.google.com/rss/search?q=covid'\n news_feed = feedparser.parse(url) \n\n covid_scrape=pd.json_normalize(news_feed.entries)\n\n #Read articles links\n title =covid_scrape['title']\n title = title.to_list()\n\n link = covid_scrape['link']\n link = link.to_list()\n\n published = covid_scrape['published']\n published = published.to_list()\n\n google_search = {'Title':title,'Link':link,'Published':published}\n\n return google_search\n\n","sub_path":"scraping_project.py","file_name":"scraping_project.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"221924690","text":"# coding:utf-8\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\nfrom datetime import datetime, timedelta\nimport json\n\nfrom .models import Category, Product, Cart, Tag\nfrom .forms import OrderForm\n\ncart = Cart()\n\n\ndef about(request):\n categories = Category.objects.all()\n context = {'categories': categories,\n }\n return render(request, 'shop/about.html', context)\n\n\ndef categories(request):\n categories = Category.objects.all()\n context = {'categories': categories,\n }\n return render(request, 'shop/categories.html', context)\n\n\ndef category(request, category_slug):\n products = Product.objects.filter(parent=category_slug)\n categories = Category.objects.all()\n category = Category.objects.get(slug=category_slug)\n context = {'categories': categories,\n 'products': products,\n 'category': category,\n\n }\n return render(request, 'shop/category.html', context)\n\n\ndef product(request, product_slug):\n product = Product.objects.get(slug=product_slug)\n tags = Tag.objects.filter(product=product.pk)\n categories = Category.objects.all()\n context = {'categories': categories,\n 'product': product,\n 'tags': tags,\n }\n return render(request, 'shop/product.html', context)\n\n\ndef products(request, tag): # get all products with tag = tag\n pass\n\n\ndef add2cart(request, ): # ajax\n product_id = request.GET.get('product2cart').split('_')[-1]\n product_quantity = request.GET.get('quantity')\n cart._add_product(product_id, product_quantity)\n products = {i: {'id': p.id,\n 'name': p.name,\n 'quantity': p.quantity,\n 'price': p.price,\n } for i, p in enumerate(cart.products)}\n return JsonResponse({'cart': products, }, )\n\n\ndef order(request, ):\n order_form = OrderForm() # Creating a form to add an order.\n categories = Category.objects.all()\n context = {'categories': categories,\n 'order_form': order_form,\n }\n return render(request, 'shop/order.html', context)\n\n\n#\n#\n# AJAX views:\n\ndef view_cart(request, ): # ajax\n products = {i: {'id': p.id,\n 'name': p.name,\n 'quantity': p.quantity,\n 'price': p.price,\n } for i, p in enumerate(cart.products)}\n\n return JsonResponse({'cart': products, }, )\n\n\ndef clear_cart(request, ): # ajax\n cart._clear()\n products = {i: {'id': p.id,\n 'name': p.name,\n 'quantity': p.quantity,\n 'price': p.price,\n } for i, p in enumerate(cart.products)}\n return JsonResponse({'cart': products, }, )\n\n\ndef change_product_in_cart(request, ): # ajax\n axis = request.GET.get('axis')\n idx = int(request.GET.get('idx'))\n if axis == 'del':\n cart._remove_product(idx)\n elif axis in {'sub', 'add'}:\n cart._add_sub_products(idx, axis)\n products = {i: {'id': p.id,\n 'name': p.name,\n 'quantity': p.quantity,\n 'price': p.price,\n } for i, p in enumerate(cart.products)}\n return JsonResponse({'cart': products, }, )\n","sub_path":"DeShop/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"377342708","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import find_packages, setup\n\n\nwith open('README.rst') as fh:\n long_description = fh.read()\n\n\nsetup(\n name='ptb',\n install_requires=['Pygments==2.0.2'],\n packages=find_packages(),\n version='0.0.3',\n description='ptb - Python TraceBack for Humans',\n long_description=long_description,\n keywords='traceback debugging',\n author='Anand Reddy Pandikunta (@chillaranand)',\n author_email='anand21nanda@gmail.com',\n maintainer='Anand Reddy Pandikunta',\n maintainer_email='anand21nanda@gmail.com',\n url='https://github.com/chillaranand/ptb',\n data_files=[],\n classifiers=[\n 'Development Status :: 1 - Planning',\n\n 'Operating System :: POSIX',\n\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n\n 'Intended Audience :: Developers',\n\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities'\n ],\n)\n","sub_path":"pypi_install_script/ptb-0.0.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"376435508","text":"#!/usr/bin/env python\n# pylint: skip-file\n\nimport mxnet as mx\nimport numpy as np\nimport os, sys\nimport pickle as pickle\nimport logging\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.append(os.path.join(curr_path, '../common/'))\nimport models\nimport get_data\n\n# symbol net\nbatch_size = 100\ndata = mx.symbol.Variable('data')\nfc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)\nact1 = mx.symbol.Activation(fc1, name='relu1', act_type=\"relu\")\nfc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)\nact2 = mx.symbol.Activation(fc2, name='relu2', act_type=\"relu\")\nfc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)\nsoftmax = mx.symbol.Softmax(fc3, name = 'sm')\n\ndef accuracy(label, pred):\n py = np.argmax(pred, axis=1)\n return np.sum(py == label) / float(label.size)\n\nnum_round = 4\nprefix = './mlp'\n\nkv = mx.kvstore.create('dist')\nbatch_size /= kv.get_num_workers()\n\n#check data\nget_data.GetMNIST_ubyte()\n\ntrain_dataiter = mx.io.MNISTIter(\n image=\"data/train-images-idx3-ubyte\",\n label=\"data/train-labels-idx1-ubyte\",\n data_shape=(784,), num_parts=kv.get_num_workers(), part_index=kv.get_rank(),\n batch_size=batch_size, shuffle=True, flat=True, silent=False, seed=10)\nval_dataiter = mx.io.MNISTIter(\n image=\"data/t10k-images-idx3-ubyte\",\n label=\"data/t10k-labels-idx1-ubyte\",\n data_shape=(784,),\n batch_size=batch_size, shuffle=True, flat=True, silent=False)\n\ndef test_mlp():\n logging.basicConfig(level=logging.DEBUG)\n\n model = mx.model.FeedForward.create(\n softmax,\n X=train_dataiter,\n eval_data=val_dataiter,\n eval_metric=mx.metric.np(accuracy),\n ctx=[mx.cpu(i) for i in range(1)],\n num_round=num_round,\n learning_rate=0.05, wd=0.0004,\n momentum=0.9,\n kvstore=kv,\n )\n logging.info('Finish traning...')\n prob = model.predict(val_dataiter)\n logging.info('Finish predict...')\n val_dataiter.reset()\n y = np.concatenate([label.asnumpy() for _, label in val_dataiter]).astype('int')\n py = np.argmax(prob, axis=1)\n acc = float(np.sum(py == y)) / len(y)\n logging.info('final accuracy = %f', acc)\n assert(acc > 0.93)\n\nif __name__ == \"__main__\":\n test_mlp()\n","sub_path":"tests/python/distributed/test_mlp.py","file_name":"test_mlp.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"350912112","text":"from dotty_dict import dotty\nfrom tracardi_plugin_sdk.domain.register import Plugin, Spec, MetaData\nfrom tracardi_plugin_sdk.domain.result import Result\nfrom tracardi_plugin_sdk.action_runner import ActionRunner\n\nfrom app.process_engine.dot_accessor import DotAccessor\n\n\nclass CutOutPropertyAction(ActionRunner):\n\n def __init__(self, **kwargs):\n if 'property' not in kwargs:\n raise ValueError(\"Please define property in config section.\")\n\n if kwargs['property'] == 'undefined':\n raise ValueError(\"Please define property in config section. It has default value of undefined.\")\n\n self.property = kwargs['property']\n\n async def run(self, payload: dict):\n dot = DotAccessor(self.profile, self.session, payload, self.event, self.flow)\n return Result(port=\"property\", value=dot[self.property])\n\n\ndef register() -> Plugin:\n return Plugin(\n start=False,\n spec=Spec(\n module='app.process_engine.action.v1.properties.cut_out_property_action',\n className='CutOutPropertyAction',\n inputs=['payload'],\n outputs=[\"property\"],\n init={\n \"property\": \"undefined\"\n }\n ),\n metadata=MetaData(\n name='Cut out property',\n desc='Returns defined property from payload.',\n type='flowNode',\n width=200,\n height=100,\n icon='property',\n group=[\"Customer Data\"]\n )\n )\n","sub_path":"app/process_engine/action/v1/properties/cut_out_property_action.py","file_name":"cut_out_property_action.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64487328","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn\nseaborn.set_style(\"darkgrid\")\nseaborn.set_style(\"whitegrid\")\n\nfrom ruffus import pipeline_printout\nfrom ruffus import *\nimport ruffus.cmdline as cmdline\nimport time\nfrom pathlib import Path\nimport os\nimport re\nimport argparse\nfrom glob import glob\nfrom itertools import islice\nimport datetime\nimport subprocess\nimport pandas as pd\nfrom send_socket_message import update_analysis\n\nfrom index_genome_files_bowtie2 import index_genome_files_bowtie2\nfrom mwTools.general import glob_file_list\nfrom mwTools.general import open_by_suffix\n\n# --------------------------------------------------\n\npipeline_name = 'pipeline_roesti'\npipelineDoc = pipeline_name + \"\"\"\n\nPipeline to analyze RNA-seq data.\nPython 3.5 script. |\n\nAuthor: Marc Weber |\nLast updated: 2019.04.16 |\nAffiliation: Center for Genomic Regulation, Luis Serrano's lab |\nemail: webermarcolivier@gmail.com; marc.weber@crg.eu |\n\nRemark: we refer to insert as the original RNA fragment flanked by adapters and read by sequencing (as opposed to \"insert\" as the inner interval between two paired reads).\n\n\"\"\"\n\n# Command-line arguments\nformatter_class = argparse.RawDescriptionHelpFormatter\nformatter_class = argparse.ArgumentDefaultsHelpFormatter\nparser = cmdline.get_argparse(description=pipelineDoc, formatter_class=formatter_class)\nparser.add_argument('-g', '--ref-genbank', dest='refGenbank', type=str, nargs='+',\n help=\"List of filenames for reference DNA sequences in Genbank format. Wildcards such as ? * and [] can be used. Annotations in genbank format will be used to create the CDS and rRNA BED files.\")\nparser.add_argument('-f', '--ref-fasta', dest='refFasta', type=str, nargs='+',\n help=\"List of filenames for reference DNA sequences in fasta format. Wildcards such as ? * and [] can be used.\")\nparser.add_argument('--ref-output-name', dest='refOutputName', type=str,\n help=\"Base name of the indexed genome and BED files. By default, the base name is set to the concatenated id's found in the Genbank or fasta files.\")\nparser.add_argument('--ref-output-dir', dest='refOutputDir', type=str, default='.',\n help=\"Directory of the indexed genome and BED files.\")\nparser.add_argument('-i', '--fastq-files', dest='input_fastq_files', default=['*.fastq*'], nargs='+',\n help=\"List of input fastq files separated by white spaces. Wildcards such as ? * and [] can be used.\")\nparser.add_argument('-o', '--output-dir', dest='output_dir', default='.',\n help='Output directory.')\nparser.add_argument('--test', dest='run_test', action='store_true',\n help='Run a test by taking as input the first 25k reads of the first fasta file.')\nparser.add_argument('--run-locally', dest='run_locally', action='store_true',\n help='Run the pipeline on local machine (as opposed to submitting jobs to the cluster). Note that multiple threads could be used.')\nparser.add_argument('--pipeline-name', dest='pipeline_name', default=pipeline_name,\n help='Name of the pipeline. Important: the history of up-to-date files is kept in a database of the same name in the output folder, .pipeline_roesti.ruffus_history.sqlite.')\nparser.add_argument('--library-type', dest='library_type', default='rna-seq', choices=['ribo-seq', 'rna-seq', 'hydro-trna-seq'],\n help=\"Type of RNA-seq library. In ribosome profiling data analysis, additional fragment filtering is applied in order to select ribosome footprints.\")\nparser.add_argument('--seq-end', dest='seq_end', default='paired-end', choices=['single-end', 'paired-end'],\n help=\"Single-end or paired-end sequencing data.\")\nparser.add_argument('--adapter-seq-fw', dest='trim_adapter_seq_forward', default='',\n help='Adapter sequence forward. Default will be set to TruSeq Universal Adapter 5’ AGATCGGAAGAGCACACGTCT')\nparser.add_argument('--adapter-seq-rv', dest='trim_adapter_seq_reverse', default='',\n help='Adapter sequence reverse. Default will be set to TruSeq Universal Adapter 5’ AGATCGGAAGAGCACACGTCT for rna-seq library, and to Illumina RNA PCR Primer GATCGTCGGACTGTAGAACTCTGAACGTGTAGATCTCGGTGGTCGCCGTA for ribo-seq library.')\nparser.add_argument('--no-trimming', dest='no_trimming', action='store_true',\n help='Do not trim reads before alignment. By default bad quality bases are trimmmed from read ends.')\nparser.add_argument('--remove-rRNA', dest='remove_rRNA', action='store_true',\n help='Remove reads that align to the rRNA.')\nparser.add_argument('--align-quality-threshold', dest='filter_alignments_quality_threshold', default=0, type=int,\n help='Filter out reads with alignment quality score MAPQ smaller than the threshold.')\nparser.add_argument('--indexed-ref-genome', dest='align_indexed_ref_genome_path', default='/users/lserrano/mweber/RNA-seq_data/bowtie2_indexed_genome/Mpn/NC_000912',\n help=\"Path to the basename of the index for the reference genome built with bowtie2-build.\")\nparser.add_argument('--rRNA-bedfile', dest='rRNA_bedfile',\n default=\"/users/lserrano/mweber/Research_cloud/Mycoplasma_pneumoniae_experimental_data/Annotation/mpn_rRNA.bed\",\n help=\"Path to the BED file of rRNA regions. Reads aligning in the first rRNA region will be used to determine the strandness.\")\nparser.add_argument('--rRNA-tRNA-bedfile', dest='rRNA_tRNA_bedfile',\n default=\"/users/lserrano/mweber/Research_cloud/Mycoplasma_pneumoniae_experimental_data/Annotation/mpn_rRNA_tRNA.bed\",\n help=\"Path to the BED file of rRNAs and tRNAs regions of the genome. If the option remove-rRNA is set, all reads aligning in these regions will be filtered out.\")\nparser.add_argument('--genome-bedfile', dest='genomeBedFile',\n default=\"/users/lserrano/mweber/Research_cloud/Mycoplasma_pneumoniae_experimental_data/Genome/NC_000912.1.genome\",\n help=\"Path to the BED file genome. Simple BED file that lists the names of the chromosomes (or scaffolds, etc.) and their size (in basepairs).\")\nparser.add_argument('--genome-CDS-bedfile', dest='genomeCDSBedFile',\n default=\"/users/lserrano/mweber/Research_cloud/Mycoplasma_pneumoniae_experimental_data/Annotation/mpn_CDS.bed\",\n help=\"Path to the BED file for all CDS. Will be used to count mRNA fragments for each gene. If set to empty string \\\"\\\", the computation of fragment count per CDS will be skipped.\")\nparser.add_argument('--nthreads', dest='nThreads', default=12, type=int,\n help='Number of threads to use in each cluster node (shared memory). This will reduce computational time, in particular for bowtie2 (alignment).')\nparser.add_argument('--njobs', dest='njobs', default=200, type=int,\n help='Number of concurrent jobs to launch on the cluster (each job will use nThreads nodes in shared memory).')\nparser.add_argument('--bash-profile', dest='bash_profile', default='',\n help='Bash profile is executed before each job on the cluster in order to load the dependencies. By default bash profile path is automatically detected in user\\'s home directory, this option sets the path manually.')\nparser.add_argument('--analysisId', default=None, type=str,\n help=\"String that identifies the overall pipeline run. It is independent from the jobids of the job submissions on the cluster grid engine.\")\nparser.add_argument('--deleteIntermediateFiles', dest='delete_intermediate_files', action='store_true', default=False)\nparser.add_argument('--sendMessageToWebServer', action='store_true',\n help=\"Send a message to the webserver dbspipe when the pipeline has finished. Only for pipeline launched by the web server application.\")\nparser.add_argument('--host', dest='host', default='cluster', type=str)\noptions = parser.parse_args()\n\n\n## Global\n\npipeline_name = options.pipeline_name\nrun_locally = options.run_locally\n# If running locally, only run 1 job\nif run_locally:\n if options.njobs > 1:\n print(\"WARNING: you are running the pipeline in local mode with njobs > 1. \"\n \"Computations will be run in parallel on the same machine.\")\n\n# Glob fastq files following list of patterns\nfastqFiles = [fn for pattern in options.input_fastq_files for fn in glob(pattern) if Path(fn).is_file()]\nif len(fastqFiles) == 0:\n raise ValueError(\"ERROR: no input file exists with path(s) {}\".format(options.input_fastq_files))\n raise SystemExit\n# Remove duplicated files from list (can happen if multiple patterns match same files)\nfastqFiles = sorted(list(set(fastqFiles)))\n# Filter out files that start with \"test\"\n# Note: This is not strictly necessary. Maybe we want to test the pipeline with already existing\n# test fastq files.\n# fastqFiles = [fn for fn in fastqFiles if not re.match(r'test.*', Path(fn).name)]\nfastqPath = Path(fastqFiles[0]).parent\nprint(\"fastqFiles:\", fastqFiles)\n\n\ndef group_paired_end_fastq_files(fastqFiles):\n \"\"\"Group paired-end fastq files together\"\"\"\n fastqFilesPE = []\n for f1 in fastqFiles:\n f1match = re.match(r\"^(.+)(read1|r1|R1)(_\\d\\d\\d)?\\.fastq(\\.gz)?$\", Path(f1).name)\n if f1match:\n read1Filename = f1\n read1FastaBasename = f1match.group(1)\n for f2 in fastqFiles:\n if re.match(r'^' + read1FastaBasename + r\"(read2|r2|R2)(_\\d\\d\\d)?\\.fastq(\\.gz)?$\", Path(f2).name):\n read2Filename = f2\n fastqFilesPE.append((read1Filename, read2Filename))\n return fastqFilesPE\n\n\n# Paths\nrootPath = Path('/users/lserrano/mweber')\nscriptPath = rootPath / 'Research_cloud' / 'RNA-seq_data_analysis' / 'src'\n# Note: here we must use the full pathname!!!\noutputPath = Path(options.output_dir).resolve()\nos.chdir(str(outputPath))\n# Make pipeline folder\npipeline_path = outputPath\npipeline_path.mkdir(exist_ok=True)\npipeline_folder = pipeline_path.name\n\n\n# The isis filesystem on the cluster can be very slow to propagate file changes, which can be a problem in the pipeline\n# because we frequently write and read files. We introduce a waiting loop in between file creation and file opening in\n# order to let the filesystem update the file changes. The loop will keep checking for the existence of the file every X\n# seconds.\nsleepTimeFilesystem = 2 # seconds\n\n\n# Cluster queues\nshort_queue = 'short-sl7'\nlong_queue = 'long-sl7'\n\n# The bash profile together with the load_dependencies script will be executed before each command in the pipeline.\n# This is necessary when running computation on the nodes of the cluster.\nif options.bash_profile == '':\n if (Path.home() / '.bash_profile').exists():\n options.bash_profile = Path.home() / '.bash_profile'\n elif (Path.home() / '.bashrc').exists():\n options.bash_profile = Path.home() / '.bashrc'\n else:\n print(\"ERROR: bash profile could not be found.\")\n raise SystemError\nloadDependenciesScriptPath = scriptPath / \"load_dependencies.sh\"\nif options.host == 'cluster':\n # cmd_source_bash_profile = \". {} && {} && cd {} &&\".format(options.bash_profile,\n # str(loadDependenciesScriptPath),\n # str(outputPath))\n cmd_source_bash_profile = \". \\\"{}\\\" && cd \\\"{}\\\" &&\".format(str(loadDependenciesScriptPath),\n str(outputPath))\nelse:\n cmd_source_bash_profile = \"cd \\\"{}\\\" &&\".format(str(outputPath))\n\n# Global bowtie2 options\noptions.phredEncoding = 'phred33'\n\n## trim adapter\noptions.no_trimming\noptions.trim_adapter_min_length = 12\noptions.trim_adapter_trim_end_quality = 10\noptions.trim_adapter_nthreads = min(4, options.nThreads)\nif options.library_type == 'ribo-seq':\n if options.trim_adapter_seq_forward == '':\n options.trim_adapter_seq_forward = 'AGATCGGAAGAGCACACGTCT'\n if options.trim_adapter_seq_reverse == '':\n options.trim_adapter_seq_reverse = 'GATCGTCGGACTGTAGAACTCTGAACGTGTAGATCTCGGTGGTCGCCGTA'\nelif options.library_type == 'rna-seq':\n if options.trim_adapter_seq_forward == '':\n options.trim_adapter_seq_forward = 'AGATCGGAAGAGCACACGTCT'\n if options.trim_adapter_seq_reverse == '':\n options.trim_adapter_seq_reverse = 'AGATCGGAAGAGCACACGTCT'\nelif options.library_type == 'hydro-trna-seq':\n options.trim_adapter_min_length = 10\n if options.trim_adapter_seq_forward == '':\n options.trim_adapter_seq_forward = 'AGATCGGAAGAGCACACGTCT'\n if options.trim_adapter_seq_reverse == '':\n options.trim_adapter_seq_reverse = 'AGATCGGAAGAGCACACGTCT'\n\n## align\noptions.align_alignmentMode = 'end-to-end'\n\n# As a reference, the default options for bowtie2 in the mode --very-sensitive are:\n# -D 20 -R 3 -N 0 -L 20 -i S,1,0.50\nif options.library_type == 'ribo-seq':\n options.align_nMismatches = 1\n options.align_seedLength = 14\n options.align_seedInterval = 'S,1,0.50' # mode very sensitive. For read length of 50, seed interval is 1 + 0.5*sqrt(50) = 4.53\nelif options.library_type == 'rna-seq':\n options.align_nMismatches = 0\n options.align_seedLength = 20\n options.align_seedInterval = 'S,1,1.15' # mode sensitive. For read length of 50, seed interval is 1 + 1.15*sqrt(50) = 9.13\nelif options.library_type == 'hydro-trna-seq':\n options.align_nMismatches = 1\n options.align_seedLength = 14\n options.align_seedInterval = 'S,1,0.50' # mode very sensitive. For read length of 50, seed interval is 1 + 0.5*sqrt(50) = 4.53\n\noptions.align_maxAlignAttempts = 20 # very sensitive: -D 20\noptions.align_maxReSeed = 3 # very sensitive: -R 3\n\nif options.library_type == 'ribo-seq':\n options.align_maxInsertLength = 200\nelif options.library_type == 'rna-seq':\n options.align_maxInsertLength = 1200 # This is the theoretical maximum fragment length in the library preparation\nelif options.library_type == 'hydro-trna-seq':\n options.align_maxInsertLength = 200\noptions.align_max_reported_alignments = 0 # set to 0 to deactivate the option\n\n## convert_sam_to_bam\nif run_locally:\n options.samtools_sort_tmp_dir = '.'\nelse:\n options.samtools_sort_tmp_dir = '$TMPDIR'\noptions.samtools_sort_max_mem = 34000 # M\noptions.samtools_sort_nthread = 4\n# Note: maximum memory per thread **has to be an integer**, otherwise it is interpreted as bytes\n# Note: we have to allocate some free memory for the main samtools thread, probably for the\n# file merging, otherwise the job will reach memory limit.\noptions.samtools_sort_max_mem_per_thread = int((options.samtools_sort_max_mem - 7000) / options.samtools_sort_nthread)\n\n## filter_alignments\n# ...\n\n## Write information about the pipeline in a text file\n# pipelineDocFile = pipeline_path / (pipeline_name + '.{:d}.readme'.format(options.jobid))\npipelineDocFile = pipeline_path / (pipeline_name + '.readme')\n\n# Standard python logger which can be synchronised across concurrent Ruffus tasks\n# options.log_file = str(pipeline_path / (pipeline_name + '.{:d}.log'.format(options.jobid)))\noptions.log_file = str(pipeline_path / (pipeline_name + '.log'))\noptions.verbose = 2\nlogger, logger_mutex = cmdline.setup_logging(pipeline_name, options.log_file, options.verbose)\nwith logger_mutex:\n logger.debug(\"Pipeline options:\\n\" + str(options))\npipelineDoc += \"Type of RNA-seq data:\" + options.library_type + \"\\n\\n\"\nprint(\"\\n\\n########## TYPE OF RNA-SEQ DATA: \",options.library_type, \"\\n\\n\")\npipelineDoc += \"Pipeline options:\\n\\n\"\nfor key, value in vars(options).items():\n pipelineDoc += '{}:{}\\n'.format(key, value)\n\n\n# Generate test files\nwriteTestFiles = options.run_test\n# takes 25k first reads in the first pair of fastq files\nif writeTestFiles:\n nReads = int(50e3)\n if options.seq_end == 'single-end':\n exampleFiles = [fastqFiles[0]]\n elif options.seq_end == 'paired-end':\n fastqFilesPE = group_paired_end_fastq_files(fastqFiles)\n print(\"fastqFilesPE\", fastqFilesPE)\n if len(fastqFilesPE) == 0:\n raise ValueError(\"No fastq file pair has been found within the list of input files. Check that the filenames are identical for read1 and read2.\")\n exampleFiles = fastqFilesPE[0]\n \n for i, fastqFilename in enumerate(exampleFiles):\n print(\"Writing test files from head of file: \",fastqFilename)\n with open_by_suffix(str(fastqFilename)) as fastqFile:\n head = list(islice(fastqFile, 4*nReads)) # Note: each read is 4 lines in fastq format\n if options.seq_end == 'single-end':\n testFilename = str(outputPath / \"test.fastq\")\n elif options.seq_end == 'paired-end':\n testFilename = str(outputPath / (\"test_read{:d}.fastq\".format(i + 1)))\n with open(testFilename, \"w\") as testFile:\n testFile.writelines(head)\n fastqPath = outputPath\n\nif options.run_test:\n # Test fastq files with 25k first reads\n print(\"Taking as a test the 25k first reads of the first (pair of) fastq file(s).\")\n fastqFiles = [str(filepath.resolve()) for filepath in fastqPath.glob('test*.fastq') if filepath.is_file()]\n\nprint(\"fastqFiles: \", fastqFiles)\n\n\n#############################################################################\n\n\n# Tools\ndef estimateNbLines(filename, learn_size_bytes=1024*1024):\n \"\"\" Estimate the number of lines in the given file without reading the whole file.\"\"\"\n\n file_size = os.path.getsize(filename)\n learn_size_bytes = min(learn_size_bytes, file_size)\n\n if Path(filename).suffix == '.gz':\n # Rough approximation for gzipped fasta file of RNA-seq reads,\n # ~40k reads per 1MB of gz fasta file, each read is 4 lines\n numLines = (file_size/(1024*1024)) * 40e3 * 4\n else:\n with open(filename, 'rb') as file:\n buf = file.read(learn_size_bytes)\n numLines = file_size / (len(buf) // buf.count(b'\\n'))\n\n return numLines\n\n\ndef printTimeDelta(time_delta):\n s = time_delta.seconds + time_delta.days*24*3600\n hours, remainder = divmod(s, 3600)\n minutes, seconds = divmod(remainder, 60)\n return \"{:d}:{:d}:{:f}\".format(hours, minutes, seconds)\n\n\ndef plot_trimmed_reads_length_dist(summaryFilePath):\n histData = []\n with open(summaryFilePath,'r') as summaryFile:\n for line in summaryFile:\n if line.strip() == 'Read length distribution after trimming:' \\\n or line.strip() == 'Length distribution of reads after trimming:':\n break\n for line in summaryFile:\n regexSearch = re.search(r'([0-9]+):?\\s*([0-9]+)', line.strip())\n if regexSearch:\n length = int(regexSearch.group(1))\n counts = int(regexSearch.group(2))\n histData.append((length, counts))\n \n x, y = zip(*histData)\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_subplot(111)\n ax.plot(x, y)\n ax.set_xlabel('length trimmed read (insert)')\n fig.savefig(summaryFilePath + '.length_dist.png', dpi=200)\n \n return histData\n\n\ndef wait_for_file(filename, sleepTime=2):\n filePath = Path(filename)\n while not filePath.exists():\n time.sleep(sleepTime)\n return\n\n\ndef wait_for_any_of_files(filenameList, sleepTime=2):\n filePathList = [Path(fn) for fn in filenameList]\n while not any([fp.exists() for fp in filePathList]):\n print(\"sleeping...\")\n time.sleep(sleepTime)\n return\n\n\n#############################################################################\n\n\n# Start DRMAA session\nif not run_locally:\n # Start shared drmaa session for all jobs / tasks in pipeline\n # drmaa Open Grid Forum API\n import drmaa\n drmaa_session = drmaa.Session()\n drmaa_session.initialize()\nelse:\n drmaa_session = None\n\nfrom ruffus.drmaa_wrapper import run_job, error_drmaa_job\n\niTask = 0\n\n#############################################################################\n\ncmdJobRunningRequest = '{} --analysisId {} --status 1 {}'.format(str(scriptPath / 'send_socket_message.py'),\n options.analysisId,\n '--sendMessageToWebServer' if options.sendMessageToWebServer else '')\n\ncmdProgressRequest = '{} --analysisId {} --type 1 {} '.format(str(scriptPath / 'send_socket_message.py'),\n options.analysisId,\n '--sendMessageToWebServer' if options.sendMessageToWebServer else '')\n\n#############################################################################\n\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"\nTask: index_genome_files\nIndexing reference DNA sequences for bowtie2 aligner.\n\nSoftware: bowtie2-build version 2.2.9\n\nParameters used: all default\\n\\n\"\"\"\npipelineDoc += infoStr\n\n\ncomputeIndexedGenome = (options.refGenbank is not None) or (options.refFasta is not None)\ntask_name = \"index_genome_files\"\n\nif computeIndexedGenome:\n\n genbankFileList = glob_file_list(options.refGenbank)\n fastaFileList = glob_file_list(options.refFasta)\n \n with logger_mutex:\n genbankFileListPrint = genbankFileList if genbankFileList is not None else [\"\"]\n fastaFileListPrint = fastaFileList if fastaFileList is not None else [\"\"]\n logger.debug(\"genbankFileList:\\n\" + \"\\n\".join(genbankFileListPrint))\n logger.debug(\"fastaFiles:\\n\" + \"\\n\".join(fastaFileListPrint))\n\n refOutputDir = options.refOutputDir\n \n indexedGenomePath = index_genome_files_bowtie2(genbankFileList=genbankFileList, fastaFileList=fastaFileList,\n outputName=options.refOutputName, outputDir=refOutputDir)\n with logger_mutex:\n logger.debug(\"### Indexed genome path:\\n\" + indexedGenomePath)\n\n\n options.align_indexed_ref_genome_path = indexedGenomePath\n options.rRNA_bedfile = indexedGenomePath + '_rRNA.bed'\n options.rRNA_tRNA_bedfile = indexedGenomePath + '_rRNA_tRNA.bed'\n options.genomeBedFile = indexedGenomePath + '_genome.bed'\n options.genomeCDSBedFile = indexedGenomePath + '_CDS.bed'\n\n\n#############################################################################\n\n\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"\nTask: trim_adapter_PE_reads.\nTrimming adapters from PE reads and extracting insert.\n\nSoftware: SeqPurge 0.1-478-g3c8651b\n\nParameters used:\\n\\n\"\"\"\ninfoStr += \"-min_len Minimum read length after adapter trimming. Shorter reads are discarded. Default value: '15'\\n\"\ninfoStr += \"min_len \" + str(options.trim_adapter_min_length) + \"\\n\\n\"\ninfoStr += \"-a1 Forward adapter sequence (at least 15 bases). Default value: 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCACGAGTTA'\\n\"\ninfoStr += \"a1 \" + options.trim_adapter_seq_forward + \"\\n\\n\"\ninfoStr += \"-a2 Reverse adapter sequence (at least 15 bases). Default value: 'AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTC'\\n\"\ninfoStr += \"a2 \" + options.trim_adapter_seq_reverse + \"\\n\\n\"\ninfoStr += \"-threads \" + str(options.nThreads) + \"\\n\\n\"\npipelineDoc += infoStr\n\nintermediateTaskPathList = []\niTask += 1\ntask_name = \"trim_adapter_PE_reads\"\ntask_path = pipeline_path / \"Task{:02d}_{}\".format(iTask, task_name)\nintermediateTaskPathList.append(task_path)\n@follows(mkdir(str(task_path)))\n# Only run this analysis for paired-end library type\n@active_if(options.seq_end == 'paired-end')\n# Group together file pairs\n@collate(\n fastqFiles,\n \n formatter(r'^/?(.+/)*(?P.+)(read[12]|[Rr][12])(_\\d\\d\\d)?\\.fastq(\\.gz)?$'),\n\n # Create output parameter supplied to next task\n [str(task_path) + \"/{SAMPLENAME[0]}read1.trimmed.fastq.gz\", # paired file 1\n str(task_path) + \"/{SAMPLENAME[0]}read2.trimmed.fastq.gz\", # paired file 2\n str(task_path) + \"/{SAMPLENAME[0]}.trimmed.nreads\"],\n\n # Extra parameters for our own convenience and use\n [str(task_path) + \"/{SAMPLENAME[0]}unpaired.read1.fastq.gz\", # unpaired file 1\n str(task_path) + \"/{SAMPLENAME[0]}unpaired.read2.fastq.gz\"], # unpaired file 2\n\n task_name, logger, logger_mutex)\ndef trim_adapter_PE_reads(input_files,\n output_paired_files,\n discarded_unpaired_files,\n task_name, logger, logger_mutex):\n if len(input_files) != 2:\n raise Exception(\"One of read pairs %s missing\" % (input_files,))\n\n if options.no_trimming:\n with logger_mutex:\n logger.debug(\"Input files:\")\n logger.debug(input_files[0])\n logger.debug(input_files[1])\n\n # Create symbolic links to the original read files\n Path(output_paired_files[0]).symlink_to(Path(input_files[0]).resolve())\n Path(output_paired_files[1]).symlink_to(Path(input_files[1]).resolve())\n\n # Compute nb of valid reads and write in file\n cmd = \"wc -l < \" + output_paired_files[0]\n cmd_output = subprocess.check_output(cmd, shell=True)\n cmd_output = cmd_output.decode().strip()\n print(cmd_output)\n nValidReads = int(cmd_output)\n\n metadata_filename = output_paired_files[2]\n with open(metadata_filename,'w') as metadata_file:\n metadata_file.write(str(nValidReads) + '\\n')\n\n else:\n\n nReadsApprox = max([estimateNbLines(input_file, 100*1024*1024) / 4. for input_file in input_files])\n # Approximate computation time in seconds per reads per thread measured for test run\n comp_time = (nReadsApprox*(((2*60+59)*16)/12.5e6))/options.trim_adapter_nthreads\n with logger_mutex:\n logger.debug(\"trim_adapter_PE_reads, nReadsApprox \" + str(nReadsApprox))\n logger.debug(\"trim_adapter_PE_reads, comp_time \" + str(comp_time))\n\n summaryFilename = re.search(r'(.+)\\.fastq.*', output_paired_files[0]).group(1) + \".summary\"\n\n # We send the message to the web server from the first node of computation when the job has started.\n cmd = (cmd_source_bash_profile +\n cmdJobRunningRequest + \" && \" +\n cmdProgressRequest + '--progress \"Trim adapter PE reads\" --n 1 && ' +\\\n \" SeqPurge \" +\n \" -in1 {} -in2 {} \".format(input_files[0], input_files[1]) +\n \" -out1 {} -out2 {} \".format(output_paired_files[0], output_paired_files[1]) +\n \" -a1 \" + options.trim_adapter_seq_forward + \" -a2 \" + options.trim_adapter_seq_reverse + \" \" +\n \" -min_len \" + str(options.trim_adapter_min_length) +\n \" -threads \" + str(options.trim_adapter_nthreads) +\n \" -summary \" + summaryFilename)\n with logger_mutex:\n logger.debug(cmd)\n logger.debug(\"estimated computation time \" + str(datetime.timedelta(seconds=comp_time)))\n\n try:\n stdout_res, stderr_res = \"\",\"\"\n walltime = datetime.timedelta(seconds=max(4*comp_time, 1*60*60))\n if walltime < datetime.timedelta(hours=6):\n job_queue_name = short_queue\n else:\n job_queue_name = long_queue\n job_other_options = \" -pe smp \" + str(options.trim_adapter_nthreads) +\\\n \" -q \" + job_queue_name +\\\n \" -l h_rt=\" + printTimeDelta(walltime) +\\\n \" -l h_vmem=3G,virtual_free=3G\" +\\\n \" -cwd\"\n\n # ruffus.drmaa_wrapper.run_job\n stdout_res, stderr_res = run_job(cmd_str=cmd, job_name=task_name, logger=logger,\n drmaa_session=drmaa_session, run_locally=run_locally,\n job_other_options=job_other_options, retain_job_scripts=False)\n\n # relay all the stdout, stderr, drmaa output to diagnose failures\n except error_drmaa_job as err:\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, -1)\n raise Exception(\"\\n\".join(map(str,[\"Failed to run:\", cmd, err, stdout_res, stderr_res])))\n\n std_err_string = \"\".join([line.decode() if isinstance(line, bytes) else line for line in stderr_res])\n with logger_mutex:\n logger.debug(std_err_string)\n\n wait_for_file(summaryFilename, sleepTimeFilesystem)\n print(\"summary file found\", summaryFilename)\n\n # Extract nb of valid reads from the summary file and write to a small metadata file\n with open(summaryFilename,'r') as summary_file:\n nValidReads = None\n nRemovedReads = None\n nRawReads = None\n for line in summary_file:\n regex = re.search(r'Reads \\(forward \\+ reverse\\): ([0-9]+).*', line)\n if regex:\n nRawReads = int(regex.group(1))\n print('nRawReads', nRawReads)\n regex = re.search(r'Removed reads: ([0-9]+) of .*', line)\n if regex:\n nRemovedReads = int(regex.group(1))\n print('nRemovedReads', nRemovedReads)\n if nRawReads is not None and nRemovedReads is not None:\n nValidReads = nRawReads - nRemovedReads\n\n metadata_filename = output_paired_files[2]\n with open(metadata_filename,'w') as metadata_file:\n metadata_file.write(str(nValidReads) + '\\n')\n\n # Small analysis that will be run on the login node\n plot_trimmed_reads_length_dist(summaryFilename)\n\n with logger_mutex:\n logger.debug(\"Trimming of pair-end reads finished.\")\n\n\n\n#############################################################################\n\n\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"\nTask: trim_adapter_SE_reads\nTrimming adapters from SE reads and extracting insert.\n\nSoftware: skewer-0.2.2\n\nParameters used:\\n\\n\"\"\"\ninfoStr += \"-x Adapter sequence/file (AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC)\\n\"\ninfoStr += \"-x \" + options.trim_adapter_seq_forward + \"\\n\\n\"\n\ninfoStr += \"-l, --min The minimum read length allowed after trimming; (18)\\n\"\ninfoStr += \"-l \" + str(options.trim_adapter_min_length) + \"\\n\\n\"\n\ninfoStr += \"-q, --end-quality Trim 3' end until specified or higher quality reached; (0)\\n\"\ninfoStr += \"-q \" + str(options.trim_adapter_trim_end_quality) + \"\\n\\n\"\n\ninfoStr += \"-t, --threads Number of concurrent threads [1, 32]; (1)\\n\"\ninfoStr += \"-t \" + str(options.trim_adapter_nthreads) + \"\\n\\n\"\n\npipelineDoc += infoStr\n\n\niTask += 1\ntask_name = \"trim_adapter_SE_reads\"\ntask_path = pipeline_path / \"Task{:02d}_{}\".format(iTask, task_name)\nintermediateTaskPathList.append(task_path)\n@follows(mkdir(str(task_path)))\n# Only run this analysis for single-end library type\n@active_if(options.seq_end == 'single-end')\n@transform(fastqFiles,\n\n formatter(r'^/?(.+/)*(?P.+)\\.fastq(\\.gz)?$'),\n\n # Create output parameter supplied to next task\n [str(task_path) + \"/{SAMPLENAME[0]}.trimmed.fastq\",\n str(task_path) + \"/{SAMPLENAME[0]}.trimmed.nreads\"],\n\n task_name, task_path, logger, logger_mutex)\ndef trim_adapter_SE_reads(input_file,\n output_files,\n task_name, task_path, logger, logger_mutex):\n\n if type(input_file) is not str:\n raise Exception(\"Input file should be unique, input_file: %s\" % (input_file,))\n\n if options.no_trimming:\n with logger_mutex:\n logger.debug(\"Input files:\")\n logger.debug(input_file)\n\n # Create symbolic links to the original read files\n Path(output_files[0]).symlink_to(Path(input_file).resolve())\n\n # Compute nb of valid reads and write in file\n cmd = \"wc -l < \" + output_files[0]\n cmd_output = subprocess.check_output(cmd, shell=True)\n cmd_output = cmd_output.decode().strip()\n print(cmd_output)\n nValidReads = int(cmd_output)\n\n metadata_filename = output_files[1]\n with open(metadata_filename,'w') as metadata_file:\n metadata_file.write(str(nValidReads) + '\\n')\n\n else:\n\n\n nReadsApprox = estimateNbLines(input_file, 100*1024*1024) / 4.\n # Approximate computation time in seconds per reads per thread measured for test run\n comp_time = (nReadsApprox * (((2*60) * 1) / 5e4)) / options.trim_adapter_nthreads\n\n # We send the message to the web server from the first node of computation when the job has started.\n cmd = (cmd_source_bash_profile +\n cmdJobRunningRequest + \" && \" +\n cmdProgressRequest + '--progress \"Trim adapter SE reads\" --n 2 && ' +\\\n \" skewer \" +\n \" \" + input_file +\n \" -x \" + options.trim_adapter_seq_forward +\n \" -l \" + str(options.trim_adapter_min_length) +\n \" -q \" + str(options.trim_adapter_trim_end_quality) +\n \" -t \" + str(options.trim_adapter_nthreads) +\n \" --quiet\" +\n \" -1 > \" + output_files[0])\n\n with logger_mutex:\n logger.debug(cmd)\n logger.debug(\"estimated computation time \" + str(datetime.timedelta(seconds=comp_time)))\n\n try:\n stdout_res, stderr_res = \"\",\"\"\n walltime = datetime.timedelta(seconds=max(4*comp_time, 1*60*60))\n if walltime < datetime.timedelta(hours=6):\n job_queue_name = short_queue\n else:\n job_queue_name = long_queue\n job_other_options = \" -pe smp \" + str(options.trim_adapter_nthreads) +\\\n \" -q \" + job_queue_name +\\\n \" -l h_rt=\" + printTimeDelta(walltime) +\\\n \" -l h_vmem=3G,virtual_free=3G\" +\\\n \" -cwd\"\n\n # ruffus.drmaa_wrapper.run_job\n stdout_res, stderr_res = run_job(cmd_str=cmd, job_name=task_name, logger=logger,\n drmaa_session=drmaa_session, run_locally=run_locally,\n job_other_options=job_other_options, retain_job_scripts=False)\n\n # relay all the stdout, stderr, drmaa output to diagnose failures\n except error_drmaa_job as err:\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, -1)\n raise Exception(\"\\n\".join(map(str,[\"Failed to run:\", cmd, err, stdout_res, stderr_res])))\n\n std_err_string = \"\".join([line.decode() if isinstance(line, bytes) else line for line in stderr_res])\n with logger_mutex:\n logger.debug(std_err_string)\n\n trimmedFilename = output_files[0]\n\n # Compute nb of valid reads and write in file\n cmd = \"wc -l < \" + trimmedFilename\n cmd_output = subprocess.check_output(cmd, shell=True)\n cmd_output = cmd_output.decode().strip()\n print(cmd_output)\n nValidReads = int(cmd_output)\n\n metadata_filename = output_files[1]\n with open(metadata_filename,'w') as metadataFile:\n metadataFile.write(str(nValidReads) + '\\n')\n\n with logger_mutex:\n logger.debug(cmd_output)\n logger.debug(\"Trimming of single-end reads finished.\")\n\n\n\n#############################################################################\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"Align reads to genome.\n\nAligning reads to the reference genome.\n\nWe align all the trimmed raw reads to the genome, including rRNA and tRNAs, which we will filter in the next stage. Note that we get tons of very short reads (<20 bp) that will virtually map everywhere on the genome but with a very low mapping quality (MAPQ). These will give raise to paired aligned reads that align discordantly, most often because they get assigned random position on the genome and does not satisfy the excpected insert size or relative orientation (Forward-Reverse). We will only analyze the reads that align concordantly in the next stage.\n\nIn the case of ribosome profiling, footprints are very short inserts. Sensitive alignment tool might be better to align those. We use a small seed length and a smaller seed step, in order to improve alignment of shorter reads and allow more mismatches.\n\nSoftware: bowtie2-align-s version 2.2.9\n\nParameters used: \n\n\"\"\"\n\ninfoStr += \"options.align_indexed_ref_genome_path: \" + str(options.align_indexed_ref_genome_path) + \"\\n\\n\"\n\ninfoStr += \"--phred33 Input qualities are ASCII chars equal to the Phred quality plus 33. This is also called the \\\"Phred+33\\\" encoding, which is used by the very latest Illumina pipelines.\\n\"\ninfoStr += \"Phred quality encoding: \" + options.phredEncoding + \"\\n\\n\"\n\ninfoStr += \"--end-to-end In this mode, Bowtie 2 requires that the entire read align from one end to the other, without any trimming (or \\\"soft clipping\\\") of characters from either end. The match bonus --ma always equals 0 in this mode, so all alignment scores are less than or equal to 0, and the greatest possible alignment score is 0. This is mutually exclusive with --local. --end-to-end is the default mode.\\n\"\ninfoStr += \"--local In this mode, Bowtie 2 does not require that the entire read align from one end to the other. Rather, some characters may be omitted (\\\"soft clipped\\\") from the ends in order to achieve the greatest possible alignment score. The match bonus --ma is used in this mode, and the best possible alignment score is equal to the match bonus (--ma) times the length of the read. Specifying --local and one of the presets (e.g. --local --very-fast) is equivalent to specifying the local version of the preset (--very-fast-local). This is mutually exclusive with --end-to-end. --end-to-end is the default mode.\\n\"\ninfoStr += \"options.align_alignmentMode: \" + options.align_alignmentMode + \"\\n\\n\"\n\ninfoStr += \"\"\"-N \nSets the number of mismatches to allowed in a seed alignment during multiseed alignment. Can be set to 0 or 1. Setting this higher makes alignment slower (often much slower) but increases sensitivity. Default: 0.\\n\"\"\"\ninfoStr += \"-N \" + str(options.align_nMismatches) + \"\\n\\n\"\n\ninfoStr += \"-L Sets the length of the seed substrings to align during multiseed alignment. Smaller values make alignment slower but more sensitive. Default: the --sensitive preset is used by default, which sets -L to 20 both in --end-to-end mode and in --local mode.\\n\"\ninfoStr += \"-L \" + str(options.align_seedLength) + \"\\n\\n\"\n\ninfoStr += \"-L Sets the length of the seed substrings to align during multiseed alignment. Smaller values make alignment slower but more sensitive. Default: the --sensitive preset is used by default, which sets -L to 20 both in --end-to-end mode and in --local mode.\\n\"\ninfoStr += \"-L \" + str(options.align_seedLength) + \"\\n\\n\"\n\ninfoStr += \"-D Up to consecutive seed extension attempts can \\\"fail\\\" before Bowtie 2 moves on, using the alignments found so far. A seed extension \\\"fails\\\" if it does not yield a new best or a new second-best alignment. This limit is automatically adjusted up when -k or -a are specified. Default: 15.\\n\"\ninfoStr += \"Default value in the mode very sensitive: 20\\n\"\ninfoStr += \"-D \" + str(options.align_maxAlignAttempts) + \"\\n\\n\"\n\ninfoStr += '-R is the maximum number of times Bowtie 2 will \"re-seed\" reads with repetitive seeds. When \"re-seeding,\" Bowtie 2 simply chooses a new set of reads (same length, same number of mismatches allowed) at different offsets and searches for more alignments. A read is considered to have repetitive seeds if the total number of seed hits divided by the number of seeds that aligned at least once is greater than 300. Default: 2.\\n'\ninfoStr += \"Default value in the mode very sensitive: 3\\n\"\ninfoStr += \"-R \" + str(options.align_maxReSeed) + \"\\n\\n\"\n\ninfoStr += \"\"\"-i \nSets a function governing the interval between seed substrings to use during multiseed alignment. For instance, if the read has 30 characters, and seed length is 10, and the seed interval is 6, the seeds extracted will be:\n\nRead: TAGCTACGCTCTACGCTATCATGCATAAAC\nSeed 1 fw: TAGCTACGCT\nSeed 1 rc: AGCGTAGCTA\nSeed 2 fw: CGCTCTACGC\nSeed 2 rc: GCGTAGAGCG\nSeed 3 fw: ACGCTATCAT\nSeed 3 rc: ATGATAGCGT\nSeed 4 fw: TCATGCATAA\nSeed 4 rc: TTATGCATGA\nSince it's best to use longer intervals for longer reads, this parameter sets the interval as a function of the read length, rather than a single one-size-fits-all number. For instance, specifying -i S,1,2.5 sets the interval function f to f(x) = 1 + 2.5 * sqrt(x), where x is the read length. See also: setting function options. If the function returns a result less than 1, it is rounded up to 1. Default: the --sensitive preset is used by default, which sets -i to S,1,1.15 in --end-to-end mode to -i S,1,0.75 in --local mode.\\n\"\"\"\ninfoStr += \"-i \" + options.align_seedInterval + \" Note: we use the same function as in the --very-sensitive preset, f(x) = 1 + 0.5 * sqrt(x)\\n\\n\"\n\ninfoStr += \"\"\"-X/--maxins \nThe maximum fragment length for valid paired-end alignments. E.g. if -X 100 is specified and a paired-end alignment consists of two 20-bp alignments in the proper orientation with a 60-bp gap between them, that alignment is considered valid (as long as -I is also satisfied). A 61-bp gap would not be valid in that case. If trimming options -3 or -5 are also used, the -X constraint is applied with respect to the untrimmed mates, not the trimmed mates.\n\nThe larger the difference between -I and -X, the slower Bowtie 2 will run. This is because larger differences bewteen -I and -X require that Bowtie 2 scan a larger window to determine if a concordant alignment exists. For typical fragment length ranges (200 to 400 nucleotides), Bowtie 2 is very efficient.\n\nDefault: 500.\\n\"\"\"\ninfoStr += \"-X \" + str(options.align_maxInsertLength) + \"\\n\\n\"\n\ninfoStr += \"\"\"-p/--threads NTHREADS\nLaunch NTHREADS parallel search threads (default: 1). Threads will run on separate processors/cores and synchronize when parsing reads and outputting alignments. Searching for alignments is highly parallel, and speedup is close to linear. Increasing -p increases Bowtie 2's memory footprint. E.g. when aligning to a human genome index, increasing -p from 1 to 8 increases the memory footprint by a few hundred megabytes. This option is only available if bowtie is linked with the pthreads library (i.e. if BOWTIE_PTHREADS=0 is not specified at build time).\\n\"\"\"\ninfoStr += \"-p \" + str(options.nThreads) + \"\\n\\n\"\n\ninfoStr += \"\"\"-k \nBy default, bowtie2 searches for distinct, valid alignments for each read. When it finds a valid alignment, it continues looking for alignments that are nearly as good or better. The best alignment found is reported (randomly selected from among best if tied). Information about the best alignments is used to estimate mapping quality and to set SAM optional fields, such as AS:i and XS:i.\n\nWhen -k is specified, however, bowtie2 behaves differently. Instead, it searches for at most distinct, valid alignments for each read. The search terminates when it can't find more distinct valid alignments, or when it finds , whichever happens first. All alignments found are reported in descending order by alignment score. The alignment score for a paired-end alignment equals the sum of the alignment scores of the individual mates. Each reported read or pair alignment beyond the first has the SAM 'secondary' bit (which equals 256) set in its FLAGS field. For reads that have more than distinct, valid alignments, bowtie2 does not guarantee that the alignments reported are the best possible in terms of alignment score. -k is mutually exclusive with -a.\n\nNote: Bowtie 2 is not designed with large values for -k in mind, and when aligning reads to long, repetitive genomes large -k can be very, very slow.\\n\"\"\"\ninfoStr += \"Note: if k=0, we do not include this option in the command.\\n\"\ninfoStr += \"-k \" + str(options.align_max_reported_alignments) + \"\\n\\n\"\n\npipelineDoc += infoStr\n\n\niTask += 1\ntask_name = 'align_seq'\ntask_path = pipeline_path / \"Task{:02d}_{}\".format(iTask, task_name)\nintermediateTaskPathList.append(task_path)\nif options.seq_end == 'single-end':\n regexInputFiles = r'^(.+/)*(?P.+)\\.trimmed\\.fastq(\\.gz)?.*'\nelif options.seq_end == 'paired-end':\n regexInputFiles = r'^(.+/)*(?P.+?)_?read[12]\\.trimmed\\.fastq(\\.gz)?$'\n@follows(trim_adapter_PE_reads, trim_adapter_SE_reads, mkdir(str(task_path)))\n@transform([trim_adapter_PE_reads, trim_adapter_SE_reads],\n\n # Match any of SE or PE trimmed reads fastq file\n formatter(regexInputFiles),\n\n # Output parameter supplied to next task\n str(task_path) + \"/{SAMPLENAME[0]}.sam\",\n\n # Extra parameters\n str(task_path) + \"/{SAMPLENAME[0]}.summary\",\n task_name, logger, logger_mutex)\ndef align_seq(input_files,\n sam_file,\n summary_file,\n task_name, logger, logger_mutex):\n\n if options.seq_end == 'single-end' and len(input_files) != 2:\n raise Exception(\"Number of input fastq files incorrect for single-end RNA-seq alignment, input_files: %s\" % (input_files,))\n elif options.seq_end == 'paired-end' and len(input_files) != 3:\n raise Exception(\"Number of input fastq files incorrect for paired-end RNA-seq alignment, input_files: %s\" % (input_files,))\n\n with logger_mutex:\n logger.debug(\"align_seq, input_files: \" + \", \".join(input_files))\n\n if options.seq_end == 'single-end':\n readU = input_files[0]\n nValidReadsFilename = input_files[1]\n elif options.seq_end == 'paired-end':\n read1 = input_files[0]\n read2 = input_files[1]\n nValidReadsFilename = input_files[2]\n \n with open(nValidReadsFilename, 'r') as nreadsFile:\n nreads = int(re.match(r'^([0-9.]+).*', nreadsFile.readline()).group(1))\n with logger_mutex:\n logger.debug(\"align_seq, nreads \" + str(nreads))\n\n ## Decompress the input file\n #with gzip.GzipFile(read1_file, mode='r') as gzip_file:\n #input_file_unzipped = re.search(r'(.+)\\.gz', read1_file).group(1)\n #print(input_file_unzipped)\n #with open(input_file_unzipped, 'wb') as gunzip_file:\n #gunzip_file.write(gzip_file.read())\n #nReadsApprox = estimateNbLines(input_file_unzipped, 10*1024*1024) / 4.\n ## Unzipped fastq file can be deleted to save storage\n #os.remove(input_file_unzipped)\n # Approximate computation time in seconds per reads per thread measured for test run\n comp_time = ( (3.3 if options.align_nMismatches == 1 else 1)*\n (2.7 if options.align_max_reported_alignments > 0 else 1)*\n ((nreads*(((30.1)*16)/(2*2e5)))/options.nThreads) )\n\n if options.seq_end == 'single-end':\n bowtieInputOptions = '-U ' + readU\n elif options.seq_end == 'paired-end':\n bowtieInputOptions = \" -1 {} -2 {} \".format(read1, read2)\n\n cmd = cmd_source_bash_profile +\\\n cmdProgressRequest + '--progress \"Align reads to genome\" --n 3 && ' +\\\n \"bowtie2 \" +\\\n \" -x \\\"{}\\\" \".format(str(options.align_indexed_ref_genome_path)) +\\\n bowtieInputOptions +\\\n \" -S \\\"{}\\\" \".format(sam_file) +\\\n \" --{} \".format(options.phredEncoding) +\\\n \" --{} \".format(options.align_alignmentMode) +\\\n \" -N \" + str(options.align_nMismatches) +\\\n \" -L \" + str(options.align_seedLength) +\\\n \" -i {} \".format(options.align_seedInterval) +\\\n \" -D {} \".format(options.align_maxAlignAttempts) +\\\n \" -R {} \".format(options.align_maxReSeed) +\\\n \" -X \" + str(options.align_maxInsertLength) +\\\n \" -p \" + str(options.nThreads)\n if options.align_max_reported_alignments > 0:\n cmd += \" -k \" + str(options.align_max_reported_alignments)\n\n with logger_mutex:\n logger.debug(cmd)\n logger.debug(\"estimated computation time \" + str(datetime.timedelta(seconds=comp_time)))\n\n try:\n stdout_res, stderr_res = \"\",\"\"\n walltime = datetime.timedelta(seconds=max(2.5*comp_time, 60))\n if walltime < datetime.timedelta(hours=6):\n job_queue_name = short_queue\n else:\n job_queue_name = long_queue\n job_other_options = \" -pe smp \" + str(options.nThreads) +\\\n \" -q \" + job_queue_name +\\\n \" -l h_rt=\" + printTimeDelta(walltime) +\\\n \" -l h_vmem=8G,virtual_free=8G\" +\\\n \" -cwd\"\n with logger_mutex:\n logger.debug(\"Submitting job, cmd:\\n\" + cmd + \"\\njob options:\" + job_other_options)\n # ruffus.drmaa_wrapper.run_job\n stdout_res, stderr_res = run_job(cmd_str=cmd, job_name=task_name, logger=logger,\n drmaa_session=drmaa_session, run_locally=run_locally,\n job_other_options=job_other_options, retain_job_scripts=False)\n\n # relay all the stdout, stderr, drmaa output to diagnose failures\n except error_drmaa_job as err:\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, -1)\n raise Exception(\"\\n\".join(map(str,[\"Failed to run:\", cmd, err, stdout_res, stderr_res])))\n\n std_err_string = \"\".join([line.decode() if isinstance(line, bytes) else line for line in stderr_res])\n with logger_mutex:\n logger.debug(std_err_string)\n\n with open(summary_file, 'w') as summaryFile:\n summaryFile.write(str(std_err_string))\n\n with logger_mutex:\n logger.debug(\"Alignment of reads finished.\")\n\n\n\n#############################################################################\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"Convert alignment file SAM to sorted BAM.\n\nSoftware: samtools Version: 1.3.1 (using htslib 1.3.1)\n\n\"\"\"\npipelineDoc += infoStr\n\n\niTask += 1\ntask_name = 'convert_sam_to_bam'\ntask_path = pipeline_path / \"Task{:02d}_{}\".format(iTask, task_name)\nintermediateTaskPathList.append(task_path)\n@follows(align_seq, mkdir(str(task_path)))\n@transform(align_seq,\n\n # sam file\n formatter(r'^(.+/)*(?P.+)\\.sam$'),\n\n # sorted bam file\n str(task_path) + \"/{SAMPLENAME[0]}_sorted.bam\",\n\n # Extra parameters\n \"{SAMPLENAME[0]}\",\n task_name, logger, logger_mutex)\ndef convert_sam_to_bam(sam_file,\n sorted_bam_file,\n sample_name,\n task_name, logger, logger_mutex):\n\n # Remark: beware of the option -m XXXG that sets memory limit for samtools sort,\n # if a float is passed, it seems that samtools takes the value as bytes and will create\n # hundred of thousands of temporary files, potentially collapsing the filesystem.\n cmd = cmd_source_bash_profile +\\\n cmdProgressRequest + '--progress \"Convert alignment file SAM to sorted BAM\" --n 4 && ' +\\\n \" samtools view -b -h -u \\\"{}\\\"\".format(sam_file) +\\\n \" | samtools sort -@ {:d} -m {:d}M -T \\\"{}\\\" -o \\\"{}\\\"\".format(options.samtools_sort_nthread,\n options.samtools_sort_max_mem_per_thread,\n options.samtools_sort_tmp_dir,\n sorted_bam_file) +\\\n \" && samtools index \\\"{}\\\"\".format(sorted_bam_file)\n with logger_mutex:\n logger.debug(cmd)\n\n try:\n stdout_res, stderr_res = \"\",\"\"\n walltime = datetime.timedelta(hours=5)\n if walltime < datetime.timedelta(hours=6):\n job_queue_name = short_queue\n else:\n job_queue_name = long_queue\n job_other_options = \" -pe smp \" + str(options.samtools_sort_nthread) +\\\n \" -q \" + job_queue_name +\\\n \" -l h_rt=\" + printTimeDelta(walltime) +\\\n \" -l virtual_free={}M\".format(options.samtools_sort_max_mem)\n\n # ruffus.drmaa_wrapper.run_job\n stdout_res, stderr_res = run_job(cmd_str=cmd, job_name=task_name, logger=logger,\n drmaa_session=drmaa_session, run_locally=run_locally,\n job_other_options=job_other_options, retain_job_scripts=False)\n\n std_err_string = \"\".join([line.decode() if isinstance(line, bytes) else line for line in stderr_res])\n with logger_mutex:\n logger.debug(std_err_string)\n\n # relay all the stdout, stderr, drmaa output to diagnose failures\n except error_drmaa_job as err:\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, -1)\n raise Exception(\"\\n\".join(map(str,[\"Failed to run:\" + cmd, err, stdout_res, stderr_res])))\n\n with logger_mutex:\n logger.debug(task_name + \" finished.\")\n\n\n\n#############################################################################\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"\n\nFilter alignments by quality and size and report statistics.\n\nWe filter reads that do not align concordantly on the genome (mostly very short reads that align almost randomly).\n\nIn order to detect properly the strand of the RNA reads, we compare the number of Forward reads and Reverse reads in the first rRNA region. The case with higher nb of reads is the + strand. We convert the BAM file to BEDPE file (BED with paired-end information with two read mates in the same record) or directly to BED file. Using the correct strand information, we run the python script pipeline_roesti_bedpe2bed.py to convert the paired-end reads to a single BED reads corresponding to the physical RNA fragment with strand correctly assigned. Then, we remove aligned reads that overlap with the rRNA or tRNA regions (optional).\n\nSoftware: samtools Version: 1.3.1 (using htslib 1.3.1)\nSoftware: bedtools v2.26.0\nSoftware: sort (GNU coreutils) 8.4\n\n\"\"\"\npipelineDoc += infoStr\n\n\niTask += 1\ntask_name = 'filter_alignments'\ntask_path = pipeline_path / \"Task{:02d}_{}\".format(iTask, task_name)\nintermediateTaskPathList.append(task_path)\n@follows(convert_sam_to_bam, mkdir(str(task_path)))\n@transform(convert_sam_to_bam,\n\n # sorted bam file\n formatter(r'^(.+/)*(?P.+)_sorted\\.bam$'),\n\n # output files\n [str(task_path) + '/{SAMPLENAME[0]}.filtered.bed',\n str(task_path) + '/{SAMPLENAME[0]}.filtered.bed.nreads'],\n\n # Sample name\n \"{SAMPLENAME[0]}\",\n # Input path\n \"{path[0]}\",\n # Output path\n str(task_path),\n task_name, logger, logger_mutex)\ndef filter_alignments(sorted_bam_file,\n output_files,\n sample_name,\n input_path,\n output_path,\n task_name, logger, logger_mutex):\n\n filter_script_filename = str(scriptPath / 'pipeline_roesti_filter_script.sh')\n filter_alignments_nthreads = min(options.nThreads, 8)\n\n cmd = cmd_source_bash_profile +\\\n cmdProgressRequest +\\\n '--progress \"Filter alignments by quality and size and report statistics\" --n 5 && ' +\\\n \" \\\"{}\\\"\".format(filter_script_filename) +\\\n \" \\\"{}\\\"\".format(sample_name) +\\\n \" \\\"{}\\\"\".format(input_path) +\\\n \" \\\"{}\\\"\".format(output_path) +\\\n \" \" + str(options.filter_alignments_quality_threshold) +\\\n \" false\" +\\\n \" \\\"{}\\\"\".format(options.rRNA_bedfile) +\\\n \" \\\"{}\\\"\".format(options.rRNA_tRNA_bedfile) +\\\n \" \\\"{}\\\"\".format(str(scriptPath)) +\\\n \" \" + (\"true\" if options.remove_rRNA else \"false\") +\\\n \" \" + options.seq_end +\\\n \" \" + str(filter_alignments_nthreads)\n with logger_mutex:\n logger.debug(cmd)\n\n try:\n stdout_res, stderr_res = \"\",\"\"\n walltime = datetime.timedelta(hours=20)\n if walltime < datetime.timedelta(hours=6):\n job_queue_name = short_queue\n else:\n job_queue_name = long_queue\n job_other_options = \" -pe smp \" + str(filter_alignments_nthreads) +\\\n \" -q \" + job_queue_name +\\\n \" -l h_rt=\" + printTimeDelta(walltime) +\\\n \" -l h_vmem=24G,virtual_free=24G\"\n\n # ruffus.drmaa_wrapper.run_job\n stdout_res, stderr_res = run_job(cmd_str=cmd, job_name=task_name, logger=logger,\n drmaa_session=drmaa_session, run_locally=run_locally,\n job_other_options=job_other_options, retain_job_scripts=False)\n\n std_err_string = \"\".join([line.decode() if isinstance(line, bytes) else line for line in stderr_res])\n with logger_mutex:\n logger.debug(std_err_string)\n\n # relay all the stdout, stderr, drmaa output to diagnose failures\n except error_drmaa_job as err:\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, -1)\n raise Exception(\"\\n\".join(map(str,[\"Failed to run:\" + cmd, err, stdout_res, stderr_res])))\n\n time.sleep(sleepTimeFilesystem)\n\n # Read results from samtools read counts\n nreads_filename = output_path + '/' + sample_name + '.bed.nreads'\n wait_for_file(nreads_filename, sleepTimeFilesystem)\n with open(nreads_filename) as nreads_file:\n nreads_bed = int(next(nreads_file).split()[0])\n os.remove(nreads_filename)\n\n # nreads_filename = output_path + '/' + sample_name + '.filtered.bed.nreads'\n nreads_filename = output_files[1]\n wait_for_file(nreads_filename, sleepTimeFilesystem)\n with open(nreads_filename) as nreads_file:\n nreads_bed_filtered = int(next(nreads_file).split()[0])\n\n # total nb of raw reads\n if options.seq_end == 'paired-end':\n nreads_raw_filename = output_path + '/../Task01_trim_adapter_PE_reads/' + sample_name + '_.trimmed.nreads'\n if not Path(nreads_raw_filename).exists():\n nreads_raw_filename = output_path + '/../Task01_trim_adapter_PE_reads/' + sample_name + '.trimmed.nreads'\n elif options.seq_end == 'single-end':\n nreads_raw_filename = output_path + '/../Task02_trim_adapter_SE_reads/' + sample_name + '_.trimmed.nreads'\n if not Path(nreads_raw_filename).exists():\n nreads_raw_filename = output_path + '/../Task02_trim_adapter_SE_reads/' + sample_name + '.trimmed.nreads'\n wait_for_file(nreads_raw_filename, sleepTimeFilesystem)\n with open(nreads_raw_filename) as nreads_file:\n nreads_raw = int(next(nreads_file).split()[0])\n\n # Write percentage of reads passing the filter in CSV file\n with open(output_path + '/' + sample_name + '.nreads','w') as nreads_file:\n nreads_file.write('nreads_raw,' + str(nreads_raw) + '\\n')\n nreads_file.write('nreads_bed,' + str(nreads_bed) + '\\n')\n nreads_file.write('nreads_bed_filtered,' + str(nreads_bed_filtered) + '\\n')\n nreads_file.write('nreads_bed_filtered/nreads_bed,' + str(nreads_bed_filtered/nreads_bed) + '\\n')\n\n with logger_mutex:\n logger.debug(task_name + \" finished.\")\n\n\n\n#############################################################################\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"\nCompute genome coverage and fragment count of reads (mRNA).\n\n[script pipeline_roesti_genome_coverage.sh] We compute the fragment count per CDS using `bedtools intersect`, with strand specific option and setting the fractional overlap threshold to 0.5, meaning that at least half of the fragment has to overlap with the feature to be counted. We compute the per base coverage for plus and minus strand using `bedtools genomecov`.\n\n[script pipeline_roesti_mean_coverage_per_CDS.py] We compute the average coverage per base for all CDS by integrating the per-base coverage along the feature and dividing by the length of the feature. This is in theory more precise than just counting the fragments, since fragment size distribution may vary between genes.\n\"\"\"\npipelineDoc += infoStr\n\n\n# Note that we need to sort the BED file, otherwise bedtools intersect will use huge memory (70G)\n# when computing the number of reads per gene and is much slower.\n# We also have to make sure that the CDS BED file is also sorted.\n# we sort the CDS BED file from here the python ruffus script and not in the task function below\n# because we only need to do it once, otherwise each parallel job will try to sort the CDS BED file,\n# resulting in error at runtime.\nscript_filename = str(scriptPath / 'pipeline_roesti_sort_CDS_BED.sh')\ncmd = cmd_source_bash_profile +\\\n \" \\\"{}\\\"\".format(script_filename) +\\\n \" \\\"{}\\\"\".format(options.genomeCDSBedFile)\nprint(cmd)\ncmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)\ncmd_output = re.sub(r'\\\\n','\\n', str(cmd_output))\nprint(cmd_output)\n\n\niTask += 1\ntask_name = 'genome_coverage_fragment_count'\ntask_path = pipeline_path / \"Task{:02d}_{}\".format(iTask, task_name)\n# Only run this analysis for rna data type\nif options.library_type in ['rna-seq', 'hydro-trna-seq', 'ribo-seq']:\n task_path.mkdir(exist_ok=True)\n finalResultsPath = task_path\n@active_if(options.library_type in ['rna-seq', 'hydro-trna-seq', 'ribo-seq'])\n@follows(filter_alignments, mkdir(str(task_path)))\n@transform(filter_alignments,\n\n # BED file of reads (inserts)\n formatter(r'^(.+/)*(?P.+)\\.filtered\\.bed$'),\n\n # output file\n str(task_path) + \"/{SAMPLENAME[0]}.strandp_coverage.bed\",\n\n # Sample name\n \"{SAMPLENAME[0]}\",\n # Input path\n \"{path[0]}\",\n # Output path\n str(task_path),\n task_name, logger, logger_mutex)\ndef genome_coverage_fragment_count(reads_bed_file,\n genome_coverage_file,\n sample_name,\n input_path,\n output_path,\n task_name, logger, logger_mutex):\n\n # For some reason the formatter filtering does not work and we have to apply a regex again to choose the .bed file only.\n if len(reads_bed_file) != 1:\n reads_bed_file = [fn for fn in reads_bed_file if re.search(r'^(.+/)*(?P.+)\\.filtered\\.bed$', fn)][0]\n with logger_mutex:\n logger.debug(task_name + \", filtered input file: \" + reads_bed_file)\n\n nreads_filename = sample_name + '.filtered.bed.nreads'\n nreads_filepath = Path(input_path) / nreads_filename\n if nreads_filepath.is_file():\n with nreads_filepath.open() as f:\n nreads = int(next(f).split()[0])\n\n nreads_filename = sample_name + '.nreads'\n nreads_filepath = Path(input_path) / nreads_filename\n if nreads_filepath.is_file():\n nreads_df = pd.read_csv(nreads_filepath, index_col=0, header=None)\n print(\"nreads_df:\\n\", nreads_df)\n nreads_bed = int(nreads_df.loc['nreads_bed', 1]) # this is without removing the rRNA reads\n print(\"nreads_bed:\", nreads_bed)\n else:\n nreads = 10e6\n print(\"sample:\", sample_name, \"nreads:\", nreads)\n\n filter_script_filename = str(scriptPath / 'pipeline_roesti_genome_coverage.sh')\n\n cmd = cmd_source_bash_profile +\\\n cmdProgressRequest + '--progress \"Compute genome coverage and fragment count of reads (mRNA)\" --n 7 && ' +\\\n \" \\\"{}\\\"\".format(filter_script_filename) +\\\n \" \\\"{}\\\"\".format(reads_bed_file) +\\\n \" \\\"{}\\\"\".format(sample_name) +\\\n \" \\\"{}\\\"\".format(input_path) +\\\n \" \\\"{}\\\"\".format(output_path) +\\\n \" false\" +\\\n \" \\\"{}\\\"\".format(str(scriptPath)) +\\\n \" \\\"{}\\\"\".format(str(options.genomeBedFile)) +\\\n \" \\\"{}\\\"\".format(str(options.genomeCDSBedFile)) +\\\n \" \\\"{}\\\"\".format(str(nreads_bed)) +\\\n \" \\\"{}\\\"\".format(str(options.rRNA_bedfile))\n with logger_mutex:\n logger.debug(cmd)\n\n try:\n stdout_res, stderr_res = \"\",\"\"\n walltime = datetime.timedelta(hours=20)\n if walltime < datetime.timedelta(hours=6):\n job_queue_name = short_queue\n else:\n job_queue_name = long_queue\n # Large number of reads might require a fairly large memory for the bedtools intersect,\n # even if the reads are sorted. Last example case was 55M reads and required 16G of memory.\n # small benchmark test for bacterial genome M. feriruminatoris, using /usr/bin/time -v\n # that reports maximum resident memory (approximative):\n # 100k reads, 57M\n # 1M reads, 530M\n # 5M reads, 2670M\n # 20M reads, 10'600M\n # approximate memory usage is 550M per 1M reads\n memory = int((nreads/1e6)*600 + 2000)\n with logger_mutex:\n logger.debug(\"Launching job, virtual_free={:d}M\".format(memory))\n job_other_options = \" -pe smp \" + str(1) +\\\n \" -q \" + job_queue_name +\\\n \" -l h_rt=\" + printTimeDelta(walltime) +\\\n \" -l virtual_free={:d}M\".format(memory)\n\n # ruffus.drmaa_wrapper.run_job\n stdout_res, stderr_res = run_job(cmd_str=cmd, job_name=task_name, logger=logger,\n drmaa_session=drmaa_session, run_locally=run_locally,\n job_other_options=job_other_options, retain_job_scripts=False)\n\n std_err_string = \"\".join([line.decode() if isinstance(line, bytes) else line for line in stderr_res])\n with logger_mutex:\n logger.debug(std_err_string)\n\n except error_drmaa_job as err:\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, -1)\n raise Exception(\"\\n\".join(map(str,[\"Failed to run:\" + cmd, err, stdout_res, stderr_res])))\n\n with logger_mutex:\n logger.debug(task_name + \" finished.\")\n\n finalResultsPath = task_path\n\n\n\n#############################################################################\ninfoStr = \"\\n\\n#############################\\n\"\ninfoStr += \"\"\"\n\nPCR duplicates.\nNote: In the ribosome profiling data analysis, we do not try to remove PCR duplicates. PCR duplicates can only be detected with high confidence when the probability of having in the library RNA inserts (fragments) of same length mapping to the same exact position in the genome is very small. In the ribosome footprints library, fragments length is distributed around 30 nt, and we expect many footprints to map to the same position. Alternative approach to detect PCR duplicates include adding short randomized nucleotides at both ends of fragments during library preparation. See Lecanda, A., Nilges, B. S., Sharma, P., Nedialkova, D. D., Schwarz, J., Vaquerizas, J. M., & Leidel, S. A. (2016). Dual randomization of oligonucleotides to reduce the bias in ribosome-profiling libraries. Methods, 10–12.\n\nNote: In the RNA-seq data analysis, we do not remove PCR duplicates neither.\n\n\n\"\"\"\npipelineDoc += infoStr\n\n\n#############################################################################\niTask += 1\ntask_name = 'delete_intermediate_files'\n\n\n@follows(genome_coverage_fragment_count)\n@active_if(options.delete_intermediate_files)\ndef delete_intermediate_files():\n\n print(\"Delete intermediate files.\")\n for p in intermediateTaskPathList:\n for f in p.glob('*'):\n print(\"deleting file:\", f)\n f.unlink()\n Path(p).rmdir()\n\n with logger_mutex:\n logger.debug(task_name + \" finished.\")\n\n\n\n#############################################################################\niTask += 1\ntask_name = 'write_jobid_files'\n\n\n@follows(delete_intermediate_files)\ndef write_jobid_files():\n\n # Check that final output files exist\n # task_path is the last task's output directory\n fileList0 = [f for f in Path(task_path).iterdir() if f.is_file()]\n comments = ''\n status = 2\n\n # We just check that the coverage file for plus and minus strands exist\n # and sum up at least one read.\n for strand in ['plus', 'minus']:\n if strand == 'plus':\n regex = re.compile(r'^(.+/)*(?P.+)\\.strandp_coverage\\.bed$')\n elif strand == 'minus':\n regex = re.compile(r'^(.+/)*(?P.+)\\.strandm_coverage\\.bed$')\n fileList = [f for f in fileList0 if regex.search(f.name)]\n if len(fileList) > 0:\n file = fileList[0]\n df = pd.read_table(str(file))\n nReads = df.iloc[:, 2].sum()\n if nReads < 1:\n status = -1\n comments = comments + 'strand {} coverage file sums up 0 reads.\\n'.format(strand)\n else:\n status = -1\n comments = comments + 'strand {} coverage file not found.\\n'.format(strand)\n\n # We also test that the CDS_values file exists, though we do not consider\n # an error if it does not exists. User could only want to compute the coverage,\n # with no annotation count values.\n regex = re.compile(r'^(.+/)*(?P.+)\\.CDS_values\\.csv$')\n fileList = [f for f in fileList0 if regex.search(f.name)]\n if len(fileList) == 0:\n status = 2\n comments = comments + 'CDS_values.csv file not found.\\n'\n\n # filename = 'pipeline_done.{:d}.txt'.format(options.jobid)\n # with (outputPath / filename).open('w') as f:\n # f.write('')\n\n if options.sendMessageToWebServer:\n update_analysis(options.analysisId, status)\n\n#############################################################################\n\n\npipelineDocFile.write_text(pipelineDoc)\n\n# Change the history file for Ruffus in order to use several different pipelines in the same root folder\nhistory_file = \".\" + pipeline_name + \".ruffus_history.sqlite\"\noptions.history_file = history_file\npipeline_printout(history_file=history_file)\n# gnu_make_maximal_rebuild_mode = True\ngnu_make_maximal_rebuild_mode = False\ncmdline.run(options, multithread=options.njobs, logger=logger, verbose=options.verbose,\n gnu_make_maximal_rebuild_mode=gnu_make_maximal_rebuild_mode)\nif not run_locally:\n drmaa_session.exit()\n","sub_path":"src/pipeline_roesti.py","file_name":"pipeline_roesti.py","file_ext":"py","file_size_in_byte":70709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533549921","text":"a=int(input(\"Enter first number:\"))\nb=int(input(\"Enter second number:\"))\nc=int(input(\"Enter third number:\"))\ndiv=int(input(\"enter a number to check the divisibility in the list\"))\nd=[]\ne=[]\nd.append(a)\nd.append(b)\nd.append(c)\nfor i in range(0,3):\n for j in range(0,3):\n for k in range(0,3):\n if(i!=j&j!=k&k!=i):\n m=(d[i]*1000+d[j]*100+d[k]*10)\n p=m//10\n e.append(p)\nprint(e)\nle=len(e)\nfor l in range(0,le):\n if(e[l]%div==0):\n print(\"the list element[*\",e[l],\"*]can divisible by\",div)\n print(e[l])\n print(end=\"\")\n\n \n \n\n","sub_path":"task3-check the divisibility.py","file_name":"task3-check the divisibility.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102247465","text":"#! python3\n#-*- encoding: utf-8 -*-\n\nimport os\nimport tkinter as tk\nfrom tkinter import filedialog\nimport lxml.etree as ET\n\ndef OpenFile():\n root=tk.Tk()\n root.withdraw()\n \n file_path = filedialog.askopenfilename(title = \"选择font.xml所在的路径\",filetypes=[(\"font.xml\", \"font.xml\")])\n return file_path\n\n#----------------------------------------------------------------------\ndef TransformXml(fontfile):\n \"\"\"\"\"\"\n parser = ET.XMLParser(remove_blank_text=True)\n tree = ET.parse(fontfile, parser)\n root=tree.getroot()\n \n chars=[]\n for node in root:\n new_value=ord(node.attrib['attr_key'])\n #char=(node.attrib['attr_key'],node.attrib['attr_value'],new_value)\n ori_name=node.attrib['attr_value']+\".png\"\n new_name=str(new_value)+\".png\"\n char=(ori_name,new_name)\n chars.append(char)\n \n node.attrib['attr_value']=str(new_value)\n #print(node.attrib['attr_key'])\n tree.write(fontfile,encoding=\"UTF-8\",pretty_print=True,xml_declaration=True)\n for i in range(10):\n new_value=ord(str(i))\n #char=(node.attrib['attr_key'],node.attrib['attr_value'],new_value)\n ori_name=str(i)+\".png\"\n new_name=str(new_value)+\".png\"\n char=(ori_name,new_name)\n chars.append(char)\n for ch in range(65, 91): \n new_value=str(ch)\n #char=(node.attrib['attr_key'],node.attrib['attr_value'],new_value)\n ori_name=chr(ch)+\".png\"\n new_name=str(new_value)+\".png\"\n char=(ori_name,new_name)\n chars.append(char)\n for ch in range(97, 123): \n new_value=str(ch)\n #char=(node.attrib['attr_key'],node.attrib['attr_value'],new_value)\n ori_name=chr(ch)+\".png\"\n new_name=str(new_value)+\".png\"\n char=(ori_name,new_name)\n chars.append(char) \n return chars\n\n#----------------------------------------------------------------------\ndef RenamePng(dirpath,changevalue):\n \"\"\"\"\"\"\n for root,ds,fs in os.walk(dirpath):\n #print(fs)\n for filename in fs:\n filepath=\"%s/%s\"%(root,filename)\n for old,new in changevalue:\n if old==filename:\n oldfile=\"%s/%s\"%(root,filename)\n newfile=\"%s/%s\"%(root,new)\n os.rename(oldfile,newfile)\n #for root1, dirs,files in os.walk(ds):\n #for file in files:\n #print(file)\n\n#----------------------------------------------------------------------\ndef main():\n \"\"\"\"\"\"\n print('go')\n filepath=OpenFile()\n changevalue=TransformXml(filepath)\n dirpath,a = os.path.split(filepath)\n dirpath=os.path.normpath(os.path.join(dirpath,\"../../res/ui/image/font/\"))\n #dirpath=os.path.relpath(\"../..\",dirpath)\n print(os.path.normpath(dirpath))\n RenamePng(dirpath,changevalue)\n print(ord(\"大\"))\n\nif __name__==\"__main__\":\n main()\n","sub_path":"otherdata/Tool/billyfont/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"229721879","text":"class Application:\r\n def __init__(self, urls, middlewares):\r\n self.urls = urls\r\n self.middlewares = middlewares\r\n\r\n def __call__(self, environ, start_response):\r\n \"\"\"\r\n :param environ: словарь данных от сервера\r\n :param start_response: функция для ответа серверу\r\n \"\"\"\r\n url = environ['PATH_INFO']\r\n\r\n for middleware in self.middlewares:\r\n url = middleware(url=url)\r\n\r\n if url in self.urls:\r\n view = self.urls[url]\r\n\r\n code_response, body_response = view()\r\n start_response(code_response, [('Content-Type', 'text/html')])\r\n return [body_response.encode('utf-8')]\r\n else:\r\n start_response('404 Not Found', [('Content-Type', 'text/html')])\r\n return [b\"PAGE NOT FOUND\"]\r\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"298831078","text":"import fleet\nimport weaponsystems\nclass players():\n def __init__(self,name,addr,port):\n self.address = addr.split(',')\n self.address = self.address[0][2:-1]\n self.owned_fleets = []\n self.name = name\n self.port = port\n print(\"\\nMade player! Name \" + self.name + \" address \" + self.address)\n\n def add_fleet(self, fleet):\n self.owned_fleets.append(fleet)\n\n def populatefleet(self,fleetname): #Output is a populated fleet\n\n new_fleet = fleet.fleet(str(self.name) + \" \" + str(fleetname))\n\n fleetfile = open(str(fleetname),'r') #Read fleet file\n lines = fleetfile.readlines()\n\n #2nd line is number of fleets\n for i in range(0,int(lines[1])):\n parsed_fleet = lines[i+2].split(\" \")\n for j in range(0,int(parsed_fleet[1])): #pass\n new_fleet.add_ship_to_fleet(self.parse_ship_and_into_fleet(new_fleet,parsed_fleet))\n #should be put into its own fleet somewhere\n #3rd+ are fleets\n\n\n #make a ship\n #generate ship\n return new_fleet\n\n def parse_ship_and_into_fleet(self, new_fleet,parsed_fleet):\n import ship\n shipfile = open(parsed_fleet[0].replace('.fleet','.ship'),'r')\n shiplines = shipfile.readlines()\n name, hitpoints, targettingrange, speed, inertia, signature,weapon = map(str, shiplines)\n parsed_weapon = weaponsystems.parse_weapon(weapon)\n ship = ship.ship(int(hitpoints),50,int(targettingrange),int(speed),int(inertia),name,0,0,0,new_fleet,parsed_weapon)\n # hitpoints, damage, targettingrange, speed, inertia, name, x, y, z, fleet, weapons):\n\n return ship\n# shipspecs.append(input(\"Name of - $ \"))\n# shipspecs.append(input(\"Hitpoints of Ship $ \"))\n# shipspecs.append(input(\"Targetting Range of Ship $ \"))\n# shipspecs.append(input(\"Speed of Ship $ \"))\n# shipspecs.append(input(\"Inertia of Ship $ \"))\n# shipspecs.append(input(\"Signature of Ship $ \"))\n","sub_path":"players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4281334","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n##\n# Imports\n# For more information, see https://www.python.org/dev/peps/pep-0008/#imports\n##\n\nimport os\nimport json\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nfrom palette import Palette\n\n\nclass Copic(Palette):\n # Dictionary holding fetched colors\n sets = {\n 'copic': []\n }\n\n # Identifier\n identifier = 'copic'\n\n # Copyright notices\n copyright = {\n 'xml': '\\n Copic® and related trademarks are the property of\\n Too Marker Corporation (https://www.toomarker.co.jp/en)\\n ',\n 'gpl': '##\\n# Copic® and related trademarks are the property of\\n# Too Marker Corporation (https://www.toomarker.co.jp/en)\\n##\\n',\n }\n\n\n def __init__(self):\n super().__init__()\n\n\n ##\n # Fetches Copic® colors\n #\n # Valid `set_name` parameter:\n # - 'copic', currently 289 colors\n ##\n def fetch(self, set_name='copic'):\n # One baseURL to rule them all\n base_url = 'https://www.copicmarker.com/collections/collect'\n\n # Scraping Copic® colors from HTML\n html = urlopen(base_url)\n soup = BeautifulSoup(html, 'lxml')\n\n for color_tile in soup.find('div', {'class': 'collection-color--desktop'}).find_all('div', {'class': 'product-item-hex'}):\n data_name = color_tile['data-name']\n data_list = data_name.split(' ')\n\n hexadecimal = color_tile['style'][12:-8]\n rgb_list = tuple(int(hexadecimal.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))\n rgb = [str(i) for i in rgb_list]\n\n color = {}\n color['code'] = data_list.pop(0)\n color['rgb'] = 'rgb(' + ','.join(rgb) + ')'\n color['hex'] = hexadecimal.upper()\n color['name'] = ' '.join(data_list)\n\n self.sets[set_name].append(color)\n\n print('Loading ' + color['code'] + ' in set \"' + set_name + '\" .. done')\n","sub_path":"lib/copic.py","file_name":"copic.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233778730","text":"def main():\n problem1()\n problem2()\n problem3()\n problem4()\n\n #created an array and used built functions to remove itmes from the array\n\n\ndef problem1():\n arrayForProblem2 = ['Kenn', 'Kevin', 'Erin', 'Meka']\n print(arrayForProblem2[2])\n print(arrayForProblem2.__len__())\n print(arrayForProblem2.pop(1))\n print(arrayForProblem2.pop(2))\n\n#Python Program that Creates a function that has a loop that quits with ‘q’. If the user doesn't enter 'q', ask them to input another string.\n\n\n\ndef problem2():\n userInput = \"\"\n while(userInput != 'q'):\n userInput = input(\"Enter something to quit the program\")\n\ndef problem3():\n myPeople = {\n \"Jonathan\": \"John\",\n \"Michaek\":\"Mike\",\n \"William\":\"Bill\",\n \"Robert\":\"Rob\"\n }\n print(myPeople)\n print(myPeople[\"William\"])\n\ndef problem4():\n numArray = [1,2,3,4,5]\n for i in range( len(numArray) - 1, -1, -1) :\n print(i)\n\n\ndef problem5():\n\n totalArray = [1,2,3,4,5,6,7,8,9,10]\n\n playerOne = int(input(\"how many numbers in an array are higher, lower, or equal to it.\"))\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"graded_classwork.py","file_name":"graded_classwork.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"401427146","text":"'''\r\nCreated on 2 Dec 2017\r\n\r\n@author: marashid\r\n'''\r\nimport os, sys\r\nfrom _datetime import datetime, timedelta, date\r\nimport logging\r\n\r\nfrom dropbox.exceptions import ApiError\r\n\r\nimport csv_processor_for_drpbx_files_backup\r\nimport file_metadata_reader\r\nimport dropbox_api_gateway\r\nfrom my_logging import MyFileHandler\r\n\r\n##### Function Definitions #####\r\ndef _get_file_metadata(_local_file, required_metadata):\r\n '''\r\n retuns ONLY the required_metadata for a given _local_file\r\n \r\n @param _local_file :str -the _local_file to enquire a metadata for\r\n @param required_metadata :str -metadata required for the program\r\n '''\r\n try:\r\n loc_file_meta_data = (file_metadata_reader.get_metadata_for_single_file(_local_file))\r\n except Exception as exp:\r\n logging.error(\"Couldn't get metadata for the local _local_file {} due to following exception: {}\".format(_local_file, exp.__str__()))\r\n print(\"Couldn't get metadata for the local _local_file {} due to following exception: {}\".format(_local_file, exp.__str__()))\r\n return None\r\n \r\n# print(datetime.fromtimestamp(loc_file_meta_data['modify_time']))\r\n metadata = datetime.fromtimestamp(loc_file_meta_data[required_metadata])\r\n return metadata\r\n\r\n\r\ndef _get_drpbx_file_metadata(_backup_file, required_metadata, _local_file):\r\n '''\r\n retuns ONLY the required_metadata for a given _backup_file, if the file doesn't exist on the server then create it\r\n \r\n @param _backup_file :str -the _backup_file to enquire a metadata for\r\n @param required_metadata :str -metadata required for the program\r\n '''\r\n try:\r\n drpbx_file_meta_data = dropbox_api_gateway.get_my_drpbx_file_metadata(_backup_file)\r\n \r\n except ApiError as err:\r\n ## if the _backup_file doesn't exist on dropbox, we will create the _backup_file for first time\r\n if(dropbox_api_gateway.no_such_file_on_dropbox(err)):\r\n logging.warning(\"specified _backup_file is not found on Dropbox, creating dropbox _backup_file {}...\".format(_backup_file))\r\n print(\"specified _backup_file is not found on Dropbox, creating dropbox _backup_file {}...\".format(_backup_file))\r\n dropbox_api_gateway.backup(_local_file, _backup_file)\r\n _update_uploaded_files_counter()\r\n else:\r\n logging.error(\"error returned from ApiError: \" + err.error.__str__())\r\n print(\"error returned from ApiError: \" + err.error.__str__())\r\n return None\r\n \r\n except Exception as exp:\r\n logging.error(\"Couldn't get metadata for the dropbox path {} due to following exception: {}\".format(_backup_file, exp.__str__()))\r\n print(\"Couldn't get metadata for the dropbox path {} due to following exception: {}\".format(_backup_file, exp.__str__()))\r\n return None\r\n \r\n metadata = drpbx_file_meta_data[required_metadata]\r\n return metadata\r\n\r\n\r\ndef _get_leading_time(loc_modtime, drpbx_modtime):\r\n '''\r\n the dropbox server time will always be ahead of local time as uploading file from client will take a few seconds or minutes\r\n we will ignore updates in less than 5 hours, so add 5 hours to the local modify time\r\n '''\r\n loc_modtime_added_5hrs = loc_modtime + timedelta(hours=5)\r\n \r\n if(loc_modtime > drpbx_modtime):\r\n return loc_modtime\r\n ## ignore dropbox lead time if it's less than 5 hours\r\n elif(drpbx_modtime > loc_modtime_added_5hrs): \r\n return drpbx_modtime\r\n else:\r\n return None\r\n\r\n \r\ndef _backup_local2_drpbx(_local_path, _backup_path):\r\n '''\r\n executive function that checks both local and backup files' modified time, backs up the local file if more than 5 hours ahead using the dropbox api-gateway script\r\n '''\r\n logging.info(\"Local path: {}. Backup path: {}\".format(_local_path, _backup_path))\r\n print(\"local path: {}\\nbackup path: {}\".format(_local_path, _backup_path))\r\n\r\n ## get the modified time for both local file and server file \r\n loc_file_modified_time = _get_file_metadata(_local_path, \"modify_time\") ## get the local file's modify_time\r\n logging.debug(\"local file modify_time: \" + str(loc_file_modified_time))\r\n print(\"local file modify_time: \" + str(loc_file_modified_time))\r\n if loc_file_modified_time is None:\r\n return ## do not execute rest of the code, go to the next record\r\n \r\n drpbx_file_modified_time = _get_drpbx_file_metadata(_backup_path, \"modify_time\", _local_path) ## get the backup file's modify_time, if doesn't exist create it on the server\r\n logging.debug(\"dropbox file server_modified time: \" + str(drpbx_file_modified_time))\r\n print(\"dropbox file server_modified time: \" + str(drpbx_file_modified_time))\r\n if drpbx_file_modified_time is None:\r\n return ## do not execute rest of the code, go to the next record\r\n \r\n ## find out whether local file was modified since the server's last update, or vice-versa ignoring the last 5 hours update on dropbox\r\n leading_time = _get_leading_time(loc_file_modified_time, drpbx_file_modified_time)\r\n if(leading_time == loc_file_modified_time):\r\n logging.info(\"local file is ahead of server, uploading latest local file to dropbox...\")\r\n print(\"local file is ahead of server, uploading latest local file to dropbox...\")\r\n \r\n try:\r\n dropbox_api_gateway.backup(_local_path, _backup_path)\r\n _update_uploaded_files_counter()\r\n \r\n except ApiError as err:\r\n if err.user_message_text:\r\n logging.error(err.user_message_text + \", cannot backup {} to {}\".format(_local_path, _backup_path))\r\n print(err.user_message_text + \", cannot backup {} to {}\".format(_local_path, _backup_path))\r\n \r\n except Exception as exp:\r\n logging.error(exp.__str__() + \", cannot backup {} to {}\".format(_local_path, _backup_path)) \r\n print(exp.__str__() + \", cannot backup {} to {}\".format(_local_path, _backup_path)) \r\n \r\n elif(leading_time == drpbx_file_modified_time):\r\n logging.info(\"dropbox file is ahead of the local copy, downloading latest file from dropbox...[download skipped]\") ## ***yet to implement***\r\n print(\"dropbox file is ahead of the local copy, downloading latest file from dropbox...[download skipped]\") ## ***yet to implement***\r\n \r\n else:\r\n logging.info(\"both files are same (ignoring updates in last 5 hours). No upload/download has taken place.\")\r\n print(\"both files are same (ignoring updates in last 5 hours). No upload/download has taken place.\")\r\n \r\n\r\ndef _update_uploaded_files_counter():\r\n global _uploaded_file_count\r\n _uploaded_file_count += 1 \r\n\r\n##### End of Function Definitions #####\r\n\r\n## enable logging\r\nmy_logfile_handler = MyFileHandler(\"./Logs/{}{}.log\".format(date.today().__format__(\"%Y%m%d\"), os.path.basename(__file__)))\r\nlogging.basicConfig(filename=my_logfile_handler.baseFilename, level=logging.INFO,\r\n format='%(asctime)s %(module)s.%(funcName)s line:%(lineno)s: %(levelname)-8s [%(process)d] %(message)s')\r\n\r\n## initiate the dropbox object\r\ndropbox_api_gateway.initiate_drpbx_obj()\r\n\r\n## read all Local files and their corresponding dropbox locations from a csv file\r\ncsv_file = \"./data_files/files_to_backup.csv\"\r\ntry:\r\n my_backup_files_list = csv_processor_for_drpbx_files_backup.process(csv_file) \r\nexcept Exception as err:\r\n logging.error(\"Error reading csv file {}. Program is exiting due to the following error: {}\".format(csv_file, err.__str__()))\r\n sys.exit(\"Error reading csv file {}. Program is exiting due to the following error: {}\".format(csv_file, err.__str__()))\r\n\r\n## if any of the key fields is missing in csv file, then program cannot execute further and should terminate\r\nif ('LocalFile' not in my_backup_files_list.__getitem__(0).keys()) or ('BackUpPath' not in my_backup_files_list.__getitem__(0).keys()):\r\n logging.error(\"Missing one or more required key fields in the csv file, ensure to have 'LocalFile' and 'BackUpPath' keys in the given csv\")\r\n sys.exit(\"Missing one or more required key fields in the csv file, ensure to have 'LocalFile' and 'BackUpPath' keys in the given csv\")\r\n\r\n_uploaded_file_count = 0 # to access this counter in a function level, keyword 'global' is to be used\r\n\r\n## now iterate through each item in the list and perform operations\r\nfor record in my_backup_files_list:\r\n print(\"=\"*50)\r\n local_path = record['LocalFile'] ## local file path with the file at the end of the path\r\n backup_path = record['BackUpPath'] ## corresponding dropbox file path with the file at the end of the path\r\n\r\n## TODO: Verify if local path has any negative symbol or wildcard leftover OR if backup path is None, terminate if so.\r\n\r\n logging.debug(\"CSV local path to backup: \" + local_path)\r\n print(\"CSV local path to backup: \" + local_path)\r\n logging.debug(\"CSV dropbox backup path: \" + backup_path)\r\n print(\"CSV dropbox backup path: \" + backup_path)\r\n \r\n _backup_local2_drpbx(local_path, backup_path)\r\n\r\nprint(\"*\"*30 + \"Upload completed. {} files have been uploaded.\".format(_uploaded_file_count) + \"*\"*30)\r\nlogging.info(\"*\"*30 + \"Upload completed. {} files have been uploaded.\".format(_uploaded_file_count) + \"*\"*30)","sub_path":"Python_Exercise/tools/backup_my_listed_files_to_drpbx.py","file_name":"backup_my_listed_files_to_drpbx.py","file_ext":"py","file_size_in_byte":9347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67330915","text":"import torch\nimport tqdm\nimport numpy as np\nimport torch.nn as nn\nimport torchvision\nfrom torch.optim import lr_scheduler\nfrom torchvision import datasets, models, transforms\nfrom utils.misc import *\nfrom utils.process_fp import process_inputs_fp\n\ncur_features = []\nref_features = []\nold_scores = []\nnew_scores = []\n\ndef get_ref_features(self, inputs, outputs):\n global ref_features\n ref_features = inputs[0]\n\ndef get_cur_features(self, inputs, outputs):\n global cur_features\n cur_features = inputs[0]\n\ndef get_old_scores_before_scale(self, inputs, outputs):\n global old_scores\n old_scores = outputs\n\ndef get_new_scores_before_scale(self, inputs, outputs):\n global new_scores\n new_scores = outputs\n\ndef map_labels(order_list, Y_set):\n map_Y = []\n for idx in Y_set:\n map_Y.append(order_list.index(idx))\n map_Y = np.array(map_Y)\n return map_Y\n\n\ndef incremental_train_and_eval(the_args, epochs, fusion_vars, ref_fusion_vars, b1_model, ref_model, b2_model, ref_b2_model, tg_optimizer, tg_lr_scheduler, fusion_optimizer, fusion_lr_scheduler, trainloader, testloader, balancedloader, iteration, start_iteration, X_protoset_cumuls, Y_protoset_cumuls, order_list,lamda, dist, K, lw_mr, fix_bn=False, weight_per_class=None, device=None):\n\n if device is None:\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n ref_model.eval()\n\n num_old_classes = ref_model.fc.out_features\n handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features)\n handle_cur_features = b1_model.fc.register_forward_hook(get_cur_features)\n handle_old_scores_bs = b1_model.fc.fc1.register_forward_hook(get_old_scores_before_scale)\n handle_new_scores_bs = b1_model.fc.fc2.register_forward_hook(get_new_scores_before_scale)\n if iteration > start_iteration+1:\n ref_b2_model.eval()\n\n for epoch in range(epochs):\n b1_model.train()\n b2_model.train()\n\n if fix_bn:\n for m in b1_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n train_loss = 0\n train_loss1 = 0\n train_loss2 = 0\n train_loss3 = 0\n correct = 0\n total = 0\n\n tg_lr_scheduler.step()\n fusion_lr_scheduler.step()\n\n print('\\nEpoch: %d, learning rate: ' % epoch, end='')\n print(tg_lr_scheduler.get_lr()[0])\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n\n inputs, targets = inputs.to(device), targets.to(device)\n\n tg_optimizer.zero_grad()\n\n outputs, _ = process_inputs_fp(the_args, fusion_vars, b1_model, b2_model, inputs)\n\n if iteration == start_iteration+1:\n ref_outputs = ref_model(inputs)\n loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), torch.ones(inputs.shape[0]).to(device)) * lamda\n else:\n ref_outputs, ref_features_new = process_inputs_fp(the_args, ref_fusion_vars, ref_model, ref_b2_model, inputs)\n loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features_new.detach(), torch.ones(inputs.shape[0]).to(device)) * lamda\n loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n outputs_bs = torch.cat((old_scores, new_scores), dim=1)\n assert(outputs_bs.size()==outputs.size())\n gt_index = torch.zeros(outputs_bs.size()).to(device)\n gt_index = gt_index.scatter(1, targets.view(-1,1), 1).ge(0.5)\n gt_scores = outputs_bs.masked_select(gt_index)\n max_novel_scores = outputs_bs[:, num_old_classes:].topk(K, dim=1)[0]\n hard_index = targets.lt(num_old_classes)\n hard_num = torch.nonzero(hard_index).size(0)\n if hard_num > 0:\n gt_scores = gt_scores[hard_index].view(-1, 1).repeat(1, K)\n max_novel_scores = max_novel_scores[hard_index]\n assert(gt_scores.size() == max_novel_scores.size())\n assert(gt_scores.size(0) == hard_num)\n loss3 = nn.MarginRankingLoss(margin=dist)(gt_scores.view(-1, 1), max_novel_scores.view(-1, 1), torch.ones(hard_num*K).to(device)) * lw_mr\n else:\n loss3 = torch.zeros(1).to(device)\n loss = loss1 + loss2 + loss3\n\n loss.backward()\n tg_optimizer.step()\n\n train_loss += loss.item()\n train_loss1 += loss1.item()\n train_loss2 += loss2.item()\n train_loss3 += loss3.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n print('Train set: {}, train loss1: {:.4f}, train loss2: {:.4f}, train loss3: {:.4f}, train loss: {:.4f} accuracy: {:.4f}'.format(len(trainloader), train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss3/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total))\n \n b1_model.eval()\n b2_model.eval()\n \n for batch_idx, (inputs, targets) in enumerate(balancedloader):\n fusion_optimizer.zero_grad()\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, _ = process_inputs_fp(the_args, fusion_vars, b1_model, b2_model, inputs)\n loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n loss.backward()\n fusion_optimizer.step()\n\n b1_model.eval()\n b2_model.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, _ = process_inputs_fp(the_args, fusion_vars, b1_model, b2_model, inputs)\n loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n print('Test set: {} test loss: {:.4f} accuracy: {:.4f}'.format(len(testloader), test_loss/(batch_idx+1), 100.*correct/total))\n\n print(\"Removing register forward hook\")\n handle_ref_features.remove()\n handle_cur_features.remove()\n handle_old_scores_bs.remove()\n handle_new_scores_bs.remove()\n return b1_model, b2_model\n\ndef incremental_train_and_eval_first_phase(the_args, epochs, b1_model, ref_model, \\\n tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iteration, \\\n lamda, dist, K, lw_mr, fix_bn=False, weight_per_class=None, device=None):\n\n if device is None:\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n for epoch in range(epochs):\n b1_model.train()\n\n if fix_bn:\n for m in b1_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n train_loss = 0\n train_loss1 = 0\n train_loss2 = 0\n train_loss3 = 0\n correct = 0\n total = 0\n\n tg_lr_scheduler.step()\n\n print('\\nEpoch: %d, learning rate: ' % epoch, end='')\n print(tg_lr_scheduler.get_lr()[0])\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n\n inputs, targets = inputs.to(device), targets.to(device)\n tg_optimizer.zero_grad()\n outputs = b1_model(inputs)\n loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n loss.backward()\n tg_optimizer.step()\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n print('Train set: {}, train loss: {:.4f} accuracy: {:.4f}'.format(len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))\n\n b1_model.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = b1_model(inputs)\n loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n print('Test set: {} test loss: {:.4f} accuracy: {:.4f}'.format(len(testloader), test_loss/(batch_idx+1), 100.*correct/total))\n\n return b1_model\n\n","sub_path":"meta-aggregation-networks/trainer/incremental_lucir.py","file_name":"incremental_lucir.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174564912","text":"import sys\nsys.path.insert(0, '../../')\nfrom model.util import train \nfrom model.exp_helpers import make_exp_plot, save_loss_history\n\nnetwork_architecture = \\\n dict(n_hidden_recog_1=12, # 1st layer encoder neurons\n n_hidden_recog_2=6, # 2nd layer encoder neurons\n n_hidden_gener_1=6, # 1st layer decoder neurons\n n_hidden_gener_2=12, # 2nd layer decoder neurons\n n_input=14, # MNIST data input (img shape: 28*28)\n n_z=1) # dimensionality of latent space\n\n# train and get test loss\nVAE, train_loss_history, valid_loss_history, test_loss = train(network_architecture, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t dataset_size=5e5, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t remove_prop=0.35)\n\n# save loss information\nsave_loss_history(train_loss_history, valid_loss_history, test_loss, 10, \"exp0001\", \n\t\t\t\t optional_desc=\"Dataset size was 5e5 examples, removed 35 percent\")\n\n# make plot\nmake_exp_plot(train_loss_history, valid_loss_history, test_loss, 10, \"exp0001\")\n\n","sub_path":"experiments/exp0001/exp0001.py","file_name":"exp0001.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399377447","text":"import gzip\nimport docker\nimport logging\nimport os\nimport tarfile\nimport uuid\n\nfrom collections import OrderedDict\nfrom io import BytesIO\nfrom textwrap import dedent\n\nfrom .Cluster import Cluster\nfrom ..flow_serialization.Minifi_flow_yaml_serializer import Minifi_flow_yaml_serializer\nfrom ..flow_serialization.Nifi_flow_xml_serializer import Nifi_flow_xml_serializer\n\n\nclass SingleNodeDockerCluster(Cluster):\n \"\"\"\n A \"cluster\" which consists of a single docker node. Useful for\n testing or use-cases which do not span multiple compute nodes.\n \"\"\"\n\n def __init__(self):\n self.minifi_version = os.environ['MINIFI_VERSION']\n self.nifi_version = '1.7.0'\n self.engine = 'minifi-cpp'\n self.flow = None\n self.name = None\n self.vols = {}\n self.minifi_root = '/opt/minifi/nifi-minifi-cpp-' + self.minifi_version\n self.nifi_root = '/opt/nifi/nifi-' + self.nifi_version\n self.kafka_broker_root = '/opt/kafka'\n self.network = None\n self.containers = OrderedDict()\n self.images = []\n self.tmp_files = []\n\n # Get docker client\n self.client = docker.from_env()\n\n def __del__(self):\n \"\"\"\n Clean up ephemeral cluster resources\n \"\"\"\n\n # Containers and networks are expected to be freed outside of this function\n\n # Clean up images\n for image in reversed(self.images):\n logging.info('Cleaning up image: %s', image[0].id)\n self.client.images.remove(image[0].id, force=True)\n\n # Clean up tmp files\n for tmp_file in self.tmp_files:\n os.remove(tmp_file)\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def set_engine(self, engine):\n self.engine = engine\n\n def get_engine(self):\n return self.engine\n\n def get_flow(self):\n return self.flow\n\n def set_flow(self, flow):\n self.flow = flow\n\n def set_directory_bindings(self, bindings):\n self.vols = bindings\n\n @staticmethod\n def create_docker_network():\n net_name = 'minifi_integration_test_network-' + str(uuid.uuid4())\n logging.info('Creating network: %s', net_name)\n return docker.from_env().networks.create(net_name)\n\n def set_network(self, network):\n self.network = network\n\n def deploy_flow(self):\n \"\"\"\n Compiles the flow to a valid config file and overlays it into a new image.\n \"\"\"\n\n if self.vols is None:\n self.vols = {}\n\n if self.name is None:\n self.name = self.engine + '-' + str(uuid.uuid4())\n logging.info('Flow name was not provided; using generated name \\'%s\\'', self.name)\n\n logging.info('Deploying %s flow \\\"%s\\\"...', self.engine, self.name)\n\n # Create network if necessary\n if self.network is None:\n self.set_network(self.create_docker_network())\n\n if self.engine == 'nifi':\n self.deploy_nifi_flow()\n elif self.engine == 'minifi-cpp':\n self.deploy_minifi_cpp_flow()\n elif self.engine == 'kafka-broker':\n self.deploy_kafka_broker()\n elif self.engine == 'http-proxy':\n self.deploy_http_proxy()\n elif self.engine == 's3-server':\n self.deploy_s3_server()\n elif self.engine == 'azure-storage-server':\n self.deploy_azure_storage_server()\n else:\n raise Exception('invalid flow engine: \\'%s\\'' % self.engine)\n\n def deploy_minifi_cpp_flow(self):\n\n # Build configured image\n dockerfile = dedent(\"\"\"FROM {base_image}\n USER root\n ADD config.yml {minifi_root}/conf/config.yml\n RUN chown minificpp:minificpp {minifi_root}/conf/config.yml\n RUN sed -i -e 's/INFO/DEBUG/g' {minifi_root}/conf/minifi-log.properties\n USER minificpp\n \"\"\".format(base_image='apacheminificpp:' + self.minifi_version,\n minifi_root=self.minifi_root))\n\n serializer = Minifi_flow_yaml_serializer()\n test_flow_yaml = serializer.serialize(self.flow)\n logging.info('Using generated flow config yml:\\n%s', test_flow_yaml)\n\n conf_file_buffer = BytesIO()\n\n try:\n conf_file_buffer.write(test_flow_yaml.encode('utf-8'))\n conf_file_len = conf_file_buffer.tell()\n conf_file_buffer.seek(0)\n\n context_files = [\n {\n 'name': 'config.yml',\n 'size': conf_file_len,\n 'file_obj': conf_file_buffer\n }\n ]\n\n configured_image = self.build_image(dockerfile, context_files)\n\n finally:\n conf_file_buffer.close()\n\n container = self.client.containers.run(\n configured_image[0],\n detach=True,\n name=self.name,\n network=self.network.name,\n volumes=self.vols)\n self.network.reload()\n\n logging.info('Adding container \\'%s\\'', container.name)\n self.containers[container.name] = container\n\n def deploy_nifi_flow(self):\n dockerfile = dedent(r\"\"\"FROM {base_image}\n USER root\n ADD flow.xml.gz {nifi_root}/conf/flow.xml.gz\n RUN chown nifi:nifi {nifi_root}/conf/flow.xml.gz\n RUN sed -i -e 's/^\\(nifi.remote.input.host\\)=.*/\\1={name}/' {nifi_root}/conf/nifi.properties\n RUN sed -i -e 's/^\\(nifi.remote.input.socket.port\\)=.*/\\1=5000/' {nifi_root}/conf/nifi.properties\n USER nifi\n \"\"\".format(name=self.name,\n base_image='apache/nifi:' + self.nifi_version,\n nifi_root=self.nifi_root))\n\n serializer = Nifi_flow_xml_serializer()\n test_flow_xml = serializer.serialize(self.flow, self.nifi_version)\n logging.info('Using generated flow config xml:\\n%s', test_flow_xml)\n\n conf_file_buffer = BytesIO()\n\n try:\n with gzip.GzipFile(mode='wb', fileobj=conf_file_buffer) as conf_gz_file_buffer:\n conf_gz_file_buffer.write(test_flow_xml.encode())\n conf_file_len = conf_file_buffer.tell()\n conf_file_buffer.seek(0)\n\n context_files = [\n {\n 'name': 'flow.xml.gz',\n 'size': conf_file_len,\n 'file_obj': conf_file_buffer\n }\n ]\n\n configured_image = self.build_image(dockerfile, context_files)\n\n finally:\n conf_file_buffer.close()\n\n logging.info('Creating and running docker container for flow...')\n\n container = self.client.containers.run(\n configured_image[0],\n detach=True,\n name=self.name,\n hostname=self.name,\n network=self.network.name,\n volumes=self.vols)\n\n logging.info('Adding container \\'%s\\'', container.name)\n self.containers[container.name] = container\n\n def deploy_kafka_broker(self):\n logging.info('Creating and running docker containers for kafka broker...')\n zookeeper = self.client.containers.run(\n self.client.images.pull(\"wurstmeister/zookeeper:3.4.6\"),\n detach=True,\n name='zookeeper',\n network=self.network.name,\n ports={'2181/tcp': 2181})\n logging.info('Adding container \\'%s\\'', zookeeper.name)\n self.containers[zookeeper.name] = zookeeper\n\n test_dir = os.environ['PYTHONPATH'].split(':')[-1] # Based on DockerVerify.sh\n broker_image = self.build_image_by_path(test_dir + \"/resources/kafka_broker\", 'minifi-kafka')\n broker = self.client.containers.run(\n broker_image[0],\n detach=True,\n name='kafka-broker',\n network=self.network.name,\n ports={'9092/tcp': 9092, '29092/tcp': 29092},\n environment=[\n \"KAFKA_BROKER_ID=1\",\n 'ALLOW_PLAINTEXT_LISTENER: \"yes\"',\n \"KAFKA_LISTENERS=PLAINTEXT://kafka-broker:9092,SSL://kafka-broker:9093,PLAINTEXT_HOST://0.0.0.0:29092\",\n \"KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,SSL:SSL\",\n \"KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka-broker:9092,SSL://kafka-broker:9093,PLAINTEXT_HOST://localhost:29092\",\n \"KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181\"])\n logging.info('Adding container \\'%s\\'', broker.name)\n self.containers[broker.name] = broker\n\n def deploy_http_proxy(self):\n logging.info('Creating and running http-proxy docker container...')\n dockerfile = dedent(\"\"\"FROM {base_image}\n RUN apt -y update && apt install -y apache2-utils\n RUN htpasswd -b -c /etc/squid/.squid_users {proxy_username} {proxy_password}\n RUN echo 'auth_param basic program /usr/lib/squid3/basic_ncsa_auth /etc/squid/.squid_users' > /etc/squid/squid.conf && \\\n echo 'auth_param basic realm proxy' >> /etc/squid/squid.conf && \\\n echo 'acl authenticated proxy_auth REQUIRED' >> /etc/squid/squid.conf && \\\n echo 'http_access allow authenticated' >> /etc/squid/squid.conf && \\\n echo 'http_port {proxy_port}' >> /etc/squid/squid.conf\n ENTRYPOINT [\"/sbin/entrypoint.sh\"]\n \"\"\".format(base_image='sameersbn/squid:3.5.27-2', proxy_username='admin', proxy_password='test101', proxy_port='3128'))\n configured_image = self.build_image(dockerfile, [])\n consumer = self.client.containers.run(\n configured_image[0],\n detach=True,\n name='http-proxy',\n network=self.network.name,\n ports={'3128/tcp': 3128})\n self.containers[consumer.name] = consumer\n\n def deploy_s3_server(self):\n server = self.client.containers.run(\n \"adobe/s3mock:2.1.28\",\n detach=True,\n name='s3-server',\n network=self.network.name,\n ports={'9090/tcp': 9090, '9191/tcp': 9191},\n environment=[\"initialBuckets=test_bucket\"])\n self.containers[server.name] = server\n\n def deploy_azure_storage_server(self):\n server = self.client.containers.run(\n \"mcr.microsoft.com/azure-storage/azurite:3.13.0\",\n detach=True,\n name='azure-storage-server',\n network=self.network.name,\n ports={'10000/tcp': 10000, '10001/tcp': 10001})\n self.containers[server.name] = server\n\n def build_image(self, dockerfile, context_files):\n conf_dockerfile_buffer = BytesIO()\n docker_context_buffer = BytesIO()\n\n try:\n # Overlay conf onto base nifi image\n conf_dockerfile_buffer.write(dockerfile.encode())\n conf_dockerfile_buffer.seek(0)\n\n with tarfile.open(mode='w', fileobj=docker_context_buffer) as docker_context:\n dockerfile_info = tarfile.TarInfo('Dockerfile')\n dockerfile_info.size = conf_dockerfile_buffer.getbuffer().nbytes\n docker_context.addfile(dockerfile_info,\n fileobj=conf_dockerfile_buffer)\n\n for context_file in context_files:\n file_info = tarfile.TarInfo(context_file['name'])\n file_info.size = context_file['size']\n docker_context.addfile(file_info,\n fileobj=context_file['file_obj'])\n docker_context_buffer.seek(0)\n\n logging.info('Creating configured image...')\n configured_image = self.client.images.build(fileobj=docker_context_buffer,\n custom_context=True,\n rm=True,\n forcerm=True)\n logging.info('Created image with id: %s', configured_image[0].id)\n self.images.append(configured_image)\n\n finally:\n conf_dockerfile_buffer.close()\n docker_context_buffer.close()\n\n return configured_image\n\n def build_image_by_path(self, dir, name=None):\n try:\n logging.info('Creating configured image...')\n configured_image = self.client.images.build(path=dir,\n tag=name,\n rm=True,\n forcerm=True)\n logging.info('Created image with id: %s', configured_image[0].id)\n self.images.append(configured_image)\n return configured_image\n except Exception as e:\n logging.info(e)\n raise\n","sub_path":"docker/test/integration/minifi/core/SingleNodeDockerCluster.py","file_name":"SingleNodeDockerCluster.py","file_ext":"py","file_size_in_byte":12948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5413886","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 13:12:59 2019\n\n@author: Karn Tiwari\n\"\"\"\n\n#Recursive Function Definition\ndef fun(n):\n x=int(n/10)\n \n if x==0:\n return n\n else:\n return (n%10)+fun(x)\n\n#Taking Input as Number\nn=int(input('Enter the number to find the Sum of Digits: '))\n\n#Calling Recursive Function\ny=fun(n)\n\n#Printing the Result\nprint('The Sum of Digits of Number is: ',y)\n","sub_path":"Assignment 2/Question 2.py","file_name":"Question 2.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173953623","text":"from oic.oic.consumer import Consumer\nfrom oic.utils.keyio import KeyChain, KeyJar\n\n__author__ = 'rohe0002'\nfrom fakeoicsrv import MyFakeOICServer\n\nCLIENT_SECRET = \"abcdefghijklmnop\"\nCLIENT_ID = \"client_1\"\n\nRSAPUB = \"../oc3/certs/mycert.key\"\n\nKC_HMAC_VS = KeyChain({\"hmac\": CLIENT_SECRET}, usage=[\"ver\", \"sig\"])\nKC_RSA = KeyChain(source=\"file://%s\" % RSAPUB, type=\"rsa\", usage=[\"ver\", \"sig\"])\nKC_HMAC_S = KeyChain({\"hmac\": CLIENT_SECRET}, usage=[\"sig\"])\n\nSRVKEYS = KeyJar()\nSRVKEYS[\"\"] = [KC_RSA]\nSRVKEYS[\"client_1\"] = [KC_HMAC_VS, KC_RSA]\n\nc = Consumer(None, None)\nmfos = MyFakeOICServer(\"http://example.com\")\nmfos.keyjar = SRVKEYS\nc.http_request = mfos.http_request\n\nprincipal = \"foo@example.com\"\n\nres = c.discover(principal)\ninfo = c.provider_config(res)\nassert info.type() == \"ProviderConfigurationResponse\"\n","sub_path":"tests/debug_2.py","file_name":"debug_2.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416701961","text":"#!/usr/bin/env python\r\n\r\n# -*- encoding: utf-8 -*-\r\n\r\n'''\r\n@Author : HY\r\n@Software: PyCharm\r\n@File : modelTree.py\r\n@Time : 2019/5/25 19:18\r\n@Desc : 模型树的实现\r\n 它和CART算法不同的是,CART叶节点上是单个数值——平均值;而modelTree则是线性回归的回归系数ws\r\n\r\n'''\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef linearSolve(dataSet):\r\n \"\"\"\r\n Description:线性回归的拟合——回归系数的求取\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 19:50\r\n \"\"\"\r\n m,n=np.shape(dataSet)\r\n X=np.mat(np.ones((m,n)));Y=np.mat(np.ones((m,1)))\r\n X[:,1:n]=dataSet[:,0:n-1];Y=dataSet[:,-1]\r\n xTx=X.T*X\r\n if np.linalg.det(xTx)==0:\r\n print('逆矩阵不存在!')\r\n ws=xTx.I*X.T*Y\r\n return ws,X,Y\r\n\r\n\r\ndef modelLeaf(dataSet):\r\n \"\"\"\r\n Description:叶节点就是一个回归系数\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 19:51\r\n \"\"\"\r\n ws, X, Y=linearSolve(dataSet)\r\n return ws\r\n\r\ndef modelError(dataSet):\r\n \"\"\"\r\n Description:误差的计算\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 19:51\r\n \"\"\"\r\n ws, X, Y = linearSolve(dataSet)\r\n Yhat=X*ws\r\n return (Y-Yhat).T*(Y-Yhat)\r\n\r\n\r\ndef loadDataSet(fileName):\r\n with open(fileName) as file:\r\n con = file.readlines()\r\n dataSet=[]\r\n for ele in con:\r\n line=ele.strip().split('\\t')\r\n line=[float(l) for l in line]\r\n dataSet.append(line)\r\n return dataSet\r\ndef binSplitDataTest(dataSet,featureIdex,featureVal):\r\n \"\"\"\r\n Description:树回归算法中,根据特征二元切分数据集\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 16:13\r\n \"\"\"\r\n #注意np.nonzero的作用\r\n rightMat=dataSet[np.nonzero(dataSet[:,featureIdex]>featureVal)[0]]\r\n leftMat=dataSet[np.nonzero(dataSet[:,featureIdex]<=featureVal)[0]]\r\n return rightMat,leftMat\r\n\r\n\r\ndef chooseBestSplit(dataSet,leafType=modelLeaf,errorType=modelError,ops=(1,4)):\r\n \"\"\"\r\n Description:遍历所有的特征和特征的值,选择出最佳的切分点\r\n Params:\r\n\r\n Return:\r\n\r\n Author:\r\n HY\r\n Modify:\r\n 2019/5/25 16:14\r\n \"\"\"\r\n tolS=ops[0];tolN=ops[1]#停止条件,一个是误差,一个是分割数据的最小范围\r\n if len(set(dataSet[:,-1].T.tolist()[0]))==1:#停止条件,数据集中元素相同停止\r\n return None,leafType(dataSet) #返回叶子的值\r\n S=errorType(dataSet)\r\n bestFeatureIndex=0;bestFeatureVal=0\r\n m,n =np.shape(dataSet)\r\n smallestS=float('inf')\r\n for feaIn in range(n-1):#遍历所有的特征\r\n for splitVal in set(dataSet[:,feaIn].T.A.tolist()[0]):#根绝特征值来遍历\r\n rightMat, leftMat=binSplitDataTest(dataSet,feaIn,splitVal)#得到切分后的数据集\r\n if np.shape(rightMat)[0];\\n\".format(id, m[id]['name']))\n f.write(\"\\t\\tidMap[parseInt({})] = (<{} parentProps={{this.props}} notify={{this.props.notify}}/>);\\n\".format(id, m[id]['name']))\n f.write(\"\\t\\tif (idMap[parseInt(this.props.componentId)]) return idMap[parseInt(this.props.componentId)];\\n\")\n f.write(\"\\t\\tvar nameMap = {};\\n\")\n for id in m.keys():\n # f.write(\"\\t\\tif (this.props.componentName == '{}') return <{} parentProps={{this.props}} notify={{this.props.notify}} />;\\n\".format(m[id]['name'], m[id]['name']))\n f.write(\"\\t\\tnameMap[{}] = (<{} parentProps={{this.props}} notify={{this.props.notify}}/>);\\n\".format(m[id]['name'], m[id]['name']))\n f.write(\"\\t\\tif (nameMap[this.props.componentName]) return nameMap[this.props.componentName];\\n\")\n f.write(\"\\t\\treturn
UNKNOWN COMPONENT
;\\n\");\n f.write(\"\\t}\\n\")#end render\n f.write(\"}\\n\")#end class\n f.close()\n outFileName = config['src'] + os.sep + config['config']\n with open(outFileName, \"w+\") as f:\n for id in m.keys():\n f.write(\"import {} from '{}';\\n\".format(m[id]['name'], m[id]['path']).replace(\"\\\\\", \"/\"))\n f.write(\"const MODULES = {\\n\")\n max_id = max(m.keys()) + 1\n for id in range(max_id):\n if id in m and m[id]:\n f.write(\"\\t{}:\\\"{}\\\",\\n\".format(id, m[id]['name']))\n f.write(\"};\\n\\nexport default MODULES;\\n\")\n f.close()\n\ndef __main__():\n showConfig()\n walk(config['src'])\n generate(commponentMap)\n\nif __name__ == \"__main__\":\n __main__()\n","sub_path":"gen-cc.py","file_name":"gen-cc.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"271893099","text":"# -*- coding: utf-8 -*-\nimport getpass\nimport requests\nimport lxml.html as html\nimport re\nimport os\nimport platform\n\n\nclass LeetCode:\n def __init__(self, user=None, password=None):\n if user:\n self.user = user\n else:\n self.user = input('Username:')\n if password:\n self.password = password\n else:\n self.password = getpass.getpass('Password:')\n self.session = requests.Session()\n\n def login(self):\n url = 'https://leetcode.com/accounts/login/'\n loginPage = self.session.get(url)\n pattern = \"//input[@name='csrfmiddlewaretoken']/@value\"\n token = html.fromstring(loginPage.text).xpath(pattern)[0]\n postData = {\n 'login': self.user,\n 'password': self.password,\n 'csrfmiddlewaretoken': token\n }\n headers = {\n #'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 '\n # '(KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0',\n 'Referer': 'https://leetcode.com/accounts/login/',\n }\n self.session.headers.update(headers)\n self.session.post(url, data=postData)\n print('Login as %s' % self.user)\n if not os.path.exists(self.user):\n os.mkdir(self.user)\n\n def fetch(self, url):\n page = self.session.get(url)\n return bytearray(page.text, encoding='utf-8')\n\n def getSubmissionsByProblem(self):\n problemPage = self.fetch('https://leetcode.com/problemset/algorithms/')\n pattern = '//table[@id=\"problemList\"]/tbody/tr'\n problemList = html.fromstring(str(problemPage)).xpath(pattern)\n print('%d problems detected' % len(problemList))\n for problemNode in problemList:\n problemSeq = int(problemNode[1].text)\n problemLink = problemNode[2][0].attrib['href']\n problemName = problemLink[1:-1].split('/')[-1]\n problemDifficulty = problemNode[5].text[0]\n problemDir = '%s/%04d-%s-[%s]' % (self.user, problemSeq, problemDifficulty, problemName)\n if not os.path.exists(problemDir):\n os.mkdir(problemDir)\n submissionLink = 'https://leetcode.com%ssubmissions/' % problemLink\n print('Start collecting accepted submissions for problem No.%d: %s' % (problemSeq, problemName))\n self.getSubmissionsList(submissionLink, problemDir)\n\n def getSubmissionsList(self, url, dirName):\n submissionsPage = self.fetch(url)\n # pattern = '//a[@class=\"text-danger status-accepted\"]/@href'\n pattern = '//tbody/tr'\n submissions = html.fromstring(str(submissionsPage)).xpath(pattern)\n acceptedListLogName = '%s/accepted_list.log' % dirName\n if os.path.exists(acceptedListLogName):\n acceptedListFile = open(acceptedListLogName, 'r').read()\n acceptedListLog = acceptedListFile.strip().split('\\n')\n else:\n acceptedListLog = []\n print('\\t%d submissions found' % len(submissions))\n for node in submissions:\n href = node[2][0].attrib['href']\n status = node[2][0].attrib['class']\n if 'status-accepted' in status:\n submissionId = href[1:-1].split('/')[-1]\n timeCost = node[3].text.strip().split()[1] + '_ms'\n if submissionId not in acceptedListLog:\n acceptedLink = 'https://leetcode.com' + href\n self.getCode(acceptedLink, dirName, timeCost)\n acceptedListLog.append(submissionId)\n else:\n print('\\t\\tAccepted submission %s already collected' % submissionId)\n open(acceptedListLogName, 'w').write('\\n'.join(acceptedListLog))\n\n def getCode(self, url, dirName, time):\n codePage = self.fetch(url)\n codeReg = r\"getLangDisplay: \\\\'(\\w+)\\\\',\\\\n submissionCode: \\\\'(.+)\\\\',\\\\n editCodeUrl:\"\n codeGroup = re.findall(codeReg, str(codePage))\n codeType = codeGroup[0][0]\n codeStr = codeGroup[0][1]\n unicodeSet = re.findall(r'\\\\\\\\u\\w{4}', codeStr)\n for ch in set(unicodeSet):\n codeStr = codeStr.replace(ch, chr(int(ch[3:7], 16)))\n if codeType == 'python':\n suffix = 'py'\n elif codeType == 'c':\n suffix = 'c'\n else:\n suffix = ''\n codeFileName = '%s/%s_%s.%s' % (dirName, url[:-1].split('/')[-1], time, suffix)\n codeFile = open(codeFileName, 'w')\n codeFile.write(codeStr.replace('\\r\\n', '\\n'))\n codeFile.close()\n print('\\t\\tAccepted submission %s collected as %s' % (url[:-1].split('/')[-1], codeFileName))\n\n def update(self):\n directories = list(os.walk(self.user))[0][1]\n cwd = os.getcwd()\n collection = '%s/0000-[all-submissions]' % self.user\n if platform.system() == 'Windows':\n copyCmd = 'copy'\n else:\n copyCmd = 'cp'\n\n if not os.path.exists(collection):\n os.mkdir(collection)\n\n for folder in directories:\n user_info = list(os.walk('%s/%s' % (self.user, folder)))[0]\n if '0000' in user_info[0]:\n continue\n submissions = user_info[2]\n submissions = [file for file in submissions if file != 'accepted_list.log']\n if submissions:\n file = min(submissions, key=lambda line: int(line.split('_')[1]))\n prePath = '%s/%s/%s' % (cwd, self.user, folder)\n cmd = '%s \"%s/%s\" \"%s/%s/%s.py\"' % (copyCmd, prePath, file, cwd, collection, folder)\n print(cmd)\n cmdStatus = os.popen(cmd)\n print(cmdStatus.read(), end='')\n\n def task(self):\n self.login()\n self.getSubmissionsByProblem()\n self.update()\n\nif __name__ == '__main__':\n lc = LeetCode()\n lc.task()\n","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"148144822","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport time\nimport pyautogui\n\nif __name__ == \"__main__\":\n loopIdx = 0\n loopLimit = 12000\n\n while True:\n # uptempo = \"https://smartstore.naver.com/neulhaerangmask/products/4632987981?site_preference=device&NaPm=\"\n uptempo = 'https://smartstore.naver.com/hana-water/products/4832110630?NaPm='\n req = urllib.request.Request(uptempo)\n res = urllib.request.urlopen(req)\n data = res.read()\n\n soup = BeautifulSoup(data.decode(\"utf-8\"), 'html.parser')\n ready = False\n\n for span in soup.find_all(\"span\"):\n if span.get('class') == None:\n continue\n\n if span.get('class')[0] == 'cart':\n for s in span:\n if s.get('class')[0] == 'mask2':\n continue\n elif s.get('class')[0] == '_stopDefault':\n ready = False\n else:\n ready = True\n break\n\n if ready:\n x, y = pyautogui.position()\n print('x: {}, y: {}'.format(x, y))\n pyautogui.moveTo(x=793, y=910)\n pyautogui.click()\n break\n\n else:\n print(str(loopIdx) + \" : 아직 준비안됨\")\n\n loopIdx += 1\n time.sleep(0.5)\n\n if loopIdx > loopLimit:\n break\n","sub_path":"activity list/MOSAIC(Koreatech Start Up)/Crawling/Naver/macro.py","file_name":"macro.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197984225","text":"#!/usr/bin/python\n# -*-coding: utf-8 -*-\n\n\nclass Local():\n local_numero = 0\n\n def __init__(self, Pgsql):\n Local.local_numero += 1\n self.idlocal = Local.local_numero\n self.Pgsql = Pgsql\n self.period = 0\n self.nom = \"\"\n self.max_hygro = 0\n self.min_hygro = 0\n self.max_temp = 0\n self.min_temp = 0\n self.last_temp = 0\n self.last_hygro = 0\n self.GetValues()\n\n def GetValues(self):\n '''\n Initialise les seuils en récupérant les valeurs depuis la BDD\n '''\n chaine = 'SELECT * FROM locaux WHERE id='+str(self.idlocal)\n try:\n values = self.Pgsql.bddSelect(chaine)\n self.idlocal = values[0][0]\n self.max_hygro = values[0][2]\n self.min_hygro = values[0][1]\n self.min_temp = values[0][3]\n self.max_temp = values[0][4]\n self.period = values[0][5]\n self.nom = values[0][6]\n except Exception as ex:\n print(\"Erreur paramètrage des locaux - {}\".format(ex))\n\n def LastValues(self):\n '''\n Récupère les dernières valeurs Temp/Hygro depuis la BDD\n enregistrées dans le local selectionné\n '''\n chaine = 'select hygro, temp from history_values, \\\n locaux where history_values.idlocal = locaux.id and locaux.id =' \\\n + str(self.idlocal) + 'order by history_values.date_start desc limit 1'\n try:\n values = self.Pgsql.bddSelect(chaine)\n self.last_temp = values[0][1]\n self.last_hygro = values[0][0]\n except Exception:\n self.last_temp = 00.00\n self.last_hygro = 00.00\n return self.last_temp, self.last_hygro\n","sub_path":"decoupeuse/Supervision/IHM/locaux.py","file_name":"locaux.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67781324","text":"str = input()\nnum=[]\nfor i in str:\n\tnum.append(int(i))\n\t\nret = 0\t\nfor n in num:\n\tif ret <=1 or n<=1:\n\t\tret += n\n\telse:\n\t\tret *= n\nprint(ret)\t \t\n\t \t\n\t\n","sub_path":"Chapter11_그리디 문제/Q02.py","file_name":"Q02.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420707457","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport os.path\nif(os.path.isfile('sqlalchemy_table.db')):\n os.remove('sqlalchemy_table.db')\nfrom sqlalchemy.event import listen\nfrom CreationTable import * \nfrom sqlalchemy import inspect,update\n\n\ninstancesToDelete = []\nerrorMessages = {}\n\n#@event.listens_for(ParamsClass, 'before_insert')\n#def checkClass(mapper, connect, self):\n# parameterInstance = session.query(ParameterTable).get(self.idParameter) \n# #dictionary\n# options = { 1:\"One\",2:\"Two\",3:\"Three\",4:\"Four\",5:\"Five\"}\n# if(parameterInstance.classOrType == \"type\"):\n# #raise ValueError (\"Couldn't be of type \\\"type\\\"\")\n# #d'apres ce qui est ecrit dans le warning de ce lien\n# #http://docs.sqlalchemy.org/en/improve_toc/orm/events.html\n# #on ne peut pas changer le state d'un objet dans le before\n# #insert event je le fait donc dans le before commit\n# #ce event me sert juste a reperer les erreurs\n# #cette event me sert aussi a remplir le champ \"tableName\"\n# errorMessages[self] = \"incompatible type\"\n# self.tableName = options[parameterInstance.numberOfAtoms]+\"Type\"\n# instancesToDelete.append(self)\n# if(parameterInstance.classOrType == \"class\"):\n# self.tableName = options[parameterInstance.numberOfAtoms]+\"Class\"\n\n#@event.listens_for(ParamsType, 'before_insert')\n#def checkType(mapper, connect, self):\n# parameterInstance = session.query(ParameterTable).get(self.idParameter) \n# #dictionary\n# options = { 1:\"One\",2:\"Two\",3:\"Three\",4:\"Four\",5:\"Five\"}\n# if(parameterInstance.classOrType == \"type\"):\n# #raise ValueError (\"Couldn't be of type \\\"type\\\"\")\n# #d'apres ce qui est ecrit dans le warning de ce lien\n# #http://docs.sqlalchemy.org/en/improve_toc/orm/events.html\n# #on ne peut pas changer le state d'un objet dans le before\n# #insert event je le fait donc dans le before commit\n# #ce event me sert juste a reperer les erreurs\n# #cette event me sert aussi a remplir le champ \"tableName\"\n# self.tableName = options[parameterInstance.numberOfAtoms]+\"Type\"\n# if(parameterInstance.classOrType == \"class\"):\n# errorMessages[self] = \"incompatible class\"\n# self.tableName = options[parameterInstance.numberOfAtoms]+\"Class\"\n# instancesToDelete.append(self)\n#\n#listen(ParamsClass, 'before_insert', checkClass)\n#listen(ParamsType, 'before_insert', checkType)\n\ndef incompatibleRow(objList,session):\n for instance in objList:\n className = instance.__class__.__name__\n x =session.query(eval(className)).get(inspect(instance).identity)\n session.delete(x)\n print(bcolors.OKBLUE + 'instance of Table \\\"' + className + '\\\" with primary key '+ str(inspect(instance).identity) +' have been deleted' + bcolors.ENDC)\n print(bcolors.OKBLUE + 'ValueError: ' + errorMessages[instance] + bcolors.ENDC +'\\n')\n \n\n#Pour mettre un peut de couleur au output\n#http://stackoverflow.com/questions/22886353/printing-colors-in-python-terminal\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n def disable(self):\n self.HEADER = ''\n self.OKBLUE = ''\n self.OKGREEN = ''\n self.WARNING = ''\n self.FAIL = ''\n self.ENDC = ''\n\n\nengine = create_engine('sqlite:///sqlalchemy_table.db')\n\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n@event.listens_for(session, 'before_commit')\ndef receive_before_commit(session):\n print('------------------ERROR MESSAGE------------------------\\n')\n incompatibleRow(instancesToDelete,session)\n print('------------------------------------------------------\\n')\n\n#creation de ForceField\namberFF98 = ForceField('AMBER_FF98')\ncharmm19 = ForceField('CHARMM19')\ncharmm27 = ForceField('CHARMM27')\nopls_aa = ForceField('OPLS-AA')\nameoba_water = ForceField('AMOEBA-WATER')\nsession.add(amberFF98)\nsession.add(charmm19)\nsession.add(charmm27)\nsession.add(opls_aa)\nsession.add(ameoba_water)\n\n#creation de user \ndaniel = User('Daniel','Haim','danielmo','danielmoisehaim@gmail.com',password='K2baLLet',social_network='facebook')\nraph = User('Raphael','Attal','rattal','raphaelattal1991@gmail.com','secret_key_raph')\nyoel = User('Yoel','Levy','levyoel','yoel_levy_1@hotmail.fr','secret_key_yoel')\n\n#creation de l'instance many_to_many\n#recap:\n#daniel --> amberFF98 , charm19 , amoeba_water\n#raph --> amberFF98 , charm 27 , amoeba_water\n#yoel --> charm27 , opls_aa\ndaniel_ff1 = UserForceField()\ndaniel_ff1.ff_instance = amberFF98\ndaniel_ff1.user_instance = daniel\ndaniel_ff1.isAuthor = True\ndaniel_ff2 = UserForceField()\ndaniel_ff2.ff_instance = charmm19\ndaniel_ff2.user_instance = daniel\ndaniel_ff2.isAuthor = False\ndaniel_ff3 = UserForceField()\ndaniel_ff3.ff_instance = ameoba_water\ndaniel_ff3.user_instance = daniel\ndaniel_ff3.isAuthor = False\nraph_ff1 = UserForceField()\nraph_ff1.ff_instance = amberFF98\nraph_ff1.user_instance = raph\nraph_ff1.isAuthor = False\nraph_ff2 = UserForceField()\nraph_ff2.ff_instance = charmm27\nraph_ff2.user_instance = raph\nraph_ff2.isAuthor = True\nraph_ff3 = UserForceField()\nraph_ff3.ff_instance = ameoba_water\nraph_ff3.user_instance = raph\nraph_ff3.isAuthor = False\nyoel_ff1 = UserForceField()\nyoel_ff1.ff_instance = charmm27\nyoel_ff1.user_instance = yoel\nyoel_ff1.isAuthor = False\nyoel_ff2 = UserForceField()\nyoel_ff2.ff_instance = opls_aa\nyoel_ff2.user_instance = yoel\nyoel_ff2.isAuthor = True\n\n#creation de Parameter\nvan_der_walls = ParameterTable('Van Der Waals','Parameter',1,'class')\nbond_stretching = ParameterTable('Bond Stretching','Parameter',2,'class')\npartial_charge = ParameterTable('Atomic Partial Charge','Parameter',1,'type')\nangle_bending = ParameterTable('Angle Bending','Parameter',3,'class')\nimproperTorsion = ParameterTable('Improper Torsion','Parameter',4,'class')\nurey_bradley = ParameterTable('Urey-Bradley','Parameter', 3, 'class')\nurey_bradley.columnsName = \"KB, Distance\"\natomic_multipole = ParameterTable('Atomic Multipole','Parameter', 1, 'type')\natomic_multipole.columnsName = \"Axis Types, Frame, Multipoles (M-D-Q)\"\ndipole_polarizability = ParameterTable('Dipole Polarizability','Parameter', 1, 'type')\ndipole_polarizability.columnsName = \" Alpha, Group Atom Types\"\n#9\nAtomic_Partial_Charge_SF = ParameterTable('Atomic Partial Charge Scaling Factors','SF',None,None)\n#10\nVan_der_Waals_SF = ParameterTable('Van Der Waals Scaling Factors','SF',None,None)\n#11\nAtomic_Multipole_SF = ParameterTable('Atomic Multipole Scaling Factors','SF',None,None)\n#12\ndirect_polarizability_SF = ParameterTable('Direct Polarizability Scaling Factors','SF',None,None)\n#13\nmutual_polarizability_SF = ParameterTable('Mutual Polarizability Scaling Factors','SF',None,None)\n#14\npolarizability_energy_SF = ParameterTable('Polarizability Energy Scaling Factors','SF',None,None)\n#15\nhigher_order_stretching_C = ParameterTable('Higher Order Stretching Constants','Constant',None,None)\n#16\nhigher_order_bending_C = ParameterTable('Higher Order Bending Constants','Constant',None,None)\n#jeTest = ParameterTable(nameParameter='Je Test',classOrType='Test')\n#session.add(jeTest) // le test du Enum marche !!\nsession.add(van_der_walls)\nsession.add(bond_stretching)\nsession.add(partial_charge)\nsession.add(angle_bending)\nsession.add(improperTorsion)\nsession.add(urey_bradley)\nsession.add(atomic_multipole)\nsession.add(dipole_polarizability)\nsession.add(Atomic_Partial_Charge_SF)\nsession.add(Van_der_Waals_SF)\nsession.add(Atomic_Multipole_SF)\nsession.add(direct_polarizability_SF)\nsession.add(mutual_polarizability_SF)\nsession.add(polarizability_energy_SF)\nsession.add(higher_order_stretching_C)\nsession.add(higher_order_bending_C)\n\n#Ajout dans la ManyToMany ParametersOfForceField\nParametersOfForceFieldInstance1 = ParametersOfForceField()\nParametersOfForceFieldInstance1.forcefieldInstance = session.query(ForceField).get(1)\nParametersOfForceFieldInstance1.parametersInstance = session.query(ParameterTable).get(1)\nParametersOfForceFieldInstance2 = ParametersOfForceField()\nParametersOfForceFieldInstance2.forcefieldInstance = session.query(ForceField).get(1)\nParametersOfForceFieldInstance2.parametersInstance = session.query(ParameterTable).get(2)\nParametersOfForceFieldInstance3 = ParametersOfForceField()\nParametersOfForceFieldInstance3.forcefieldInstance = session.query(ForceField).get(1)\nParametersOfForceFieldInstance3.parametersInstance = session.query(ParameterTable).get(3)\nParametersOfForceFieldInstance4 = ParametersOfForceField()\nParametersOfForceFieldInstance4.forcefieldInstance = session.query(ForceField).get(1)\nParametersOfForceFieldInstance4.parametersInstance = session.query(ParameterTable).get(4)\nParametersOfForceFieldInstance5 = ParametersOfForceField()\nParametersOfForceFieldInstance5.forcefieldInstance = session.query(ForceField).get(1)\nParametersOfForceFieldInstance5.parametersInstance = session.query(ParameterTable).get(5)\nParametersOfForceFieldInstance6 = ParametersOfForceField()\nParametersOfForceFieldInstance6.forcefieldInstance = session.query(ForceField).get(2)\nParametersOfForceFieldInstance6.parametersInstance = session.query(ParameterTable).get(1)\nParametersOfForceFieldInstance7 = ParametersOfForceField()\nParametersOfForceFieldInstance7.forcefieldInstance = session.query(ForceField).get(2)\nParametersOfForceFieldInstance7.parametersInstance = session.query(ParameterTable).get(3)\nParametersOfForceFieldInstance8 = ParametersOfForceField()\nParametersOfForceFieldInstance8.forcefieldInstance = session.query(ForceField).get(3)\nParametersOfForceFieldInstance8.parametersInstance = session.query(ParameterTable).get(1)\nParametersOfForceFieldInstance9 = ParametersOfForceField()\nParametersOfForceFieldInstance9.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance9.parametersInstance = session.query(ParameterTable).get(1)\nParametersOfForceFieldInstance10 = ParametersOfForceField()\nParametersOfForceFieldInstance10.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance10.parametersInstance = session.query(ParameterTable).get(2)\nParametersOfForceFieldInstance11 = ParametersOfForceField()\nParametersOfForceFieldInstance11.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance11.parametersInstance = session.query(ParameterTable).get(4)\nParametersOfForceFieldInstance12 = ParametersOfForceField()\nParametersOfForceFieldInstance12.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance12.parametersInstance = session.query(ParameterTable).get(6)\nParametersOfForceFieldInstance13 = ParametersOfForceField()\nParametersOfForceFieldInstance13.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance13.parametersInstance = session.query(ParameterTable).get(7)\nParametersOfForceFieldInstance14 = ParametersOfForceField()\nParametersOfForceFieldInstance14.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance14.parametersInstance = session.query(ParameterTable).get(8)\nParametersOfForceFieldInstance15 = ParametersOfForceField()\nParametersOfForceFieldInstance15.forcefieldInstance = session.query(ForceField).get(1)\nParametersOfForceFieldInstance15.parametersInstance = session.query(ParameterTable).get(9)\nParametersOfForceFieldInstance16 = ParametersOfForceField()\nParametersOfForceFieldInstance16.forcefieldInstance = session.query(ForceField).get(2)\nParametersOfForceFieldInstance16.parametersInstance = session.query(ParameterTable).get(10)\nParametersOfForceFieldInstance17 = ParametersOfForceField()\nParametersOfForceFieldInstance17.forcefieldInstance = session.query(ForceField).get(2)\nParametersOfForceFieldInstance17.parametersInstance = session.query(ParameterTable).get(9)\nParametersOfForceFieldInstance18 = ParametersOfForceField()\nParametersOfForceFieldInstance18.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance18.parametersInstance = session.query(ParameterTable).get(10)\nParametersOfForceFieldInstance19 = ParametersOfForceField()\nParametersOfForceFieldInstance19.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance19.parametersInstance = session.query(ParameterTable).get(11)\nParametersOfForceFieldInstance20 = ParametersOfForceField()\nParametersOfForceFieldInstance20.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance20.parametersInstance = session.query(ParameterTable).get(12)\nParametersOfForceFieldInstance21 = ParametersOfForceField()\nParametersOfForceFieldInstance21.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance21.parametersInstance = session.query(ParameterTable).get(13)\nParametersOfForceFieldInstance22 = ParametersOfForceField()\nParametersOfForceFieldInstance22.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance22.parametersInstance = session.query(ParameterTable).get(14)\nParametersOfForceFieldInstance23 = ParametersOfForceField()\nParametersOfForceFieldInstance23.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance23.parametersInstance = session.query(ParameterTable).get(15)\nParametersOfForceFieldInstance24 = ParametersOfForceField()\nParametersOfForceFieldInstance24.forcefieldInstance = session.query(ForceField).get(5)\nParametersOfForceFieldInstance24.parametersInstance = session.query(ParameterTable).get(16)\n\n\n\n\n#creation de ScalingFactor\nAtomic_Partial_Charge_SF_1 = ScalingFactorTable(1,9,'1-2 Atoms',0.000)\nAtomic_Partial_Charge_SF_2 = ScalingFactorTable(1,9,'1-3 Atoms',0.000)\nAtomic_Partial_Charge_SF_3 = ScalingFactorTable(1,9,'1-4 Atoms',0.833)\nAtomic_Partial_Charge_SF_4 = ScalingFactorTable(1,9,'1-5 Atoms',1.000)\nVan_der_Waals_SF_1 = ScalingFactorTable(2,10,'1-2 Atoms',0.000)\nVan_der_Waals_SF_2 = ScalingFactorTable(2,10,'1-3 Atoms',0.000)\nVan_der_Waals_SF_3 = ScalingFactorTable(2,10,'1-4 Atoms',1.000)\nVan_der_Waals_SF_4 = ScalingFactorTable(2,10,'1-5 Atoms',1.000)\nAtomic_Partial_Charge_SF_5 = ScalingFactorTable(2,9,'1-2 Atoms',0.000)\nAtomic_Partial_Charge_SF_6 = ScalingFactorTable(2,9,'1-3 Atoms',0.000)\nAtomic_Partial_Charge_SF_7 = ScalingFactorTable(2,9,'1-4 Atoms',1.000)\nAtomic_Partial_Charge_SF_8 = ScalingFactorTable(2,9,'1-5 Atoms',1.000)\nVan_der_Waals_SF_5 = ScalingFactorTable(5,10,'1-2 Atoms',0.000)\nVan_der_Waals_SF_6 = ScalingFactorTable(5,10,'1-3 Atoms',0.000)\nVan_der_Waals_SF_7 = ScalingFactorTable(5,10,'1-4 Atoms',1.000)\nVan_der_Waals_SF_8 = ScalingFactorTable(5,10,'1-5 Atoms',1.000)\nAtomic_Multipole_SF_1 = ScalingFactorTable(5,11,'1-2 Atoms',0.000)\nAtomic_Multipole_SF_2 = ScalingFactorTable(5,11,'1-3 Atoms',0.000)\nAtomic_Multipole_SF_3 = ScalingFactorTable(5,11,'1-4 Atoms',1.000)\nAtomic_Multipole_SF_4 = ScalingFactorTable(5,11,'1-5 Atoms',1.000)\ndirect_polarizability_SF_1 = ScalingFactorTable(5,12,'1-1 Groups',0.000)\ndirect_polarizability_SF_2 = ScalingFactorTable(5,12,'1-2 Groups',1.000)\ndirect_polarizability_SF_3 = ScalingFactorTable(5,12,'1-3 Groups',1.000)\ndirect_polarizability_SF_4 = ScalingFactorTable(5,12,'1-4 Groups',1.000)\nmutual_polarizability_SF_1 = ScalingFactorTable(5,13,'1-1 Groups',1.000)\nmutual_polarizability_SF_2 = ScalingFactorTable(5,13,'1-2 Groups',1.000)\nmutual_polarizability_SF_3 = ScalingFactorTable(5,13,'1-3 Groups',1.000)\nmutual_polarizability_SF_4 = ScalingFactorTable(5,13,'1-4 Groups',1.000)\npolarizability_energy_SF_1 = ScalingFactorTable(5,14,'1-2 Atoms',0.000)\npolarizability_energy_SF_2 = ScalingFactorTable(5,14,'1-3 Atoms',0.000)\npolarizability_energy_SF_3 = ScalingFactorTable(5,14,'1-4 Atoms',1.000)\npolarizability_energy_SF_4 = ScalingFactorTable(5,14,'1-5 Atoms',1.000)\nsession.add(Atomic_Partial_Charge_SF_1)\nsession.add(Atomic_Partial_Charge_SF_2)\nsession.add(Atomic_Partial_Charge_SF_3)\nsession.add(Atomic_Partial_Charge_SF_4)\nsession.add(Atomic_Partial_Charge_SF_5)\nsession.add(Atomic_Partial_Charge_SF_6)\nsession.add(Atomic_Partial_Charge_SF_7)\nsession.add(Atomic_Partial_Charge_SF_8)\nsession.add(Van_der_Waals_SF_1)\nsession.add(Van_der_Waals_SF_2)\nsession.add(Van_der_Waals_SF_3)\nsession.add(Van_der_Waals_SF_4)\nsession.add(Van_der_Waals_SF_5)\nsession.add(Van_der_Waals_SF_6)\nsession.add(Van_der_Waals_SF_7)\nsession.add(Van_der_Waals_SF_8)\nsession.add(Atomic_Multipole_SF_1)\nsession.add(Atomic_Multipole_SF_2)\nsession.add(Atomic_Multipole_SF_3)\nsession.add(Atomic_Multipole_SF_4)\nsession.add(direct_polarizability_SF_1)\nsession.add(direct_polarizability_SF_2)\nsession.add(direct_polarizability_SF_3)\nsession.add(direct_polarizability_SF_4)\nsession.add(mutual_polarizability_SF_1)\nsession.add(mutual_polarizability_SF_2)\nsession.add(mutual_polarizability_SF_3)\nsession.add(mutual_polarizability_SF_4)\nsession.add(polarizability_energy_SF_1)\nsession.add(polarizability_energy_SF_2)\nsession.add(polarizability_energy_SF_3)\nsession.add(polarizability_energy_SF_4)\n\n\n#creation de constants\nhigher_order_stretching_C_1 = ConstantTable(5,15,'Cubic',3)\nhigher_order_stretching_C_2 = ConstantTable(5,15,'Quartic',4)\nhigher_order_bending_C_1 = ConstantTable(5,16,'Cubic',-0.014)\nhigher_order_bending_C_2 = ConstantTable(5,16,'Quartic',0.000056)\nhigher_order_bending_C_3 = ConstantTable(5,16,'Pentic',-0.0000007)\nhigher_order_bending_C_4 = ConstantTable(5,16,'Sextic',0.000000022)\nsession.add(higher_order_stretching_C_1)\nsession.add(higher_order_stretching_C_2)\nsession.add(higher_order_bending_C_1)\nsession.add(higher_order_bending_C_2)\nsession.add(higher_order_bending_C_3)\nsession.add(higher_order_bending_C_4)\n\n\n#creation de classAtom\nclassAtomInstance1 = ClassAtom(14, 1,'N', 7, 14.010, 3)\nclassAtomInstance2 = ClassAtom(1, 1,'CT', 6, 12.010, 4)\nclassAtomInstance3 = ClassAtom(2, 1,'C', 6, 12.010, 3)\nclassAtomInstance4 = ClassAtom(29, 1, 'H', 1, 1.008, 1)\nclassAtomInstance5 = ClassAtom(4,1,'CM',6,12.010,3)\nclassAtomInstance6 = ClassAtom(3,1,'CA',6,12.010,3)\nclassAtomInstance7 = ClassAtom(22,1,'OH',8,16.000,2)\nclassAtomInstance8 = ClassAtom(1, 2,'H', 1, 1.008, 1)\nclassAtomInstance9 = ClassAtom(2, 2, 'HC', 1, 1.008, 1)\nclassAtomInstance10 = ClassAtom( 3, 2, 'HT', 1, 1.008, 1)\nclassAtomInstance11 = ClassAtom( 4, 2, 'C', 6, 12.011, 3)\nclassAtomInstance12 = ClassAtom( 1, 3, 'HA', 1, 1.008, 1)\nclassAtomInstance13 = ClassAtom(2, 3,'HP', 1, 1.008, 1)\nclassAtomInstance14 = ClassAtom(3 , 3,'H', 1, 1.008, 1)\nclassAtomInstance15 = ClassAtom(4, 3,'HB',1, 1.008, 1)\nclassAtomInstance16 = ClassAtom(1 , 5,'O', 8 ,15.995, 2)\nclassAtomInstance17 = ClassAtom(2, 5,'H',1, 1.008, 1)\nsession.add(classAtomInstance1)\nsession.add(classAtomInstance2)\nsession.add(classAtomInstance3)\nsession.add(classAtomInstance4)\nsession.add(classAtomInstance5)\nsession.add(classAtomInstance6)\nsession.add(classAtomInstance7)\nsession.add(classAtomInstance8)\nsession.add(classAtomInstance9)\nsession.add(classAtomInstance10)\nsession.add(classAtomInstance11)\nsession.add(classAtomInstance12)\nsession.add(classAtomInstance13)\nsession.add(classAtomInstance14)\nsession.add(classAtomInstance15)\nsession.add(classAtomInstance16)\nsession.add(classAtomInstance17)\n\n#creation de AtomType\natomsTypeInstance1 = AtomsType(idAtomType = 1, idClassAtom = 14,idForceField = 1, description = 'Glycine N' )\natomsTypeInstance2 = AtomsType(idAtomType = 2, idClassAtom = 1, idForceField = 1, description = 'Glycine CA' )\natomsTypeInstance3 = AtomsType(idAtomType = 3, idClassAtom = 2, idForceField = 1, description = 'Glycine C' )\natomsTypeInstance4 = AtomsType(idAtomType = 4, idClassAtom = 29,idForceField = 1, description = 'Glycine HN' )\natomsTypeInstance5 = AtomsType(idAtomType = 1, idClassAtom = 1, idForceField = 2, description = 'Amide CONHR Hydrogen')\natomsTypeInstance6 = AtomsType(idAtomType = 2, idClassAtom = 1, idForceField = 2, description = 'Amide CONH2 Hydrogen')\natomsTypeInstance7 = AtomsType(idAtomType = 3, idClassAtom = 1, idForceField = 2, description = 'HIP Imidazolium HN' )\natomsTypeInstance8 = AtomsType(idAtomType = 4, idClassAtom = 1, idForceField = 2, description = 'Hydroxyl Hydrogen' )\natomsTypeInstance9 = AtomsType(idAtomType = 5, idClassAtom = 2, idForceField = 2, description = 'LYS/ARG/N-Term H' )\natomsTypeInstance10 = AtomsType(idAtomType = 6, idClassAtom = 3, idForceField = 2, description = 'Modified TIP3P H' )\natomsTypeInstance11 = AtomsType(idAtomType = 7, idClassAtom = 4, idForceField = 2, description = 'Amide Carbon' )\natomsTypeInstance12 = AtomsType(idAtomType = 1, idClassAtom = 1, idForceField = 3, description = 'Nonpolar Hydrogen' )\natomsTypeInstance13 = AtomsType(idAtomType = 2, idClassAtom = 2, idForceField = 3, description = 'Aromatic Hydrogen' )\natomsTypeInstance14 = AtomsType(idAtomType = 3, idClassAtom = 3, idForceField = 3, description = 'Peptide Amide HN' )\natomsTypeInstance15 = AtomsType(idAtomType = 4, idClassAtom = 4, idForceField = 3, description = 'Peptide HCA' )\natomsTypeInstance16 = AtomsType(idAtomType = 1, idClassAtom = 1, idForceField = 5, description = 'AMOEBA Water O' )\natomsTypeInstance17 = AtomsType(idAtomType = 2, idClassAtom = 2, idForceField = 5, description = 'AMOEBA Water H' )\nsession.add(atomsTypeInstance1)\nsession.add(atomsTypeInstance2)\nsession.add(atomsTypeInstance3)\nsession.add(atomsTypeInstance4)\nsession.add(atomsTypeInstance5)\nsession.add(atomsTypeInstance6)\nsession.add(atomsTypeInstance7)\nsession.add(atomsTypeInstance8)\nsession.add(atomsTypeInstance9)\nsession.add(atomsTypeInstance10)\nsession.add(atomsTypeInstance11)\nsession.add(atomsTypeInstance12)\nsession.add(atomsTypeInstance13)\nsession.add(atomsTypeInstance14)\nsession.add(atomsTypeInstance15)\nsession.add(atomsTypeInstance16)\nsession.add(atomsTypeInstance17)\n\n#creation de ParamsClass\nparamsClassInstance1 = ParamsClass(idParameter=1,idForceField=1) #VDW\nparamsClassInstance2 = ParamsClass(idParameter=2,idForceField=1) #BS\nparamsClassInstance3 = ParamsClass(idParameter=1,idForceField=1) #VDW\nparamsClassInstance4 = ParamsClass(idParameter=3,idForceField=2) #Angle Bending\nparamsClassInstance5 = ParamsClass(idParameter=4,idForceField=1) #Angle Bending\nparamsClassInstance6 = ParamsClass(idParameter=5,idForceField=1) #Improper Torsion\nparamsClassInstance7 = ParamsClass(idParameter=1,idForceField=2) #VDW\nparamsClassInstance8 = ParamsClass(idParameter=1,idForceField=3) #VDW\nparamsClassInstance9 = ParamsClass(idParameter=2,idForceField=1) #BS\nparamsClassInstance10 = ParamsClass(idParameter=1,idForceField=5) #VDW FOR AMOEBA-WATER \nparamsClassInstance11 = ParamsClass(idParameter=1,idForceField=5) #VDW FOR AMOEBA-WATER \nparamsClassInstance12 = ParamsClass(idParameter=2,idForceField=5) #BS FOR AMOEBA-WATER \nparamsClassInstance13 = ParamsClass(idParameter=4,idForceField=5) #ANGLE BENDING FOR AMOEBA-WATER \nparamsClassInstance14 = ParamsClass(idParameter=6,idForceField=5) #Urey-Bradley FOR AMOEBA-WATER \nsession.add(paramsClassInstance1)\nsession.add(paramsClassInstance2)\nsession.add(paramsClassInstance3)\nsession.add(paramsClassInstance4)\nsession.add(paramsClassInstance5)\nsession.add(paramsClassInstance6)\nsession.add(paramsClassInstance7)\nsession.add(paramsClassInstance8)\nsession.add(paramsClassInstance9)\nsession.add(paramsClassInstance10)\nsession.add(paramsClassInstance11)\nsession.add(paramsClassInstance12)\nsession.add(paramsClassInstance13)\nsession.add(paramsClassInstance14)\n\n#,tableName = \"OneClass\") \n#,tableName = \"TwoClass\") \n#,tableName = \"OneClass\") \n#,tableName = \"OneType\") \n#,tableName = \"ThreeClass\")\n#,tableName = \"FourClass\") \n#,tableName = \"OneClass\") \n#,tableName = \"OneClass\") \n#,tableName = \"TwoClass\") \n\n#creation de oneClass //premiere ligne de VDW\n#pour le FF AMBER_FF98\nOneClassInstance1 = ValueClass(idParam=1,key = \"Radius\",value=1.908)\nOneClassInstance2 = ValueClass(idParam=1,key = \"Epsilon\",value=0.109)\nOneClassInstance3 = ValueClass(idParam=1,key = \"Reduction\",value=0.000)\nsession.add(OneClassInstance1)\nsession.add(OneClassInstance2)\nsession.add(OneClassInstance3)\n\n#creation de ClassAtom_ParamsClass (ManyToMany)\nClassAtom_ParamsClassInstance1 = ClassAtom_ParamsClass()\n#on lie le class_atom idclassAtom=1 et forcefield=1 avec l'instance ManyToMany\nClassAtom_ParamsClassInstance1.classAtomsInstance = session.query(ClassAtom).get((1,1))\n#on lie le paramClass avec idParam=1 avec l'instance ManyToMany\nClassAtom_ParamsClassInstance1.paramsClassInstance = session.query(ParamsClass).get(1)\n\n\n#creation de OneClass again //deuxieme ligne de VDW\n#pour FF AMBER_FF98\nOneClassInstance4 = ValueClass(idParam=3,key = \"Radius\",value=1.908)\nOneClassInstance5 = ValueClass(idParam=3,key = \"Epsilon\",value=0.086)\nOneClassInstance6 = ValueClass(idParam=3,key = \"Reduction\",value=0.000)\nsession.add(OneClassInstance4)\nsession.add(OneClassInstance5)\nsession.add(OneClassInstance6)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance2 = ClassAtom_ParamsClass()\nClassAtom_ParamsClassInstance2.classAtomsInstance = session.query(ClassAtom).get((2,1))\nClassAtom_ParamsClassInstance2.paramsClassInstance = session.query(ParamsClass).get(3)\n\n#creation de OneClass //premiere ligne de VDW\n#pour le FF CHARM19\nOneClassInstance7 = ValueClass(idParam=7,key = \"Radius\",value=0.800)\nOneClassInstance8 = ValueClass(idParam=7,key = \"Epsilon\",value=0.050)\nOneClassInstance9 = ValueClass(idParam=7,key = \"Reduction\",value=0.000)\nsession.add(OneClassInstance7)\nsession.add(OneClassInstance8)\nsession.add(OneClassInstance9)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance3 = ClassAtom_ParamsClass()\nClassAtom_ParamsClassInstance3.classAtomsInstance = session.query(ClassAtom).get((1,2))\nClassAtom_ParamsClassInstance3.paramsClassInstance = session.query(ParamsClass).get(7)\n\n\n#creation de OneClass //premiere ligne de VDW\n#pour le FF CHARM27\nOneClassInstance10 = ValueClass(idParam=8,key = \"Radius\",value=0.800)\nOneClassInstance11 = ValueClass(idParam=8,key = \"Epsilon\",value=0.050)\nOneClassInstance12 = ValueClass(idParam=8,key = \"Reduction\",value=0.000)\nsession.add(OneClassInstance10)\nsession.add(OneClassInstance11)\nsession.add(OneClassInstance12)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance4 = ClassAtom_ParamsClass()\nClassAtom_ParamsClassInstance4.classAtomsInstance = session.query(ClassAtom).get((1,3))\nClassAtom_ParamsClassInstance4.paramsClassInstance = session.query(ParamsClass).get(8)\n\n\n#creation de oneClass // premiere ligne de VDW pour AMOEBA-WATER\nOneClassInstance13 = ValueClass(idParam=10,key = \"Radius\",value=3.405)\nOneClassInstance14 = ValueClass(idParam=10,key = \"Epsilon\",value=0.110)\nOneClassInstance15 = ValueClass(idParam=10,key = \"Reduction\",value=0.000)\nsession.add(OneClassInstance13)\nsession.add(OneClassInstance14)\nsession.add(OneClassInstance15)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance5 = ClassAtom_ParamsClass()\n#on lie le class_atom idclassAtom=1 et forcefield=1 avec l'instance ManyToMany\nClassAtom_ParamsClassInstance5.classAtomsInstance = session.query(ClassAtom).get((1,5))\n#on lie le paramClass avec idParam=1 avec l'instance ManyToMany\nClassAtom_ParamsClassInstance5.paramsClassInstance = session.query(ParamsClass).get(10)\n\n\n#creation de oneClass // deuxieme ligne de VDW pour AMOEBA-WATER\nOneClassInstance16 = ValueClass(idParam=11,key = \"Radius\",value=2.655)\nOneClassInstance17 = ValueClass(idParam=11,key = \"Epsilon\",value=0.013)\nOneClassInstance18 = ValueClass(idParam=11,key = \"Reduction\",value=0.910)\nsession.add(OneClassInstance16)\nsession.add(OneClassInstance17)\nsession.add(OneClassInstance18)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance5 = ClassAtom_ParamsClass()\n#on lie le class_atom idclassAtom=1 et forcefield=1 avec l'instance ManyToMany\nClassAtom_ParamsClassInstance5.classAtomsInstance = session.query(ClassAtom).get((2,5))\n#on lie le paramClass avec idParam=1 avec l'instance ManyToMany\nClassAtom_ParamsClassInstance5.paramsClassInstance = session.query(ParamsClass).get(11)\n\n\n#creation de twoClass //deuxieme ligne de BS\n#ForceField AMBER_FF98\ntwoClassInstance1 = ValueClass(idParam=2,key = \"KS\",value=317.000)\ntwoClassInstance2 = ValueClass(idParam=2,key = \"Length\",value=1.5220)\nsession.add(twoClassInstance1)\nsession.add(twoClassInstance2)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance5 = ClassAtom_ParamsClass(description=\"1-2\")\nClassAtom_ParamsClassInstance5.classAtomsInstance = session.query(ClassAtom).get((1,1))\nClassAtom_ParamsClassInstance5.paramsClassInstance = session.query(ParamsClass).get(2)\nClassAtom_ParamsClassInstance6 = ClassAtom_ParamsClass(description=\"1-2\")\nClassAtom_ParamsClassInstance6.classAtomsInstance = session.query(ClassAtom).get((2,1))\nClassAtom_ParamsClassInstance6.paramsClassInstance = session.query(ParamsClass).get(2)\n\n#creation de twoClass //premiere ligne de BS\n#ForceField AMBER_FF98\ntwoClassInstance3 = ValueClass(idParam=9,key = \"KS\",value=317.000)\ntwoClassInstance4 = ValueClass(idParam=9,key = \"Length\",value=1.5220)\nsession.add(twoClassInstance3)\nsession.add(twoClassInstance4)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance7 = ClassAtom_ParamsClass(description=\"1-1\")\nClassAtom_ParamsClassInstance7.classAtomsInstance = session.query(ClassAtom).get((1,1))\nClassAtom_ParamsClassInstance7.paramsClassInstance = session.query(ParamsClass).get(9)\n\n\n#creation de twoClass //premiere ligne de BS\n#ForceField AMOEBA-WATER\ntwoClassInstance5 = ValueClass(idParam=12,key = \"KS\",value=529.600)\ntwoClassInstance6 = ValueClass(idParam=12,key = \"Length\",value=0.9572)\nsession.add(twoClassInstance5)\nsession.add(twoClassInstance6)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance8 = ClassAtom_ParamsClass(description=\"1-2\")\nClassAtom_ParamsClassInstance8.classAtomsInstance = session.query(ClassAtom).get((1,5))\nClassAtom_ParamsClassInstance8.paramsClassInstance = session.query(ParamsClass).get(12)\nClassAtom_ParamsClassInstance9 = ClassAtom_ParamsClass(description=\"1-2\")\nClassAtom_ParamsClassInstance9.classAtomsInstance = session.query(ClassAtom).get((2,5))\nClassAtom_ParamsClassInstance9.paramsClassInstance = session.query(ParamsClass).get(12)\n\n\n#creation de ThreeClass //Angle Bending du AMBER_FF98\n#1ere ligne\nthreeClassInstance1 = ValueClass(idParam=5,key='KB',value=40.000)\nthreeClassInstance2 = ValueClass(idParam=5,key='Value1(R-X-R)',value=109.500)\nsession.add(threeClassInstance1)\nsession.add(threeClassInstance2)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance8 = ClassAtom_ParamsClass(description=\"1-1-1\")\nClassAtom_ParamsClassInstance8.classAtomsInstance = session.query(ClassAtom).get((1,1))\nClassAtom_ParamsClassInstance8.paramsClassInstance = session.query(ParamsClass).get(5)\n\n\n#creation de ThreeClass //Angle Bending du AMOEBA-WATER\n#1ere ligne\nthreeClassInstance3 = ValueClass(idParam=13,key='KB',value=34.050)\nthreeClassInstance4 = ValueClass(idParam=13,key='Value1(R-X-R)',value=108.500)\nsession.add(threeClassInstance3)\nsession.add(threeClassInstance4)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance9 = ClassAtom_ParamsClass(description=\"2-1-2\")\nClassAtom_ParamsClassInstance9.classAtomsInstance = session.query(ClassAtom).get((1,5))\nClassAtom_ParamsClassInstance9.paramsClassInstance = session.query(ParamsClass).get(13)\nClassAtom_ParamsClassInstance10 = ClassAtom_ParamsClass(description=\"2-1-2\")\nClassAtom_ParamsClassInstance10.classAtomsInstance = session.query(ClassAtom).get((2,5))\nClassAtom_ParamsClassInstance10.paramsClassInstance = session.query(ParamsClass).get(13)\n\n\n#creation de ThreeClass //Urey-Bradley du AMOEBA-WATER\n#1ere ligne\nthreeClassInstance5 = ValueClass(idParam=14,key='KB',value=38.250)\nthreeClassInstance6 = ValueClass(idParam=14,key='Distance',value=1.5537)\nsession.add(threeClassInstance5)\nsession.add(threeClassInstance6)\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance11 = ClassAtom_ParamsClass(description=\"2-1-2\")\nClassAtom_ParamsClassInstance11.classAtomsInstance = session.query(ClassAtom).get((1,5))\nClassAtom_ParamsClassInstance11.paramsClassInstance = session.query(ParamsClass).get(14)\nClassAtom_ParamsClassInstance12 = ClassAtom_ParamsClass(description=\"2-1-2\")\nClassAtom_ParamsClassInstance12.classAtomsInstance = session.query(ClassAtom).get((2,5))\nClassAtom_ParamsClassInstance12.paramsClassInstance = session.query(ParamsClass).get(14)\n\n\n#creation de FourClass //Improper Torsion du Amber_ff98\n#premiere ligne\nfourClassInstance1 = ValueClass(idParam=6,key='KTI',value=1.100)\nfourClassInstance2 = ValueClass(idParam=6,key='Value1',value=180.0)\nfourClassInstance3 = ValueClass(idParam=6,key='Value2',value=2)\nsession.add(fourClassInstance1)\nsession.add(fourClassInstance2)\nsession.add(fourClassInstance3)\n\n\n#Ajout dans la ManyToMany\nClassAtom_ParamsClassInstance9 = ClassAtom_ParamsClass(description=\"3-3-2-22\")\nClassAtom_ParamsClassInstance9.classAtomsInstance = session.query(ClassAtom).get((3,1))\nClassAtom_ParamsClassInstance9.paramsClassInstance = session.query(ParamsClass).get(6)\nClassAtom_ParamsClassInstance10 = ClassAtom_ParamsClass(description=\"3-3-2-22\")\nClassAtom_ParamsClassInstance10.classAtomsInstance = session.query(ClassAtom).get((2,1))\nClassAtom_ParamsClassInstance10.paramsClassInstance = session.query(ParamsClass).get(6)\nClassAtom_ParamsClassInstance11 = ClassAtom_ParamsClass(description=\"3-3-2-22\")\nClassAtom_ParamsClassInstance11.classAtomsInstance = session.query(ClassAtom).get((22,1))\nClassAtom_ParamsClassInstance11.paramsClassInstance = session.query(ParamsClass).get(6)\n\n#creation de paramType\nparamTypeInstance1 = ParamsType(idParameter=3,idForceField=1)\nparamTypeInstance2 = ParamsType(idParameter=3,idForceField=2)\nparamTypeInstance3 = ParamsType(idParameter=3,idForceField=1)\nparamTypeInstance4 = ParamsType(idParameter=3,idForceField=2)\nparamTypeInstance5 = ParamsType(idParameter=7,idForceField=5)\nparamTypeInstance6 = ParamsType(idParameter=7,idForceField=5)\nparamTypeInstance7 = ParamsType(idParameter=8,idForceField=5)\nparamTypeInstance8 = ParamsType(idParameter=8,idForceField=5)\nsession.add(paramTypeInstance1)\nsession.add(paramTypeInstance2)\nsession.add(paramTypeInstance3)\nsession.add(paramTypeInstance4)\nsession.add(paramTypeInstance5)\nsession.add(paramTypeInstance6)\nsession.add(paramTypeInstance7)\nsession.add(paramTypeInstance8)\n#,tableName = \"OneType\"\n#,tableName = \"OneType\"\n#,tableName = \"OneType\"\n#,tableName = \"OneType\"\n\n#creation de OneType //Partial charge du Amber_ff98\n#premiere ligne\noneTypeInstance1 = ValueType('Partial Chg', 1, -0.416)\nsession.add(oneTypeInstance1)\n#deuxieme ligne\noneTypeInstance3 = ValueType('Partial Chg', 3, -0.025)\nsession.add(oneTypeInstance3)\n\n#creation de OneType //Partial charge du Charm19\n#premiere ligne\noneTypeInstance2 = ValueType('Partial Chg', 2, 0.250)\nsession.add(oneTypeInstance2)\n#deuxieme ligne\noneTypeInstance4 = ValueType('Partial Chg', 4, 0.300)\nsession.add(oneTypeInstance4)\n\n\n#creation de OneType //Atomic Multipole du AMOEBA-WATER\n#premiere ligne\noneTypeInstance5 = ValueType('Axis Types', 5, 22)\noneTypeInstance6 = ValueType('Frame', 5, -0.51966)\noneTypeInstance7 = ValueType('Multipoles (M-D-Q)', 5, 0.14279)\nsession.add(oneTypeInstance5)\nsession.add(oneTypeInstance6)\nsession.add(oneTypeInstance7)\n#deuxieme ligne\noneTypeInstance8 = ValueType('Axis Types', 6, 12)\noneTypeInstance9 = ValueType('Frame', 6, 0.25983)\noneTypeInstance10 = ValueType('Multipoles (M-D-Q)', 6, -0.05818)\nsession.add(oneTypeInstance8)\nsession.add(oneTypeInstance9)\nsession.add(oneTypeInstance10)\n\n#creation de OneType //Dipole Polarizability du AMOEBA-WATER\n#premiere ligne\noneTypeInstance11 = ValueType('Alpha', 7, 0.837)\noneTypeInstance12 = ValueType('Group Atom Types', 7, 2)\nsession.add(oneTypeInstance11)\nsession.add(oneTypeInstance12)\n#deuxieme ligne\noneTypeInstance13 = ValueType('Alpha', 8, 0.496)\noneTypeInstance14 = ValueType('Group Atom Types', 8, 1)\nsession.add(oneTypeInstance13)\nsession.add(oneTypeInstance14)\n\n\n#liaison avec many to many AtomsType_ParamType\n#le OneTypeInstance1 est lier avec le ParamsTypeInstance1\n#le OneTypeInstance1 fait partie du ForceField AMBER_FF98\n#et est relier avec le idAtomType=1 de ce ForceField donc\n\n#paramTypeInstance1 est lier:\n#idForceField = 1 =>[AMBER_FF98] \n#idAtomType = 1 => [Type = 1]\n#paramTypeInstance1.atomsTypeParents.append(session.query(AtomsType).get((1,1)))\nAtomsType_ParamTypeInstance1 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance1.atomsTypeInstance = session.query(AtomsType).get((1,1))\nAtomsType_ParamTypeInstance1.paramsTypeInstance = paramTypeInstance1 #session.query(ParamsType).get(1)\n#paramTypeInstance3 est lier:\n#idForceField = 1 =>[AMBER_FF98] \n#idAtomType = 2 => [Type = 2]\n#paramTypeInstance3.atomsTypeParents.append(session.query(AtomsType).get((2,1)))\nAtomsType_ParamTypeInstance2 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance2.atomsTypeInstance = session.query(AtomsType).get((2,1))\nAtomsType_ParamTypeInstance2.paramsTypeInstance = paramTypeInstance3 #session.query(ParamsType).get(3)\n#paramTypeInstance2 est lier:\n#idForceField = 2 =>[CHARM19] \n#idAtomType = 1 => [Type = 1]\n#paramTypeInstance2.atomsTypeParents.append(session.query(AtomsType).get((1,2)))\nAtomsType_ParamTypeInstance3 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance3.atomsTypeInstance = session.query(AtomsType).get((1,2))\nAtomsType_ParamTypeInstance3.paramsTypeInstance = paramTypeInstance2 #session.query(ParamsType).get(2)\n\n#paramTypeInstance4 est lier:\n#idForceField = 2 =>[CHARM19] \n#idAtomType = 2 => [Type = 2]\n#paramTypeInstance4.atomsTypeParents.append(session.query(AtomsType).get((2,2)))\nAtomsType_ParamTypeInstance4 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance4.atomsTypeInstance = session.query(AtomsType).get((2,2))\nAtomsType_ParamTypeInstance4.paramsTypeInstance = paramTypeInstance4 #session.query(ParamsType).get(4)\n\n\n#paramTypeInstance5 est lier:\n#idForceField = 5 =>[AMOEBA-WATER] \n#idAtomType = 1 et 2 => [Type = 1, 2]\n#paramTypeInstance4.atomsTypeParents.append(session.query(AtomsType).get((2,2)))\nAtomsType_ParamTypeInstance5 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance5.atomsTypeInstance = session.query(AtomsType).get((1,5))\nAtomsType_ParamTypeInstance5.paramsTypeInstance = paramTypeInstance5 #session.query(ParamsType).get(5)\nAtomsType_ParamTypeInstance6 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance6.atomsTypeInstance = session.query(AtomsType).get((2,5))\nAtomsType_ParamTypeInstance6.paramsTypeInstance = paramTypeInstance6 #session.query(ParamsType).get(6)\n\n\n#paramTypeInstance7 et 8 est lier:\n#idForceField = 5 =>[AMOEBA-WATER] \n#idAtomType = 1 et 2 => [Type = 1, 2]\n#paramTypeInstance4.atomsTypeParents.append(session.query(AtomsType).get((2,2)))\nAtomsType_ParamTypeInstance7 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance7.atomsTypeInstance = session.query(AtomsType).get((1,5))\nAtomsType_ParamTypeInstance7.paramsTypeInstance = paramTypeInstance7 #session.query(ParamsType).get(7)\nAtomsType_ParamTypeInstance8 = AtomsType_ParamsType()\nAtomsType_ParamTypeInstance8.atomsTypeInstance = session.query(AtomsType).get((2,5))\nAtomsType_ParamTypeInstance8.paramsTypeInstance = paramTypeInstance8 #session.query(ParamsType).get(8)\n\n\nsession.commit()\n\n\nprint(bcolors.FAIL + ' ForceFieldTable' + bcolors.ENDC+'\\n')\nprint('idForceField nameForceField \\n')\nfor instance in session.query(ForceField):\n print('{:^12} {}'.format(instance.idForceField,instance.nameForceField))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ParameterTable' + bcolors.ENDC+'\\n')\nprint('{:^11} {:<40} {:^13} {} \\n'.format('idParameter','nameParameter','numberOfAtoms','classOrType'))\nfor instance in session.query(ParameterTable):\n print('{:^11} {:<40} {:^13} {}'.format(instance.idParameter,instance.nameParameter,str(instance.numberOfAtoms),str(instance.classOrType)))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ScalingFactorTable ' + bcolors.ENDC+'\\n')\nprint('{:<40} {:^11} {:<12} {}\\n'.format('nameScalingFactor','idForceField','idParameter','Description','Value'))\nfor instance in session.query(ScalingFactorTable):\n print('{:<40} {:^11} {:<12} {}'.format(session.query(ParameterTable).get(instance.idParameter).nameParameter,instance.idForceField,instance.idParameter,instance.key,instance.value)) \n \nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ConstantTable ' + bcolors.ENDC+'\\n')\nprint('{:<40} {:^10} {:^20} {:<10} {:<13}\\n'.format('nameConstant','idForceField','idParameter','key','value'))\nfor instance in session.query(ConstantTable):\n print('{:<40} {:^10} {:^20} {:<13} {:<13}'.format(session.query(ParameterTable).get(instance.idParameter).nameParameter,instance.idForceField,instance.idParameter, instance.key, instance.value))\n \nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' classAtomTable ' + bcolors.ENDC+'\\n')\nprint('idClassAtom idForceField symbol atomicNumber atomicWeight valence\\n')\nfor instance in session.query(ClassAtom):\n print('{:^12}{:^14} {:<8}{:^13} {:<9}{:^9}'.format(instance.idClassAtom,instance.idForceField,instance.symbol,instance.atomicNumber,instance.atomicWeight,instance.valence))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' AtomTypeTable ' + bcolors.ENDC+'\\n')\nprint('{0} {1} {2} {3}\\n'.format('idClassAtom','idForceField','idAtomType','Description'))\nfor instance in session.query(AtomsType):\n print('{:^11}{:^14}{:^11}{} '.format(instance.idClassAtom,instance.idForceField,instance.idAtomType,instance.description))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ParamsClassTable ' + bcolors.ENDC+'\\n')\nprint('idParam idParameter idForceField tableName nameParameter\\n')\nfor instance in session.query(ParamsClass):\n print('{:^7} {:^13} {:^12} {:<10} {}'.format(instance.idParam,instance.idParameter,instance.idForceField, session.query(ParameterTable).get(instance.idParameter).numberOfAtoms,session.query(ParameterTable).get(instance.idParameter).nameParameter))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ClassAtom_ParamsClass Table ' + bcolors.ENDC+'\\n')\nprint('{} {} {} {}\\n'.format('idClassAtom','idForceField','idParam','description'))\nfor instance in session.query(ClassAtom_ParamsClass):\n print('{:^11} {:^11} {:^7} {}'.format(instance.idClassAtom,instance.idForceField,instance.idParam,instance.description))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' OneClassTable ' + bcolors.ENDC+'\\n')\nprint('idParam Key Value\\n')\nfor instance in session.query(ValueClass):\n idParameter = session.query(ParamsClass).get(instance.idParam).idParameter\n if(session.query(ParameterTable).get(idParameter).numberOfAtoms == 1):\n print('{:^8} {:<12}{}'.format(instance.idParam,instance.key,instance.value))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' TwoClassTable ' + bcolors.ENDC+'\\n')\nprint('idParam Key Value\\n')\nfor instance in session.query(ValueClass):\n idParameter = session.query(ParamsClass).get(instance.idParam).idParameter\n if(session.query(ParameterTable).get(idParameter).numberOfAtoms == 2):\n print('{:^8} {:<12}{}'.format(instance.idParam,instance.key,instance.value))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ThreeClassTable ' + bcolors.ENDC+'\\n')\nprint('idParam Key Value\\n')\nfor instance in session.query(ValueClass):\n idParameter = session.query(ParamsClass).get(instance.idParam).idParameter\n if(session.query(ParameterTable).get(idParameter).numberOfAtoms == 3):\n print('{:^8} {:<12}{}'.format(instance.idParam,instance.key,instance.value))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' FourClassTable ' + bcolors.ENDC+'\\n')\nprint('idParam Key Value\\n')\nfor instance in session.query(ValueClass):\n idParameter = session.query(ParamsClass).get(instance.idParam).idParameter\n if(session.query(ParameterTable).get(idParameter).numberOfAtoms == 4):\n print('{:^8} {:<12}{}'.format(instance.idParam,instance.key,instance.value))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ParamType Table' + bcolors.ENDC+'\\n')\nprint('idParam idParameter idForceField tableName nameParameter\\n')\nfor instance in session.query(ParamsType):\n print('{:^7} {:^13} {:^13} {:<10} {}'.format(instance.idParam,instance.idParameter,instance.idForceField,session.query(ParameterTable).get(instance.idParameter).numberOfAtoms,session.query(ParameterTable).get(instance.idParameter).nameParameter))\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' OneTypeTable' + bcolors.ENDC+'\\n')\nprint('idParam Key Value\\n')\nfor instance in session.query(ValueType):\n idParameter = session.query(ParamsType).get(instance.idParam).idParameter\n if(session.query(ParameterTable).get(idParameter).numberOfAtoms == 1):\n print('{:^8} {:<12}{}'.format(instance.idParam,instance.key,instance.value))\n\n#Affichage de tous les SF qui appartiennent au AMBER_FF98\n#utilisation du one to many entre ScalingFactorTable et Forcefield \nx = session.query(ForceField).get(1).childrenScalingFactor\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' ScalingFactor belong to AMBER_FF98' + bcolors.ENDC+'\\n')\nprint('idForceField idParameter Description Value\\n')\nfor instance in x:\n print('{:^13} {:^11} {:<12} {}'.format(instance.idForceField,instance.idParameter,instance.key,instance.value))\n\n#Apres les changement cette requette ne peut plus se faire\n#Affichage de tous les SF qui sont lier a AtomicPartialCharge\n#utilisation du one to many entre ScalingFactor et ParameterTable\n#x = session.query(ParameterTable).get(3).childrenScalingFactor\n#print('----------------------------------------------------')\n#print(bcolors.FAIL +' ScalingFactor of AtomicPartialCharge\\'s Parameter' + bcolors.ENDC+'\\n')\n#print('idForceField idParameter Description Value\\n')\n#for instance in x:\n# print('{:^13} {:^11} {:<12} {}'.format(instance.idForceField,instance.idParameter,instance.key,instance.value))\n\n#Affichage de tous le AtomType qui sont lier au ClassAtom 1 du ForceField 2\n#utilisation du one to many entre AtomType et ClassAtom \nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Atoms Type of CHARMM27 belong to ClassAtom number 1' + bcolors.ENDC+'\\n')\nprint('{0} {1} {2}\\n'.format('ClassAtom','ForceField','Description'))\nx = session.query(ClassAtom).get((1,2)).atoms_typeChildren\nfor instance in x:\n print('{0:^9}{1:^12}{2} '.format(instance.idClassAtom,instance.idForceField,instance.description))\n\n\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many To Many between AtomsType/ParamsType ' + bcolors.ENDC+'\\n')\nprint(bcolors.FAIL +' ParamType belong to ForceField Amber_ff98 ' + bcolors.ENDC+'\\n')\n#Affichage de la Table many to many entre AtomType et ParamType\nx = session.query(ParamsType).filter(ParamsType.atomsTypeParents.any(idForceField=1)).all()\nfor instance in x:\n print(instance.idParam,instance.idParameter,session.query(ParameterTable).get(instance.idParameter).nameParameter)\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many To Many between AtomsType/ParamsType' + bcolors.ENDC+'\\n')\nprint(bcolors.FAIL +' ParamType bound to idAtomType = 1' + bcolors.ENDC+'\\n')\n#Affichage de la Table many to many entre AtomType et ParamType\nx = session.query(ParamsType).filter(ParamsType.atomsTypeParents.any(idAtomType=1)).all()\nfor instance in x:\n print(instance.idParam,instance.idParameter,session.query(ParameterTable).get(instance.idParameter).nameParameter)\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many To Many between AtomsType/ParamsType' + bcolors.ENDC+'\\n')\nprint(bcolors.FAIL +'ParamType belong to AMBER_FF98 and bound to idAtomType = 1' + bcolors.ENDC+'\\n')\n#Affichage de la Table many to many entre AtomType et ParamType\nx = session.query(ParamsType).filter(ParamsType.atomsTypeParents.any(idForceField=1,idAtomType=1)).all()\nfor instance in x:\n print(instance.idParam,instance.idParameter,session.query(ParameterTable).get(instance.idParameter).nameParameter)\n\n\n\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many to Many between ClassAtom/ParamsClass' + bcolors.ENDC+'\\n')\nprint(bcolors.FAIL +' idParam bound to idClassAtom=1 && idForceField=1' + bcolors.ENDC+'\\n')\nx = session.query(ClassAtom).get((1,1)).paramsClassChildren\nfor instance in x:\n print('{} '.format(instance.idParam),end=\"\")\nprint('\\n')\n\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many to Many between ClassAtom/ParamsClass' + bcolors.ENDC+'\\n')\nprint(bcolors.FAIL +' idClassAtom belong to idParam 6' + bcolors.ENDC+'\\n')\nx = session.query(ParamsClass).get(6).classAtomsParents\nfor instance in x:\n print('{} '.format(instance.idClassAtom),end=\"\")\nprint('\\n')\n\n\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' idParam belong to Forcefield amberFF98'+ bcolors.ENDC+'\\n')\nx = session.query(ForceField).get(1).childrenParamClass\nfor instance1 in x:\n paramInstance = session.query(ParamsClass).get(instance1.idParam)\n parameterInstance = session.query(ParameterTable).get(paramInstance.idParameter)\n #je verifie si le idParam est bien de type class\n if(parameterInstance.classOrType == \"class\"):\n print('idParam: {}'.format(instance1.idParam))\n xx = session.query(ParameterTable).get(instance1.idParameter).nameParameter\n xx = xx + \":\"\n print('{:<20}'.format(xx),end=\"\")\n valueParams = session.query(ValueClass).filter(ValueClass.idParam == paramInstance.idParam)\n for instance3 in valueParams:\n print('{} = {}, '.format(instance3.key,instance3.value),end=\"\")\n y = session.query(ParamsClass).get(instance1.idParam).classAtomsParents\n description = session.query(ClassAtom_ParamsClass).filter(ClassAtom_ParamsClass.idParam == instance1.idParam).first().description\n print('')\n for instance2 in y:\n x = session.query(ClassAtom).get((instance2.idClassAtom,1))\n print('ClassAtoms: {:<3} {:<3} {} {} {}'.format(x.idClassAtom,x.symbol,x.atomicNumber,x.atomicWeight,x.valence)) \n if(description != None):\n print('Description: ' + description)\n print('')\n\n#pour afficher tous les parametre d'un forcefield ici \n#le forcfield numero 1\nprint('\\n----------------------------------------------------')\nprint(bcolors.FAIL +'Display all Parameter(without SF/C) of AMBER_FF98'+ bcolors.ENDC+'\\n')\nlistOfParams = session.query(ForceField).get(1).childrenParamClass\nlistOfParameter = [x.idParameter for x in listOfParams]\nlistOfParameter = list(set(listOfParameter))\nfor number in listOfParameter:\n print (session.query(ParameterTable).get(number).nameParameter)\n\nprint('')\n\n#Pour afficher la many to many entre forcefield et parametreTable\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many To Many between ForceField/ParameterTable' + bcolors.ENDC+'\\n')\n#print(bcolors.FAIL +'Parameter belong to AMOEBA-WATER' + bcolors.ENDC+'\\n')\n#Affichage de la Table many to many entre AtomType et ParamType\n#x = session.query(ParameterTable).filter(ParameterTable.associatedForcefield.any(idForceField=5)).all()\nprint('{:<14} {}\\n'.format('ForceField','Parameter'))\nfor instance in session.query(ParametersOfForceField):\n print('{:<14} {}'.format(session.query(ForceField).get(instance.idForceField).nameForceField,session.query(ParameterTable).get(instance.idParameter).nameParameter))\n\n\n#Pour afficher la many to many entre forcefield et parametreTable\nprint('----------------------------------------------------')\nprint(bcolors.FAIL +' Many To Many between ForceField/ParameterTable' + bcolors.ENDC+'\\n')\nprint(bcolors.FAIL +'Parameter belong to AMOEBA-WATER' + bcolors.ENDC+'\\n')\nx = session.query(ForceField).get(5).childrenParameter\n# x c'est une liste d'objet de parametersOfForceField\nprint('{:<14} {}\\n'.format('ForceField','Parameter'))\nfor instance in x:\n print('{:<14} {}'.format(session.query(ForceField).get(instance.idForceField).nameForceField,session.query(ParameterTable).get(instance.idParameter).nameParameter))\n\n#pour afficher les SF de AMOEBA-WATER\nprint('\\n----------------------------------------------------')\nprint(bcolors.FAIL +'scaling Factor belong to AMOEBA-WATER' + bcolors.ENDC+'\\n')\nprint('{:<14} {}\\n'.format('ForceField','ScalingFactor'))\n# x c'est une liste d'objet de la table de ParameterTable\nx = session.query(ForceField).get(5).childrenParameter\nfor instance in x:\n parameterInstance = session.query(ParameterTable).get(instance.idParameter)\n if(parameterInstance.parameterType == 'SF'):\n print('{:<14} {}'.format(session.query(ForceField).get(5).nameForceField,parameterInstance.nameParameter))\n\n\n#pour afficher les constants de AMOEBA-WATER\nprint('\\n----------------------------------------------------')\nprint(bcolors.FAIL +'constant belong to AMOEBA-WATER' + bcolors.ENDC+'\\n')\nprint('{:<14} {}\\n'.format('ForceField','constantName'))\n# x c'est une liste d'objet de la table de ParameterTable\nx = session.query(ForceField).get(5).childrenParameter\nfor instance in x:\n parameterInstance = session.query(ParameterTable).get(instance.idParameter)\n if(parameterInstance.parameterType == 'Constant'):\n print('{:<14} {}'.format(session.query(ForceField).get(5).nameForceField,parameterInstance.nameParameter))\n\n#pour afficher les Users et leurs FF\nprint('\\n----------------------------------------------------')\nprint(bcolors.FAIL +'\\tUser and their FF' + bcolors.ENDC+'\\n')\nusers = session.query(User).all()\nfor user_instance in users:\n print(user_instance.firstname,user_instance.lastname,end=\"\")\n print(' --> ',end=\"\")\n for ff_list in user_instance.forcefield_list:\n print(session.query(ForceField).get(ff_list.idForceField).nameForceField,end=\"\")\n print(\"(\" + str(ff_list.isAuthor) +\")\", end=\"\")\n print(\" \",end=\"\")\n print(' ')\nprint(\" \")\n\nuser = session.query(User).get(1)\nsession.query(User).filter(User.user_id==1).update({'username':'danielmo'})\n\nusers = session.query(User).all()\nfor user_instance in users:\n print(user_instance.username)\n\n\n\n\n#pour afficher toutes les valeur d'une table \n#pour un parametre et forcefield donnee\n#ici j'ai pris AMBER_FF98 et VDW\n\n#http://stackoverflow.com/questions/23654652/how-to-retrieve-data-from-tables-with-relationships-many-to-many-sqlalchemy\n","sub_path":"InsertTable.py","file_name":"InsertTable.py","file_ext":"py","file_size_in_byte":54973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"153909616","text":"import matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom adjustText import adjust_text\nimport pandas as pd\nimport Globals\n\n\ndef plot(csv_paths):\n fig = plt.figure(figsize=(9, 9))\n brewer = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a','#919114', '#b15928']\n\n # Read dataframes\n ar_df = pd.read_csv(csv_paths[0], index_col=0)\n\n stab_dfs = []\n for path in csv_paths[1:]:\n stab_dfs.append(pd.read_csv(path, index_col=0))\n\n stab_df = pd.concat(stab_dfs)\n stab_df = stab_df.groupby(level=0).mean()\n texts = []\n for i, tech in enumerate(stab_df.index):\n\n ar_mean = (ar_df.loc[[tech],:]).mean(axis=1)\n st_mean = (stab_df.loc[[tech],:]).mean(axis=1)\n for dataset in stab_df.columns:\n ar_ob = ar_df[dataset][tech]\n st_ob = stab_df[dataset][tech]\n y_line = [ar_mean, ar_ob]\n x_line = [st_mean, st_ob]\n plt.plot(x_line, y_line, c=brewer[i], alpha=0.4, zorder=1)\n\n plt.scatter(st_mean, ar_mean, s=80, c=brewer[i], label=tech, linewidth=2, zorder=10)\n t = plt.text(st_mean, ar_mean, Globals.acronyms[tech], ha='center', va='center', zorder=11,\n fontsize=14, fontweight='bold')\n texts.append(t)\n\n # adjust_text(texts)\n adjust_text(texts, force_points=1.0, force_text=1.0, expand_points=(1, 1), expand_text=(1, 1))\n plt.savefig(Globals.plot_subdir + 'star.png')\n\n\n\n # Initialize counter\n # columns = [str(i+1) for i in range(len(dfs[0].index))]\n # counter = pd.DataFrame(0, index=dfs[0].index, columns=columns)\n #\n # # Count\n # for df in dfs:\n # for column in df.columns:\n # sorted = df.sort_values(column, ascending=False)\n # for position, tech in enumerate(sorted.index):\n # counter[str(position + 1)][tech] += 1\n #\n # counter.index = [Globals.acronyms[i] for i in counter.index]\n # counter.to_csv(Globals.plot_subdir + 'table.csv')","sub_path":"Figures/Code/StarGlyph.py","file_name":"StarGlyph.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"513738192","text":"import json\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponsePermanentRedirect\n\nfrom devhub import models\n\n\ndef dumps(value):\n return json.dumps(value, default=lambda o: None)\n\n\nclass RequestMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n\n self.process_request_response(request, response)\n\n return response\n\n def process_request_response(self, request, response):\n\n if request.path.endswith('/favicon.ico'):\n return response\n\n if isinstance(\n response, HttpResponsePermanentRedirect) and settings.APPEND_SLASH:\n new_location = response.get('location', None)\n content_length = response.get('content-length', None)\n\n if new_location and content_length is '0':\n new_parsed = urlparse(new_location)\n\n old = (('http', 'https')[request.is_secure()], request.get_host(), '{0}/'.format(request.path),\n request.META['QUERY_STRING'])\n new = (\n new_parsed.scheme,\n new_parsed.netloc,\n new_parsed.path,\n new_parsed.query)\n\n if old == new:\n # dont log - it's just adding a /\n return response\n try:\n self.save(request, response)\n except Exception as e:\n print(\"Error saving request log\", e)\n\n return response\n\n def save(self, request, response):\n if hasattr(request, 'user'):\n user = request.user if isinstance(request.user, User) else None\n else:\n user = None\n\n meta = request.META.copy()\n meta.pop('QUERY_STRING', None)\n meta.pop('HTTP_COOKIE', None)\n remote_addr_fwd = None\n\n if 'HTTP_X_FORWARDED_FOR' in meta:\n remote_addr_fwd = meta['HTTP_X_FORWARDED_FOR'].split(\",\")[\n 0].strip()\n if remote_addr_fwd == meta['HTTP_X_FORWARDED_FOR']:\n meta.pop('HTTP_X_FORWARDED_FOR')\n\n uri = request.build_absolute_uri()\n # if request.POST and uri != '/login/':\n # post = dumps(request.POST)\n\n if request.path.startswith('/admin'):\n return\n\n models.Request(\n host=request.get_host(),\n path=request.path,\n method=request.method,\n uri=request.build_absolute_uri(),\n status_code=response.status_code,\n user_agent=meta.pop('HTTP_USER_AGENT', None),\n remote_addr=meta.pop('REMOTE_ADDR', None),\n remote_addr_fwd=remote_addr_fwd,\n meta=None if not meta else dumps(meta),\n cookies=None if not request.COOKIES else dumps(request.COOKIES),\n get=None if not request.GET else dumps(request.GET),\n # post=None if (not request.POST or getattr(request, 'hide_post') == True) else dumps(request.POST),\n # raw_post=None if getattr(request, 'hide_post') else request.raw_post_data,\n is_secure=request.is_secure(),\n is_ajax=request.is_ajax(),\n user=user\n ).save()\n","sub_path":"src/devhub/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522010794","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyams_form/group.py\n# Compiled at: 2020-02-23 12:53:51\n# Size of source mod 2**32: 3835 bytes\n\"\"\"PyAMS_form.group module\n\nThis module handles groups of widgets within forms.\n\"\"\"\nfrom zope.interface import implementer\nfrom pyams_form.events import DataExtractedEvent\nfrom pyams_form.form import BaseForm, get_form_weight\nfrom pyams_form.interfaces.form import IGroup, IGroupForm, IGroupManager\nfrom pyams_form.interfaces.widget import IWidgets\n__docformat__ = 'restructuredtext'\n\n@implementer(IGroupManager)\nclass GroupManager:\n __doc__ = 'Base groups manager miixn class'\n groups = ()\n\n def update(self):\n \"\"\"See interfaces.IForm\"\"\"\n self.update_widgets()\n groups = []\n for group_class in self.groups:\n if IGroup.providedBy(group_class):\n group = group_class\n else:\n group = group_class(self.context, self.request, self)\n groups.append(group)\n\n registry = self.request.registry\n for group in sorted((adapter for name, adapter in registry.getAdapters((self.context, self.request, self), IGroup)), key=get_form_weight):\n groups.append(group)\n\n [group.update() for group in groups]\n self.groups = tuple(groups)\n\n def extract_data(self, set_errors=True):\n \"\"\"See interfaces.IForm\"\"\"\n data, errors = super(GroupManager, self).extract_data(set_errors=set_errors)\n for group in self.groups:\n group_data, group_errors = group.extract_data(set_errors=set_errors)\n data.update(group_data)\n if group_errors:\n if errors:\n errors += group_errors\n else:\n errors = group_errors\n\n registry = self.request.registry\n registry.notify(DataExtractedEvent(data, errors, self))\n return (data, errors)\n\n\n@implementer(IGroup)\nclass Group(GroupManager, BaseForm):\n __doc__ = 'Group of field widgets within form'\n\n def __init__(self, context, request, parent_form):\n self.context = context\n self.request = request\n self.parent_form = self.__parent__ = parent_form\n\n def update_widgets(self, prefix=None):\n \"\"\"See interfaces.IForm\"\"\"\n registry = self.request.registry\n self.widgets = registry.getMultiAdapter((self, self.request, self.get_content()), IWidgets)\n for attr_name in ('mode', 'ignore_request', 'ignore_context', 'ignore_readonly'):\n value = getattr(self.parent_form.widgets, attr_name)\n setattr(self.widgets, attr_name, value)\n\n if prefix is not None:\n self.widgets.prefix = prefix\n self.widgets.update()\n\n\n@implementer(IGroupForm)\nclass GroupForm(GroupManager):\n __doc__ = 'A mix-in class for add and edit forms to support groups.'\n\n def update(self):\n \"\"\"See interfaces.IForm\"\"\"\n GroupManager.update(self)\n self.update_actions()\n self.actions.execute()","sub_path":"pycfiles/pyams_form-1.0.2-py3.5/group.cpython-35.py","file_name":"group.cpython-35.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"223876032","text":"# Copyright 2017 D-Wave Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport time\nimport random\nimport logging\nimport platform\nimport itertools\nimport numbers\n\nfrom collections import abc, OrderedDict\nfrom urllib.parse import urljoin\nfrom datetime import datetime\nfrom dateutil.tz import UTC\nfrom functools import wraps\nfrom pkg_resources import iter_entry_points\n\nimport click\nimport requests\n\n# Use numpy if available for fast decoding\ntry:\n import numpy as np\n _numpy = True\nexcept ImportError: # pragma: no cover\n _numpy = False\n\n__all__ = ['evaluate_ising', 'uniform_iterator', 'uniform_get',\n 'default_text_input', 'click_info_switch', 'datetime_to_timestamp',\n 'datetime_to_timestamp', 'utcnow', 'epochnow', 'tictoc']\n\nlogger = logging.getLogger(__name__)\n\n\ndef evaluate_ising(linear, quad, state):\n \"\"\"Calculate the energy of a state given the Hamiltonian.\n\n Args:\n linear: Linear Hamiltonian terms.\n quad: Quadratic Hamiltonian terms.\n state: Vector of spins describing the system state.\n\n Returns:\n Energy of the state evaluated by the given energy function.\n \"\"\"\n\n # If we were given a numpy array cast to list\n if _numpy and isinstance(state, np.ndarray):\n return evaluate_ising(linear, quad, state.tolist())\n\n # Accumulate the linear and quadratic values\n energy = 0.0\n for index, value in uniform_iterator(linear):\n energy += state[index] * value\n for (index_a, index_b), value in quad.items():\n energy += value * state[index_a] * state[index_b]\n return energy\n\n\ndef active_qubits(linear, quadratic):\n \"\"\"Calculate a set of all active qubits. Qubit is \"active\" if it has\n bias or coupling attached.\n\n Args:\n linear (dict[variable, bias]/list[variable, bias]):\n Linear terms of the model.\n\n quadratic (dict[(variable, variable), bias]):\n Quadratic terms of the model.\n\n Returns:\n set:\n Active qubits' indices.\n \"\"\"\n\n active = {idx for idx,bias in uniform_iterator(linear)}\n for edge, _ in quadratic.items():\n active.update(edge)\n return active\n\n\ndef generate_random_ising_problem(solver, h_range=None, j_range=None):\n \"\"\"Generates an Ising problem formulation valid for a particular solver,\n using all qubits and all couplings and linear/quadratic biases sampled\n uniformly from `h_range`/`j_range`.\n \"\"\"\n\n if h_range is None:\n h_range = solver.properties.get('h_range', [-1, 1])\n if j_range is None:\n j_range = solver.properties.get('j_range', [-1, 1])\n\n lin = {qubit: random.uniform(*h_range) for qubit in solver.nodes}\n quad = {edge: random.uniform(*j_range) for edge in solver.undirected_edges}\n\n return lin, quad\n\n\ndef generate_const_ising_problem(solver, h=1, j=-1):\n return generate_random_ising_problem(solver, h_range=[h, h], j_range=[j, j])\n\n\ndef uniform_iterator(sequence):\n \"\"\"Uniform (key, value) iteration on a `dict`,\n or (idx, value) on a `list`.\"\"\"\n\n if isinstance(sequence, abc.Mapping):\n return sequence.items()\n else:\n return enumerate(sequence)\n\n\ndef uniform_get(sequence, index, default=None):\n \"\"\"Uniform `dict`/`list` item getter, where `index` is interpreted as a key\n for maps and as numeric index for lists.\"\"\"\n\n if isinstance(sequence, abc.Mapping):\n return sequence.get(index, default)\n else:\n return sequence[index] if index < len(sequence) else default\n\n\ndef reformat_qubo_as_ising(qubo):\n \"\"\"Split QUBO coefficients into linear and quadratic terms (the Ising form).\n\n Args:\n qubo (dict[(int, int), float]):\n Coefficients of a quadratic unconstrained binary optimization\n (QUBO) model.\n\n Returns:\n (dict[int, float], dict[(int, int), float])\n\n \"\"\"\n\n lin = {u: bias for (u, v), bias in qubo.items() if u == v}\n quad = {(u, v): bias for (u, v), bias in qubo.items() if u != v}\n\n return lin, quad\n\n\ndef strip_head(sequence, values):\n \"\"\"Strips elements of `values` from the beginning of `sequence`.\"\"\"\n values = set(values)\n return list(itertools.dropwhile(lambda x: x in values, sequence))\n\n\ndef strip_tail(sequence, values):\n \"\"\"Strip `values` from the end of `sequence`.\"\"\"\n return list(reversed(list(strip_head(reversed(sequence), values))))\n\n\ndef input_with_default(prompt, default, optional):\n line = ''\n while not line:\n line = input(prompt)\n if not line:\n line = default\n if not line:\n if optional:\n break\n click.echo(\"Input required, please try again.\")\n return line\n\n\ndef default_text_input(prompt, default=None, optional=True):\n if default:\n prompt = \"{} [{}]: \".format(prompt, default)\n else:\n if optional:\n prompt = \"{} [skip]: \".format(prompt)\n else:\n prompt = \"{}: \".format(prompt)\n\n return input_with_default(prompt, default, optional)\n\n\ndef click_info_switch(f):\n \"\"\"Decorator to create eager Click info switch option, as described in:\n http://click.pocoo.org/6/options/#callbacks-and-eager-options.\n\n Takes a no-argument function and abstracts the boilerplate required by\n Click (value checking, exit on done).\n\n Example:\n\n @click.option('--my-option', is_flag=True, callback=my_option,\n expose_value=False, is_eager=True)\n def test():\n pass\n\n @click_info_switch\n def my_option()\n click.echo('some info related to my switch')\n \"\"\"\n\n @wraps(f)\n def wrapped(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n f()\n ctx.exit()\n return wrapped\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Convert timezone-aware `datetime` to POSIX timestamp and\n return seconds since UNIX epoch.\n\n Note: similar to `datetime.timestamp()` in Python 3.3+.\n \"\"\"\n\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n return (dt - epoch).total_seconds()\n\n\ndef utcnow():\n \"\"\"Returns tz-aware now in UTC.\"\"\"\n return datetime.utcnow().replace(tzinfo=UTC)\n\n\ndef epochnow():\n \"\"\"Returns now as UNIX timestamp.\n\n Invariant:\n epochnow() ~= datetime_to_timestamp(utcnow())\n\n \"\"\"\n return time.time()\n\n\ndef strtrunc(s, maxlen=60):\n s = str(s)\n return s[:(maxlen-3)]+'...' if len(s) > maxlen else s\n\n\nclass TimeoutingHTTPAdapter(requests.adapters.HTTPAdapter):\n \"\"\"Sets a default timeout for all adapter (think session) requests. It is\n overridden with per-request timeout. But it can not be reset back to\n infinite wait (``None``).\n\n Usage:\n\n s = requests.Session()\n s.mount(\"http://\", TimeoutingHTTPAdapter(timeout=5))\n s.mount(\"https://\", TimeoutingHTTPAdapter(timeout=5))\n\n s.get('http://httpbin.org/delay/6') # -> timeouts after 5sec\n s.get('http://httpbin.org/delay/6', timeout=10) # -> completes after 6sec\n\n The alternative is to set ``timeout`` on each request manually/explicitly,\n subclass ``Session``, or monkeypatch ``Session.request()``.\n \"\"\"\n\n def __init__(self, timeout=None, *args, **kwargs):\n self.timeout = timeout\n super(TimeoutingHTTPAdapter, self).__init__(*args, **kwargs)\n\n def send(self, *args, **kwargs):\n # can't use setdefault because caller always sets timeout kwarg\n kwargs['timeout'] = self.timeout\n return super(TimeoutingHTTPAdapter, self).send(*args, **kwargs)\n\n\n# Note: BaseUrlSession is taken from https://github.com/requests/toolbelt under\n# an Apache 2 license. This simple extension didn't warrant a new dependency.\n# If we later decide to use additional features from `requests-toolbelt`,\n# remove it from here.\n\nclass BaseUrlSession(requests.Session):\n \"\"\"A Session with a URL that all requests will use as a base.\"\"\"\n\n base_url = None\n\n def __init__(self, base_url=None):\n if base_url:\n self.base_url = base_url\n super(BaseUrlSession, self).__init__()\n\n def request(self, method, url, *args, **kwargs):\n \"\"\"Send the request after generating the complete URL.\"\"\"\n url = self.create_url(url)\n return super(BaseUrlSession, self).request(\n method, url, *args, **kwargs\n )\n\n def create_url(self, url):\n \"\"\"Create the URL based off this partial path.\"\"\"\n return urljoin(self.base_url, url)\n\n\ndef user_agent(name=None, version=None):\n \"\"\"Return User-Agent ~ \"name/version language/version interpreter/version os/version\".\"\"\"\n\n def _interpreter():\n name = platform.python_implementation()\n version = platform.python_version()\n bitness = platform.architecture()[0]\n if name == 'PyPy':\n version = '.'.join(map(str, sys.pypy_version_info[:3]))\n full_version = [version]\n if bitness:\n full_version.append(bitness)\n return name, \"-\".join(full_version)\n\n tags = []\n\n if name and version:\n tags.append((name, version))\n\n tags = [\n (\"python\", platform.python_version()),\n _interpreter(),\n (\"machine\", platform.machine() or 'unknown'),\n (\"system\", platform.system() or 'unknown'),\n (\"platform\", platform.platform() or 'unknown'),\n ]\n\n # add platform-specific tags\n tags.extend(get_platform_tags())\n\n return ' '.join(\"{}/{}\".format(name, version) for name, version in tags)\n\n\nclass CLIError(Exception):\n \"\"\"CLI command error that includes the error code in addition to the\n standard error message.\"\"\"\n\n def __init__(self, message, code):\n super(CLIError, self).__init__(message)\n self.code = code\n\n\nclass cached(object):\n \"\"\"Caching decorator with max-age/expiry, forced refresh, and\n per-arguments-combo keys.\n\n Example:\n Cache for 5 minutes::\n\n @cached(maxage=300)\n def get_solvers(**features):\n return requests.get(...)\n\n Populate the cache on the first hit for a specific arguments combination::\n\n get_solvers(name='asd', count=5)\n\n Cache hit (note a different ordering of arguments)::\n\n get_solvers(count=5, name='asd')\n\n Not in cache::\n\n get_solvers(count=10, name='asd')\n\n But cache is refreshed, even on a hit, if ``refresh_=True``::\n\n get_solvers(count=5, name='asd', refresh_=True)\n\n \"\"\"\n\n def argshash(self, args, kwargs):\n \"Hash mutable arguments' containers with immutable keys and values.\"\n a = repr(args)\n b = repr(sorted((repr(k), repr(v)) for k, v in kwargs.items()))\n return a + b\n\n def __init__(self, maxage=None):\n self.maxage = maxage or 0\n self.cache = {}\n\n def __call__(self, fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n refresh_ = kwargs.pop('refresh_', False)\n now = epochnow()\n\n key = self.argshash(args, kwargs)\n data = self.cache.get(key, {})\n\n if not refresh_ and data.get('expires', 0) > now:\n val = data.get('val')\n else:\n val = fn(*args, **kwargs)\n self.cache[key] = dict(expires=now+self.maxage, val=val)\n\n return val\n\n # expose the cache for testing and debugging\n wrapper._cache = self.cache\n wrapper._maxage = self.maxage\n\n return wrapper\n\n\nclass retried(object):\n \"\"\"Decorator that retries running the wrapped function `retries` times,\n logging exceptions along the way.\n\n Args:\n retries (int, default=1):\n Decorated function is allowed to fail `retries` times.\n\n backoff (number/List[number]/callable, default=0):\n Delay (in seconds) before a retry.\n\n Example:\n Retry up to three times::\n\n import random\n\n def f(thresh):\n r = random.random()\n if r < thresh:\n raise ValueError\n return r\n\n retried_f = retried(3)(f)\n\n retried_f(0.5)\n \"\"\"\n\n def __init__(self, retries=1, backoff=0):\n self.retries = retries\n\n # normalize `backoff` to callable\n if isinstance(backoff, numbers.Number):\n self.backoff = lambda retry: backoff\n elif isinstance(backoff, abc.Sequence):\n it = iter(backoff)\n self.backoff = lambda retry: next(it)\n else:\n self.backoff = backoff\n\n def __call__(self, fn):\n if not callable(fn):\n raise TypeError(\"decorated object must be callable\")\n\n @wraps(fn)\n def wrapped(*args, **kwargs):\n for retries_left in range(self.retries, -1, -1):\n try:\n return fn(*args, **kwargs)\n\n except Exception as exc:\n fn_name = getattr(fn, '__name__', 'unnamed')\n logger.debug(\n \"Running %s(*%r, **%r) failed with %r. Retries left: %d\",\n fn_name, args, kwargs, exc, retries_left)\n\n if retries_left == 0:\n raise exc\n\n retry = self.retries - retries_left + 1\n delay = self.backoff(retry)\n logger.debug(\"Sleeping for %s seconds before retrying.\", delay)\n time.sleep(delay)\n\n return wrapped\n\n\nclass tictoc(object):\n \"\"\"Timer as a context manager.\"\"\"\n\n def __enter__(self):\n self.tick = time.perf_counter()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.dt = time.perf_counter() - self.tick\n\n\ndef parse_loglevel(level_name, default=logging.NOTSET):\n \"\"\"Resolve numeric and symbolic log level names to numeric levels.\"\"\"\n\n try:\n level_name = str(level_name or '').strip().lower()\n except:\n return default\n\n # note: make sure `TRACE` level is added to `logging` before calling this\n known_levels = {\n 'notset': logging.NOTSET,\n 'trace': logging.TRACE,\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'warn': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL,\n 'fatal': logging.CRITICAL\n }\n\n try:\n level = int(level_name)\n except ValueError:\n level = known_levels.get(level_name, default)\n\n return level\n\n\ndef set_loglevel(logger, level_name):\n level = parse_loglevel(level_name)\n logger.setLevel(level)\n logger.info(\"Log level for %r namespace set to %r\", logger.name, level)\n\n\ndef get_contrib_config():\n \"\"\"Return all registered contrib (non-open-source) Ocean packages.\"\"\"\n\n contrib = [ep.load() for ep in iter_entry_points('dwave_contrib')]\n return contrib\n\n\ndef get_contrib_packages():\n \"\"\"Combine all contrib packages in an ordered dict. Assumes package names\n are unique.\n \"\"\"\n\n contrib = get_contrib_config()\n\n packages = OrderedDict()\n for dist in contrib:\n for pkg in dist:\n packages[pkg['name']] = pkg\n\n return packages\n\n\ndef get_platform_tags():\n \"\"\"Return a list of platform tags generated from registered entry points.\"\"\"\n\n fs = [ep.load() for ep in iter_entry_points('dwave.common.platform.tags')]\n tags = list(filter(None, [f() for f in fs]))\n return tags\n","sub_path":"dwave/cloud/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70794446","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render, redirect\nfrom .models import Courses, Description\n\ndef index(request):\n\tcontext = {\n\t\t'courses': Courses.objects.all()\n\t}\n\treturn render(request, 'coursesApp/index.html', context)\n\ndef add(request):\n\tdescription = Description(description=request.POST['description'])\n\tdescription.save()\n\tCourses.objects.create(name=request.POST['courseName'], description=description)\n\treturn redirect('/')\n\ndef remove(request, id):\n\tcontext = {\n\t\t'courses': Courses.objects.get(id=id)\n\t}\n\treturn render(request, 'coursesApp/remove.html', context)\n\ndef confirm(request, id):\n\tif request.POST.get('yes'):\n\t\tCourses.objects.filter(id=id).delete()\n\treturn redirect('/')\n","sub_path":"Python Django/Django/courses/apps/coursesApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"361511978","text":"import time\nimport random\n\nimport pytest\nimport cattle\nfrom cattle import ApiError\n\n\n@pytest.fixture\ndef client(request):\n url = 'http://localhost:9501/v1/schemas'\n c = cattle.from_env(url=url)\n request.addfinalizer(lambda: cleanup(c))\n return cleanup(c)\n\n\ndef cleanup(client):\n v = client.list_volume()[0]\n if v.replicaCount != 0:\n v = v.shutdown()\n for r in client.list_replica():\n client.delete(r)\n return client\n\n\n@pytest.fixture\ndef random_str():\n return 'random-{0}-{1}'.format(random_num(), int(time.time()))\n\n\ndef random_num():\n return random.randint(0, 1000000)\n\n\ndef test_replica_list(client):\n replicas = client.list_replica()\n assert len(replicas) == 0\n\n\ndef test_replica_create(client):\n f = 'file://' + random_str()\n replica = client.create_replica(address=f)\n assert replica.address == f\n\n client.create_replica(address=f)\n client.create_replica(address=f)\n\n r = client.list_replica()\n assert len(r) == 1\n assert r[0].address == f\n assert r[0].mode == 'WO'\n\n f2 = 'file://' + random_str()\n with pytest.raises(ApiError) as e:\n client.create_replica(address=f2)\n assert e.value.error.status == 500\n assert e.value.error.message == 'Can only have one WO replica at a time'\n\n r = client.update(r[0], mode='RW')\n assert r.mode == 'RW'\n\n replica2 = client.create_replica(address=f2)\n assert replica2.address == f2\n\n r = client.list_replica()\n assert len(r) == 2\n\n\ndef test_replica_delete(client):\n f = 'file://' + random_str()\n r1 = client.create_replica(address=f+'1')\n client.update(r1, mode='RW')\n r2 = client.create_replica(address=f+'2')\n client.update(r2, mode='RW')\n r3 = client.create_replica(address=f+'3')\n client.update(r3, mode='RW')\n\n r = client.list_replica()\n assert len(r) == 3\n\n client.delete(r1)\n r = client.list_replica()\n assert len(r) == 2\n\n client.delete(r1)\n r = client.list_replica()\n assert len(r) == 2\n\n client.delete(r2)\n r = client.list_replica()\n assert len(r) == 1\n\n client.delete(r3)\n r = client.list_replica()\n assert len(r) == 0\n\n\ndef test_replica_change(client):\n f = 'file://' + random_str()\n r1 = client.create_replica(address=f)\n assert r1.mode == 'WO'\n\n r1 = client.update(r1, mode='RW')\n assert r1.mode == 'RW'\n\n r1 = client.reload(r1)\n assert r1.mode == 'RW'\n\n\ndef test_start(client):\n vs = client.list_volume()\n assert len(vs) == 1\n\n v = vs[0]\n assert v.replicaCount == 0\n\n addresses = ['file://' + random_str(), 'file://' + random_str()]\n v = v.start(replicas=addresses)\n\n rs = client.list_replica()\n assert len(rs) == 2\n assert v.replicaCount == 2\n\n found_addresses = [r.address for r in rs]\n assert set(found_addresses) == set(addresses)\n\n\ndef test_shutdown(client):\n vs = client.list_volume()\n assert len(vs) == 1\n v = vs[0]\n assert v.replicaCount == 0\n\n addresses = ['file://' + random_str(), 'file://' + random_str()]\n v = v.start(replicas=addresses)\n assert v.replicaCount == 2\n\n v = v.shutdown()\n assert v.replicaCount == 0\n\n r = client.list_replica()\n assert len(r) == 0\n","sub_path":"integration/core/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"632442862","text":"from tkinter import *\r\nfrom time import strftime\r\nfrom tkinter import messagebox\r\nfrom PIL import Image, ImageTk\r\nimport random\r\nimport threading\r\nimport time\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Numerical Computing Implementation\") # title of the application\r\nw, h = root.winfo_screenwidth(), root.winfo_screenheight() # getting the screen height and width\r\nroot.geometry(\"%dx%d\" % (w * 0.80, h * 0.80)) # setting the Window Size to 80% of the screen size\r\nroot.resizable(False, False)\r\n\r\n\r\ndef toggle_fullscreen(event): # Toggling Full screen ON when F11 is Pressed\r\n root.attributes(\"-fullscreen\", True)\r\n return \"break\"\r\n\r\n\r\ndef end_fullscreen(event): # Toggling Full screen OFF when Esc is Pressed\r\n root.attributes(\"-fullscreen\", False)\r\n return \"break\"\r\n\r\n\r\ndef on_closing(): # Confirmation to Exit the Program\r\n if messagebox.askyesnocancel(\"Confirm Exit \", \"Are you sure you want to exit ?\"):\r\n root.destroy()\r\n\r\n\r\ndef resize_image(event): # Function to resize images according to Screen Resolution\r\n new_width = event.width\r\n new_height = event.height\r\n path = copy_of_image.resize((new_width, new_height))\r\n photo1 = ImageTk.PhotoImage(path)\r\n background_label.config(image=photo1)\r\n background_label.image = photo1 # avoid garbage collection\r\n\r\n\r\ndef new_img(): # Generates Random number for images and returns the path of the image generated\r\n\r\n number = random.randint(1, 44) # Pick a new number\r\n path = 'D:/NCproject/Background_Images/' + str(number) + '.png'\r\n return path\r\n\r\n\r\nimage = Image.open(new_img())\r\ncopy_of_image = image.copy()\r\nphoto = ImageTk.PhotoImage(image)\r\n\r\n# background_label = Label(root, image=photo)\r\n# background_label.bind('', resize_image)\r\n# background_label.grid(row=0, column=0) # Placement of the image\r\n\r\nside_panel = Frame(root, bg=\"#3498db\", width=100, bd=5)\r\nside_panel.grid(row=0, column=0, sticky=S+W)\r\n\r\nexit_button = Button(side_panel, text=\"Exit\", padx=10, pady=10, width=15, relief=RAISED, bd=5, command=on_closing)\r\nexit_button.grid(row=1, column=1, sticky=\"s\")\r\n\r\nHelp = Button(side_panel, text=\"Help\", padx=10, pady=10, width=15, relief=RAISED, bd=5)\r\nHelp.grid(row=2, column=1, sticky=S+W)\r\n# Help = Button(side_panel, text=\"Help\", padx=10, pady=15, width=15, relief=RAISED, bd=5, bitmap=\"question\")\r\n# Help.grid(row=2, column=1, sticky=S+W)\r\n#\r\nsettings = Button(side_panel, text=\"Settings\", padx=10, pady=10, width=15, relief=RAISED, bd=5)\r\nsettings.grid(row=3, column=1, sticky=S+W)\r\n#\r\n# top_frame = Frame(root, height=20)\r\n# top_frame.pack(fill=X, side=TOP)\r\n#\r\n# label = Label(top_frame, text=\"Welcome To NC Computing \", height=5, bd=5, relief=SUNKEN, font=\"Times 18 bold italic\")\r\n# label.pack(fill=X)\r\n# main_frame = Frame(root)\r\n# main_frame.pack()\r\n#\r\n# chap2 = Button(main_frame, text=\"Solution(Root) of equations\", padx=10, pady=10, width=70, relief=RAISED, bd=5)\r\n# chap2.pack(anchor=CENTER)\r\n# chap2 = Button(main_frame, text=\"Solution(Root) of equations\", padx=10, pady=10, width=70, relief=RAISED, bd=5)\r\n# chap2.pack(anchor=CENTER)\r\n# chap2 = Button(main_frame, text=\"Solution(Root) of equations\", padx=10, pady=10, width=70, relief=RAISED, bd=5)\r\n# chap2.pack(anchor=CENTER)\r\n# chap2 = Button(main_frame, text=\"Solution(Root) of equations\", padx=10, pady=10, width=70, relief=RAISED, bd=5)\r\n# chap2.pack(anchor=CENTER)\r\n# chap2 = Button(main_frame, text=\"Solution(Root) of equations\", padx=10, pady=10, width=70, relief=RAISED, bd=5)\r\n# chap2.pack(anchor=CENTER)\r\n# chap2 = Button(main_frame, text=\"Solution(Root) of equations\", padx=10, pady=10, width=70, relief=RAISED, bd=5)\r\n# chap2.pack(anchor=CENTER)\r\n#\r\n#\r\n# root.bind(\"\", toggle_fullscreen)\r\n# root.bind(\"\", end_fullscreen)\r\n\r\n# root.protocol(\"WM_DELETE_WINDOW\", on_closing) # Modifying close button function\r\n\r\nroot.mainloop()\r\nexit()\r\nfrom tkinter import *\r\nfrom time import strftime\r\nfrom tkinter import messagebox\r\nfrom PIL import Image, ImageTk\r\nimport random\r\n\r\nroot = Tk()\r\nroot.title(\"Numerical Computing Implementation\") # title of the application\r\nw, h = root.winfo_screenwidth(), root.winfo_screenheight() # getting the screen height and width\r\nroot.geometry(\"%dx%d\" % (w * 0.75, h * 0.750)) # setting the Window Size to 80% of the screen size\r\n\r\n\r\ndef callback():\r\n lbl = Label(root, text=e.get()).pack()\r\n\r\n\r\ndef click():\r\n e.delete(0, END)\r\n\r\n\r\ndef clear_search(event): # Clearing the entry box upon clicking on it\r\n e.delete(0, END)\r\n\r\n\r\ndef on_enter(e): # hover on button color change\r\n b['background'] = '#3498db'\r\n b.config(relief='raised')\r\n\r\n\r\ndef on_leave(e): # leaving hover on button getting normal\r\n b['background'] = 'SystemButtonFace'\r\n b.config(relief='flat')\r\n\r\n\r\nb = Button(root, text=\"Submit\", width=50, height=5, font='Rockwell', relief=RIDGE, bd=10, activebackground=\"green\", command=callback)\r\nb.place(relx=0.4, rely=0.5)\r\n# b1 = Button(root, text=\"Submit\", width=50, bg=\"#C8F9C4\", height=5, relief='raised', font='Rockwell', bd=10, activebackground=\"green\", command=callback)\r\n# b1.place(relx=0.5, rely=0.6)\r\n\r\ne = Entry(root, width=50,)\r\ne.pack()\r\ne.insert(0, \"Enter an Expression\")\r\ntext = e.get()\r\ne.bind(\"\", clear_search)\r\n\r\nb.bind(\"\", on_enter)\r\nb.bind(\"\", on_leave)\r\n#!/usr/bin/env python3\r\n\r\n\"\"\"\r\nZetCode Tkinter tutorial\r\n\r\nIn this script, we use the grid\r\nmanager to create a more complicated Windows\r\nlayout.\r\n\r\nAuthor: Jan Bodnar\r\nLast modified: April 2019\r\nWebsite: www.zetcode.com\r\n\"\"\"\r\n\r\nfrom tkinter import Tk, Text, BOTH, W, N, E, S\r\nfrom tkinter.ttk import Frame, Button, Label, Style\r\n\r\n\r\nclass Example(Frame):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.initUI()\r\n\r\n\r\n def initUI(self):\r\n\r\n self.master.title(\"Windows\")\r\n self.pack(fill=BOTH, expand=True)\r\n\r\n self.columnconfigure(1, weight=1)\r\n self.columnconfigure(3, pad=7)\r\n self.rowconfigure(3, weight=1)\r\n self.rowconfigure(5, pad=7)\r\n\r\n lbl = Label(self, text=\"Windows\")\r\n lbl.grid(sticky=W, pady=4, padx=5)\r\n\r\n area = Text(self)\r\n area.grid(row=1, column=0, columnspan=2, rowspan=4,\r\n padx=5, sticky=E+W+S+N)\r\n\r\n abtn = Button(self, text=\"Activate\")\r\n abtn.grid(row=1, column=3)\r\n\r\n cbtn = Button(self, text=\"Close\")\r\n cbtn.grid(row=2, column=3, pady=4)\r\n\r\n hbtn = Button(self, text=\"Help\")\r\n hbtn.grid(row=5, column=0, padx=5)\r\n\r\n obtn = Button(self, text=\"OK\")\r\n obtn.grid(row=5, column=3)\r\n\r\n\r\ndef main():\r\n\r\n root = Tk()\r\n root.geometry(\"350x300+300+300\")\r\n app = Example()\r\n root.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n# root.mainloop()\r\n\r\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530645724","text":"# -*- coding: utf-8 -*-\n\n\nclass Cells:\n def __init__(self, height, width):\n self.height = height\n self.width = width\n self.max_idx = height * width - 1\n\n def get_neighbours(self, idx):\n col, row = self._idx_cord(idx)\n up = (col, row - 1)\n right_1 = (col + 1, row)\n left_1 = (col - 1, row)\n if col % 2 == 0:\n right_2 = (col + 1, row - 1)\n left_2 = (col - 1, row - 1)\n else:\n right_2 = (col + 1, row + 1)\n left_2 = (col - 1, row + 1)\n down = (col, row + 1)\n lefts = sorted([left_1, left_2], key=lambda x: -x[1])\n rights = sorted([right_1, right_2], key=lambda x: x[1])\n res = [self._cord_idx(*cord) for cord in [up] + rights + [down] + lefts]\n return res\n\n def _idx_cord(self, idx):\n if idx > self.max_idx or idx < 0:\n raise Exception\n else:\n col = idx // self.height\n row = idx % self.height\n return (col, row)\n\n def _cord_idx(self, col, row):\n if not (0 <= col < self.width and 0 <= row < self.height):\n return -1\n res = self.height * col + row\n if res > self.max_idx:\n return -1\n return res\n\n","sub_path":"Practice/Interview/square/cells/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52843866","text":"\nimport sys\nsys.path.insert(1, '/tmp/Projects2021/depth_estimation/final-project-monodepth-ccny/dataloaders/')\nimport tensorflow as tf\nfrom dataloaders import *\nfrom tensorflow import keras\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom keras import backend as K\n \n\nargv = sys.argv\ndataset_path = '/tmp/Projects2021/rgbd_dataset/indoor_test'\nmodel = keras.models.load_model('unet128_indoor.hdf5', compile=False)\ndtloader = dataloader_rgbd(dataset_path, 38, image_size=[128, 128])\nX_test, y_test = dtloader.get_testing_sample()\ny_pred = model.predict(X_test)\ny_pred = y_pred[:,:,:,0]*255\ny_true = y_test*255\n\ndef mae(y_true, y_pred):\n error = y_pred-y_true\n return np.mean(np.abs(error))\n\ndef mse(y_true, y_pred):\n error = y_pred-y_true\n return np.mean(np.power(error,2))\n\ndef rmse(y_true, y_pred):\n error = y_pred-y_true\n return np.sqrt(np.mean(np.power(error,2)))\n\ndef log_rmse(y_true, y_pred):\n error = np.log(1+y_pred)-np.log(1+y_true)\n return np.sqrt(np.mean(np.power(error,2)))\n\nprint(\"MAE: \", mae(y_true, y_pred))\n# print(\"MSE: \", mse(y_test, y_pred))\nprint(\"RMSE: \", rmse(y_true, y_pred))\nprint(\"LogRMSE: \", log_rmse(y_true, y_pred))\n","sub_path":"models/eval_errors.py","file_name":"eval_errors.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403442603","text":"\"\"\"\nDjango settings for system project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport tempfile\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'cv*2n!28j*_p(+6%nn%)&@d^x1w75$bimq9bxfcv9hl(&&t=!7'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\nUSE_TZ=True\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.sites',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_filters',\n #'proyects',\n #'payments',\n #'customers',\n #'developers',\n 'sections',\n 'compute',\n 'postman',\n 'fileupload',\n 'ckeditor',\n 'allaccess', \n 'djrill',\n\n #'pinax.notifications',\n)\n\nSITE_ID = 1\n\nAUTHENTICATION_BACKENDS = (\n # Default backend\n 'django.contrib.auth.backends.ModelBackend',\n # Additional backend\n 'allaccess.backends.AuthorizedServiceBackend',\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nLOGIN_REDIRECT_URL = '/customer/process'\n\nTEMPLATE_DIRS = (\n BASE_DIR + '/templates/',\n)\n\nROOT_URLCONF = 'system.urls'\n\nWSGI_APPLICATION = 'system.wsgi.application'\n\nPOSTMAN_DISALLOW_ANONYMOUS = True\nPOSTMAN_DISALLOW_COPIES_ON_REPLY = True\nPOSTMAN_AUTO_MODERATE_AS = True\n#POSTMAN_NOTIFIER_APP = 'pinax.notifications'\nPOSTMAN_MAILER_APP = 'django.core.mail' \n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\n\nDEFAULT_FROM_EMAIL = 'contacto@serverticsup.com'\n#Email Setting\nMANDRILL_API_KEY = \"tuK68gZttFdaUESJZ-brSA\"\nEMAIL_BACKEND = \"djrill.mail.backends.djrill.DjrillBackend\"\n#EMAIL_BACKEND = 'django_mailer.smtp_queue.EmailBackend'\n# Host for sending e-mail.\n#EMAIL_HOST = 'smtp.mandrillapp.com'\n\n# Port for sending e-mail.\n#EMAIL_PORT = 587\n\n# Optional SMTP authentication information for EMAIL_HOST.\n#EMAIL_HOST_USER = 'albertisfu@gmail.com'\n#EMAIL_HOST_PASSWORD = 'tuK68gZttFdaUESJZ-brSA'\n#EMAIL_USE_TLS = True\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-es'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n\nMEDIA_ROOT = BASE_DIR\nMEDIA_URL = '/'\n\n#CKE Editor\nCKEDITOR_CONFIGS = {\n 'text': {\n 'language' :'es-es',\n 'uiColor': '#F3F3F4',\n\n 'disableNativeSpellChecker': False,\n'removePlugins': 'contextmenu,liststyle,tabletools',\n'toolbar': [\n { 'name': 'basicstyles', 'groups': [ 'basicstyles', 'cleanup' ], 'items': [ 'Bold', 'Italic', 'Underline', 'Strike',] },\n [ 'Paste', 'PasteFromWord', '-', 'Undo', 'Redo' ], \n { 'name': 'paragraph', 'groups': [ 'list', 'indent', 'blocks', 'align', 'bidi' ], 'items': [ 'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', ] },\n { 'name': 'links', 'items': [ 'Link', 'Unlink', ] },\n { 'name': 'insert', 'items': [ 'Image', 'Table', 'Smiley', 'SpecialChar', ] },\n '/',\n { 'name': 'styles', 'items': [ 'Styles', 'Format', 'FontSize' ] },\n { 'name': 'colors', 'items': [ 'TextColor', 'BGColor' ] },\n { 'name': 'document', 'items': [ 'Source',] }, \n ],\n\n },\n}\nCKEDITOR_UPLOAD_PATH = BASE_DIR + '/static/uploads'\n#STATIC_ROOT = os.path.join(tempfile.gettempdir(), 'ck_static')\n#MEDIA_ROOT = os.path.join(tempfile.gettempdir(), 'ck_media')\n#CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\n#encripted\nENCRYPTED_FIELDS_KEYDIR = BASE_DIR + '/static/fieldkeys'\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n\n)\n","sub_path":"system/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"167706405","text":"import os,random,math\nfrom nltk import word_tokenize as tokenize\nimport operator\n\ndef get_training_testing(training_dir,split=0.5):\n\n filenames=os.listdir(training_dir)\n n=len(filenames)\n print(\"There are {} files in the training directory: {}\".format(n,training_dir))\n random.seed(53) #if you want the same random split every time\n random.shuffle(filenames)\n index=int(n*split)\n trainingfiles=filenames[:index]\n heldoutfiles=filenames[index:]\n return trainingfiles,heldoutfiles\n\n\nclass language_model():\n \n def __init__(self,trainingdir,files=[]):\n self.training_dir=trainingdir\n self.files=files\n self.train()\n \n def train(self): \n self.unigram={}\n self.bigram={}\n \n self._processfiles()\n self._make_unknowns()\n self._discount()\n self._kneser_ney()\n \n \n self._convert_to_probs()\n \n \n def _processline(self,line):\n tokens=[\"__START\"]+tokenize(line)+[\"__END\"]\n previous=\"__END\"\n for token in tokens:\n self.unigram[token]=self.unigram.get(token,0)+1\n current=self.bigram.get(previous,{})\n current[token]=current.get(token,0)+1\n self.bigram[previous]=current\n previous=token\n \n \n def _processfiles(self):\n for afile in self.files:\n print(\"Processing {}\".format(afile))\n try:\n with open(os.path.join(self.training_dir,afile)) as instream:\n for line in instream:\n line=line.rstrip()\n if len(line)>0:\n self._processline(line)\n except UnicodeDecodeError:\n print(\"UnicodeDecodeError processing {}: ignoring rest of file\".format(afile))\n \n \n def _convert_to_probs(self):\n \n self.unigram={k:v/sum(self.unigram.values()) for (k,v) in self.unigram.items()}\n self.bigram={key:{k:v/sum(adict.values()) for (k,v) in adict.items()} for (key,adict) in self.bigram.items()}\n self.kn={k:v/sum(self.kn.values()) for (k,v) in self.kn.items()}\n \n ###adjust __UNK probabilities to include probability of an individual unknown word (1/number_unknowns)\n \n self.unigram[\"__UNK\"]=self.unigram.get(\"__UNK\",0)/self.number_unknowns\n self.bigram[\"__UNK\"]={k:v/self.number_unknowns for (k,v) in self.bigram.get(\"__UNK\",{}).items()}\n for key,adict in self.bigram.items():\n adict[\"__UNK\"]=adict.get(\"__UNK\",0)/self.number_unknowns\n self.bigram[key]=adict\n self.kn[\"__UNK\"]=self.kn.get(\"__UNK\",0)/self.number_unknowns\n \n def get_prob(self,token,context=\"\",methodparams={}):\n if methodparams.get(\"method\",\"unigram\")==\"unigram\":\n return self.unigram.get(token,self.unigram.get(\"__UNK\",0))\n else:\n if methodparams.get(\"smoothing\",\"kneser-ney\")==\"kneser-ney\":\n unidist=self.kn\n else:\n unidist=self.unigram\n bigram=self.bigram.get(context[-1],self.bigram.get(\"__UNK\",{}))\n big_p=bigram.get(token,bigram.get(\"__UNK\",0))\n lmbda=bigram[\"__DISCOUNT\"]\n uni_p=unidist.get(token,unidist.get(\"__UNK\",0))\n #print(big_p,lmbda,uni_p)\n p=big_p+lmbda*uni_p \n return p\n \n \n def nextlikely(self,k=1,current=\"\",method=\"unigram\"):\n #use probabilities according to method to generate a likely next sequence\n #choose random token from k best\n blacklist=[\"__START\",\"__UNK\",\"__DISCOUNT\"]\n \n if method==\"unigram\":\n dist=self.unigram\n else:\n dist=self.bigram.get(current,self.bigram.get(\"__UNK\",{}))\n \n #sort the tokens by unigram probability\n mostlikely=sorted(list(dist.items()),key=operator.itemgetter(1),reverse=True)\n #filter out any undesirable tokens\n filtered=[w for (w,p) in mostlikely if w not in blacklist]\n #choose one randomly from the top k\n res=random.choice(filtered[:k])\n return res\n \n def generate(self,k=1,end=\"__END\",limit=20,method=\"bigram\",methodparams={}):\n if method==\"\":\n method=methodparams.get(\"method\",\"bigram\")\n current=\"__START\"\n tokens=[]\n while current!=end and len(tokens)0:\n p,N=self.compute_prob_line(line,methodparams=methodparams)\n total_p+=p\n total_N+=N\n except UnicodeDecodeError:\n print(\"UnicodeDecodeError processing file {}: ignoring rest of file\".format(afile))\n return total_p,total_N\n \n def compute_perplexity(self,filenames=[],methodparams={\"method\":\"bigram\",\"smoothing\":\"kneser-ney\"}):\n \n #compute the probability and length of the corpus\n #calculate perplexity\n #lower perplexity means that the model better explains the data\n \n p,N=self.compute_probability(filenames=filenames,methodparams=methodparams)\n #print(p,N)\n pp=math.exp(-p/N)\n return pp \n \n def _make_unknowns(self,known=2):\n unknown=0\n self.number_unknowns=0\n for (k,v) in list(self.unigram.items()):\n if v 255:\n raise Exception(\"Messages cannot contain > 255 bytes [%s]\" % n)\n return chr(n) + bs + chr(checksum(bs))\n\n\nclass Comando(object):\n def __init__(self, stream, protocols=None):\n self.stream = stream\n self.protocols = {}\n self.message_callback = None\n self.error_protocol = -1\n if protocols is not None:\n [self.register_protocol(i, p) for (i, p) in enumerate(protocols)]\n\n def handle_stream(self, poll=True):\n if poll and hasattr(self.stream, 'inWaiting'):\n while self.stream.inWaiting():\n self.handle_stream(poll=False)\n return\n n = ord(self.stream.read(1))\n if n != '\\x00':\n bs = self.stream.read(n)\n else:\n bs = \"\"\n if len(bs) != n:\n raise Exception(\n \"Invalid message length of bytes %s != %s\" %\n (len(bs), n))\n cs = self.stream.read(1)\n if cs != chr(checksum(bs)):\n raise Exception(\n \"Invalid message checksum [%s != %s]\" %\n (chr(checksum(bs)), cs))\n self.receive_message(bs)\n\n def register_protocol(self, index, protocol):\n self.protocols[index] = protocol\n protocol.assign_comm(self)\n protocol.index = index\n\n def set_error_protocol(self, pid):\n self.error_protocol = pid\n\n def register_message_callback(self, f):\n self.message_callback = f\n\n def unregister_message_callback(self, f):\n self.message_callback = None\n\n def send_error(self, bs):\n if self.error_protocol != -1:\n self.protocols[self.error_protocol].send_message(bs)\n\n def send_message(self, bs):\n self.stream.write(build_message(bs))\n\n def receive_message(self, bs):\n if self.message_callback is not None:\n return self.message_callback(bs)\n if (len(bs) < 1):\n raise Exception(\"Invalid message, missing protocol\")\n pid = ord(bs[0])\n if pid not in self.protocols:\n raise Exception(\"Unknown protocol: %s\" % pid)\n self.protocols[pid].receive_message(bs[1:])\n","sub_path":"pycomando/comando.py","file_name":"comando.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"338331272","text":"from imgproc import *\nimport cv2 as cv\nimport numpy as np\nfilepath='/home/pi/webcam/'\n\n\n\ndef GetImage(imageName):\n img= cv.imread(imageName)\n return img\n \ndef GetMaxXY(img):\n maxIntensity=0.0\n height, width, channels = img.shape\n for x in range(0,width,10):\n for y in range(0,height-100,10):\n red=float(img[y,x,0])\n blue=float(img[y,x,1]) \n green=float(img[y,x,2])\n intensity=round(np.cbrt(red*green*blue),2)\n if intensity>maxIntensity:\n maxIntensity=intensity\n MaxXY=[y, x]\n return MaxXY\n\ndef GetAvgIntensity(img, MaxXY):\n height, width, channels = img.shape\n counter=0.0\n avgIntensity=0.0\n for x in range(0, width,10):\n counter=counter+1.0\n red=float(img[MaxXY[0],x,0])\n blue=float(img[MaxXY[0],x,1]) \n green=float(img[MaxXY[0],x,2])\n intensity=round(np.cbrt(red*green*blue),2)\n avgIntensity+=intensity\n\n avgIntensity=round(avgIntensity/counter,2)\n print(\"AvgIntensity. \"+str(avgIntensity))\n return avgIntensity\n \ndef GetAndSaveFocusImg(img, MaxXY, filename):\n height, width, channels = img.shape\n xrangeMin=MaxXY[1]-150\n if(xrangeMin<0):\n xrangeMin=0\n\n xrangeMax=MaxXY[1]+150\n if(xrangeMin>width):\n xrangeMin=width\n\n yrangeMin=MaxXY[0]-150\n if(yrangeMin<0):\n yrangeMin=0\n\n yrangeMax=MaxXY[0]+150\n if(yrangeMin>height):\n yrangeMin=height \n\n print( str(yrangeMin)+\" \" +str(yrangeMax) + \" \"+str(xrangeMin)+\" \" +str(xrangeMax))\n focus =img[yrangeMin:yrangeMax,xrangeMin:xrangeMax]\n cv.imwrite(filename+'_Focus',focus)\n\n \n","sub_path":"InspectPic.py","file_name":"InspectPic.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"524550863","text":"# -*-coding:utf-8-*-\nfrom django.shortcuts import render\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom django.http import HttpResponse\nfrom getnba.models import Player\nfrom string import lowercase\n# Create your views here.\ndef playerUrl(url):\n\tsoup = BeautifulSoup(urllib.urlopen(url))\n\tplayers = soup.find(id='players').contents[5].find_all('tr')\n\tactive = []\n\tfor tr in players:\n\t\ttd = tr.find_all('td')[0]\n\t\tif td.find('strong'):\n\t\t\tactive.append(td.find('a')['href'])\n\treturn active\ndef playerData(player_url):\n\turl = 'http://www.basketball-reference.com' + player_url\n\tsoup = BeautifulSoup(urllib.urlopen(url))\n\t# player info\n\tplayerdic = {\n\t\t'name' : soup.find('h1').string,\n\t\t'exp' : '',\n\t\t'height' : '',\n\t\t'weight' : '',\n\t}\n\tspans = soup.find(id=\"info_box\").find_all('span', 'bold_text')\n\tfor span in spans:\n\t\tif span.string == 'Height:':\n\t\t\tplayerdic['height'] = span.next_sibling[1:-3]\n\t\tif span.string == 'Weight:':\n\t\t\tplayerdic['weight'] = span.next_sibling[1:8]\n\t\tif span.string == 'Experience:':\n\t\t\ts = span.next_sibling[1:]\n\t\t\tplayerdic['exp'] = s.split(' ')[0]\n\t# into db\n\tplayer = Player.objects.create(**playerdic)\ndef gainBbrData(request):\n\tabc = lowercase[:23]\n\tabc = ''.join([abc, lowercase[24:]]) # NBA没有X开头的人名\n\tplayer_urls = []\n\tfor s in abc:\n\t\turl = 'http://www.basketball-reference.com/players/'+ s +'/'\n\t\tplayer_urls.extend(playerUrl(url))\n\tfor url in player_urls:\n\t\tplayerData(url)\n\treturn HttpResponse('gained')\n","sub_path":"mypro/getnba/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466584242","text":"import numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom mathFunc import getDetec,xcorrSimple,xcorrComplex,flat\nfrom numba import jit,float32, int64\nfrom scipy import fftpack,interpolate\nfrom fk import FK,getSourceSacName,FKL\nimport os \nfrom scipy import io as sio\nimport obspy\nfrom multiprocessing import Process, Manager\n'''\nthe specific meaning of them you can find in Chen Xiaofei's paper\n(A systematic and efficient method of computing normal modes for multilayered half-space)\n'''\nclass layer:\n '''\n class for layered media;\n the p velocity(vp), s velocity(vs), density(rho), [top depth, bottom depth](z) is needed; \n p and s 's Q(Qp, Qs) is optional(default is 1200,600)\n After specifying the above parameters, the lame parameter(lambda, miu), zeta and xi would\n be calculate as class's attributes. \n '''\n def __init__(self,vp,vs,rho,z=[0,0],Qp=1200,Qs=600):\n self.z = np.array(z)\n self.vp = np.array(vp)\n self.vs = np.array(vs)\n self.rho = np.array(rho)\n self.Qp = Qp\n self.Qs = Qs\n self.lamb, self.miu = self.getLame()\n self.zeta = self.getZeta()\n self.xi = self.getXi()\n @jit\n def getLame(self):\n miu = self.vs**2*self.rho\n lamb = self.vp**2*self.rho-2*miu\n return lamb, miu\n @jit\n def getZeta(self):\n return 1/(self.lamb + 2*self.miu)\n @jit\n def getXi(self):\n zeta = self.getZeta()\n return 4*self.miu*(self.lamb + self.miu)*zeta\n @jit\n def getNu(self, k, omega):\n return (k**2-(omega/self.vs.astype(np.complex))**2)**0.5\n @jit\n def getGamma(self, k,omega):\n return (k**2-(omega/self.vp.astype(np.complex))**2)**0.5\n @jit\n def getChi(self, k,omega):\n ### is it right\n nu = self.getNu(k, omega)\n return k**2 + nu**2\n #return k**2 + np.abs(nu)**2\n @jit\n def getEA(self, k,omega, z,mode = 'PSV'):\n nu = self.getNu(k, omega)\n gamma = self.getGamma(k, omega)\n chi = self.getChi(k,omega)\n alpha = self.vp\n beta = self.vs\n miu = self.miu\n if mode == 'PSV':\n E = 1/omega*np.array(\\\n [[ alpha*k, beta*nu, alpha*k, beta*nu],\\\n [ alpha*gamma, beta*k, -alpha*gamma, -beta*k], \\\n [ -2*alpha*miu*k*gamma, -beta*miu*chi, 2*alpha*miu*k*gamma, beta*miu*chi],\n [ -alpha*miu*chi, -2*beta*miu*k*nu, -alpha*miu*chi, -2*beta*miu*k*nu]])\n A = np.array(\\\n [[np.exp(-gamma*(z-self.z[0])), 0, 0, 0],\\\n [0, np.exp(-nu*(z-self.z[0])), 0, 0],\\\n [0, 0, np.exp(gamma*(z-self.z[1])), 0],\\\n [0, 0, 0, np.exp(nu*(z-self.z[1]))]\\\n ])\n elif mode == 'SH':\n E = np.array(\\\n [[1, 1],\\\n [ -miu*nu, miu*nu]])\n A = np.array(\\\n [[np.exp(-nu*(z-self.z[0])), 0],\\\n [ 0, np.exp(nu*(z-self.z[1]))]])\n return E, A\n\nclass surface:\n '''\n class for surface of layer\n the layers above and beneath(layer0, layer1) is needed\n Specify the bool parameters isTop and isBottom, if the surface is the first or last one; the default is false\n the default waveform mode(mode) is 'PSV', you can set it to 'SH'\n '''\n def __init__(self, layer0, layer1,mode='PSV',isTop = False, isBottom = False):\n self.layer0 = layer0\n self.layer1 = layer1\n #if not isTop:\n self.z = layer0.z[-1]\n self.Td = 0\n self.Tu = 0\n self.Rud = 0\n self.Rdu = 0\n self.TTd = 0\n self.TTu = 0\n self.RRud = 0\n self.RRdu = 0\n self.mode = mode\n self.isTop = isTop\n self.isBottom = isBottom\n self.E = [None,None]\n self.A = [None,None]\n @jit\n def submat(self,M):\n shape = M.shape\n lenth = int(shape[0]/2)\n newM = M.reshape([2, lenth, 2, lenth])\n newM = newM.transpose([0,2,1,3])\n return newM\n @jit\n def setTR(self, k, omega):\n E0, A0 = self.layer0.getEA(k, omega, self.z, self.mode)\n E1, A1 = self.layer1.getEA(k, omega, self.z,self.mode)\n E0 = self.submat(E0)\n #print(E0[0][0].shape)\n E1 = self.submat(E1)\n A0 = self.submat(A0)\n A1 = self.submat(A1)\n self.E = [E0, E1]\n self.A = [A0, A1]\n EE0 = self.toMat([[E1[0][0], -E0[0][1]],\\\n [ E1[1][0], -E0[1][1]]])\n EE1 = self.toMat([[E0[0][0], -E1[0][1]],\\\n [ E0[1][0], -E1[1][1]]])\n AA = self.toMat([[A0[0][0], A0[0][0]*0],\\\n [ A1[0][0]*0, A1[1][1]]])\n #print(AA)\n TR = EE0**(-1)*EE1*AA\n TR = self.submat(np.array(TR))\n self.Td = TR[0][0]\n self.Rdu = TR[1][0]\n self.Rud = TR[0][1]\n self.Tu = TR[1][1]\n if self.isTop:\n self.Rud = -E1[1][0]**(-1)*E1[1][1]*(A1[1][1])\n self.Td = self.Rud*0\n self.Tu = self.Rud*0\n @jit\n def toMat(self,l):\n shape0 = len(l)\n shape1 = len(l[0])\n shape = np.zeros(2).astype(np.int64)\n #print(l[0][0].shape)\n shape[0] = l[0][0].shape[0]\n shape[1] = l[0][0].shape[1]\n SHAPE = shape+0\n SHAPE[0] *=shape0\n SHAPE[1] *=shape1\n M = np.zeros(SHAPE,np.complex)\n for i in range(shape0):\n for j in range(shape1):\n i0 = i*shape[0]\n i1 = (i+1)*shape[0]\n j0 = j*shape[1]\n j1 = (j+1)*shape[1]\n #print(i0,i1,j0,j1)\n M[i0:i1,j0:j1] = l[i][j]\n return np.mat(M)\n @jit\n def setTTRRD(self, surface1 = 0):\n if self.isBottom :\n RRdu1 = np.mat(self.Rdu*0)\n #return 0\n else:\n RRdu1 = surface1.RRdu\n self.TTd = (np.mat(np.eye(self.Rud.shape[0])) - np.mat(self.Rud)*np.mat(RRdu1))**(-1)*np.mat(self.Td)\n self.RRdu = np.mat(self.Rdu) + np.mat(self.Tu)*np.mat(RRdu1)*self.TTd\n @jit\n def setTTRRU(self, surface0 = 0):\n if self.isTop :\n self.RRud = self.Rud\n return 0\n self.TTu = (np.mat(np.eye(self.Rud.shape[0])) - np.mat(self.Rdu)*np.mat(surface0.RRud))**(-1)*np.mat(self.Tu)\n self.RRud = np.mat(self.Rud) + np.mat(self.Td)*np.mat(surface0.RRud)*self.TTu\n\nclass model:\n '''\n class for layered media model\n modeFile is the media parameter model File, there are tow mods\n if layerMode == 'norm':\n '0 18 2.80 6.0 3.5'\n layer's top depth, layer's bottom depth, density, p velocity, svelocity\n if layerMode =='prem':\n '0.00 5.800 3.350 2.800 1400.0 600.0'\n depth, p velocity, s velocity, density, Qp, Qs\n mode is for PSV and SH\n getMode is the way to get phase velocity:\n norm is enough to get phase velocity\n new is to get fundamental phase velocity for PSV\n '''\n def __init__(self,modelFile, mode='PSV',getMode = 'norm',layerMode ='norm',layerN=10000,isFlat=False,R=6371,flatM=-2,\\\n pog='p'):\n #z0 z1 rho vp vs Qkappa Qmu\n #0 1 2 3 4 5 6\n self.modelFile = modelFile\n self.getMode = getMode\n self.isFlat =isFlat\n data = np.loadtxt(modelFile)\n layerN=min(data.shape[0]+1,layerN+1)\n layerL=[None for i in range(layerN)]\n if layerMode == 'norm':\n layerL[0] = layer(1.7, 1, 0.0001,[-100,0])\n for i in range(1,layerN):\n layerL[i] = layer(data[i-1,3], data[i-1,4], data[i-1,2], data[i-1,:2])\n elif layerMode == 'prem':\n layerL[0] = layer(1.7, 1, 0.0001,[-100,0])\n zlast = 0\n for i in range(1,layerN):\n #100.0 7.95 4.45 3.38 200.0 80.0\n #0 1 2 3 4 5\n #vp,vs,rho,z=[0,0],Qp=1200,Qs=600\n vp=data[i-1,1]\n vs=data[i-1,2]\n rho=data[i-1,3]\n if data.shape[1] == 6:\n Qp=data[i-1,4]\n Qs=data[i-1,5]\n else:\n Qp= 1200\n Qs=600\n z =np.array([data[i-1,0],data[min(i+1-1,layerN-2),0]])\n if isFlat:\n z,vp,vs,rho = flat(z,vp,vs,rho,m=flatM,R=R)\n layerL[i] = layer(vp,vs,rho,z,Qp,Qs)\n surfaceL = [None for i in range(layerN-1)]\n for i in range(layerN-1):\n isTop = False\n isBottom = False\n if i == 0:\n isTop = True\n if i == layerN-2:\n isBottom = True\n surfaceL[i] = surface(layerL[i], layerL[i+1], mode, isTop, isBottom)\n self.layerL = layerL\n self.surfaceL = surfaceL\n self.layerN = layerN\n @jit\n def set(self, k,omega):\n for s in self.surfaceL:\n s.setTR(k,omega)\n for i in range(self.layerN-1-1,-1,-1):\n #print(i)\n s = self.surfaceL[i]\n if i == self.layerN-1-1:\n s.setTTRRD(self.surfaceL[0])\n else:\n s.setTTRRD(self.surfaceL[i+1])\n for i in range(self.layerN-1):\n #print(i)\n s = self.surfaceL[i]\n if i == 0:\n s.setTTRRU(self.surfaceL[0])\n else:\n s.setTTRRU(self.surfaceL[i-1])\n @jit\n def get(self, k, omega):\n self.set(k, omega)\n RRud0 = self.surfaceL[0].RRud\n RRdu1 = self.surfaceL[1].RRdu\n if self.getMode == 'norm':\n M = np.mat(np.eye(RRud0.shape[0])) - RRud0*RRdu1\n elif self.getMode == 'new':\n #-E1[1][0]**(-1)*E1[1][1]*(A1[1][1])\n M = self.surfaceL[0].E[1][1][0]+self.surfaceL[0].E[1][1][1]*self.surfaceL[0].A[1][1][1]*RRdu1\n return np.linalg.det(M)\n @jit\n def plot(self, omega, dv=0.01):\n #k = np.arange(0,1,dk)\n v, k ,det = self.calList(omega, dv)\n plt.plot(v,np.real(det),'-k')\n plt.plot(v,np.imag(det),'-.k')\n plt.plot(v,np.abs(det),'r')\n plt.show()\n @jit\n def calList(self,omega,dv=0.01):\n vs0 = self.layerL[1].vs\n vp0 = self.layerL[1].vp\n v = np.arange(vs0-0.499,vs0+5,dv)\n k = omega/v\n det = k.astype(np.complex)*0\n for i in range(k.shape[0]):\n det[i] = self.get(k[i], omega)\n return v, k, det\n @jit\n def __call__(self,omega,calMode='fast'):\n return self.calV(omega,order = 0, dv=0.002, DV = 0.008,calMode=calMode,threshold=0.1)\n def calV(self, omega,order = 0, dv=0.002, DV = 0.008,calMode='norm',threshold=0.1,vStart = -1):\n if calMode =='norm':\n v, k ,det = self.calList(omega, dv)\n iL, detL = getDetec(-np.abs(det), minValue=-0.1, minDelta=int(DV /dv))\n i0 = iL[order]\n v0 = v[i0]\n det0 = -detL[0]\n elif calMode == 'fast':\n v0,det0=self.calVFast(omega,order=order,dv=dv,DV=DV,threshold=threshold,vStart=vStart)\n '''\n ddv = 0.001 \n for i in range(5):\n step = 1e-3*(5-i)\n v1 = v0 + ddv\n det1 = np.abs(self.get(omega/v1, omega))\n k = (det1-det0)/ddv\n v0 = v0 - k*step\n print(k)\n det0 = np.abs(self.get(omega/v1, omega))\n '''\n return v0,det0\n @jit\n def calVFast(self,omega,order=0,dv=0.01,DV=0.008,threshold=0.1,vStart=-1):\n if self.getMode == 'new':\n v = self.layerL[1].vs/2+1e-8\n else:\n v = self.layerL[1].vs+1e-8\n #print(vStart,v)\n if vStart >0:\n v = max(self.layerL[1].vs+1e-8,vStart - 0.02)\n dv = 0.001\n v0 = v\n det0=10\n for i in range(100000):\n v1 = i*dv+v\n #print(v1)\n det1 =np.abs(self.get(omega/v1, omega))\n if det1det0:\n return v0, det0\n @jit\n def calDispersion(self, order=0,calMode='norm',threshold=0.1,T= np.arange(1,100,5).astype(np.float),pog='p'):\n f = 1/T\n omega = 2*np.pi*f\n v = omega*0\n for i in range(omega.size):\n if pog =='p':\n V =np.abs(self.calV(omega[i],order=order,calMode=calMode,threshold=threshold))[0]\n v[i]=np.abs(self.calV(omega[i],order=order,calMode=calMode,threshold=threshold,vStart=V))[0]\n elif pog =='g' :\n omega0 = omega[i]*0.9\n omega1 = omega[i]*1.1\n V=np.abs(self.calV(omega1,order=order,calMode=calMode,threshold=threshold))[0]\n v0=np.abs(self.calV(omega0,order=order,calMode=calMode,threshold=threshold,vStart=V))[0]\n v1=np.abs(self.calV(omega1,order=order,calMode=calMode,threshold=threshold,vStart=V))[0]\n dOmega = omega1 - omega0\n dK = omega1/v1 - omega0/v0\n v[i] = dOmega/dK \n #print(omega[i],v[i])\n return f,v\n def test(self):\n self.plot(2*np.pi)\n def testDispersion(self):\n f,v = self.calDispersion()\n plt.plot(f,v)\n plt.show()\n def compare(self,dv=0.01):\n self.getMode = 'norm'\n v, k ,det = self.calList(6.28, dv)\n plt.plot(v,np.abs(det)/np.abs(det).max(),'k')\n self.getMode = 'new'\n v, k ,det = self.calList(6.28, dv)\n plt.plot(v,np.abs(det)/np.abs(det).max(),'r')\n plt.show()\n def covert2Fk(self, fkMode=0):\n if fkMode == 0:\n filename = self.modelFile+'fk0'\n else:\n filename = self.modelFile+'fk1'\n if self.isFlat:\n filename+='_flat'\n with open(filename,'w+') as f:\n for i in range(1,self.layerN):\n layer = self.layerL[i]\n thickness = layer.z[1] - layer.z[0]\n vp = layer.vp.copy()\n vs = layer.vs.copy()\n rho = layer.rho\n if fkMode == 0:\n vp/=vs\n print('%.2f %.2f %.2f %.2f 1200 600'%(thickness, vs, vp, rho))\n f.write('%.2f %.2f %.2f %.2f 1200 600'%(thickness, vs, vp, rho))\n if i!= self.layerN-1:\n f.write('\\n')\n\n\n\nclass disp:\n '''\n traditional method to calculate the dispersion curve\n then should add some sac to handle time difference\n '''\n def __init__(self,nperseg=300,noverlap=298,fs=1,halfDt=150,xcorrFunc = xcorrComplex):\n self.nperseg=nperseg\n self.noverlap=noverlap\n self.fs = fs\n self.halfDt = halfDt\n self.halfN = np.int(halfDt*self.fs)\n self.xcorrFunc = xcorrFunc\n @jit\n def cut(self,data):\n maxI = np.argmax(data)\n i0 = max(maxI - self.halfN,0)\n i1 = min(maxI + self.halfN,data.shape[0])\n print(i0,i1)\n return data[i0:i1],i0,i1\n @jit\n def xcorr(self,data0, data1,isCut=True):\n if isCut:\n data1,i0,i1 = self.cut(data1)\n #print(data0.shape,data1.shape1)\n xx = self.xcorrFunc(data0,data1)\n return xx,i0,i1\n @jit\n def stft(self,data):\n F,t,zxx = scipy.signal.stft(np.real(data),fs=self.fs,nperseg=self.nperseg,\\\n noverlap=self.noverlap)\n F,t,zxxj = scipy.signal.stft(np.imag(data),fs=self.fs,nperseg=self.nperseg,\\\n noverlap=self.noverlap)\n zxx = zxx+zxxj*1j\n zxx /= np.abs(zxx).max(axis=1,keepdims=True)\n return F,t,zxx\n def show(self,F,t,zxx,data,timeL,isShow=True):\n plt.subplot(2,1,1);plt.pcolor(t,F,np.abs(zxx));plt.subplot(2,1,2);plt.plot(timeL,data);\n if isShow:\n plt.show()\n def sacXcorr(self,sac0,sac1,isCut=True):\n fs = sac0.stats['sampling_rate']\n self.fs=fs\n self.halfN = np.int(self.halfDt*self.fs)\n data0 = sac0.data\n time0 = sac0.stats.starttime.timestamp\n dis0 = sac0.stats['sac']['dist']\n data1 = sac1.data\n time1 = sac1.stats.starttime.timestamp\n dis1 = sac1.stats['sac']['dist']\n xx,i0,i1 = self.xcorr(data0,data1,isCut)\n time1New = time1+i0/fs\n dTime = time0 -time1New\n timeL = np.arange(xx.size)/fs+dTime\n dDis = dis0 - dis1\n #print(np.imag(xx))\n return corr(xx,timeL,dDis,fs)\n def test(self,data0,data1,isCut=True):\n xx = self.xcorr(data0,data1,isCut=isCut)\n F,t,zxx = self.stft(xx)\n self.show(F,t,zxx,xx)\n def testSac(self,sac0,sac1,isCut=True,fTheor=[],vTheor=[]):\n xx,timeL,dDis,fs = self.sacXcorr(sac0,sac1,isCut=True).output()\n F,t,zxx = self.stft(xx)\n print(t)\n t = t+timeL[0]+0*self.nperseg/self.fs\n self.show(F,t,zxx,xx,timeL,isShow=False)\n if len(fTheor)>0:\n timeTheorL =dDis/vTheor\n plt.subplot(2,1,1);plt.plot(timeTheorL,fTheor)\n return xx, zxx, F, t\n\nclass fv:\n '''\n class for dispersion result\n it have two attributes f and v, each element in v accosiate with an \n element in v \n '''\n def __init__(self,input,mode='num'):\n if mode == 'num':\n self.f = input[0]\n self.v = input[1]\n if mode == 'file':\n fvM = np.loadtxt(input)\n self.f = fvM[:,0]\n self.v = fvM[:,1]\n self.interp = self.genInterp()\n def genInterp(self):\n return interpolate.interp1d(self.f,self.v,kind='linear')\n def __call__(self,f):\n return self.interp(f)\n def save(self,filename):\n np.savetxt(filename, np.concatenate([self.f.reshape([-1,1]),\\\n self.v.reshape([-1,1])],axis=1))\n\n\n\n\nclass corr:\n \"\"\"docstring for \"\"\"\n def __init__(self,xx=np.arange(0,dtype=np.complex),timeL=np.arange(0),dDis=0,fs=0,\\\n az=np.array([0,0]),dura=0,M=np.array([0,0,0,0,0,0,0]),dis=np.array([0,0]),\\\n dep = 10,modelFile='',name0='',name1='',srcSac=''):\n self.maxCount = -1\n maxCount = xx.shape[0]\n self.dtype = self.getDtype(maxCount)\n self.xx = xx.astype(np.complex)\n self.timeL = timeL\n self.dDis = dDis\n self.fs = fs\n self.az = az\n self.dura = dura\n self.M = M\n self.dis = dis\n self.dep = dep\n self.modelFile=modelFile\n self.name0 = name0\n self.name1 = name1\n self.srcSac= srcSac\n def output(self):\n return self.xx,self.timeL,self.dDis,self.fs\n def toDict(self):\n return {'xx':self.xx, 'timeL':self.timeL, 'dDis':self.dDis, 'fs':self.fs,\\\n 'az':self.az, 'dura':self.dura,'M':self.M,'dis':self.dis,'dep':self.dep,\\\n 'modelFile':self.modelFile,'name0':self.name0,'name1':self.name1,\\\n 'srcSac':self.srcSac}\n def toMat(self):\n self.getDtype(self.xx.shape[0])\n return np.array((self.xx, self.timeL, self.dDis,self.fs,self.az, self.dura\\\n ,self.M,self.dis,self.dep,self.modelFile,self.name0,self.name1,\\\n self.srcSac),self.dtype)\n def setFromFile(self,file):\n mat = scipy.io.load(file)\n self.setFromDict(mat)\n def setFromDict(self,mat):\n self.xx = mat['xx'] \n self.timeL = mat['timeL']\n self.dDis = mat['dDis']\n self.fs = mat['fs']\n self.az = mat['az']\n self.dura = mat['dura']\n self.M = mat['M']\n self.dis = mat['dis']\n self.dep = mat['dep']\n self.modelFile = str(mat['modelFile'])\n self.name0 = str(mat['name0'])\n self.name1 = str(mat['name1'])\n self.srcSac = str(mat['srcSac'])\n return self\n def save(self,fileName):\n sio.savemat(fileName,self.toMat())\n def show(self,d,FV):\n linewidth=0.3\n F,t,zxx = d.stft(self.xx)\n t = t+self.timeL[0]\n ylim=[0,0.2]\n xlim = [t[0],t[-1]]\n ax=plt.subplot(3,2,1)\n plt.plot(self.timeL,np.real(self.xx),'b',linewidth=linewidth)\n plt.plot(self.timeL,np.imag(self.xx),'r',linewidth=linewidth)\n plt.xlabel('t/s')\n plt.ylabel('corr')\n plt.xlim(xlim)\n ax=plt.subplot(3,2,3)\n plt.pcolor(t,F,np.abs(zxx))\n fTheor = FV.f\n timeTheorL = self.dDis/FV.v\n #print(timeTheorL)\n plt.plot(timeTheorL,fTheor,'r')\n plt.xlabel('t/s')\n plt.ylabel('f/Hz')\n plt.xlim(xlim)\n plt.ylim(ylim)\n ax=plt.subplot(3,2,2)\n sac0 = obspy.read(self.name0)[0]\n sac1 = obspy.read(self.name1)[0]\n plt.plot(getSacTimeL(sac0),sac0,'b',linewidth=linewidth)\n plt.plot(getSacTimeL(sac1),sac1,'r',linewidth=linewidth)\n plt.xlabel('time/s')\n ax=plt.subplot(3,2,4)\n mat = np.loadtxt(self.modelFile)\n ax.invert_yaxis() \n #'0.00 5.800 3.350 2.800 1400.0 600.0'\n plt.plot(mat[:,1],mat[:,0],'b',linewidth=linewidth)\n plt.plot(mat[:,2],mat[:,0],'r',linewidth=linewidth)\n plt.ylim([900,-10])\n plt.subplot(3,2,5)\n timeDis = self.outputTimeDis(FV)\n plt.pcolor(self.timeL,self.T,timeDis.transpose())\n def getDtype(self,maxCount):\n if maxCount == self.maxCount:\n return self.dtype\n else:\n self.maxCount=maxCount\n corrType = np.dtype([ ('xx' ,np.complex,maxCount),\\\n ('timeL' ,np.float64,maxCount),\\\n ('dDis' ,np.float64,1),\\\n ('fs' ,np.float64,1),\\\n ('az' ,np.float64,2),\\\n ('dura' ,np.float64,1),\\\n ('M' ,np.float64,7),\\\n ('dis' ,np.float64,2),\\\n ('dep' ,np.float64,1),\\\n ('modelFile',np.str,200),\\\n ('name0' ,np.str,200),\\\n ('name1' ,np.str,200),\\\n ('srcSac' ,np.str,200)\\\n ])\n return corrType\n def outputTimeDis(self,FV,T=np.array([5,10,20,30,50,80,100,150,200,250,300]),sigma=2):\n self.T=T\n f = 1/T\n dim = [self.timeL.shape[0],T.shape[0]]\n timeDis = np.zeros(dim)\n f = f.reshape([1,-1])\n timeL = self.timeL.reshape([-1,1])\n v = FV(f)\n t = self.dDis/v\n timeDis = np.exp(-((timeL-t)/sigma)**2)\n return timeDis\n\ndef getTimeDis(corrL,fvD,T,sigma=2,maxCount=512):\n maxCount0 = maxCount\n x = np.zeros([len(corrL),maxCount,1,1])\n y = np.zeros([len(corrL),maxCount,1,len(T)])\n #maxCount = min(maxCount,corrL[0].xx.shape[0])\n for i in range(len(corrL)):\n maxCount = min(maxCount0,corrL[i].xx.shape[0])\n x[i,:maxCount,0,0] = corrL[i].xx[:maxCount]\n y[i,:maxCount,0,:] = corrL[i].outputTimeDis(fvD[corrL[i].modelFile],\\\n T=T,sigma=sigma)[:maxCount]\n return x,y\n\ndef getSacTimeL(sac):\n return np.arange(len(sac))*sac.stats['delta']+sac.stats['sac']['b']\n\ndef genModel(modelFile = 'prem',N=100,perD = 0.10,depthMul=2):\n modelDir = 'models/'\n if not os.path.exists(modelDir):\n os.mkdir(modelDir)\n #800.0 11.0 6.13 4.46 740.0 312.0\n model0 = np.loadtxt(modelFile)\n for i in range(N):\n model = model0.copy()\n depthLast = 0\n for j in range(model.shape[0]):\n depth0 = model[j,0]\n depth = max(depthLast,depthLast + (depth0-depthLast)*(1+perD*depthMul*(2*np.random.rand()-1)))\n if j ==0:\n depth=0\n depthLast = depth\n model[j,0] = depth\n for k in range(1,model.shape[1]):\n if j ==0:\n d = 0.3\n else:\n d = model0[j,k]- model0[j-1,k]\n if j!=0:\n model[j,k]=model[j-1,k]+(1+perD*(2*np.random.rand()-1))*d\n else:\n model[j,k]=model[j,k]+(0+perD*(2*np.random.rand()-1))*d\n np.savetxt('%s/%s%d'%(modelDir,modelFile,i),model)\n\ndef genFvFile(modelFile,fvFile='',mode='PSV',getMode = 'norm',layerMode ='prem',layerN=20,calMode='fast',\\\n T=np.array([0.5,1,5,10,20,30,50,80,100,150,200,250,300]),isFlat=False,pog='p'):\n if len(fvFile) ==0:\n if not isFlat:\n fvFile='%s_fv'%modelFile\n else:\n fvFile='%s_fv_flat'%modelFile\n fvFile+= '_'+getMode\n fvFile+= '_'+pog\n m = model(modelFile,mode=mode,getMode=getMode,layerMode=layerMode,layerN=layerN,isFlat=isFlat)\n f,v=m.calDispersion(order=0,calMode=calMode,threshold=0.1,T=T,pog=pog)\n f = fv([f,v],'num')\n f.save(fvFile)\n\ndef calFv(iL,originName='models/prem',layerN=20,pog='p',\\\n T=np.array([5,10,20,30,50,80,100,150,200,250,300])):\n for i in iL:\n modelFile = '%s%d'%(originName,i)\n print(i)\n genFvFile(modelFile,fvFile='',mode='PSV',getMode = 'norm',layerMode ='prem',layerN=layerN,calMode='fast',\\\n T=T,isFlat=True,pog=pog)\n\n\ndef corrSac(d,sac0,sac1,name0='',name1='',az=np.array([0,0]),dura=0,M=np.array([0,0,0,0,0,0,0])\\\n ,dis=np.array([0,0]),dep = 10,modelFile='',srcSac=''):\n corr = d.sacXcorr(sac0,sac1)\n corr.az = az\n corr.dura = dura\n corr.M = M\n corr.dis = dis\n corr.dep = dep\n corr.modelFile = modelFile\n corr.name0 = name0\n corr.name1 = name1\n corr.srcSac=srcSac\n return corr\n\ndef corrSacsL(d,sacsL,sacNamesL,dura=0,M=np.array([0,0,0,0,0,0,0])\\\n ,dep = 10,modelFile='',srcSac=''):\n corrL = []\n N = len(sacsL)\n for i in range(N):\n for j in range(i):\n sac0 = sacsL[i][0]\n sac1 = sacsL[j][0]\n name0 = sacNamesL[i][0]\n name1 = sacNamesL[j][0]\n #print(sac0,sac1,sac0.stats['sac']['az'],sac1.stats['sac']['az'])\n az = np.array([sac0.stats['sac']['az'],sac1.stats['sac']['az']])\n dis = np.array([sac0.stats['sac']['dist'],sac1.stats['sac']['dist']])\n #tmp = corrSac(d,sac0,sac1,name0,name1,az,dura,M,dis,dep,modelFile)\n #print(np.imag(tmp.xx))\n corrL.append(corrSac(d,sac0,sac1,name0,name1,az,dura,M,dis,dep,modelFile,srcSac).toMat())\n return corrL\n\ndef singleFk(f,iL,corrLL,index,D,originName,srcSacDir,distance,srcSacNum,delta,layerN):\n for i in iL:\n modelFile = '%s%d'%(originName,i)\n print(modelFile)\n m = model(modelFile=modelFile,mode='PSV',layerN=layerN,layerMode ='prem',isFlat=True)\n m.covert2Fk(0)\n m.covert2Fk(1)\n dura = np.random.rand()*10+20\n depth= int(np.random.rand()*20+10)+(i%10)\n print('###################################',depth)\n M=np.array([3e25,0,0,0,0,0,0])\n M[1:] = np.random.rand(6)\n srcSacIndex = int(np.random.rand()*srcSacNum*0.999)\n rise = 0.1+0.3*np.random.rand()\n sacsL, sacNamesL= f.test(distance=distance+np.round((np.random.rand(distance.size)-0.5)*80),\\\n modelFile=modelFile,fok='/k',dt=delta,depth=depth,expnt=10,dura=dura,dk=0.1,\\\n azimuth=[0,int(6*(np.random.rand()-0.5))],M=M,rise=rise,srcSac=getSourceSacName(srcSacIndex,delta,\\\n srcSacDir = srcSacDir),isFlat=True)\n corrLL[index] += corrSacsL(D,sacsL,sacNamesL,modelFile=modelFile,\\\n srcSac=getSourceSacName(srcSacIndex,delta,srcSacDir = srcSacDir))\n# 20,d.singleFk,20,D,'models/ak135',srcSacDir,distance,srcSacNum,delta,orignExe=orignExe\ndef multFK(fkN,singleFk,num,D,originName,srcSacDir,distance,srcSacNum,delta,layerN,orignExe):\n fkN = 20 \n fkL = FKL(fkN,orignExe=orignExe)\n pL = []\n manager = Manager()\n corrLL = manager.list()\n for i in range(fkN):\n corrLL. append([])\n for i in range(fkN):\n f = fkL[i]\n pL.append(Process(\\\n target=singleFk,\\\n args=(f,range(i,num,fkN), corrLL,i,D,originName,srcSacDir,distance,srcSacNum,delta,layerN) \n )\\\n )\n pL[-1].start()\n for p in pL:\n p.join()\n print('######',i)\n i+=1\n corrL = []\n for tmp in corrLL:\n corrL += tmp\n corrMat = np.array(corrL)\n return corrMat\n","sub_path":"dispersion.py","file_name":"dispersion.py","file_ext":"py","file_size_in_byte":28954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18188540","text":"#Takes a url as input from the command line,\r\n#and creates a file with the clarifai output\r\n\r\nfrom clarifai import rest\r\nfrom clarifai.rest import ClarifaiApp\r\nfrom clarifai.rest import Image as ClImage\r\n#Jason's Clarifai Client ID, Client Secret\r\napp = ClarifaiApp(\"wPXX8nSrRj_A25bqQAdAurGdZdbxzhJWELL9aaQ2\",\"SRRlIyrXfV7bJkNE7R3naCBPkxw3J-UwqVgcxJhJ\")\r\n#Model for Apparel Prediction\r\nmodel = app.models.get('e0be3b9d6a454f0493ac3a30784001ff')\r\n\r\n#Takes a URL in text, returns the response as a string\r\n#This function is the main part of this code, use it to format any clarifai API response\r\ndef clarify(urlstr):\r\n if 'http:' in urlstr:\r\n x = ClImage(url = urlstr)\r\n else:\r\n x = app.inputs.create_image_from_filename(urlstr)\r\n \r\n predictions = str(model.predict([x]))\r\n \r\n #Outputing the response string to a file and formatting it\r\n fil = open('output.txt', 'w')\r\n for i in predictions:\r\n if i == '}':\r\n fil.write('}\\n')\r\n else:\r\n fil.write(i)\r\n \r\n #Takes the highest confidence line\r\n fil = open('output.txt','r')\r\n for line in fil:\r\n if (('\\'id\\': \\'ai' in line) == True):\r\n result = line\r\n print(line)\r\n break\r\n\r\n #Takes only the name\r\n result1 = result[(result.find('\\'name\\':')+7):]\r\n result2 = result1[:result1.find(',')]\r\n return result2\r\n\r\nimageurl = str(input(\"Input a url or local file:\"))\r\nprint(clarify(imageurl))\r\n","sub_path":"fabrik/clarify.py","file_name":"clarify.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516081923","text":"import os\nfrom setuptools import setup, find_packages\n\nthis_dir = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_dir, 'README.md'), 'r') as f:\n long_description = f.read()\n\n# More information on properties: https://packaging.python.org/distributing\nsetup(name='requests_auth',\n version=open(\"requests_auth/_version.py\").readlines()[-1].split()[-1].strip(\"\\\"'\"),\n author='Colin Bounouar',\n author_email='colin.bounouar.dev@gmail.com',\n maintainer='Colin Bounouar',\n maintainer_email='colin.bounouar.dev@gmail.com',\n url=\"https://github.com/Colin-b/requests_auth\",\n description=\"Easy Authentication for Requests\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n download_url='https://pypi.org/project/requests-auth/',\n license='MIT',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n keywords=[\n 'authentication',\n 'ntlm',\n 'oauth2',\n 'azure-active-directory',\n 'azure-ad',\n 'okta',\n 'apikey',\n 'multiple',\n ],\n packages=find_packages(exclude=['tests']),\n tests_require=[\n # Used to run tests\n 'nose==1.3.7',\n # Used to generate a JWT token\n 'pyjwt==1.6.4',\n # Used to run test services\n 'flask==1.0.2',\n # Used to mock responses to requests\n 'responses==0.10.3',\n ],\n install_requires=[\n # Used for Base Authentication and to communicate with OAuth2 servers (also used in test cases)\n 'requests==2.21.0',\n ],\n platforms=[\n 'Windows',\n 'Linux',\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289572840","text":"# Use altered pycocotools to save annotated ground-truth images\r\n\r\nfrom pycocotools.coco import COCO\r\nimport numpy as np\r\nimport os\r\nimport skimage.io as io\r\nimport matplotlib.pyplot as plt\r\nimport pylab\r\n\r\npylab.rcParams['figure.figsize'] = (8.0, 10.0)\r\n\r\ndataDir = 'bead_cropped_detection'\r\nsaveDir = '{}/{}'.format(dataDir, '2020JSON_Painted')\r\n\r\n\r\n# Helper function to create a directory if it does not already exists\r\ndef make_dir(path, name = ''):\r\n path = os.path.abspath(os.path.join(path, name))\r\n\r\n if not os.path.exists(path):\r\n try:\r\n os.makedirs(path)\r\n except Exception as e:\r\n # Raise if directory can't be made.\r\n print('Error creating directory')\r\n raise e\r\n\r\ndef run():\r\n make_dir(saveDir)\r\n names = ['train', 'test']\r\n for filename in names:\r\n annFile = '{}/{}.json'.format(dataDir, filename)\r\n # initialize COCO api for instance annotations\r\n coco = COCO(annFile)\r\n catIds = coco.getCatIds(catNms=['beading'])\r\n imgIds = coco.getImgIds(catIds=catIds)\r\n for imgId in imgIds:\r\n img = coco.loadImgs([imgId])[0]\r\n I = io.imread('{}/images/{}'.format(dataDir, img['file_name']))\r\n\r\n # load and display instance annotations\r\n plt.figure()\r\n plt.imshow(I)\r\n plt.axis('off')\r\n annIds = coco.getAnnIds(imgIds=[img['id']], catIds=catIds,\r\n iscrowd=None)\r\n anns = coco.loadAnns(annIds)\r\n coco.showAnns(anns, draw_bbox=True)\r\n plt.savefig('{}/painted-{}'.format(saveDir, img['file_name']),\r\n bbox_inches='tight')\r\n print(\"painting {} ...\".format(img['file_name']))\r\n plt.close()\r\n\r\n print(\"complete!\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()","sub_path":"helper_scripts/coco-painter.py","file_name":"coco-painter.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588117474","text":"# - * - coding:utf8 - * - -\n###########################################\n# Author: Tinkle\n# E-mail: shutingnjupt@gmail.com\n# Name: Range Sum Query - Mutable.py\n# Creation Time: 2017/8/30\n###########################################\n'''\nGiven an integer array nums, find the sum of the elements between indices i and j (i ≤ j), inclusive.\n\nThe update(i, val) function modifies nums by updating the element at index i to val.\nExample:\nGiven nums = [1, 3, 5]\n\nsumRange(0, 2) -> 9\nupdate(1, 2)\nsumRange(0, 2) -> 8\n'''\n\n\nclass NumArray(object):\n def __init__(self, nums):\n '''\n :param nums: List[int]\n '''\n self.nums = nums\n self.segTree = [0] * (4 * len(nums) + 10)\n if len(nums):\n self.Build(1, 0, len(nums) - 1)\n\n def Build(self, node, begin, end):\n if begin == end:\n self.segTree[node] = self.nums[begin]\n else:\n mid = (begin + end) / 2\n self.Build(2 * node, begin, mid)\n self.Build(2 * node + 1, mid + 1, end)\n\n self.segTree[node] = self.segTree[2 * node] + self.segTree[2 * node + 1]\n\n def range(self, node, begin, end, i, j):\n if begin >= i and end <= j:\n return self.segTree[node]\n elif end < i or begin > j:\n return 0\n else:\n mid = (begin + end) / 2\n p1 = self.range(2 * node, begin, mid, i, j)\n p2 = self.range(2 * node + 1, mid + 1, end, i, j)\n return p1 + p2\n\n def sumRange(self, i, j):\n '''\n :param i: int\n :param j: int\n :return: int\n '''\n if i < 0 or j < 0 or i >= len(self.nums) or j >= len(self.nums):\n return 0\n return self.range(1, 0, len(self.nums) - 1, i, j)\n\n def update(self, i, val):\n '''\n :param i: int\n :param val: int\n :return: void\n '''\n if i < 0 or i >= len(self.nums):\n return\n self.up(1, 0, len(self.nums) - 1, i, val)\n\n def up(self, node, begin, end, i, val):\n if begin == end:\n self.segTree[node] = val\n else:\n mid = (begin + end) / 2\n if mid >= i:\n self.up(2 * node, begin, mid, i, val)\n else:\n self.up(2 * node + 1, mid + 1, end, i, val)\n self.segTree[node] = self.segTree[2 * node] + self.segTree[2 * node + 1]\n","sub_path":"Segment Tree/307. Range Sum Query - Mutable.py","file_name":"307. Range Sum Query - Mutable.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"363783878","text":"import os, argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 16, 'xtick.labelsize':14, 'ytick.labelsize':14})\n\nfrom model import PENG_model\nfrom utils import integration_utils\nfrom scipy.optimize import newton_krylov, brentq\n\nfrom astropy.cosmology import Planck15 as cosmo, z_at_value\n\nz_init = 10\nz_final = 0\n\ncluster_mass = 13.5 #log10(Mhalo)\nn_clusters = 100\n\noc_flag = True #\noc_eta = 1\n\nlogMh_range = np.arange(9,15,0.1)\nlogMs_range = np.arange(0.5,11.5,0.1)\nMh_range = np.power(10, logMh_range)\nz_range = np.arange(0,10,0.1)\n\nif __name__ == \"__main__\":\n model = PENG_model(None, z_init, z_final)\n \n ##!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#\n \n sSFR = np.array([model.sSFR(logMs,z_range) for logMs in logMs_range ])\n \n fig,ax = plt.subplots(tight_layout=True)\n contourf_ = ax.contourf(z_range, logMs_range, np.log10(sSFR) )#, np.arange(0,14,2), extend='both')\n ax.set_title('Speagle log(sSFR)')\n ax.set_xlabel('Redshift [z]')\n ax.set_ylabel('Stellar Mass [log($M_*/M_\\odot$)]')\n plt.colorbar(contourf_)\n \n fig.savefig('./images/SPEAGLE_sSFR.png', dpi=220)\n \n ##!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#\n \n \n \n # model.sf_masses = 4.5\n # model.setup_evolve(10,0)\n \n # while model.t >= model.t_final and model.condition:\n # model.mass_array = model.integ.RK45(model.mass_array, model.t, model.force)\n \n # if (model.t - model.integ.step) > model.t_final:\n # pass\n # else:\n # model.integ.step = model.t - model.t_final\n # model.force = True\n # model.condition = False\n \n # print(np.log10(model.mass_array))\n \n\n ##!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#\n \n # model.oc_eta = 1\n # tt = np.array([cosmo.lookback_time(z).value - model.t_delay_2(Mh_range, z) for z in z_range])\n \n # fig,ax = plt.subplots()\n # ax.contourf(z_range, logMh_range, tt.T, np.arange(0,14,2), extend='both')\n \n ##!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#\n \n # logMh = np.array([[brentq(model.M_star_inv(logMs,z), 5, 35 ) for z in z_range] for logMs in logMs_range])\n \n # fig,ax = plt.subplots()\n # contourf_ = ax.contourf(z_range, logMs_range, logMh)#, np.arange(0,14,2), extend='both')\n # cbar = fig.colorbar(contourf_, label='Halo Mass')\n # ax.set_xlabel('z')\n # ax.set_ylabel('stellar mass')\n \n ##!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#\n \n fig,ax = plt.subplots()\n for z in range(0,9):\n f_s = model.M_star(Mh_range, z) / Mh_range\n f_s[f_s List[Record]:\n path = Path(path_dicts)\n if path.is_dir():\n for path_dict in path.iterdir():\n for record in parse_dict(path_dict):\n yield record\n else:\n yield parse_dict(path)\n\n\ndef parse_dict(path_dict: Union[str, Path]) -> List[Record]:\n path = Path(path_dict) if str == type(path_dict) else path_dict\n with path.open() as f:\n for line in f.readlines():\n if line.startswith('#'): continue\n yield parse_line(line)\n\n\ndef parse_line(line: str) -> Record:\n \"\"\"\n word, alias\n 军团, [SPECIAL-TOKEN], (0, (均,君)), <工会>\n \"\"\"\n record = line.split(',', 1)\n\n if 1 == len(record):\n return Record(record[0].strip())\n \n alias = {}\n for (i, _alias) in re.findall(r'(?:\\(([0-9]{1,}),\\s*\\((.*?)\\)\\))', record[1].strip()):\n alias[int(i)] = _alias.split(',')\n\n special = re.findall(r'(\\[.*\\])', record[1].strip())\n special = special[0] if special else None\n if special: \n try:\n assert(special in spe)\n except:\n logger.debug(special)\n raise ValueError(f'{special} not in special ')\n\n norm = re.findall(r'<(.*)>', record[1].strip())\n norm = norm[0] if norm else None\n \n return Record(record[0].strip(), alias, special, norm)\n\n\ndef trie(path_dicts: str) -> Node:\n root = Node(None, None, None, None, [])\n\n for record in parse_dicts(path_dicts):\n current_node = root\n\n for i, char in enumerate(record.value):\n node = find_in_children(current_node, char, precise=True)\n record_alias = record.alias.get(i,[]) if record.alias is not None else []\n record_leaf = i == len(record.value)-1\n record_special = record.special if record_leaf else None\n record_norm = record.norm if record_leaf else None\n\n if node is not None:\n # update new info\n if (not node.alias and record_alias) or (not node.special and record_special) or (not node.leaf and record_leaf):\n old_node = node\n\n alias = record_alias if record_alias else old_node.alias\n special = record_special if record_special else old_node.special\n norm = record_norm if record_norm else old_node.norm\n leaf = record_leaf if record_leaf else old_node.leaf\n\n node = Node(old_node.value, alias, special, norm, old_node.children, leaf)\n\n current_node.children.remove(old_node)\n del old_node\n current_node.children.append(node)\n else:\n node = Node(char, record_alias, record_special, record_norm, [], record_leaf)\n current_node.children.append(node)\n\n current_node = node\n\n return root\n\n\ndef has(root: Node, text: Union[str, List[str]]) -> bool:\n current_node = root\n for char in text:\n current_node = find_in_children(current_node, char)\n if current_node is None:\n return False\n if current_node.leaf:\n return True\n return False\n\n\ndef find(root: Node, text: Union[str, List[str]]) -> Tuple[str, int]:\n token = None\n idx = -1\n\n match_nodes = []\n current_node = root\n i = 0\n for i, char in enumerate(text):\n node = find_in_children(current_node, char)\n # print(i, char, node.value if node is not None else repr(node))\n if node is None:\n i -= 1\n break\n match_nodes.append(node)\n current_node = node\n if not current_node.children:\n break\n\n if match_nodes and current_node.leaf:\n if match_nodes[-1].special is not None:\n token = match_nodes[-1].special\n elif match_nodes[-1].norm is not None:\n token = match_nodes[-1].norm\n else:\n token = ''.join([n.value for n in match_nodes])\n # print('match', [_.value for _ in match_nodes])\n\n return token, i\n\n\ndef find_in_children(node: Node, char: str, precise: bool=False) -> bool:\n for c in node.children:\n # print(c.value, c.alias, char, c.value==char, char in c.alias)\n if char == c.value or (not precise and char in c.alias):\n return c\n\n\ndef fprint(node: Node):\n return f'value:{node.value}, alias:{repr(node.alias)}, special:{repr(node.special)}, leaf:{node.leaf}'# \\\n # f' ---- id:{id(node)} ---- children:{[id(c) for c in node.children]}'\n\n\ndef tree(node, depth=0):\n if depth == 0:\n print(fprint(node))\n\n if not node.children:\n print('|' if 0 != depth else '')\n return\n \n for c in node.children:\n print(('|' if 0 != depth else '') + ' ' * depth + '|____' + fprint(c))\n tree(c, depth+1)\n \n\nif __name__ == '__main__':\n # print(list(parse_dicts('./dict')))\n root = trie('./dict')\n # print()\n # print(root)\n # print()\n tree(root)\n","sub_path":"ads/tokenizer/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"480212096","text":"\"\"\"\nAuthor:Jesse Boakye-Donkor\nThis module contains the tests for the methods in the functions modules\n\"\"\"\n\nimport unittest\nimport functions\nfrom DBClientStub import DBClientStub\n\n\nclass BmiTestCase(unittest.TestCase):\n \"\"\"\n A unit test case for calc_bmi\n \"\"\"\n def test_bmi_over(self):\n results = functions.calc_bmi(5, 10, 200)\n error_str = \"output should be (23.8,'normal weight') not {}\".format(results)\n self.assertEqual(results, (28.7, 'overweight'), error_str)\n\n def test_bmi_under(self):\n results = functions.calc_bmi(6, 0, 130)\n error_str = \"output should be (17.6,'underweight') not {}\".format(results)\n self.assertEqual(results, (17.6, 'underweight'), error_str)\n\n def test_bmi_normal(self):\n results = functions.calc_bmi(5, 8, 160)\n error_str = \"output should be (24.3,'normal weight') not {}\".format(results)\n self.assertEqual(results, (24.3, 'normal weight'), error_str)\n\n def test_bmi_obese(self):\n results = functions.calc_bmi(5, 4, 180)\n error_str = \"output should be (30.9,'obese') not {}\".format(results)\n self.assertEqual(results, (30.9, 'obese'), error_str)\n\n\nclass DistanceTestCase(unittest.TestCase):\n \"\"\"\n unit test case for calc_distance\n \"\"\"\n \n def test_calc_distance(self):\n results = functions.calc_distance([2, 1], [3, 4],db_client=DBClientStub())\n error_str = \"distance should be 3.16 not %f \" % (results)\n self.assertEqual(results, 3.16, error_str)\n\n\nclass SplitTipTestCase(unittest.TestCase):\n \"\"\"\n unit test for split_tip\n \"\"\"\n\n def test_split_tip(self):\n results = functions.split_tip(23.64, 5,db_client=DBClientStub())\n expect_output = {\n 'guest1': 5.43,\n 'guest2': 5.44,\n 'guest3': 5.44,\n 'guest4': 5.44,\n 'guest5': 5.44,\n }\n error_str = \"expect bill should be {} not {}\".format(expect_output, results)\n self.assertEqual(results, expect_output, error_str)\n\n\nclass RetirementSavingsTestCase(unittest.TestCase):\n \"\"\"\n unit test for calc_savings\n \"\"\"\n def test_savings_metdeath(self):\n results = functions.calc_savings(21, 80000, 10, 120 * 10 ** 6)\n expect_output = (100, False)\n error_str = \"expected {} not {}\".format(expect_output, results)\n self.assertEqual(results, expect_output, error_str)\n def test_savings_metgoal(self):\n results = functions.calc_savings(21, 120000, 23, 12 * 10 ** 6)\n expect_output = (90, True)\n error_str = \"expected {} not {}\".format(expect_output, results)\n self.assertEqual(results, expect_output, error_str)\n\n def test_savings_metgoal2(self):\n results = functions.calc_savings(21, 120000, 90, 12 * 10 ** 6)\n expect_output = (0, True)\n error_str = \"expected {} not {}\".format(expect_output, results)\n self.assertEqual(results, expect_output, error_str)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318752841","text":"# -*- coding: utf-8 -*- #\n# Copyright 2022 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Recommender API recommender config Update command.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.recommender import base as reco_base\nfrom googlecloudsdk.api_lib.recommender import recommender_config\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.recommender import flags\n\n_DETAILED_HELP = {\n 'DESCRIPTION':\n '{description}',\n 'EXAMPLES':\n \"\"\" \\\n To update a recommender configuration, run:\n\n $ {command} ${RECOMMENDER} --project=${PROJECT} --location=${LOCATION}\n --etag=\\\\\"123\\\\\" --config-file=config.yaml\n \"\"\",\n}\n\n\n@base.ReleaseTracks(base.ReleaseTrack.BETA,\n base.ReleaseTrack.GA)\nclass Update(base.Command):\n r\"\"\"Update a recommender configuration.\n\n Update a recommender configuration based on a given entity (project,\n organization, billing account),\n location, and recommender.\n \"\"\"\n detailed_help = _DETAILED_HELP\n\n @staticmethod\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command.\n\n Args:\n parser: An argparse parser that you can use to add arguments that go on\n the command line after this command.\n \"\"\"\n flags.AddRecommenderFlagsToParser(parser, [\n reco_base.EntityType.PROJECT, reco_base.EntityType.ORGANIZATION,\n reco_base.EntityType.BILLING_ACCOUNT\n ])\n flags.AddConfigFileToParser(parser, 'recommender configuration')\n flags.AddDisplayNameToParser(parser, 'recommender configuration')\n flags.AddValidateOnlyToParser(parser)\n flags.AddEtagToParser(parser, 'recommender configuration')\n flags.AddAnnotationsToParser(parser, 'recommender configuration')\n\n def Run(self, args):\n \"\"\"Run 'gcloud recommender recommender-config update'.\n\n Args:\n args: argparse.Namespace, The arguments that the command was invoked with.\n\n Returns:\n The result recommender configuration to describe.\n \"\"\"\n client = recommender_config.CreateClient(self.ReleaseTrack())\n config_name = flags.GetRecommenderConfigName(args)\n return client.Update(config_name, args)\n","sub_path":"lib/surface/recommender/recommender_config/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639253647","text":"\"\"\"\nSetuptools script for cx_Freeze.\n\nUse one of the following commands to install:\n pip install .\n python setup.py build install\nUse one of the following commands to use the development mode:\n pip install -e .\n python setup.py develop\n\"\"\"\n\nimport glob\nimport os\nfrom shutil import which\nimport subprocess\nimport sys\nfrom sysconfig import get_config_var, get_platform, get_python_version\n\nfrom setuptools import setup, Command, Extension\nimport setuptools.command.build_ext\n\nWIN32 = sys.platform == \"win32\"\nDARWIN = sys.platform == \"darwin\"\n\nif sys.version_info < (3, 6, 0):\n sys.exit(\"Python3 versions lower than 3.6.0 are not supported.\")\n\n\nclass build_ext(setuptools.command.build_ext.build_ext):\n def build_extension(self, ext):\n if \"bases\" not in ext.name:\n super().build_extension(ext)\n return\n if WIN32 and self.compiler.compiler_type == \"mingw32\":\n ext.sources.append(\"source/bases/manifest.rc\")\n objects = self.compiler.compile(\n ext.sources,\n output_dir=self.build_temp,\n include_dirs=ext.include_dirs,\n debug=self.debug,\n depends=ext.depends,\n )\n filename = os.path.splitext(self.get_ext_filename(ext.name))[0]\n fullname = os.path.join(self.build_lib, filename)\n library_dirs = ext.library_dirs or []\n libraries = self.get_libraries(ext)\n extra_args = ext.extra_link_args or []\n if WIN32:\n compiler_type = self.compiler.compiler_type\n # support for delay load [windows]\n for arg in extra_args[:]:\n if arg.startswith(\"/DELAYLOAD:\"):\n lib_name = arg[len(\"/DELAYLOAD:\") :]\n extra_args.remove(arg)\n dll_path = self._get_dll_path(lib_name)\n dll_name = os.path.basename(dll_path)\n if compiler_type == \"msvc\":\n extra_args.append(f\"/DELAYLOAD:{dll_name}\")\n if lib_name not in libraries:\n libraries.append(lib_name)\n if \"delayimp\" not in libraries:\n libraries.append(\"delayimp\")\n elif compiler_type == \"mingw32\":\n if lib_name in libraries:\n libraries.remove(lib_name)\n lib_dir, library = self._dlltool_delay_load(lib_name)\n for linker_option in self.compiler.linker_exe:\n if \"clang\" in linker_option:\n extra_args.append(f\"-Wl,-delayload,{dll_name}\")\n break\n if get_platform().startswith(\"mingw_i686\"): # mingw32\n # disable delay load to avoid a Segmentation fault\n libraries.append(lib_name)\n else:\n libraries.append(library)\n library_dirs.append(lib_dir)\n if compiler_type == \"msvc\":\n extra_args.append(\"/MANIFEST\")\n elif compiler_type == \"mingw32\":\n if \"Win32GUI\" in ext.name:\n extra_args.append(\"-mwindows\")\n else:\n extra_args.append(\"-mconsole\")\n extra_args.append(\"-municode\")\n else:\n library_dirs.append(get_config_var(\"LIBPL\"))\n abiflags = get_config_var(\"abiflags\")\n libraries.append(f\"python{get_python_version()}{abiflags}\")\n if get_config_var(\"LINKFORSHARED\") and not DARWIN:\n extra_args.extend(get_config_var(\"LINKFORSHARED\").split())\n if get_config_var(\"LIBS\"):\n extra_args.extend(get_config_var(\"LIBS\").split())\n if get_config_var(\"LIBM\"):\n extra_args.append(get_config_var(\"LIBM\"))\n if get_config_var(\"BASEMODLIBS\"):\n extra_args.extend(get_config_var(\"BASEMODLIBS\").split())\n if get_config_var(\"LOCALMODLIBS\"):\n extra_args.extend(get_config_var(\"LOCALMODLIBS\").split())\n if DARWIN:\n # macOS on Github Actions\n extra_args.append(\"-Wl,-export_dynamic\")\n else:\n if not self.debug:\n extra_args.append(\"-s\")\n extra_args.append(\"-Wl,-rpath,$ORIGIN/lib\")\n extra_args.append(\"-Wl,-rpath,$ORIGIN/../lib\")\n self.compiler.link_executable(\n objects,\n fullname,\n libraries=libraries,\n library_dirs=library_dirs,\n runtime_library_dirs=ext.runtime_library_dirs,\n extra_postargs=extra_args,\n debug=self.debug,\n )\n\n def get_ext_filename(self, ext_name):\n if ext_name.endswith(\"util\"):\n return super().get_ext_filename(ext_name)\n # Examples of returned names:\n # Console-cp37-win32.exe, Console-cp39-win-amd64.exe,\n # Console-cp38-linux-x86_64\n ext_path = ext_name.split(\".\")\n py_version_nodot = get_config_var(\"py_version_nodot\")\n platform_nodot = get_platform().replace(\".\", \"\")\n name_suffix = f\"-cp{py_version_nodot}-{platform_nodot}\"\n exe_extension = \".exe\" if WIN32 else \"\"\n return os.path.join(*ext_path) + name_suffix + exe_extension\n\n @staticmethod\n def _get_dll_path(name):\n \"\"\"Find the dll by name, priority by extension.\"\"\"\n paths = [path for path in sys.path if os.path.isdir(path)]\n dll_path = None\n for path in paths:\n for dll_path in glob.glob(os.path.join(path, f\"{name}*.pyd\")):\n return dll_path\n for dll_path in glob.glob(os.path.join(path, f\"{name}*.dll\")):\n return dll_path\n return f\"{name}.dll\"\n\n def _dlltool_delay_load(self, name):\n \"\"\"Get the delay load library to use with mingw32 gcc/clang compiler\"\"\"\n dir_name = f\"libdl.{get_platform()}-{get_python_version()}\"\n library_dir = os.path.join(self.build_temp, dir_name)\n os.makedirs(library_dir, exist_ok=True)\n # Use gendef and dlltool to generate the library (.a and .delay.a)\n dll_path = self._get_dll_path(name)\n def_name = os.path.join(library_dir, f\"{name}.def\")\n gendef_exe = which(\"gendef\")\n def_data = subprocess.check_output([gendef_exe, \"-\", dll_path])\n with open(def_name, \"wb\") as def_file:\n def_file.write(def_data)\n lib_path = os.path.join(library_dir, f\"lib{name}.a\")\n library = f\"{name}.delay\"\n dlb_path = os.path.join(library_dir, f\"lib{library}.a\")\n dlltool_exe = os.path.join(os.path.dirname(gendef_exe), \"dlltool.exe\")\n dlltool = [dlltool_exe, \"-d\", def_name, \"-D\", dll_path, \"-l\", lib_path]\n output_delaylib_args = [\"-y\", dlb_path]\n try:\n # GNU binutils dlltool support --output-delaylib\n subprocess.check_call(dlltool + output_delaylib_args)\n except subprocess.CalledProcessError:\n # LLVM dlltool only supports generating an import library\n subprocess.check_call(dlltool)\n library = name\n return library_dir, library\n\n def run(self):\n self.run_command(\"install_include\")\n super().run()\n\n\nclass install_include(Command):\n def initialize_options(self):\n self.install_dir = None\n self.outfiles = []\n\n def finalize_options(self):\n self.set_undefined_options(\n \"install\",\n (\"install_data\", \"install_dir\"),\n )\n\n def run(self):\n if WIN32:\n target_dir = os.path.join(self.install_dir, \"include\")\n target_file_name = os.path.join(target_dir, \"cx_Logging.h\")\n if os.path.isfile(target_file_name):\n return\n self.mkpath(target_dir)\n self.copy_file(\"source\\\\bases\\\\cx_Logging.h\", target_file_name)\n self.outfiles.append(target_file_name)\n\n\nif __name__ == \"__main__\":\n # build base executables\n depends = [\"source/bases/Common.c\"]\n console = Extension(\n \"cx_Freeze.bases.Console\",\n [\"source/bases/Console.c\"],\n depends=depends,\n )\n extensions = [console]\n if WIN32:\n gui = Extension(\n \"cx_Freeze.bases.Win32GUI\",\n [\"source/bases/Win32GUI.c\"],\n depends=depends,\n libraries=[\"user32\"],\n )\n extensions.append(gui)\n service = Extension(\n \"cx_Freeze.bases.Win32Service\",\n [\"source/bases/Win32Service.c\"],\n depends=depends,\n extra_link_args=[\"/DELAYLOAD:cx_Logging\"],\n libraries=[\"advapi32\"],\n )\n extensions.append(service)\n # build utility module\n util_module = Extension(\n \"cx_Freeze.util\",\n [\"source/util.c\"],\n libraries=[\"imagehlp\", \"shlwapi\"],\n )\n extensions.append(util_module)\n\n # define package data\n package_data = []\n for filename in os.listdir(os.path.join(\"cx_Freeze\", \"initscripts\")):\n name, ext = os.path.splitext(filename)\n if ext != \".py\":\n continue\n package_data.append(f\"initscripts/{filename}\")\n\n setup(\n cmdclass={\"build_ext\": build_ext, \"install_include\": install_include},\n options={\"install\": {\"optimize\": 1}},\n ext_modules=extensions,\n packages=[\"cx_Freeze\"],\n package_data={\"cx_Freeze\": package_data},\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":9566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307556229","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n ARKspatial\n A QGIS plugin for Archaeological Recording.\n Part of the Archaeological Recording Kit by L - P : Archaeology\n http://ark.lparchaeology.com\n -------------------\n copyright : 2017 by L - P : Heritage LLP\n email : ark@lparchaeology.com\n copyright : 2017 by John Layt\n email : john@layt.net\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nfrom PyQt4.QtGui import QWidget\n\nfrom ArkSpatial.ark.core import Settings\n\nfrom .ui.project_browser_widget_base import Ui_ProjectBrowserWidget\n\n\nclass ProjectBrowserWidget(QWidget, Ui_ProjectBrowserWidget):\n\n def __init__(self, parent=None):\n super(ProjectBrowserWidget, self).__init__(parent)\n self.setupUi(self)\n\n def loadProject(self, plugin):\n self.projectCodeEdit.setText(Settings.projectCode())\n self.siteCodeEdit.setText(Settings.siteCode())\n self.projectNameEdit.setText(Settings.projectName())\n\n def closeProject(self):\n self.projectCodeEdit.setText('')\n self.siteCodeEdit.setText('')\n self.projectNameEdit.setText('')\n","sub_path":"ark/gui/project_browser_widget.py","file_name":"project_browser_widget.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"419581286","text":"def check_ip( str_ip_addr, str_start_ip, subnetmask):\n\n ipaddr = list(str_ip_addr.split(\".\"))\n ipaddr[0] = int(ipaddr[0])\n ipaddr[1] = int(ipaddr[1])\n ipaddr[2] = int(ipaddr[2])\n ipaddr[3] = int(ipaddr[3])\n startip = list(str_start_ip.split(\".\"))\n startip[0] = int(startip[0])\n startip[1] = int(startip[1])\n startip[2] = int(startip[2])\n startip[3] = int(startip[3])\n if startip[0] > 255 or startip[1] > 255 or startip[2] > 255 or startip[3] > 255:\n return False\n if ipaddr[0] > 255 or ipaddr[1] > 255 or ipaddr[2] > 255 or ipaddr[3] > 255:\n return False\n if ipaddr[0] < 0 or ipaddr[1] < 0 or ipaddr[2] < 0 or ipaddr[3] < 0:\n return False\n if startip[0] < 0 or startip[1] < 0 or startip[2] < 0 or startip[3] < 0:\n return False\n print(ipaddr)\n print(startip)\n\n #temp1 = int(ipaddr[0])<<24 + int(ipaddr[1])<<16 + int(ipaddr[2])<<8 + int(ipaddr[3])\n #temp2 = int(startip[0])<<24 + int(startip[1])<<16 + int(startip[2])<<8 + int(startip[3])\n temp1 = int(ipaddr[0])*(2**24) + int(ipaddr[1])*(2**16) + int(ipaddr[2])*(2**8) + int(ipaddr[3])\n temp2 = int(startip[0])*(2**24) + int(startip[1])*(2**16) + int(startip[2])*(2**8) + int(startip[3])\n print(hex(temp1))\n print(hex(temp2))\n\n if int(temp1)>>(32-subnetmask) == int(temp2)>>(32-subnetmask):\n return True\n else:\n return False\n","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"266134415","text":"\n\"\"\"Implementation of Graph Data Structure.\n\nFormally, a Graph G consists of set of vertices V and set of directed edges E. \n G = (V, E). \nFor example: \n V = {1, 2, 3, 4} \n E = {(1, 2), (2, 4), (3, 4), (4, 3), (3, 2)}\n\nThese assertions are valid for graph G = (V, E):\n * The \"in-degree\" of vertex 4 is 2 (how many edges coming in).\n * The \"out-degree\" of vertex 4 is 1 (how many edges coming out).\n * Vertex 4's incoming neighbors are 2, 3.\n * Vertex 4's outgoing neighbor is 3.\n\nTypes of Graph:\n---------------\n * Directred / Undirected\n * Cyclic / Acyclic\n * Weighted / Unweighted\n * Connected / Disconnected\n * Some special cases:\n * Directed Acyclic Graph (DAG) - popular type of graph to model relations like casualties,\n hierarchies, temporal dependencies. e.g AirFlow library\n \nGraph Representations:\n----------------------\n * Adjacency List\n * Adjacency matrix\n * Edge Lists\n\n\"\"\"\nclass Node:\n def __init__(self, key, attributes, graph):\n self.key = key\n self.attributes = attributes\n self.graph = graph\n\n def update(self, attributes):\n self.attributes.update(attributes)\n\n @property\n def neighbors(self):\n return [self.graph.get_node(name) for name in self.graph.neighbors[self.key].keys()]\n\n\nclass Graph(object):\n \"\"\"Representation of a Graph data structure.\n Notes: this object is not supported multi-graph model.\n\n Parameters\n ----------\n nodes: (default = None) a list of hashable object:\n edges: (default = None) a list of hashable objects:\n directed: (default = False) - boolean - indicates Graph is\n `directed` or `undirected`.\n\n Example:\n -------\n ```python\n from data_structures.graph import Graph\n\n V = [1, 2, 3, 4] \n E = [(1, 2), (2, 4), (3, 4), (4, 3), (3, 2)]\n G = Graph(V, E)\n ```\n \"\"\"\n\n def __init__(self, nodes=None, edges=None, directed=False, weighted=False):\n self.nodes = {} # A dictinary of Node objects\n self.directed = directed\n self.neighbors = {} # adjacency matrix to represent edges\n self.weighted = weighted\n if directed:\n self.in_neighbors = {}\n self.add_multiple_nodes(nodes)\n self.add_multiple_edges(edges)\n\n def add_node(self, node_key, **node_attributes):\n if node_key in self.nodes.keys():\n self.nodes[node_key].update(node_attributes)\n else:\n self.nodes[node_key] = Node(node_key, node_attributes, self)\n self.neighbors[node_key] = {}\n if self.directed:\n self.in_neighbors[node_key] = {}\n\n def add_multiple_nodes(self, node_list):\n if node_list is None:\n return\n for node in node_list:\n if isinstance(node, tuple):\n self.add_node(node[0], **node[1])\n else:\n self.add_node(node) # no data in node\n\n def add_edge(self, from_node, to_node, **edge_attributes):\n if from_node not in self.nodes:\n self.add_node(from_node)\n\n if self.weighted:\n if 'weight' not in edge_attributes:\n raise ValueError('Weighted graph requires new edge to have'\n 'weight keyword')\n if to_node not in self.nodes:\n self.add_node(to_node)\n\n edge_data = self.neighbors[from_node].get(to_node, {})\n edge_data.update(edge_attributes)\n\n self.neighbors[from_node][to_node] = edge_data\n if not self.directed:\n self.neighbors[to_node][from_node] = edge_data\n else:\n self.in_neighbors[to_node][from_node] = edge_data\n\n def add_multiple_edges(self, edge_list):\n if edge_list is None:\n return\n for edge in edge_list:\n if len(edge) == 2:\n self.add_edge(edge[0], edge[1])\n else:\n if isinstance(edge[2], (int, float)):\n edge_property = {'weight': edge[2]}\n elif not isinstance(edge[2], dict):\n raise ValueError('Edge properties should be a dictionary.'\n 'But got {}'.format(edge[2]))\n else:\n edge_property = edge[2]\n self.add_edge(edge[0], edge[1], **edge_property)\n\n def has_node(self, node):\n \"\"\"Return True if graph contains a node. \"\"\"\n return node in self.nodes\n\n def get_node(self, node_key):\n return self.nodes[node_key]\n\n def get_neighbors(self, node):\n return self.neighbors[node]\n\n def degree(self, node):\n return len(self.neighbors[node])\n\n def edges(self, from_node=None, to_node=None):\n if from_node is None and to_node is None:\n return self.neighbors\n elif from_node is None:\n if self.directed: # Get all incoming edges of `to_node`\n return self.in_neighbors[to_node]\n else:\n return self.neighbors[to_node]\n # Get all neighbors (outcoming edges) `from_node`\n elif to_node is None:\n return self.neighbors[from_node]\n else:\n return self.neighbors[from_node][to_node]\n\n def transpose(self):\n if self.directed:\n self.in_neighbors, self.neighbors = self.neighbors, self.in_neighbors\n return self\n","sub_path":"data_structures/graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"37820061","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 22 10:09:21 2015\n\n@author: jordan\n\"\"\"\nfrom Serre2dc import *\nfrom scipy import *\nimport csv\nimport os\nfrom pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog\nfrom numpy.linalg import norm \n\n\nfrom scipy.optimize import bisect, newton\n\ndef copyarraytoC(a):\n n = len(a)\n b = mallocPy(n)\n for i in range(n):\n writetomem(b,i,a[i])\n return b\n \ndef copyarrayfromC(a,n):\n b = [0]*n\n for i in range(n):\n b[i] = readfrommem(a,i)\n \n return b\n \ndef makevar(sx,ex,dx,st,et,dt): \n x = arange(sx, ex, dx)\n t = arange(st, et, dt)\n \n return x,t \n \n\ndef sech(x):\n a = 2./(exp(x) + exp(-x))\n return a\n\ndef sech2 (x):\n a = 2./(exp(x) + exp(-x))\n return a*a\n\ndef soliton (x,t,g,a0,a1):\n c = sqrt(g*(a0 + a1))\n phi = x - c*t;\n k = sqrt(3.0*a1) / (2.0*a0 *sqrt(a0 + a1))\n return a0 + a1*sech2(k*phi)\n \ndef solitoninit(a0,a1,g,x,t0,dx):\n n = len(x)\n h = zeros(n)\n u = zeros(n)\n G = zeros(n)\n w = zeros(n)\n b = zeros(n)\n c = sqrt(g*(a0 + a1))\n for i in range(n):\n h[i] = soliton(x[i],t0,g,a0,a1)\n w[i] = h[i]\n u[i] = c* (1 - a0 / h[i])\n G[i] = 2.0/3*a0*a1*c*k**2*sech(k*(x[i] - c*t0))**4*h[i] + h[i]*u[i] - 4.0/3*a0*a1**2*c*k**2*sech(k*(x[i] - c*t0))**4*tanh(k*(x[i] - c*t0))**2 - 4.0/3*a0*a1*c*k**2*sech(k*(x[i] - c*t0))**2*h[i]*tanh(k*(x[i] - c*t0))**2\n \n return h,u,G,w,b\n\n\ndef SolitonMass(a0,a1,k,xb,xe): \n return a0*(xe - xb) + a1*(tanh(k*xe) - tanh(k*xb)) / k\n \n\ndef SolitonMome(a0,a1,c,k,xb,xe): \n return a1*c*(tanh(k*xe) - tanh(k*xb)) / k\n \ndef SolitonG(a0,a1,c,k,xb,xe):\n return a1*c / (3*k) *( (3 + 2*a0**2*k**2*sech(k*xe)**2 + 2*a0*a1*k**2*sech(k*xe)**4)*tanh(k*xe) \\\n -(3 + 2*a0**2*k**2*sech(k*xb)**2 + 2*a0*a1*k**2*sech(k*xb)**4)*tanh(k*xb) )\n \ndef solitonAveragest0(a0,a1,g,x,dx):\n n = len(x)\n ha = zeros(n)\n Ga = zeros(n)\n c = sqrt(g*(a0 + a1))\n k = sqrt(3.0*a1) / (2.0*a0 *sqrt(a0 + a1))\n idx = 1.0 / dx\n for i in range(n):\n ha[i] = idx*SolitonMass(a0,a1,k,x[i] - 0.5*dx,x[i] + 0.5*dx)\n Ga[i] = idx*SolitonG(a0,a1,c,k,x[i] - 0.5*dx,x[i] + 0.5*dx)\n \n return ha,Ga\n \n#Soliton Problem \n#wdirb = \"/home/thanksjoe/Documents/newPHD/data/2018/raw/Thesis/SolitonAgn/FDVM2/\"\n#\n#if not os.path.exists(wdirb):\n# os.makedirs(wdirb)\n\nfor ki in range(11,12):\n# wdir = wdirb + str(ki) + \"/\"\n# \n# if not os.path.exists(wdir):\n# os.makedirs(wdir)\n \n a0 = 1.0\n a1 = 0.7\n g = 9.81\n k = sqrt(3.0*a1) / (2.0*a0 *sqrt(a0 + a1))\n c = sqrt(g*(a0 + a1))\n \n \n startx = -250\n endx = 250\n \n startt = 0.0\n endt = 50\n \n \n dx = 100.0/ 2**ki\n Cr = 0.5\n l = 1.0 / (sqrt(g*(a0 + a1)))\n dt = Cr*l*dx\n \n t = startt\n theta = 1.2\n \n x = arange(startx,endx +0.1*dx, dx)\n \n n = len(x)\n nBC = 3\n nBCs = 4\n \n h,u,G,wta,bta = solitoninit(a0,a1,g,x,0,dx)\n h,G = solitonAveragest0(a0,a1,g,x,dx)\n \n u0a = zeros(nBCs)\n u1a = zeros(nBCs) \n h0a = a0*ones(nBCs)\n h1a = a0*ones(nBCs)\n \n h_c = copyarraytoC(h)\n G_c = copyarraytoC(G)\n h0_c = copyarraytoC(h0a)\n h1_c = copyarraytoC(h1a)\n u0_c = copyarraytoC(u0a)\n u1_c = copyarraytoC(u1a)\n u_c = mallocPy(n)\n\n Gerr = []\n herr = []\n \n Mt = SolitonMass(a0,a1,k,startx - 0.5*dx,endx + 0.5*dx)\n Gt = SolitonG(a0,a1,c,k,startx - 0.5*dx,endx + 0.5*dx)\n \n while t < endt : \n evolvewrap(G_c,h_c,h0_c,h1_c,u0_c,u1_c,g,dx,dt,nBC,n,nBCs,theta)\n print (t)\n \n GaC = copyarrayfromC(G_c,n)\n haC = copyarrayfromC(h_c,n) \n GnTc = dx*sum(GaC)\n MnTc = dx*sum(haC)\n\n Gerr.append(abs(GnTc - Gt)/ Gt)\n herr.append(abs(MnTc - Mt)/ Mt)\n \n t = t + dt\n \n getufromG(h_c,G_c,u0a[-1],u1a[0],h0a[-1],h1a[0], dx ,n,u_c)\n uF = copyarrayfromC(u_c,n)\n GF = copyarrayfromC(G_c,n)\n hF = copyarrayfromC(h_c,n) \n \n ht,ut,Gt,wt,bt = solitoninit(a0,a1,g,x,t,dx)\n\n# s = wdir + \"outlast.txt\"\n# with open(s,'w') as file2:\n# writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# \n# writefile2.writerow(['dx' ,'dt','time',\"cell midpoint\" ,'h', 'G' , 'u(m/s)', 'ht','ut' ]) \n# \n# for j in range(n):\n# writefile2.writerow([str(dx),str(dt),str(t),str(x[j]), str(hF[j]) , str(GF[j]) , str(uF[j]),str(ht[j]),str(ut[j])])","sub_path":"CODE/experimentcode/Thesis/Soliton/FDVM2/T1/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"150012228","text":"from numpy import *\n\n\ndef loadSimapData():\n datMat = matrix([[1., 2.1],\n [2., 1.1],\n [1.3, 1.],\n [1., 1.],\n [2., 1.]])\n classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]\n return datMat, classLabels\n\n\ndef stumpClassify(dataMatrix, dimen, threshVal, threshIneq):\n \"\"\"\n 通过阈值比较对数据进行分类\n 所有在阈值一边的分到类别-1, 另一半的+1\n # 数组过滤实现\n\n\n\t 将最小错误率minError设为+∞\n\t 对数据集中的每一个特征(每一层循环):\n\t 对每个步长(第二层循环):\n\t 对每个不等号(第三层循环):\n\t 建立一棵单层决策树并利用加权平均数据集对它进行测试\n\t 如果错误率低于minError,则将当前单层决策树设为最佳决策树\n 返回最佳决策树\n\n :param dataMatrix:\n :param dimen:\n :param threshVal:\n :param threshIneq: 不等于号\n :return:\n \"\"\"\n retArray = ones((shape(dataMatrix)[0], 1))\n if threshIneq == 'lt':\n retArray[dataMatrix[:, dimen] <= threshVal] = -1.0\n else:\n retArray[dataMatrix[:, dimen] > threshVal] = -1.0\n return retArray\n\n\ndef buildStump(dataArr, classLabels, D):\n \"\"\"\n 遍历stumpClassify()函数所有的可能输入值, 并找到数据集上最佳的单层决策树,这里的最佳是基于数据的权重向量D来定义的\n\n :param dataArr:\n :param classLabels:\n :param D:\n :return:\n \"\"\"\n dataMatrix = mat(dataArr)\n labelMat = mat(classLabels).T\n m, n = shape(dataMatrix)\n numSteps = 10.0 # 用于在特征的所有可能值上进行遍历\n bestStump = {} # 存储给定权重向量D时所得到的最佳单层决策树的相关信息,\n bestClassEst = mat(zeros((m, 1)))\n minError = inf # init error sum, to +infinity # 一开始就初始化程无穷大,之后用于寻找可能的最小错误率\n count = 0\n for i in range(n):\n # loop ove all dimensions(维度) 在数据集上所有特征遍历\n rangeMin = dataMatrix[:, i].min() # 考虑到数值型的特征,可以通过最大值,最小值来了解需要步长\n rangeMax = dataMatrix[:, i].max()\n stepSize = (rangeMax - rangeMin) / numSteps\n\n for j in range(-1, int(numSteps) + 1):\n # 再在这些值上遍历.甚至将阈值设置为整个取值范围之外也是可以的\n # loop over all range in current dimension\n\n for inequal in ['lt', 'gt']:\n # go over less than and greater than # 在大于和小于之间切换不等式\n threshVal = (rangeMin + float(j) * stepSize)\n predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)\n # call stump classify(分类) with i, j , lessThan\n errArr = mat(ones((m, 1))) # 错误向量\n\n errArr[predictedVals == labelMat] = 0\n # 若果predictedVals中的值不等于labelMat中的真正类别标签值,那么errArr相应位置置为1\n\n # 权重向量D\n # 加权错误率 weightedError# 这就是AdaBoost和分类器交互的地方, 基于权重向量D来评价很猎奇\n weightedError = D.T * errArr # calc total error multiplied by D 计算加权错误率\n print(\"split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f\" % (\n i, threshVal, inequal, weightedError))\n count += 1\n if weightedError < minError:\n # 当前错误率和已有的最小错误率进行比较,如果当前最小,那么就在吃点bestStump中保存该单层决策树.字典/错误率/类别估计都返回给AdaBoost\n minError = weightedError\n bestClassEst = predictedVals.copy()\n bestStump['dim'] = i\n bestStump['thresh'] = threshVal\n bestStump['ineq'] = inequal\n print(count)\n return bestStump, minError, bestClassEst\n\n\ndef adaBoostTrainDS(dataArr, classLabels, numIt=40):\n weakClassArr = []\n m = shape(dataArr)[0]\n D = mat(ones((m, 1)) / m) # init D to all equal\n aggClassEst = mat(zeros((m, 1)))\n for i in range(numIt):\n bestStump, error, classEst = buildStump(dataArr, classLabels, D) # build Stump\n print(\"D: \", D.T)\n alpha = float(0.5 * log(\n (1.0 - error) / max(error, 1e-16))) # calc alpha, throw in max(error, eps) to account for error = 0\n bestStump['alpha'] = alpha\n weakClassArr.append(bestStump) # store Stump Params in Array\n print(\"classEst: \", classEst.T)\n expon = multiply(-1 * alpha * mat(classLabels).T, classEst) # exponent for D calc, getting messy\n D = multiply(D, exp(expon))\n D = D / D.sum()\n\n # calc training error of all classifiers, if this is 0 quit for loop early (use break)\n aggClassEst += alpha * classEst\n print(\"aggClassEst: \", aggClassEst.T)\n aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T, ones((m, 1)))\n errorRate = aggErrors.sum() / m\n print(\"total error: \", errorRate)\n if errorRate == 0.0:\n break\n return weakClassArr, aggClassEst\n\n\ndef adaClassify(dataToClass, classifierArr):\n dataMatrix = mat(dataToClass) # do stauff imilar to last aggClassEst in adaBoostTrainDS\n m = shape(dataMatrix)[0]\n aggClassEst = mat(zeros((m, 1)))\n for i in range(len(classifierArr)):\n classEst = stumpClassify(dataMatrix, not classifierArr[i]['dim'],\n classifierArr[i]['thresh'],\n classifierArr[i]['ineq']) # call stump classify\n aggClassEst += classifierArr[i]['alpha'] * classEst\n print(aggClassEst)\n return sign(aggClassEst)\n\n\ndef plotROC(predStrengths, classLabels):\n \"\"\"\n\n :param predStrengths: 分类器的预测强度\n :param classLabels:\n :return:\n \"\"\"\n import matplotlib.pyplot as plt\n cur = (1.0, 1.0) # cursor\n ySum = 0.0 # variable to calculate AUC\n numPosClas = sum(array(classLabels) == 1.0) # calc positive\n yStep = 1 / float(numPosClas) # step nums\n xStep = 1 / float(len(classLabels) - numPosClas) # [0.0, 1.0]\n sortedIndicies = predStrengths.argsort() # get sorted index , it's reverse\n fig = plt.figure()\n fig.clf()\n ax = plt.subplot(111)\n # loop through all the values, drawing a line segment at each point\n for index in sortedIndicies.tolist()[0]:\n if classLabels[index] == 1.0:\n delX = 0\n delY = yStep\n else:\n delX = xStep\n delY = 0\n ySum += cur[1]\n # draw line from cur to (cur[0 - delX, cur[1] - delY)\n ax.plot([cur[0], cur[0] - delX], [cur[1], cur[1] - delY], c='b')\n cur = (cur[0] - delX, cur[1] - delY)\n ax.plot([0, 1], [0, 1], 'b--')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve for AdaBoost horse colic detection system')\n ax.axis([0, 1, 0, 1])\n plt.show()\n print(\"the Area Under the Curve is: \", ySum * xStep)\n\n\n\nif __name__ == '__main__':\n D = mat(ones((5, 1)) / 5)\n datMat, classLabels = loadSimapData()\n # bestStump, minError, bestClassEst = buildStump(datMat, classLabels, D)\n # print(bestStump, \"\\n\", minError, \"\\n\", bestClassEst, )\n print(datMat)\n print(classLabels)\n weakClassArr, aggClassEst = adaBoostTrainDS(datMat, classLabels, 9)\n print(weakClassArr)\n print(aggClassEst)\n","sub_path":"07_AdaBoost/adaboost1.py","file_name":"adaboost1.py","file_ext":"py","file_size_in_byte":7590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"230959979","text":"from peewee import *\n\ndb = SqliteDatabase('students.db')\n\n\nclass Student(Model):\n username = CharField(max_length=255, unique=True)\n points = IntegerField(default=0)\n\n class Meta:\n database = db\n\nstudents = [\n {\"username\": \"dylsharp\",\n \"points\": 48452},\n {\"username\": \"martmey\",\n \"points\": 59423},\n {\"username\": \"soloshni\",\n \"points\": 14854},\n {\"username\": \"stinjus\",\n \"points\": 65297},\n {\"username\": \"malcdawg\",\n \"points\": 25478}\n]\n\ndef add_students():\n for student in students:\n try:\n Student.create(username=student['username'],\n points=student['points'])\n except IntegrityError:\n student_record = Student.get(username=student['username']) #Get the student object that already exists\n student_record.points = student['points'] # Update the points in the student object with the current value (may or may not have changed - one could check for this first)\n student_record.save()\n\ndef top_student():\n student = Student.select().order_by(Student.points.desc()).get() # Get all students and the sort and the just \"get\" the first one.\n return student\n\nif __name__ == '__main__': #If file is run directly and not imported\n db.connect()\n db.create_tables([Student], safe=True)\n add_students()\n print(\"Our top student right now is {0.username}\".format(top_student()))","sub_path":"students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"362498956","text":"import json, config\r\nfrom requests_oauthlib import OAuth1Session\r\n\r\nCK = config.CK\r\nCS = config.CS\r\nAT = config.AT\r\nATS = config.ATS\r\ntwitter = OAuth1Session(CK, CS, AT, ATS)\r\n\r\na = 1\r\n\r\nwhile a ==1:\r\n print(\"What do you want to do:\")\r\n print(\"Sh (show your timeline)\")\r\n print(\"Se (Search words you input)\")\r\n print(\"Tw (Tweet with words)\")\r\n print(\"Ti (Tweet with image and words)\")\r\n print(\"Tv (Tweet with video and words)\")\r\n print(\"If you don't do anything input anything other\")\r\n c = input(\">> \")\r\n if c == \"Sh\" or c == \"sh\":\r\n config.MyTime(twitter)\r\n elif c == \"Se\" or c == \"se\":\r\n config.WordSearch(twitter)\r\n elif c == \"Tw\" or c == \"tw\":\r\n config.tweet(twitter)\r\n elif c == \"Ti\" or c == \"ti\":\r\n config.TweetWithImg(twitter)\r\n elif c == \"Tv\" or c == \"tv\":\r\n from asyncUpload import VideoTweet\r\n MEDIA_ENDPOINT_URL = 'https://upload.twitter.com/1.1/media/upload.json'\r\n POST_TWEET_URL = 'https://api.twitter.com/1.1/statuses/update.json'\r\n print(\"ファイルの名前を選択して下さい(mediaディレクトリに入っているmp4ファイルのみ可能)\")\r\n fname = input(\">> \")\r\n print(\"何をつぶやく?\")\r\n mess = input(\">> \")\r\n VIDEO_FILENAME = 'C:\\\\Users\\\\kuwahara\\\\Documents\\\\Programming\\\\python\\\\Scrape\\\\ScrapeTwitter\\\\media\\\\{}'.format(fname)\r\n\r\n videoTweet = VideoTweet(VIDEO_FILENAME)\r\n videoTweet.upload_init()\r\n videoTweet.upload_append()\r\n videoTweet.upload_finalize()\r\n videoTweet.tweet(mess)\r\n else:\r\n a -=1\r\n","sub_path":"ScrapeTwitter/sctwtr.py","file_name":"sctwtr.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559545648","text":"import _variables\nimport urllib.request\nfrom xml.dom import minidom\n\n\ncmd = \"iplookup\"\nurl = \"http://ip-api.com/xml/\"\n\ndef iplookup(client, message):\n if message.content.startswith(_variables.prefix + cmd + ' '):\n user_message = str(message.content).replace(_variables.prefix + cmd + ' ', '')\n\n with urllib.request.urlopen(url + user_message) as xml:\n xml_str = xml.read()\n xmldoc = minidom.parseString(xml_str)\n\n countries = xmldoc.getElementsByTagName('country')\n cities = xmldoc.getElementsByTagName('city')\n\n if len(countries) > 0:\n country = countries[0].firstChild.nodeValue\n city = cities[0].firstChild.nodeValue\n if len(str(message.content)) > len(_variables.prefix + cmd + ' '):\n yield from client.send_message(message.channel, 'Country: ' + country + \" | City: \" + city)\n else:\n yield from client.send_message(message.channel, 'Invalid IP address.')\n\n","sub_path":"commands/iplookup.py","file_name":"iplookup.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560903405","text":"# 1429. First Unique Number\n# 2021/11/25\n# weekly speical\n\n# Runtime: 760 ms, faster than 86.59% of Python3 online submissions for First Unique Number.\n# Memory Usage: 55.7 MB, less than 81.14% of Python3 online submissions for First Unique Number.\n\n\n# design + hash table\n# add: O(1) if this value already in hash table, do not append it, and set the counts to be 2\n# showFirstUniqueL amortized O(1). n-show operations takes n-time.\n\nclass FirstUnique:\n\n def __init__(self, nums: List[int]):\n self.pos = 0\n self.counts = {}\n for num in nums:\n self.counts[num] = self.counts.get(num, 0) + 1\n self.nums = [num for num in nums if self.counts[num] == 1]\n\n def showFirstUnique(self) -> int:\n while self.pos < len(self.nums) and self.counts[self.nums[self.pos]] > 1:\n self.pos += 1\n if self.pos < len(self.nums):\n return self.nums[self.pos]\n return -1\n\n def add(self, value: int) -> None:\n if value in self.counts:\n self.counts[value] = 2\n return\n self.nums.append(value)\n self.counts[value] = 1\n\n# Your FirstUnique object will be instantiated and called as such:\n# obj = FirstUnique(nums)\n# param_1 = obj.showFirstUnique()\n# obj.add(value)","sub_path":"1429. First Unique Number.py","file_name":"1429. First Unique Number.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"264211632","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nimport model_utils.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('password', models.CharField(verbose_name='password', max_length=128)),\n ('last_login', models.DateTimeField(verbose_name='last login', default=django.utils.timezone.now)),\n ('is_superuser', models.BooleanField(verbose_name='superuser status', default=False, help_text='Designates that this user has all permissions without explicitly assigning them.')),\n ('created', model_utils.fields.AutoCreatedField(editable=False, verbose_name='created', default=django.utils.timezone.now)),\n ('modified', model_utils.fields.AutoLastModifiedField(editable=False, verbose_name='modified', default=django.utils.timezone.now)),\n ('email', models.CharField(max_length=255, unique=True)),\n ('username', models.CharField(max_length=255, unique=True)),\n ('first_name', models.CharField(max_length=255)),\n ('last_name', models.CharField(max_length=255)),\n ('code', models.IntegerField(default=None, max_length=7, null=True)),\n ('is_staff', models.BooleanField(verbose_name='staff status', default=False, help_text='Designates whether the user can log into this admin site.')),\n ('is_active', models.BooleanField(verbose_name='active', default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')),\n ('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),\n ],\n options={\n 'verbose_name_plural': 'users',\n 'verbose_name': 'user',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='FriendShip',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('source', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='source')),\n ('target', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='target')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TempAccountCode',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('code', models.IntegerField(max_length=7, unique=True)),\n ('owner', models.ForeignKey(null=True, default=None, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='user',\n name='friends',\n field=models.ManyToManyField(through='authentication.FriendShip', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='user',\n name='groups',\n field=models.ManyToManyField(verbose_name='groups', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', related_name='user_set', related_query_name='user', to='auth.Group'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='user',\n name='user_permissions',\n field=models.ManyToManyField(verbose_name='user permissions', blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission'),\n preserve_default=True,\n ),\n ]\n","sub_path":"webeng2 testing facility/DisTrip/authentication/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"630342421","text":"\"\"\"\n{\n \"author\": \"Yucheng Huang\",\n \"difficulty\": \"easy\",\n \"link\": \"https://leetcode.com/problems/maximum-product-of-three-numbers/description/\",\n \"beats\": 0.5960,\n \"category\": [\"math\"],\n \"tags\": [],\n \"questions\": []\n}\n\"\"\"\n\n\"\"\"\n思路\n\t- 最大的三个正数or有两个最小负数\n\"\"\"\n\nclass Solution:\n \n def bruteForce(self, nums):\n maxMul = float('-inf')\n for i in range(len(nums)-2):\n for j in range(i+1, len(nums)-1):\n for k in range(j+1, len(nums)):\n tmp = nums[i]*nums[j]*nums[k]\n if tmp > maxMul:\n maxMul = tmp\n return maxMul\n \n def maximumProduct(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n a = self.bruteForce(nums[-3:])\n b = self.bruteForce(nums[:2]+nums[-1:])\n return max(a,b)\n \n \n","sub_path":"solutions/628.smart-bruteForce.py","file_name":"628.smart-bruteForce.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"457973042","text":"\"\"\"\nRelief Visualization Toolbox – Visualization Functions\n\nContains core functions for blending.\n\nCredits:\n Žiga Kokalj (ziga.kokalj@zrc-sazu.si)\n Krištof Oštir (kristof.ostir@fgg.uni-lj.si)\n Klemen Zakšek\n Peter Pehani\n Klemen Čotar\n Maja Somrak\n Žiga Maroh\n\nCopyright:\n 2010-2020 Research Centre of the Slovenian Academy of Sciences and Arts\n 2016-2020 University of Ljubljana, Faculty of Civil and Geodetic Engineering\n\"\"\"\n\n# TODO: more testing, find and fix bugs if they exists\n\n# python libraries\nimport matplotlib as mpl\nimport matplotlib.cm\nimport numpy as np\nimport warnings\n\n\ndef gray_scale_to_color_ramp(gray_scale, colormap, alpha=False):\n \"\"\"\n Turns normalized gray scale np.array to rgba (np.array of 4 np.arrays r, g, b, a).\n\n Parameters\n ----------\n gray_scale : np.array (2D)\n Normalized gray_scale img as np.array (0-1)\n colormap : str\n colormap form matplotlib (https://matplotlib.org/3.3.2/tutorials/colors/colormaps.html)\n alpha : bool\n If True outputs 4D array RGBA, if False outputs 3D array RGB\n Returns\n -------\n rgba_out : np.array (3D: red 0-255, green 0-255, blue 0-255)\n If alpha False: np.array (4D: red 0-255, green 0-255, blue 0-255, alpha 0-255)\n \"\"\"\n cm = mpl.cm.get_cmap(colormap)\n rgba_out = cm(gray_scale) # normalized rgb\n rgba_mtpl_out = np.uint8(rgba_out * 255) # 0-1 scale to 0-255 and change type to uint8\n if alpha:\n rgba_out = np.array([rgba_mtpl_out[:, :, 0], rgba_mtpl_out[:, :, 1], rgba_mtpl_out[:, :, 2]])\n else:\n rgba_out = np.array([rgba_mtpl_out[:, :, 0], rgba_mtpl_out[:, :, 1], rgba_mtpl_out[:, :, 2],\n rgba_mtpl_out[:, :, 3]])\n return rgba_out\n\n\ndef normalize_lin(image, minimum, maximum):\n # linear cut off\n image[image > maximum] = maximum\n image[image < minimum] = minimum\n\n # stretch to 0.0 - 1.0 interval\n image = (image - minimum) / (maximum - minimum)\n image[image > 1] = 1\n image[image < 0] = 0\n return image\n\n\ndef lin_cutoff_calc_from_perc(image, minimum, maximum):\n \"\"\"Minimum cutoff in percent, maximum cutoff in percent (0%-100%). Returns min and max values for linear\n stretch (cut-off).\"\"\"\n if minimum < 0 or maximum < 0 or minimum > 100 or maximum > 100:\n raise Exception(\"rvt.blend_funct.lin_cutoff_calc_from_perc: minimum, maximum are percent and have to be in \"\n \"range 0-100!\")\n if minimum + maximum > 100:\n raise Exception(\"rvt.blend_funct.lin_cutoff_calc_from_perc: if minimum + maximum > 100% then there are no\"\n \" values left! You can't cutoff whole image!\")\n distribution = np.nanpercentile(a=image, q=np.array([minimum, 100 - maximum]))\n min_lin = distribution[0]\n max_lin = distribution[1]\n if min_lin == max_lin:\n min_lin = np.nanmin(image)\n max_lin = np.nanmax(image)\n return {\"min_lin\": min_lin, \"max_lin\": max_lin}\n\n\ndef normalize_perc(image, minimum, maximum):\n min_max_lin_dict = lin_cutoff_calc_from_perc(image, minimum, maximum)\n min_lin = min_max_lin_dict[\"min_lin\"]\n max_lin = min_max_lin_dict[\"max_lin\"]\n return normalize_lin(image, min_lin, max_lin)\n\n\ndef advanced_normalization(image, minimum, maximum, normalization):\n equ_image = image\n if minimum == maximum and normalization == \"value\":\n raise Exception(\"rvt.blend_func.advanced_normalization: If normalization == value, min and max cannot be the\"\n \" same!\")\n if minimum > maximum and normalization == \"value\":\n raise Exception(\"rvt.blend_func.advanced_normalization: If normalization == value, max can't be smaller\"\n \" than min!\")\n if normalization.lower() == \"value\":\n equ_image = normalize_lin(image=image, minimum=minimum, maximum=maximum)\n elif normalization.lower() == \"perc\":\n equ_image = normalize_perc(image=image, minimum=minimum, maximum=maximum)\n elif normalization is None:\n equ_image = image\n return equ_image\n\n\ndef image_join_channels(r, g, b):\n if r.shape != g.shape or r.shape != b.shape or g.shape != b.shape:\n raise Exception(\"rvt.blend.image_join_channels: r, g, b must me same dimensions!\")\n return np.array([r, g, b])\n\n\ndef lum(img):\n if len(img.shape) == 3:\n r = img[0]\n g = img[1]\n b = img[2]\n return (0.3 * r) + (0.59 * g) + (0.11 * b)\n else:\n return img\n\n\ndef matrix_eq_min_lt_zero(r, idx_min_lt_zero, lum_c, min_c):\n r[idx_min_lt_zero] = lum_c[idx_min_lt_zero] + (((r[idx_min_lt_zero] - lum_c[idx_min_lt_zero]) *\n lum_c[idx_min_lt_zero])\n / (lum_c[idx_min_lt_zero] - min_c[idx_min_lt_zero]))\n return r\n\n\ndef matrix_eq_max_gt_one(r, idx_max_c_gt_one, lum_c, max_c):\n r[idx_max_c_gt_one] = lum_c[idx_max_c_gt_one] + (((r[idx_max_c_gt_one] - lum_c[idx_max_c_gt_one]) *\n (1.0 - lum_c[idx_max_c_gt_one]))\n / (max_c[idx_max_c_gt_one] - lum_c[idx_max_c_gt_one]))\n return r\n\n\ndef channel_min(r, g, b):\n min_c = r\n idx_min = np.where(g < min_c)\n min_c[idx_min] = g[idx_min]\n idx_min = np.where(b < min_c)\n min_c[idx_min] = b[idx_min]\n return min_c\n\n\ndef channel_max(r, g, b):\n max_c = r\n idx_min = np.where(g > max_c)\n max_c[idx_min] = g[idx_min]\n idx_min = np.where(b > max_c)\n max_c[idx_min] = b[idx_min]\n return max_c\n\n\ndef clip_color(c, min_c=None, max_c=None):\n lum_c = lum(c)\n r = c[0]\n g = c[1]\n b = c[2]\n\n if min_c is None and max_c is None:\n min_c = channel_min(r, g, b)\n max_c = channel_max(r, g, b)\n\n idx_min_lt_zero = np.where(min_c < 0)\n r = matrix_eq_min_lt_zero(r, idx_min_lt_zero, lum_c, min_c)\n g = matrix_eq_min_lt_zero(g, idx_min_lt_zero, lum_c, min_c)\n b = matrix_eq_min_lt_zero(b, idx_min_lt_zero, lum_c, min_c)\n\n idx_max_c_gt_one = np.where(max_c > 1)\n r = matrix_eq_max_gt_one(r, idx_max_c_gt_one, lum_c, max_c)\n g = matrix_eq_max_gt_one(g, idx_max_c_gt_one, lum_c, max_c)\n b = matrix_eq_max_gt_one(b, idx_max_c_gt_one, lum_c, max_c)\n\n c[0, :, :] = r\n c[1, :, :] = g\n c[2, :, :] = b\n\n return c\n\n\ndef blend_normal(active, background):\n return active\n\n\ndef blend_screen(active, background):\n return 1 - (1 - active) * (1 - background)\n\n\ndef blend_multiply(active, background):\n return active * background\n\n\ndef blend_overlay(active, background):\n idx1 = np.where(background > 0.5)\n idx2 = np.where(background <= 0.5)\n background[idx1[0], idx1[1]] = (\n 1 - (1 - 2 * (background[idx1[0], idx1[1]] - 0.5)) * (1 - active[idx1[0], idx1[1]]))\n background[idx2[0], idx2[1]] = ((2 * background[idx2[0], idx2[1]]) * active[idx2[0], idx2[1]])\n return background\n\n\ndef blend_luminosity(active, background, min_c=None, max_c=None):\n lum_active = lum(active)\n lum_background = lum(background)\n luminosity = lum_active - lum_background\n if len(background.shape) < 3:\n return lum_active\n\n r = background[0] + luminosity\n g = background[1] + luminosity\n b = background[2] + luminosity\n\n c = np.zeros(background.shape)\n c[0, :, :] = r\n c[1, :, :] = g\n c[2, :, :] = b\n\n clipped_image = clip_color(c, min_c, max_c)\n\n return clipped_image\n\n\ndef equation_blend(blend_mode, active, background):\n if blend_mode.lower() == \"screen\":\n return blend_screen(active, background)\n elif blend_mode.lower() == \"multiply\":\n return blend_multiply(active, background)\n elif blend_mode.lower() == \"overlay\":\n return blend_overlay(active, background)\n\n\ndef blend_multi_dim_images(blend_mode, active, background):\n a_rgb = len(active.shape) == 3 # bool, is active rgb\n b_rgb = len(background.shape) == 3 # bool, is background rgb\n blended_image = None\n if a_rgb and b_rgb:\n blended_image = np.zeros(background.shape)\n for i in range(3):\n blended_image[i, :, :] = equation_blend(blend_mode, active[i, :, :], background[i, :, :])\n if a_rgb and not b_rgb:\n blended_image = np.zeros(active.shape)\n for i in range(3):\n blended_image[i, :, :] = equation_blend(blend_mode, active[i, :, :], background)\n if not a_rgb and b_rgb:\n blended_image = np.zeros(background.shape)\n for i in range(3):\n blended_image[i, :, :] = equation_blend(blend_mode, active, background[i, :, :])\n if not a_rgb and not b_rgb:\n blended_image = equation_blend(blend_mode, active, background)\n\n return blended_image\n\n\ndef blend_images(blend_mode, active, background, min_c=None, max_c=None):\n if blend_mode.lower() == \"multiply\" or blend_mode.lower() == \"overlay\" or blend_mode.lower() == \"screen\":\n return blend_multi_dim_images(blend_mode, active, background)\n elif blend_mode.lower() == \"luminosity\":\n return blend_luminosity(active, background, min_c, max_c)\n else:\n return blend_normal(active, background)\n\n\ndef render_images(active, background, opacity):\n if np.nanmin(active) < 0 or np.nanmax(active) > 1.1:\n active = scale_0_to_1(active)\n if np.nanmin(background) < 0 or np.nanmax(background) > 1.1:\n background = scale_0_to_1(background)\n\n a_rgb = len(active.shape) == 3\n b_rgb = len(background.shape) == 3\n render_image = 0\n if a_rgb and b_rgb:\n render_image = np.zeros(background.shape)\n for i in range(3):\n render_image[i, :, :] = apply_opacity(active[i, :, :], background[i, :, :], opacity)\n if a_rgb and not b_rgb:\n render_image = np.zeros(active.shape)\n for i in range(3):\n render_image[i, :, :] = apply_opacity(active[i, :, :], background, opacity)\n if not a_rgb and b_rgb:\n render_image = np.zeros(background.shape)\n for i in range(3):\n render_image[i, :, :] = apply_opacity(active, background[i, :, :], opacity)\n if not a_rgb and not b_rgb:\n render_image = apply_opacity(active, background, opacity)\n return render_image\n\n\ndef scale_within_0_and_1(numeric_value):\n if np.nanmin(numeric_value) >= 0 and np.nanmax(numeric_value) <= 1:\n return numeric_value\n\n numeric_value[np.isnan(numeric_value)] = np.nanmin(numeric_value) # nan change to nanmin\n\n actual_min = np.nanmin(numeric_value)\n norm_min_value = np.nanmax(np.array(0, actual_min))\n\n actual_max = np.nanmax(numeric_value)\n norm_max_value = np.nanmin(np.array(1, actual_max))\n\n # do not scale values where max is between 1 and 255 if the max-min values diffrence is at least 30 and min >0\n # and numeric values are integer type\n if 255 >= actual_max > 1:\n if actual_max - actual_min > 30 and actual_min > 0:\n scaled = numeric_value / 255\n return scaled\n\n scaled = (numeric_value - norm_min_value) / (norm_max_value - norm_min_value)\n\n if np.nanmin(scaled) > -0.01:\n scaled[(0 > scaled) & (scaled > -0.01)] = 0\n\n return scaled\n\n\ndef scale_strict_0_to_1(numeric_value):\n if np.nanmin(numeric_value) == 0 and np.nanmax(numeric_value) == 1:\n return numeric_value\n\n numeric_value[np.isnan(numeric_value)] = 0 # nan change to 0\n\n min_value = np.nanmin(numeric_value)\n max_value = np.nanmax(numeric_value)\n\n scaled = (numeric_value - min_value) / (max_value - min_value)\n\n if np.nanmin(scaled) > -0.01:\n scaled[0 > scaled > -0.01] = 0\n\n return scaled\n\n\ndef scale_0_to_1(numeric_value):\n if 1 >= np.nanmax(numeric_value) > 0.9 and np.nanmin(numeric_value) == 0:\n return numeric_value\n elif np.nanmax(numeric_value) - np.nanmin(numeric_value) > 0.3:\n return scale_within_0_and_1(numeric_value)\n else:\n return scale_strict_0_to_1(numeric_value)\n\n\ndef apply_opacity(active, background, opacity):\n if opacity > 1:\n opacity = opacity / 100\n return active * opacity + background * (1 - opacity)\n\n\ndef normalize_image(visualization, image, min_norm, max_norm, normalization):\n if visualization is None:\n return None\n if normalization == \"percent\":\n normalization = \"perc\"\n\n norm_image = advanced_normalization(image=image, minimum=min_norm, maximum=max_norm, normalization=normalization)\n\n # make sure it scales 0 to 1\n if np.nanmax(norm_image) > 1:\n if visualization.lower() == \"multiple directions hillshade\":\n norm_image = scale_0_to_1(norm_image)\n else:\n norm_image = scale_0_to_1(norm_image)\n warnings.warn(\"rvt.blend.normalize_images_on_layers: unexpected values! max > 1\")\n if np.nanmin(norm_image) < 0:\n warnings.warn(\"rvt.blend.normalize_images_on_layers: unexpected values! min < 0\")\n\n # for slope, invert scale\n # meaning high slopes will be black\n if visualization.lower() == \"slope gradient\":\n norm_image = 1 - norm_image\n return norm_image\n\n\ndef cut_off_normalize(image, mode, min=None, max=None, bool_norm=True):\n \"\"\"\n One band image cut-off or normalization or both. Image is 2D np.ndarray of raster, mode is perc or value\n (min and max units), min and max are minimum value to cutoff and maximum value to cutoff.\n (e.x. percent min=2 and max=3 -> cutoff lower 2% values and higher 3% values;\n e.x. value min=10 and max=60 -> cutoff bellow 10 and above 60, image values will be 10-60)\n \"\"\"\n if min is not None and max is not None:\n if min == max and mode == \"value\":\n raise Exception(\"rvt.blend_func.cut_off_normalize: If normalization == value, min and max cannot be the\"\n \" same!\")\n if min > max and mode == \"value\":\n raise Exception(\"rvt.blend_func.cut_off_normalize: If normalization == value, max can't be smaller\"\n \" than min!\")\n\n cut_off_arr = image\n if min is None and mode.lower() == \"value\":\n min = np.amin(image)\n if max is None and mode.lower() == \"value\":\n max = np.amax(image)\n if min is None and (mode.lower() == \"perc\" or mode.lower() == \"percent\"):\n min = 0\n if max is None and (mode.lower() == \"perc\" or mode.lower() == \"percent\"):\n max = 0\n if bool_norm:\n if mode.lower() == \"value\":\n cut_off_arr = normalize_lin(cut_off_arr, min, max)\n elif mode.lower() == \"perc\" or mode.lower() == \"percent\":\n cut_off_arr = normalize_perc(cut_off_arr, min, max)\n else:\n if mode.lower() == \"value\":\n cut_off_arr[cut_off_arr > max] = max\n cut_off_arr[cut_off_arr < min] = min\n elif mode.lower() == \"perc\" or mode.lower() == \"percent\":\n min_max_value_dict = lin_cutoff_calc_from_perc(cut_off_arr, min, max)\n min_value = min_max_value_dict[\"min_lin\"]\n max_value = min_max_value_dict[\"max_lin\"]\n cut_off_arr[cut_off_arr > max_value] = max_value\n cut_off_arr[cut_off_arr < min_value] = min_value\n return cut_off_arr\n","sub_path":"rvt/blend_func.py","file_name":"blend_func.py","file_ext":"py","file_size_in_byte":15116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"229590139","text":"# Python standard library:\nimport numpy as np \nimport multiprocessing as mp \nimport sys \nimport logging\nimport time \nimport os \nimport itertools \nimport shutil\n\n# local library: \nimport IO.check_file \nimport IO.check_type\nimport IO.reader \nimport IO.user_provided \n\n# Third-party library: \n\n# define global variables: \n\n# This defines the maximum size to be loaded into memory during initializatioin\n\nclass load(): \n\n count_jobs = 0 \n\n max_wait_time = 300 # maximum amount of time waiting for data (seconds) \n \n total_file_size_allowed = 1000 # MB \n\n def __init__(self,ref_address_tple,predit_address_tple,argument_dict): \n \n load.count_jobs += 1 \n \n self.logger = logging.getLogger(__name__)\n\n # load pre-determined file name \n\n self.loaded_filename() \n\n self.set_file_address_and_check_status(ref_address_tple,predit_address_tple) \n\n # parse the objective, cores information: \n\n self.parse_argument_dict(argument_dict)\n \n # parse the user-defined input information: \n\n self.parse_user_defined(argument_dict) \n\n self.Initialize_force_matching() \n\n self.Initialize_energy_matching()\n\n return None \n \n def loaded_filename(self): \n \n # modify the following file names if needed\n\n self.Ref_energy_file = \"Ref.eng\"\n\n self.Ref_force_file = \"Ref.force\"\n\n self.predict_energy_file = \"predict.eng\"\n\n self.predict_force_file = \"predict.force\"\n\n return None \n\n def set_file_address_and_check_status(self,ref_address_tple,predit_address_tple): \n\n self.Ref_force_file_lst = [] \n\n self.predict_force_file_lst = []\n\n self.Ref_energy_file_lst = [] \n\n self.predict_energy_file_lst = []\n\n self.ref_force_lines = [] \n\n self.ref_eng_lines = [] \n\n self.predicted_address_lst = [] \n\n for ref_address,predict_address in zip(ref_address_tple,predit_address_tple): \n\n # get Reference energy and force address: \n \n ref_energy_file = os.path.join(ref_address,self.Ref_energy_file) \n\n ref_force_file = os.path.join(ref_address,self.Ref_force_file)\n\n predict_energy_file = os.path.join(predict_address,self.predict_energy_file)\n\n predict_force_file = os.path.join(predict_address,self.predict_force_file)\n\n self.predicted_address_lst.append(predict_address) \n\n self.Pre_load_energy_data(ref_energy_file) \n\n IO.check_file.status_is_ok(ref_energy_file)\n\n IO.check_file.status_is_ok(ref_force_file) \n\n num_lines_eng,num_colums = IO.reader.get_lines_columns(ref_energy_file) \n \n num_lines_force,num_colums = IO.reader.get_lines_columns(ref_force_file) \n\n self.ref_eng_lines.append(num_lines_eng )\n \n self.ref_force_lines.append(num_lines_force) \n\n self.Ref_energy_file_lst.append(ref_energy_file)\n\n self.Ref_force_file_lst.append(ref_force_file)\n\n self.predict_energy_file_lst.append(predict_energy_file) \n\n self.predict_force_file_lst.append(predict_force_file) \n\n return None \n\n def check_predicted_data_status(self): \n\n for i,predicted_data in enumerate(self.predict_force_file_lst):\n\n IO.check_file.status_is_ok(predicted_data) \n\n predicted_num_lines,column = IO.reader.get_lines_columns(predicted_data)\n \n if (predicted_num_lines == self.ref_force_lines[i]): \n \n self.logger.info(\"Predicted force data is ready ... \")\n\n return None \n\n else: \n\n self.wait_for_data_to_be_ready(predicted_num_lines,self.ref_force_lines[i]) \n \n return None \n\n def wait_for_data_to_be_ready(self,predicted_data,ref_force_lines):\n\n count_time = 0 \n\n self.logger.info(\"Waiting for predicted force data ... \") \n\n while True: \n\n predicted_num_lines,num_columns = IO.reader.get_lines_columns(predicted_data) \n\n if ( predicted_num_lines != ref_force_lines ):\n \n time.sleep(5) \n\n self.logger.info(\"time elapsed: %d ... \\n\"%count_time)\n\n count_time += 5 \n\n elif ( count_time > load.max_wait_time ) :\n\n self.logger.error(\"Maximum amount of waiting time for predicted force data is reached ( 300s ) ... \\n\")\n self.logger.error(\"Current number of lines of predicted force data is\"\n \"%d not equal to that of reference data:%d\\n\"%(predicted_num_lines,ref_force_lines))\n self.logger.error(\"Check the file address: %s\\n\"%predicted_data)\n sys.exit(\"Check errors in the log file\")\n\n return None \n\n def Pre_load_energy_data(self,file_address): \n\n # set default preload of Ref force and energy as false: \n \n self.load_ref_eng = False \n\n # check Reference energy file is too big or not \n \n ref_eng_file_size = IO.check_file.get_file_size(file_address,units=\"MB\") \n \n if ( ref_eng_file_size < load.total_file_size_allowed ): \n \n self.load_ref_eng = True \n\n return None \n\n#----------------------------------------------------------------------------\n# Parse the input: \n#----------------------------------------------------------------------------\n\n # parse mandatory user-input: \n def parse_argument_dict(self,argument): \n # argument is a tuple\n \n self.sub_folder = argument[0]\n \n # convert objective weight into float \n \n self.obj_weight = float(argument[1]) \n\n # convert cores for analysis into integer\n\n self.num_cores = int(argument[3] ) \n\n # equally assign cores for processing predicted and reference data \n self.num_cores_ref = int(self.num_cores/2) \n\n self.num_cores_predict = int(self.num_cores/2) \n\n return None \n\n # parse the user-defined input \n def parse_user_defined(self,argument): \n\n # --------------- user defined argument ----------------------\n # user defined: \"w 1.0 1.0 bf 5000 eng abs virial\"\n # get the weight between energy and force \n \n argument_str = argument[-1].split() \n\n self.parse_weight_arg(argument_str) \n \n self.parse_buffersize_arg(argument_str) \n\n self.parse_eng_arg(argument_str) \n\n return None \n\n def parse_weight_arg(self,argument_str): \n\n keyword_index = IO.user_provided.keyword_exists(argument_str,\"w\") \n\n if ( keyword_index < 0 ): \n\n self.logger.warn(\"WARRNING: missing weight 'w' in the force matching argument\\n\" \n \"If none, force and energy is assumed to be equally weighted\") \n\n self.weight_force_eng = np.array([1.0,1.0],dtype=np.float64)\n\n return None \n\n try: \n\n self.weight_force_eng = np.array([\n float(argument_str[keyword_index+1]),\n float(argument_str[keyword_index+2])]) \n\n except (ValueError,TypeError): \n\n self.logger.error(\"ERROR: type or value errors in choosing weight between force and energy; The format should be 'w float float' \") \n\n sys.exit(\"Check errors in the log file\") \n\n self.logger.warn(\"WARRNING: missing weight 'w' in the force matching argument\\n\"\n \"If none, force and energy is assumed to be equally weighted\") \n \n return None \n\n def parse_buffersize_arg(self,argument_str): \n\n keyword_index = IO.user_provided.keyword_exists(argument_str,\"bf\") \n\n if ( keyword_index < 0 ): \n\n self.logger.error(\"ERROR: missing buffersize 'bf' in the force matching argument\") \n\n sys.exit(\"Check errors in the log file\") \n\n try: \n\n self.buffersize = int(argument_str[keyword_index+1]) \n\n except ( ValueError,TypeError):\n\n self.logger.error(\"ERROR: buffer index argument error; The format is 'bf integer' \") \n\n sys.exit(\"Check errors in the log file\") \n\n return None \n\n def parse_eng_arg(self,argument_str):\n\n keyword_index = IO.user_provided.keyword_exists(argument_str,\"eng\") \n \n if ( keyword_index < 0 ): \n\n self.eng_keyword = \"var\" \n\n self.logger.warn(\"WARRNING: missing engergy matching 'eng' in the force matching argument\\n\")\n\n self.logger.warn(\"if none, 'eng relative' is used instead\\n\") \n\n return None \n\n if ( not IO.check_type.is_string(argument_str[keyword_index+1])): \n\n self.logger.error(\"ERROR: energy keyword type error; The keyword is a string;'eng abs' or 'eng var'' \")\n\n sys.exit(\"Check errors in the log file\") \n\n try: \n\n self.eng_keyword = argument_str[keyword_index+1] \n\n except ( ValueError,TypeError): \n\n self.logger.error(\"ERROR: energy keyword type error; The keyword is a string;'eng abs' or 'eng relative'' \") \n\n sys.exit(\"Check errors in the log file\")\n\n return None \n\n def parse_virial_arg(self): \n\n keyword_index = IO.user_provided.keyword_exists(argument_str,\"virial\")\n\n if (keyword_index < 0):\n \n self.virial_keword = False \n\n return None \n \n \n\n return None \n\n def print_objective_info(self):\n\n self.logger.info(\"Reference data address: \\n\")\n self.logger.info(\"The sub_folder name: %s\\n\"%sub_folder) \n self.logger.info(\"The weight of objective function : %.3f \\n\"%weight) \n self.logger.info(\"Number of cores for running sampling: %d \\n\"%cores_for_sampling) \n self.logger.info(\"Number of cores for computing objective: %d\\n\"% cores_for_objective) \n self.logger.info(\"The other arbitrary argument: %s \\n\"%argument ) \n\n return None \n \n#----------------------------------------------------------------------------\n# Force Matching : \n#----------------------------------------------------------------------------\n\n def Initialize_force_matching(self):\n\n if (self.weight_force_eng[1] == 0.0): \n\n self.logger.warn(\"WARNNING: The weight for force matching is 0; skip the force matching\\n\") \n\n return None \n\n self.num_congigs_lst = [] \n \n self.num_atoms_lst = []\n\n self.ref_force_norm_lst = [] \n\n self.workers = mp.Pool(self.num_cores) \n\n for i,force_file_name in enumerate(self.Ref_force_file_lst):\n\n num_lines = self.ref_force_lines[i] \n \n num_atoms = IO.reader.read_LAMMPS_traj_num_atoms(force_file_name) \n \n self.num_atoms_lst.append(num_atoms) \n \n # get the number of configurations: \n num_configs = IO.reader.get_num_configs_LAMMPS_traj(num_atoms,num_lines) \n \n self.num_congigs_lst.append(num_configs) \n \n force_ref_jobs = IO.reader.read_LAMMPS_traj_in_parallel(force_file_name,\n self.num_cores,\n num_atoms,\n num_configs,\n first=1,\n buffer_size=self.buffersize,\n workers=self.workers) \n\n # computing the force normalization :\n\n self.ref_force_norm_lst.append(self.pre_compute_force_norm(force_ref_jobs,num_configs,num_atoms,num_column=3))\n\n self.workers.close() \n \n self.workers.join() \n\n return None \n \n def pre_compute_force_norm(self,force_job_list,total_configs,num_atoms,num_column):\n\n sum_refforce = 0 ; \n\n sqr_ave = 0 \n \n # loop over all cores of reading force data \n\n for output in force_job_list: \n\n # get reference data from current core \n\n Reference_data = output.get() \n\n sum_refforce = sum_refforce + np.sum(Reference_data)\n\n sqr_ave = sqr_ave + np.sum(Reference_data*Reference_data) \n\n average_sqr = (sum_refforce/(total_configs*num_atoms*num_column))**2 \n \n sqr_average = sqr_ave/(total_configs*num_atoms*num_column) \n\n variances_ref = ( sqr_average - average_sqr )*total_configs*num_atoms*num_column\n\n return variances_ref \n \n def compute_force_matching_objective(self): \n\n self.fm_objective_lst = [] \n\n i = 0 \n\n for ref_file,predict_file in zip(self.Ref_force_file_lst,\n self.predict_force_file_lst): \n\n if (self.weight_force_eng[1] != 0.0): \n\n self.ref_workers = mp.Pool(self.num_cores_ref) \n\n self.predict_workers = mp.Pool(self.num_cores_predict) \n \n # launch the job in parallel jobs \n # start reading reference force data \n force_ref_jobs = IO.reader.read_LAMMPS_traj_in_parallel(ref_file,\n self.num_cores_ref,\n self.num_atoms_lst[i],\n self.num_congigs_lst[i],\n first=1,\n buffer_size=self.buffersize,\n workers=self.ref_workers) \n\n # start reading predicted force data\n force_predict_jobs = IO.reader.read_LAMMPS_traj_in_parallel(predict_file,\n self.num_cores_predict,\n self.num_atoms_lst[i], \n self.num_congigs_lst[i], \n first=1,\n buffer_size=self.buffersize,\n workers=self.predict_workers) \n\n sum_sqr_diff = 0 \n \n # update the counter \n\n for ref_output,predict_output in zip(force_ref_jobs,force_predict_jobs): \n\n sum_sqr_diff += np.sum(np.square(( ref_output.get() - predict_output.get() ))) \n \n self.fm_objective_lst.append(sum_sqr_diff/self.ref_force_norm_lst[i]) \n\n i += 1 \n\n self.ref_workers.close() \n\n self.predict_workers.close() \n\n self.ref_workers.join() \n\n self.predict_workers.join() \n \n else: \n\n self.fm_objective_lst.append(0) \n \n return None \n\n#----------------------------------------------------------------------------\n# Energy Matching : \n#----------------------------------------------------------------------------\n\n def Initialize_energy_matching(self): \n\n # if weight of energy is 0, no need to do energy matching:\n\n if (self.weight_force_eng[0] == 0.0): \n\n self.logger.warn(\"WARNNING: The weight for energy matching is 0; skip energy matching\\n\") \n\n return None \n\n self.ref_eng_data_lst = [] \n\n self.ref_eng_norm_lst = [] \n\n for i,ref_eng_file in enumerate(self.Ref_energy_file_lst): \n \n num_lines = self.ref_eng_lines[i] \n \n ref_energy_data,energy_norm = self.pre_compute_energy_matching_norm(ref_eng_file,num_lines)\n\n self.ref_eng_data_lst.append(ref_energy_data) \n\n self.ref_eng_norm_lst.append(energy_norm )\n \n return None \n\n def pre_compute_energy_matching_norm(self,Ref_eng_file,num_lines_eng): \n\n if ( self.load_ref_eng == True ): \n\n ref_energy_data = IO.reader.loadtxt(Ref_eng_file,\n num_lines_eng,\n skiprows=0,\n return_numpy=True) \n\n energy_norm = np.var(ref_energy_data)\n\n return ref_energy_data,energy_norm \n\n def compute_energy_matching_objective(self):\n\n self.energy_objective_lst = [] \n\n i = 0 \n\n for ref_file,predict_file in zip(self.Ref_energy_file_lst,\n self.predict_energy_file_lst):\n\n if (self.weight_force_eng[0] != 0.0): \n\n predicted_eng_data = IO.reader.loadtxt(predict_file,\n self.ref_eng_lines[i]+1,\n skiprows=1,\n return_numpy=True)\n\n if (self.eng_keyword == \"var\"):\n \n self.energy_objective_lst.append( self.compute_scaled_var_energy(predicted_eng_data,\n self.ref_eng_data_lst[i],\n self.ref_eng_norm_lst[i])) \n\n elif (self.eng_keyword ==\"abs\"): \n\n self.energy_objective_lst.append(self.compute_scaled_abs_energy(predicted_eng_data,\n self.ref_eng_data_lst[i],\n self.ref_eng_norm_lst[i]))\n\n\n else: \n\n self.logger.info(\"The energy matching keyword not recognized: Choose 'var' or 'abs'\")\n sys.exit(\"Check errors in the log file !\")\n\n i += 1 \n \n else: \n \n self.energy_objective_lst.append(0) \n\n return None \n\n def compute_scaled_var_energy(self,predicted_eng, ref_energy,eng_norm): \n\n diff = predicted_eng - ref_energy \n\n ave_diff = np.average( diff) \n\n relative_eng = (diff -ave_diff)**2 \n\n return np.average(relative_eng/eng_norm) \n\n def compute_scaled_abs_energy(self,predicted_eng,ref_energy,eng_norm):\n\n return np.average((predicted_eng - ref_energy)**2/eng_norm)\n\n\n#----------------------------------------------------------------------------\n# Virial Matching \n#----------------------------------------------------------------------------\n\n \n#----------------------------------------------------------------------------\n# Compute overall objective: \n#----------------------------------------------------------------------------\n\n def optimize(self): \n\n # before evaluating objective functions \n self.check_predicted_data_status()\n\n eng_weight = self.weight_force_eng[0] \n\n force_weight = self.weight_force_eng[1]\n\n scaled_eng_objective = 0 \n\n scaled_force_objective = 0\n\n self.compute_force_matching_objective() \n\n self.compute_energy_matching_objective()\n \n for e_obj,f_obj in zip(self.energy_objective_lst,self.fm_objective_lst): \n\n scaled_eng_objective += eng_weight*e_obj \n\n scaled_force_objective += force_weight*f_obj\n \n #print ( \"scaled energy: \", scaled_eng_objective ,\"scaled force: \", scaled_force_objective )\n return self.obj_weight*( scaled_eng_objective + scaled_force_objective ) \n\n # output of predicted force and energy data \n\n def rename(self,status,output_folder): \n\n counter = 0 \n\n for eng_file,force_file in zip(self.predict_energy_file_lst,self.predict_force_file_lst): \n \n if (status ==\"guess\"): \n\n initia_predicted_force = self.sub_folder+\"_guess\"+\".force\"\n \n initia_predicted_eng = self.sub_folder+\"_guess\"+\".eng\"\n\n dest_eng = os.path.join(output_folder,initia_predicted_eng) \n\n dest_force = os.path.join(output_folder,initia_predicted_force)\n\n shutil.move(eng_file,dest_eng)\n\n shutil.move(force_file,dest_force)\n\n elif (status ==\"old\"): \n\n current_force_file = os.path.join(self.predicted_address_lst[counter],status+\".force\") \n \n current_eng_file = os.path.join(self.predicted_address_lst[counter],status+\".eng\") \n\n shutil.copyfile(force_file,current_force_file) \n\n shutil.copyfile(eng_file,current_eng_file)\n\n counter += 1 \n\n return None \n\n def update(self,keyword,output_folder): \n\n counter = 0 \n\n for eng_file,force_file in zip(self.predict_energy_file_lst,self.predict_force_file_lst): \n\n predicted_force = self.sub_folder + \"_best\" + \".force\" \n \n predicted_eng = self.sub_folder + \"_best\" + \".eng\" \n\n dest_force = os.path.join(output_folder,predicted_force) \n\n dest_eng = os.path.join(output_folder,predicted_eng) \n\n if (keyword ==\"new\"): \n \n shutil.move(eng_file,dest_eng) \n \n shutil.move(force_file,dest_force) \n \n elif ( keyword ==\"old\"): \n\n current_force_file = os.path.join(self.predicted_address_lst[counter],keyword+\".force\") \n\n current_eng_file = os.path.join(self.predicted_address_lst[counter],keyword+\".eng\") \n\n shutil.move(current_force_file,dest_force)\n\n shutil.move(current_eng_file,dest_eng)\n \n return None \n \n","sub_path":"objective/force_matching/force_matching.py","file_name":"force_matching.py","file_ext":"py","file_size_in_byte":21878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"98907823","text":"class Human:\n\n#Class Attribute:\n race = \"Aryan\"\n\n\n def __init__(self, x, y):\n self.name = x\n self.gender = y\n\n def __str__(self):\n return f\"Name: {self.name} Gender: {self.gender} Race: {self.race}\"\n\n\nperson1 = Human(\"Aadarsha\", \"Male\")\n\n#Instance attribute:\nperson1.race = \"African\"\n\n\nprint(person1)\n\n\n#This object will not use the instance attribute but use class attribute instead\nperson2 = Human(\"Samikshya\", \"Female\")\nprint(person2)\n\n\n#Changing the class attribute:\nHuman.race = \"Asian\"\n\n#Now, every objects that I make will have \"Asian\" as their race\n\nperson3 = Human(\"JPG\", \"Male\")\nprint(person3)\n\nperson4 = Human(\"Shakuntala\", \"Female\")\nprint(person4)","sub_path":"Classes/class_attributes_instance_attributes.py","file_name":"class_attributes_instance_attributes.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"9294083","text":"import sklearn.datasets as sk_data\nimport sklearn.neighbors as sk_knn\n\niris = sk_data.load_iris()\n\n#store feature matrix in X\nx = iris.data\n\n#store response vector in Y\ny = iris.data\n\n#Instantiate estimator\nknn = sk_knn.KNeighborsClassifier(n_neighbors=5)\n\n#Fit the model (Output will be purely numerical)\nknn.fit(x,y)\n\n#Test with new data\nx_new = [[3,5,4,2],[5,4,3,2]]\n\nknn.predict(x_new)\n\n\n","sub_path":"Iris/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3500820","text":"from aipollo_omr.detectors.staff_detector import StaffDetector\r\nfrom aipollo_omr.detectors.note_detector import NoteDetector\r\n\r\nobject_detectors = []\r\n\r\n\r\ndef analyze_score(image, models_dir):\r\n all_score_elements = []\r\n\r\n print('Start detecting staffs...')\r\n staffs, staff_height = StaffDetector(models_dir).detect(image)\r\n all_score_elements.extend(staffs)\r\n print('Done.')\r\n print('Start detecting half notes...')\r\n all_score_elements.extend(\r\n NoteDetector(models_dir).detect(image, staff_height))\r\n print('Done.')\r\n\r\n return all_score_elements","sub_path":"aipollo_omr/score_analyzer.py","file_name":"score_analyzer.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54777893","text":"from __future__ import division\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\nfrom google.oauth2 import service_account\n\nimport pyaudio\nfrom six.moves import queue\nfrom threading import Thread\n\nimport re\nimport sys\n# from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import * # QIcon\nfrom PyQt5.QtCore import * # pyqtSlot\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\n\n# Audio recording parameters\nRATE = 16000\nCHUNK = int(RATE / 10) # 100ms\n\nstri = \"\"\n\n\nclass MicrophoneStream(object):\n \"\"\"Opens a recording stream as a generator yielding the audio chunks.\"\"\"\n def __init__(self, rate, chunk):\n self._rate = rate\n self._chunk = chunk\n\n # Create a thread-safe buffer of audio data\n self._buff = queue.Queue()\n self.closed = True\n\n def __enter__(self):\n self._audio_interface = pyaudio.PyAudio()\n self._audio_stream = self._audio_interface.open(\n format=pyaudio.paInt16,\n # The API currently only supports 1-channel (mono) audio\n # https://goo.gl/z757pE\n channels=1, rate=self._rate,\n input=True, frames_per_buffer=self._chunk,\n # Run the audio stream asynchronously to fill the buffer object.\n # This is necessary so that the input device's buffer doesn't\n # overflow while the calling thread makes network requests, etc.\n stream_callback=self._fill_buffer,\n )\n\n self.closed = False\n\n return self\n\n def __exit__(self, type, value, traceback):\n self._audio_stream.stop_stream()\n self._audio_stream.close()\n self.closed = True\n # Signal the generator to terminate so that the client's\n # streaming_recognize method will not block the process termination.\n self._buff.put(None)\n self._audio_interface.terminate()\n\n def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n \"\"\"Continuously collect data from the audio stream, into the buffer.\"\"\"\n self._buff.put(in_data)\n return None, pyaudio.paContinue\n\n def generator(self):\n while not self.closed:\n # Use a blocking get() to ensure there's at least one chunk of\n # data, and stop iteration if the chunk is None, indicating the\n # end of the audio stream.\n chunk = self._buff.get()\n if chunk is None:\n return\n data = [chunk]\n\n # Now consume whatever other data's still buffered.\n while True:\n try:\n chunk = self._buff.get(block=False)\n if chunk is None:\n return\n data.append(chunk)\n except queue.Empty:\n break\n\n yield b''.join(data)\n\n\ndef listen_print_loop(responses):\n print(\"listen - called\")\n global ex\n global stri\n num_chars_printed = 0\n for response in responses:\n # print(\"Inside For\")\n if not response.results:\n continue\n\n result = response.results[0]\n if not result.alternatives:\n continue\n\n # Display the transcription of the top alternative.\n pre_transcript0 = result.alternatives[0].transcript\n pre_transcript1 = pre_transcript0.replace(\"hyphen\", \"-\")\n pre_transcript2 = pre_transcript1.replace(\"dash\", \"-\")\n pre_transcript3 = pre_transcript2.replace(\"Dash\", \"-\")\n transcript = pre_transcript3.replace(\"slash\", \"/\")\n\n overwrite_chars = ' ' * (num_chars_printed - len(transcript))\n\n if not result.is_final:\n # print(\"====================\")\n\n # sys.stdout.write(pre_transcript0 + overwrite_chars + '\\n')\n # sys.stdout.write(transcript + overwrite_chars + '\\n')\n # sys.stdout.flush()\n\n num_chars_printed = len(transcript)\n\n ex.focused_box.setText(stri + transcript + overwrite_chars + '\\r')\n ex.focused_box.setFont(ex.myFont)\n\n # ex.focused_box.setText(str(ex.focused_box.text) + transcript + overwrite_chars + '\\r') \n\n else:\n\n ex.focused_box.setText(stri + transcript + overwrite_chars + '\\r')\n ex.focused_box.setFont(ex.myFontNormal)\n stri += transcript\n\n # Exit recognition if any of the transcribed phrases could be\n # one of our keywords.\n if re.search(r'\\b(exit|quit)\\b', transcript, re.I):\n print('Exiting..')\n break\n num_chars_printed = 0\n\ndef main():\n try:\n # language_code = 'en-US' # a BCP-47 language tag te-IN en-IN\n language_code = 'en-IN' # a BCP-47 language tag te-IN en-IN\n credentials = service_account.Credentials. from_service_account_file('googleKeys.json')\n client = speech.SpeechClient(credentials=credentials)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=RATE,\n language_code=language_code)\n streaming_config = types.StreamingRecognitionConfig(\n config=config,\n interim_results=True)\n\n with MicrophoneStream(RATE, CHUNK) as stream:\n print(\"inside stream\")\n audio_generator = stream.generator()\n requests = (types.StreamingRecognizeRequest(audio_content=content)\n for content in audio_generator)\n\n responses = client.streaming_recognize(streaming_config, requests)\n\n # Now, put the transcription responses to use.\n listen_print_loop(responses)\n except Exception as e: \n print(str(e))\n main()\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n\n\nclass App(QMainWindow):\n c1fnx = 15 # column 1 FieldName x - coordinates\n c2fnx = 375 # column 2 FieldName x - coordinates\n c3fnx = 735 # column 3 FieldName x - coordinates\n c4fnx = 1110 # column 4 FieldName x - coordinates\n\n c1tbx = 160 # column 1 TextBox x - coordinates\n c2tbx = 520 # column 2 TextBox x - coordinates\n c3tbx = 900 # column 3 TextBox x - coordinates\n c4tbx = 1250 # column 3 TextBox x - coordinates\n\n tbx = 175 # TextBox x/width\n tby = 30 # TextBox y/height\n\n def __init__(self):\n super().__init__()\n self.title = 'Bank Form | Speech - Text integration'\n self.left = 10\n self.top = 10\n self.width = 1500\n self.height = 800\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n # self.setGeometry(self.left, self.top)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.myFont = QtGui.QFont()\n self.myFont.setBold(True)\n self.myFontNormal = QtGui.QFont()\n self.myFontNormal.setBold(False)\n self.onlyInt = QIntValidator()\n # self.LineEdit.setValidator(self.onlyInt)\n QtWidgets.QShortcut(QtGui.QKeySequence('Ctrl+s'), self, self.on_click)\n QtWidgets.QShortcut(QtGui.QKeySequence('Ctrl+d'), self, self.on_clear)\n\n\n # Coulmn 1 \n\n # self.c1PushText = QLabel(self)\n # # self.c1FieldName.setText('Scheme Code')\n # # self.c1PushText.move(App.c1fnx, 20)\n # self.c1PushText.setGeometry(QtCore.QRect(App.c1fnx, 20, 320, 60)) # (x, y, width, height)\n # self.c1PushText.setWordWrap(True)\n\n # self.button = QPushButton('Push text', self)\n # self.button.move(App.c1fnx, 100)\n # # self.button.move(App.c1fnx, 20)\n # # connect button to function on_click\n # self.button.clicked.connect(self.on_click)\n\n # self.button = QPushButton('Clear', self)\n # self.button.move(App.c1fnx + 100, 100)\n # # self.button.move(App.c1fnx, 20)\n # # connect button to function on_click\n # self.button.clicked.connect(self.on_clear)\n\n\n self.c1FieldName = QLabel(self)\n self.c1FieldName.setText('Scheme Code')\n self.c1FieldName.move(App.c1fnx, 20)\n\n self.c1TextBox = QLineEdit(self)\n self.c1TextBox.installEventFilter(self)\n self.c1TextBox.move(App.c1tbx, 20)\n self.c1TextBox.resize(App.tbx, App.tby)\n\n self.c1FieldName1 = QLabel(self)\n self.c1FieldName1.setText('Customer ID')\n self.c1FieldName1.move(App.c1fnx, 60)\n\n self.c1TextBox1 = QLineEdit(self)\n self.c1TextBox1.installEventFilter(self)\n self.c1TextBox1.move(App.c1tbx, 60)\n self.c1TextBox1.resize(App.tbx, App.tby)\n # self.c1TextBox1s = \"newn\"\n # self.c1TextBox1.setText(self.c1TextBox1s)\n\n self.c1FieldName2 = QLabel(self)\n self.c1FieldName2.setText('Account No.')\n self.c1FieldName2.move(App.c1fnx, 100)\n\n self.c1TextBox2 = QLineEdit(self)\n self.c1TextBox2.installEventFilter(self)\n self.c1TextBox2.move(App.c1tbx, 100)\n self.c1TextBox2.resize(App.tbx, App.tby)\n\n self.c1FieldName3 = QLabel(self)\n self.c1FieldName3.setText('Date')\n # self.c1FieldName3.setText('Date')\n self.c1FieldName3.move(App.c1fnx, 140)\n\n self.c1TextBox3 = QLineEdit(self)\n self.c1TextBox3.setValidator(QIntValidator())\n self.c1TextBox3.setMaxLength(10)\n self.c1TextBox3.installEventFilter(self)\n self.c1TextBox3.move(App.c1tbx, 140)\n self.c1TextBox3.resize(App.tbx, App.tby)\n\n self.c1FieldName4 = QLabel(self)\n # self.c1FieldName4.setText('Label Code')\n self.c1FieldName4.setText('label Code')\n self.c1FieldName4.move(App.c1fnx, 180)\n\n self.c1TextBox4 = QLineEdit(self)\n self.c1TextBox4.installEventFilter(self)\n self.c1TextBox4.move(App.c1tbx, 180)\n self.c1TextBox4.resize(App.tbx, App.tby)\n\n self.c1FieldName5 = QLabel(self)\n # self.c1FieldName5.setText('Telex Code')\n self.c1FieldName5.setText('Telex Code')\n self.c1FieldName5.move(App.c1fnx, 220)\n\n self.c1TextBox5 = QLineEdit(self)\n self.c1TextBox5.installEventFilter(self)\n self.c1TextBox5.move(App.c1tbx, 220)\n self.c1TextBox5.resize(App.tbx, App.tby)\n\n self.c1FieldName6 = QLabel(self)\n self.c1FieldName6.setText('Name & No. of BC')\n # self.c1FieldName6.move(App.c1fnx, 260)\n self.c1FieldName6.setGeometry(QtCore.QRect(App.c1fnx, 260, 120, 30)) # (x, y, width, height)\n\n self.c1TextBox6 = QLineEdit(self)\n self.c1TextBox6.installEventFilter(self)\n self.c1TextBox6.move(App.c1tbx, 260)\n self.c1TextBox6.resize(App.tbx, App.tby)\n\n self.c1FieldName7 = QLabel(self)\n self.c1FieldName7.setText('Name of the Branch')\n # self.c1FieldName7.move(App.c1fnx, 300)\n self.c1FieldName7.setGeometry(QtCore.QRect(App.c1fnx, 300, 120, 30)) # (x, y, width, height)\n\n self.c1TextBox7 = QLineEdit(self)\n self.c1TextBox7.installEventFilter(self)\n self.c1TextBox7.move(App.c1tbx, 300)\n self.c1TextBox7.resize(App.tbx, App.tby)\n\n self.c1FieldName8 = QLabel(self)\n self.c1FieldName8.setText('Village / Town')\n self.c1FieldName8.move(App.c1fnx, 340)\n\n self.c1TextBox8 = QLineEdit(self)\n self.c1TextBox8.installEventFilter(self)\n self.c1TextBox8.move(App.c1tbx, 340)\n self.c1TextBox8.resize(App.tbx, App.tby)\n\n self.c1FieldName9 = QLabel(self)\n self.c1FieldName9.setText('Sub District / Block')\n # self.c1FieldName9.move(App.c1fnx, 380)\n self.c1FieldName9.setGeometry(QtCore.QRect(App.c1fnx, 380, 120, 30)) # (x, y, width, height)\n\n self.c1TextBox9 = QLineEdit(self)\n self.c1TextBox9.installEventFilter(self)\n self.c1TextBox9.move(App.c1tbx, 380)\n self.c1TextBox9.resize(App.tbx, App.tby)\n\n self.c1FieldName10 = QLabel(self)\n self.c1FieldName10.setText('District')\n self.c1FieldName10.move(App.c1fnx, 420)\n\n self.c1TextBox10 = QLineEdit(self)\n self.c1TextBox10.installEventFilter(self)\n self.c1TextBox10.move(App.c1tbx, 420)\n self.c1TextBox10.resize(App.tbx, App.tby)\n \n self.c1FieldName11 = QLabel(self)\n self.c1FieldName11.setText('State')\n self.c1FieldName11.move(App.c1fnx, 460)\n\n self.c1TextBox11 = QLineEdit(self)\n self.c1TextBox11.installEventFilter(self)\n self.c1TextBox11.move(App.c1tbx, 460)\n self.c1TextBox11.resize(App.tbx, App.tby)\n\n self.c1FieldName12 = QLabel(self)\n self.c1FieldName12.setText('SSA Code / Ward No.')\n # self.c1FieldName12.move(App.c1fnx, 500)\n self.c1FieldName12.setGeometry(QtCore.QRect(App.c1fnx, 500, 130, 30)) # (x, y, width, height)\n\n self.c1TextBox12 = QLineEdit(self)\n self.c1TextBox12.installEventFilter(self)\n self.c1TextBox12.move(App.c1tbx, 500)\n self.c1TextBox12.resize(App.tbx, App.tby)\n\n self.c1FieldName13 = QLabel(self)\n self.c1FieldName13.setText('Area Code')\n self.c1FieldName13.move(App.c1fnx, 540)\n\n self.c1TextBox13 = QLineEdit(self)\n self.c1TextBox13.installEventFilter(self)\n self.c1TextBox13.move(App.c1tbx, 540)\n self.c1TextBox13.resize(App.tbx, App.tby)\n\n self.c1FieldName14 = QLabel(self)\n self.c1FieldName14.setText('Name of Applicant')\n # self.c1FieldName14.move(App.c1fnx, 580)\n self.c1FieldName14.setGeometry(QtCore.QRect(App.c1fnx, 580, 120, 30)) # (x, y, width, height)\n\n self.c1TextBox14 = QLineEdit(self)\n self.c1TextBox14.installEventFilter(self)\n self.c1TextBox14.move(App.c1tbx, 580)\n self.c1TextBox14.resize(App.tbx, App.tby)\n\n self.c1FieldName15 = QLabel(self)\n self.c1FieldName15.setText('Father / Husband')\n # self.c1FieldName15.move(App.c1fnx, 620)\n self.c1FieldName15.setGeometry(QtCore.QRect(App.c1fnx, 620, 150, 30)) # (x, y, width, height)\n\n self.c1TextBox15 = QLineEdit(self)\n self.c1TextBox15.installEventFilter(self)\n self.c1TextBox15.move(App.c1tbx, 620)\n self.c1TextBox15.resize(App.tbx, App.tby)\n\n self.c1FieldName16 = QLabel(self)\n self.c1FieldName16.setText(\"Mother's maiden Name\")\n # self.c1FieldName16.move(App.c1fnx, 660)\n self.c1FieldName16.setGeometry(QtCore.QRect(App.c1fnx, 660, 150, 30)) # (x, y, width, height)\n\n self.c1TextBox16 = QLineEdit(self)\n self.c1TextBox16.installEventFilter(self)\n self.c1TextBox16.move(App.c1tbx, 660)\n self.c1TextBox16.resize(App.tbx, App.tby)\n\n self.c1FieldName17 = QLabel(self)\n self.c1FieldName17.setText('Gender')\n self.c1FieldName17.move(App.c1fnx, 700)\n\n self.c1TextBox17 = QLineEdit(self)\n self.c1TextBox17.installEventFilter(self)\n self.c1TextBox17.move(App.c1tbx, 700)\n self.c1TextBox17.resize(App.tbx, App.tby)\n\n self.c1FieldName18 = QLabel(self)\n self.c1FieldName18.setText('Marital Status')\n self.c1FieldName18.move(App.c1fnx, 740)\n\n self.c1TextBox18 = QLineEdit(self)\n self.c1TextBox18.installEventFilter(self)\n self.c1TextBox18.move(App.c1tbx, 740)\n self.c1TextBox18.resize(App.tbx, App.tby)\n\n # Column 2\n\n self.c2FieldName = QLabel(self)\n self.c2FieldName.setText('Date of Birth')\n self.c2FieldName.move(App.c2fnx, 20)\n\n self.c2TextBox = QLineEdit(self)\n self.c2TextBox.installEventFilter(self)\n self.c2TextBox.move(App.c2tbx, 20)\n self.c2TextBox.resize(App.tbx, App.tby)\n self.c2TextBoxs = \"\"\n\n self.c2FieldName1 = QLabel(self)\n self.c2FieldName1.setText('Religion')\n self.c2FieldName1.move(App.c2fnx, 60)\n\n self.c2TextBox1 = QLineEdit(self)\n self.c2TextBox1.installEventFilter(self)\n self.c2TextBox1.move(App.c2tbx, 60)\n self.c2TextBox1.resize(App.tbx, App.tby)\n # self.c2TextBox1s = \"newn\"\n # self.c2TextBox1.setText(self.c2TextBox1s)\n\n self.c2FieldName2 = QLabel(self)\n self.c2FieldName2.setText('Caste')\n self.c2FieldName2.move(App.c2fnx, 100)\n\n self.c2TextBox2 = QLineEdit(self)\n self.c2TextBox2.installEventFilter(self)\n self.c2TextBox2.move(App.c2tbx, 100)\n self.c2TextBox2.resize(App.tbx, App.tby)\n self.c2TextBox2s = \"\"\n\n self.c2FieldName3 = QLabel(self)\n self.c2FieldName3.setText('Address:')\n self.c2FieldName3.move(App.c2fnx - 10, 140)\n self.c2FieldName3.setFont(self.myFont)\n\n # self.c2TextBox3 = QLineEdit(self)\n # self.c2TextBox3.installEventFilter(self)\n # self.c2TextBox3.move(App.c2tbx, 140)\n # self.c2TextBox3.resize(App.tbx, App.tby)\n # self.c2TextBox3s = \"\"\n\n self.c2FieldName4 = QLabel(self)\n self.c2FieldName4.setText('Building')\n self.c2FieldName4.move(App.c2fnx, 180)\n\n self.c2TextBox4 = QLineEdit(self)\n self.c2TextBox4.installEventFilter(self)\n self.c2TextBox4.move(App.c2tbx, 180)\n self.c2TextBox4.resize(App.tbx, App.tby)\n\n self.c2FieldName5 = QLabel(self)\n self.c2FieldName5.setText('Street No. / Name')\n # self.c2FieldName5.move(App.c2fnx, 220)\n self.c2FieldName5.setGeometry(QtCore.QRect(App.c2fnx, 220, 120, 30)) # (x, y, width, height)\n\n self.c2TextBox5 = QLineEdit(self)\n self.c2TextBox5.installEventFilter(self)\n self.c2TextBox5.move(App.c2tbx, 220)\n self.c2TextBox5.resize(App.tbx, App.tby)\n\n self.c2FieldName6 = QLabel(self)\n self.c2FieldName6.setText('Locality')\n self.c2FieldName6.move(App.c2fnx, 260)\n\n self.c2TextBox6 = QLineEdit(self)\n self.c2TextBox6.installEventFilter(self)\n self.c2TextBox6.move(App.c2tbx, 260)\n self.c2TextBox6.resize(App.tbx, App.tby)\n\n self.c2FieldName7 = QLabel(self)\n self.c2FieldName7.setText('Landmark')\n self.c2FieldName7.move(App.c2fnx, 300)\n\n self.c2TextBox7 = QLineEdit(self)\n self.c2TextBox7.installEventFilter(self)\n self.c2TextBox7.move(App.c2tbx, 300)\n self.c2TextBox7.resize(App.tbx, App.tby)\n\n self.c2FieldName8 = QLabel(self)\n self.c2FieldName8.setText('Village / City')\n self.c2FieldName8.move(App.c2fnx, 340)\n\n self.c2TextBox8 = QLineEdit(self)\n self.c2TextBox8.installEventFilter(self)\n self.c2TextBox8.move(App.c2tbx, 340)\n self.c2TextBox8.resize(App.tbx, App.tby)\n\n self.c2FieldName9 = QLabel(self)\n self.c2FieldName9.setText('District')\n self.c2FieldName9.move(App.c2fnx, 380)\n\n self.c2TextBox9 = QLineEdit(self)\n self.c2TextBox9.installEventFilter(self)\n self.c2TextBox9.move(App.c2tbx, 380)\n self.c2TextBox9.resize(App.tbx, App.tby)\n\n self.c2FieldName10 = QLabel(self)\n self.c2FieldName10.setText('State')\n self.c2FieldName10.move(App.c2fnx, 420)\n\n self.c2TextBox10 = QLineEdit(self)\n self.c2TextBox10.installEventFilter(self)\n self.c2TextBox10.move(App.c2tbx, 420)\n self.c2TextBox10.resize(App.tbx, App.tby)\n \n self.c2FieldName11 = QLabel(self)\n self.c2FieldName11.setText('Pincode')\n self.c2FieldName11.move(App.c2fnx, 460)\n\n self.c2TextBox11 = QLineEdit(self)\n self.c2TextBox11.installEventFilter(self)\n self.c2TextBox11.move(App.c2tbx, 460)\n self.c2TextBox11.resize(App.tbx, App.tby)\n\n self.c2FieldName12 = QLabel(self)\n self.c2FieldName12.setText('Mobile No.')\n self.c2FieldName12.move(App.c2fnx, 500)\n\n self.c2TextBox12 = QLineEdit(self)\n self.c2TextBox12.installEventFilter(self)\n self.c2TextBox12.move(App.c2tbx, 500)\n self.c2TextBox12.resize(App.tbx, App.tby)\n\n self.c2FieldName13 = QLabel(self)\n self.c2FieldName13.setText('Landline No.')\n self.c2FieldName13.move(App.c2fnx, 540)\n\n self.c2TextBox13 = QLineEdit(self)\n self.c2TextBox13.installEventFilter(self)\n self.c2TextBox13.move(App.c2tbx, 540)\n self.c2TextBox13.resize(App.tbx, App.tby)\n\n self.c2FieldName14 = QLabel(self)\n self.c2FieldName14.setText('E-mail ID')\n self.c2FieldName14.move(App.c2fnx, 580)\n\n self.c2TextBox14 = QLineEdit(self)\n self.c2TextBox14.installEventFilter(self)\n self.c2TextBox14.move(App.c2tbx, 580)\n self.c2TextBox14.resize(App.tbx, App.tby)\n\n self.c2FieldName15 = QLabel(self)\n self.c2FieldName15.setText('PAN No.')\n self.c2FieldName15.move(App.c2fnx, 620)\n\n self.c2TextBox15 = QLineEdit(self)\n self.c2TextBox15.installEventFilter(self)\n self.c2TextBox15.move(App.c2tbx, 620)\n self.c2TextBox15.resize(App.tbx, App.tby)\n\n self.c2FieldName16 = QLabel(self)\n self.c2FieldName16.setText('Aadhar No. / EID No.')\n # self.c2FieldName16.move(App.c2fnx, 660)\n self.c2FieldName16.setGeometry(QtCore.QRect(App.c2fnx, 660, 150, 30)) # (x, y, width, height)\n\n self.c2TextBox16 = QLineEdit(self)\n self.c2TextBox16.installEventFilter(self)\n self.c2TextBox16.move(App.c2tbx, 660)\n self.c2TextBox16.resize(App.tbx, App.tby)\n\n self.c2FieldName17 = QLabel(self)\n self.c2FieldName17.setText('Link Aadhar')\n self.c2FieldName17.move(App.c2fnx, 700)\n\n self.c2TextBox17 = QLineEdit(self)\n self.c2TextBox17.installEventFilter(self)\n self.c2TextBox17.move(App.c2tbx, 700)\n self.c2TextBox17.resize(App.tbx, App.tby)\n\n self.c2FieldName18 = QLabel(self)\n self.c2FieldName18.setText('MNREGA Job Card No.')\n self.c2FieldName18.move(App.c2fnx, 740)\n self.c2FieldName18.setGeometry(QtCore.QRect(App.c2fnx, 740, 150, 30)) # (x, y, width, height)\n\n self.c2TextBox18 = QLineEdit(self)\n self.c2TextBox18.installEventFilter(self)\n self.c2TextBox18.move(App.c2tbx, 740)\n self.c2TextBox18.resize(App.tbx, App.tby)\n\n # Column 3\n\n self.c3FieldName = QLabel(self)\n self.c3FieldName.setText('Occupation')\n self.c3FieldName.move(App.c3fnx, 20)\n\n self.c3TextBox = QLineEdit(self)\n self.c3TextBox.installEventFilter(self)\n self.c3TextBox.move(App.c3tbx, 20)\n self.c3TextBox.resize(App.tbx, App.tby)\n self.c3TextBoxs = \"\"\n\n self.c3FieldName1 = QLabel(self)\n self.c3FieldName1.setText('Annual Income')\n self.c3FieldName1.move(App.c3fnx, 60)\n\n self.c3TextBox1 = QLineEdit(self)\n self.c3TextBox1.installEventFilter(self)\n self.c3TextBox1.move(App.c3tbx, 60)\n self.c3TextBox1.resize(App.tbx, App.tby)\n # self.c3TextBox1s = \"newn\"\n # self.c3TextBox1.setText(self.c3TextBox1s)\n\n self.c3FieldName2 = QLabel(self)\n self.c3FieldName2.setText('No. of Dependents')\n self.c3FieldName2.move(App.c3fnx, 100)\n\n self.c3TextBox2 = QLineEdit(self)\n self.c3TextBox2.installEventFilter(self)\n self.c3TextBox2.move(App.c3tbx, 100)\n self.c3TextBox2.resize(App.tbx, App.tby)\n self.c3TextBox2s = \"\"\n\n self.c3FieldName3 = QLabel(self)\n self.c3FieldName3.setText('Owning house')\n self.c3FieldName3.move(App.c3fnx, 140)\n\n self.c3TextBox3 = QLineEdit(self)\n self.c3TextBox3.installEventFilter(self)\n self.c3TextBox3.move(App.c3tbx, 140)\n self.c3TextBox3.resize(App.tbx, App.tby)\n self.c3TextBox3s = \"\"\n\n self.c3FieldName4 = QLabel(self)\n self.c3FieldName4.setText('Owning Farm')\n self.c3FieldName4.move(App.c3fnx, 180)\n\n self.c3TextBox4 = QLineEdit(self)\n self.c3TextBox4.installEventFilter(self)\n self.c3TextBox4.move(App.c3tbx, 180)\n self.c3TextBox4.resize(App.tbx, App.tby)\n\n self.c3FieldName5 = QLabel(self)\n self.c3FieldName5.setText('No. of Animals')\n self.c3FieldName5.move(App.c3fnx, 220)\n\n self.c3TextBox5 = QLineEdit(self)\n self.c3TextBox5.installEventFilter(self)\n self.c3TextBox5.move(App.c3tbx, 220)\n self.c3TextBox5.resize(App.tbx, App.tby)\n\n self.c3FieldName6 = QLabel(self)\n self.c3FieldName6.setText('Existing Bank A/c.')\n # self.c3FieldName6.move(App.c3fnx, 260)\n self.c3FieldName6.setGeometry(QtCore.QRect(App.c3fnx, 260, 120, 30)) # (x, y, width, height)\n\n self.c3TextBox6a = QLineEdit(self)\n self.c3TextBox6a.installEventFilter(self)\n self.c3TextBox6a.move(App.c3tbx, 260)\n self.c3TextBox6a.resize(App.tbx - 135, App.tby)\n\n self.c3TextBox6b = QLineEdit(self)\n self.c3TextBox6b.installEventFilter(self)\n self.c3TextBox6b.move(App.c3tbx + 50, 260)\n self.c3TextBox6b.resize(App.tbx - 50, App.tby)\n\n self.c3FieldName7 = QLabel(self)\n self.c3FieldName7.setText('Kisan CC')\n # self.c3FieldName7.move(App.c3fnx, 300)\n self.c3FieldName7.setGeometry(QtCore.QRect(App.c3fnx, 300, 120, 30)) # (x, y, width, height)\n\n self.c3TextBox7 = QLineEdit(self)\n self.c3TextBox7.installEventFilter(self)\n self.c3TextBox7.move(App.c3tbx, 300)\n self.c3TextBox7.resize(App.tbx, App.tby)\n\n self.c3FieldName8 = QLabel(self)\n self.c3FieldName8.setText('RuPay Card')\n self.c3FieldName8.move(App.c3fnx, 340)\n\n self.c3TextBox8 = QLineEdit(self)\n self.c3TextBox8.installEventFilter(self)\n self.c3TextBox8.move(App.c3tbx, 340)\n self.c3TextBox8.resize(App.tbx, App.tby)\n\n self.c3FieldName9 = QLabel(self)\n self.c3FieldName9.setText(\"Guardian's Name\")\n # self.c3FieldName9.move(App.c3fnx, 380)\n self.c3FieldName9.setGeometry(QtCore.QRect(App.c3fnx, 380, 150, 30)) # (x, y, width, height)\n\n self.c3TextBox9 = QLineEdit(self)\n self.c3TextBox9.installEventFilter(self)\n self.c3TextBox9.move(App.c3tbx, 380)\n self.c3TextBox9.resize(App.tbx, App.tby)\n\n self.c3FieldName10 = QLabel(self)\n self.c3FieldName10.setText('DOB Minor')\n self.c3FieldName10.move(App.c3fnx, 420)\n\n self.c3TextBox10 = QLineEdit(self)\n self.c3TextBox10.installEventFilter(self)\n self.c3TextBox10.move(App.c3tbx, 420)\n self.c3TextBox10.resize(App.tbx, App.tby)\n \n self.c3FieldName11 = QLabel(self)\n self.c3FieldName11.setText('DOB Guardian')\n self.c3FieldName11.move(App.c3fnx, 460)\n\n self.c3TextBox11 = QLineEdit(self)\n self.c3TextBox11.installEventFilter(self)\n self.c3TextBox11.move(App.c3tbx, 460)\n self.c3TextBox11.resize(App.tbx, App.tby)\n\n self.c3FieldName12 = QLabel(self)\n self.c3FieldName12.setText('Realtionship')\n self.c3FieldName12.move(App.c3fnx, 500)\n\n self.c3TextBox12 = QLineEdit(self)\n self.c3TextBox12.installEventFilter(self)\n self.c3TextBox12.move(App.c3tbx, 500)\n self.c3TextBox12.resize(App.tbx, App.tby)\n\n self.c3FieldName13 = QLabel(self)\n self.c3FieldName13.setText('Instruction for Operation')\n # self.c3FieldName13.move(App.c3fnx, 540)\n self.c3FieldName13.setGeometry(QtCore.QRect(App.c3fnx, 540, 150, 30)) # (x, y, width, height)\n\n self.c3TextBox13 = QLineEdit(self)\n self.c3TextBox13.installEventFilter(self)\n self.c3TextBox13.move(App.c3tbx, 540)\n self.c3TextBox13.resize(App.tbx, App.tby)\n\n self.c3FieldName14 = QLabel(self)\n self.c3FieldName14.setText('Channel Services')\n # self.c3FieldName14.move(App.c3fnx, 580)\n self.c3FieldName14.setGeometry(QtCore.QRect(App.c3fnx, 580, 120, 30)) # (x, y, width, height)\n\n self.c3TextBox14 = QLineEdit(self)\n self.c3TextBox14.installEventFilter(self)\n self.c3TextBox14.move(App.c3tbx, 580)\n self.c3TextBox14.resize(App.tbx, App.tby)\n\n self.c3FieldName15 = QLabel(self)\n self.c3FieldName15.setText('Primary Card')\n # self.c3FieldName15.move(App.c3fnx, 620)\n self.c3FieldName15.setGeometry(QtCore.QRect(App.c3fnx, 620, 150, 30)) # (x, y, width, height)\n\n self.c3TextBox15 = QLineEdit(self)\n self.c3TextBox15.installEventFilter(self)\n self.c3TextBox15.move(App.c3tbx, 620)\n self.c3TextBox15.resize(App.tbx, App.tby)\n\n self.c3FieldName16 = QLabel(self)\n self.c3FieldName16.setText('Chequebook')\n self.c3FieldName16.move(App.c3fnx, 660)\n\n self.c3TextBox16 = QLineEdit(self)\n self.c3TextBox16.installEventFilter(self)\n self.c3TextBox16.move(App.c3tbx, 660)\n self.c3TextBox16.resize(App.tbx, App.tby)\n\n self.c3FieldName17 = QLabel(self)\n self.c3FieldName17.setText('Passbook')\n self.c3FieldName17.move(App.c3fnx, 700)\n\n self.c3TextBox17 = QLineEdit(self)\n self.c3TextBox17.installEventFilter(self)\n self.c3TextBox17.move(App.c3tbx, 700)\n self.c3TextBox17.resize(App.tbx, App.tby)\n\n self.c3FieldName18 = QLabel(self)\n self.c3FieldName18.setText('Statement')\n self.c3FieldName18.move(App.c3fnx, 740)\n\n self.c3TextBox18 = QLineEdit(self)\n self.c3TextBox18.installEventFilter(self)\n self.c3TextBox18.move(App.c3tbx, 740)\n self.c3TextBox18.resize(App.tbx, App.tby)\n\n # Column 4\n\n self.c4FieldName = QLabel(self)\n self.c4FieldName.setText('Want to Nominate')\n # self.c4FieldName.move(App.c4fnx, 20)\n self.c4FieldName.setGeometry(QtCore.QRect(App.c4fnx, 20, 120, 30)) # (x, y, width, height)\n\n self.c4TextBox = QLineEdit(self)\n self.c4TextBox.installEventFilter(self)\n self.c4TextBox.move(App.c4tbx, 20)\n self.c4TextBox.resize(App.tbx, App.tby)\n self.c4TextBoxs = \"\"\n\n self.c4FieldName1 = QLabel(self)\n self.c4FieldName1.setText('Name of the Nominee')\n self.c4FieldName1.move(App.c4fnx, 60)\n self.c4FieldName1.setGeometry(QtCore.QRect(App.c4fnx, 60, 150, 30)) # (x, y, width, height)\n\n self.c4TextBox1 = QLineEdit(self)\n self.c4TextBox1.installEventFilter(self)\n self.c4TextBox1.move(App.c4tbx, 60)\n self.c4TextBox1.resize(App.tbx, App.tby)\n # self.c4TextBox1s = \"newn\"\n # self.c4TextBox1.setText(self.c4TextBox1s)\n\n self.c4FieldName2 = QLabel(self)\n self.c4FieldName2.setText('Relationship')\n self.c4FieldName2.move(App.c4fnx, 100)\n\n self.c4TextBox2 = QLineEdit(self)\n self.c4TextBox2.installEventFilter(self)\n self.c4TextBox2.move(App.c4tbx, 100)\n self.c4TextBox2.resize(App.tbx, App.tby)\n self.c4TextBox2s = \"\"\n\n self.c4FieldName3 = QLabel(self)\n self.c4FieldName3.setText('Age')\n self.c4FieldName3.move(App.c4fnx, 140)\n\n self.c4TextBox3 = QLineEdit(self)\n self.c4TextBox3.installEventFilter(self)\n self.c4TextBox3.move(App.c4tbx, 140)\n self.c4TextBox3.resize(App.tbx, App.tby)\n self.c4TextBox3s = \"\"\n\n self.c4FieldName4 = QLabel(self)\n self.c4FieldName4.setText('DOB Minor')\n self.c4FieldName4.move(App.c4fnx, 180)\n\n self.c4TextBox4 = QLineEdit(self)\n self.c4TextBox4.installEventFilter(self)\n self.c4TextBox4.move(App.c4tbx, 180)\n self.c4TextBox4.resize(App.tbx, App.tby)\n\n self.c4FieldName5 = QLabel(self)\n self.c4FieldName5.setText('Authorised Name')\n # self.c4FieldName5.move(App.c4fnx, 220)\n self.c4FieldName5.setGeometry(QtCore.QRect(App.c4fnx, 220, 120, 30)) # (x, y, width, height)\n\n self.c4TextBox5 = QLineEdit(self)\n self.c4TextBox5.installEventFilter(self)\n self.c4TextBox5.move(App.c4tbx, 220)\n self.c4TextBox5.resize(App.tbx, App.tby)\n\n self.c4FieldName6 = QLabel(self)\n self.c4FieldName6.setText('Tax Purposes')\n # self.c4FieldName6.move(App.c4fnx, 260)\n self.c4FieldName6.setGeometry(QtCore.QRect(App.c4fnx, 260, 120, 30)) # (x, y, width, height)\n\n self.c4TextBox6 = QLineEdit(self)\n self.c4TextBox6.installEventFilter(self)\n self.c4TextBox6.move(App.c4tbx, 260)\n self.c4TextBox6.resize(App.tbx, App.tby)\n\n self.c4FieldName7 = QLabel(self)\n self.c4FieldName7.setText('Place')\n self.c4FieldName7.move(App.c4fnx, 300)\n\n self.c4TextBox7 = QLineEdit(self)\n self.c4TextBox7.installEventFilter(self)\n self.c4TextBox7.move(App.c4tbx, 300)\n self.c4TextBox7.resize(App.tbx, App.tby)\n\n self.c4FieldName8 = QLabel(self)\n self.c4FieldName8.setText('Date')\n self.c4FieldName8.move(App.c4fnx, 340)\n\n self.c4TextBox8 = QLineEdit(self)\n self.c4TextBox8.installEventFilter(self)\n self.c4TextBox8.move(App.c4tbx, 340)\n self.c4TextBox8.resize(App.tbx, App.tby)\n\n self.c4FieldName9 = QLabel(self)\n self.c4FieldName9.setText('Acting Branch head')\n # self.c4FieldName9.move(App.c4fnx, 380)\n self.c4FieldName9.setGeometry(QtCore.QRect(App.c4fnx, 380, 120, 30)) # (x, y, width, height)\n\n self.c4TextBox9 = QLineEdit(self)\n self.c4TextBox9.installEventFilter(self)\n self.c4TextBox9.move(App.c4tbx, 380)\n self.c4TextBox9.resize(App.tbx, App.tby)\n\n self.c4FieldName10 = QLabel(self)\n self.c4FieldName10.setText('Risk Level Code')\n # self.c4FieldName10.move(App.c4fnx, 420)\n self.c4FieldName10.setGeometry(QtCore.QRect(App.c4fnx, 420, 150, 30)) # (x, y, width, height)\n\n self.c4TextBox10 = QLineEdit(self)\n self.c4TextBox10.installEventFilter(self)\n self.c4TextBox10.move(App.c4tbx, 420)\n self.c4TextBox10.resize(App.tbx, App.tby)\n \n self.c4FieldName11 = QLabel(self)\n self.c4FieldName11.setText('Name of BC / BF')\n # self.c4FieldName11.move(App.c4fnx, 460)\n self.c4FieldName11.setGeometry(QtCore.QRect(App.c4fnx, 460, 150, 30)) # (x, y, width, height)\n\n self.c4TextBox11 = QLineEdit(self)\n self.c4TextBox11.installEventFilter(self)\n self.c4TextBox11.move(App.c4tbx, 460)\n self.c4TextBox11.resize(App.tbx, App.tby)\n\n self.c4FieldName12 = QLabel(self)\n self.c4FieldName12.setText('No. of BC / BF')\n # self.c4FieldName12.move(App.c4fnx, 500)\n self.c4FieldName12.setGeometry(QtCore.QRect(App.c4fnx, 500, 130, 30)) # (x, y, width, height)\n\n self.c4TextBox12 = QLineEdit(self)\n self.c4TextBox12.installEventFilter(self)\n self.c4TextBox12.move(App.c4tbx, 500)\n self.c4TextBox12.resize(App.tbx, App.tby)\n\n self.c4FieldName13 = QLabel(self)\n self.c4FieldName13.setText('Name of Official')\n self.c4FieldName13.move(App.c4fnx, 540)\n\n self.c4TextBox13 = QLineEdit(self)\n self.c4TextBox13.installEventFilter(self)\n self.c4TextBox13.move(App.c4tbx, 540)\n self.c4TextBox13.resize(App.tbx, App.tby)\n\n self.c4FieldName14 = QLabel(self)\n self.c4FieldName14.setText('Employee Code')\n # self.c4FieldName14.move(App.c4fnx, 580)\n self.c4FieldName14.setGeometry(QtCore.QRect(App.c4fnx, 580, 120, 30)) # (x, y, width, height)\n\n self.c4TextBox14 = QLineEdit(self)\n self.c4TextBox14.installEventFilter(self)\n self.c4TextBox14.move(App.c4tbx, 580)\n self.c4TextBox14.resize(App.tbx, App.tby)\n\n self.c4FieldName15 = QLabel(self)\n self.c4FieldName15.setText('Name of Branch')\n # self.c4FieldName15.move(App.c4fnx, 620)\n self.c4FieldName15.setGeometry(QtCore.QRect(App.c4fnx, 620, 150, 30)) # (x, y, width, height)\n\n self.c4TextBox15 = QLineEdit(self)\n self.c4TextBox15.installEventFilter(self)\n self.c4TextBox15.move(App.c4tbx, 620)\n self.c4TextBox15.resize(App.tbx, App.tby)\n\n self.c4FieldName16 = QLabel(self)\n self.c4FieldName16.setText('Code of Branch')\n # self.c4FieldName16.move(App.c4fnx, 660)\n self.c4FieldName16.setGeometry(QtCore.QRect(App.c4fnx, 660, 150, 30)) # (x, y, width, height)\n\n self.c4TextBox16 = QLineEdit(self)\n self.c4TextBox16.installEventFilter(self)\n self.c4TextBox16.move(App.c4tbx, 660)\n self.c4TextBox16.resize(App.tbx, App.tby)\n\n self.c4FieldName17 = QLabel(self)\n self.c4FieldName17.setText('Customer ID')\n self.c4FieldName17.move(App.c4fnx, 700)\n\n self.c4TextBox17 = QLineEdit(self)\n self.c4TextBox17.installEventFilter(self)\n self.c4TextBox17.move(App.c4tbx, 700)\n self.c4TextBox17.resize(App.tbx, App.tby)\n\n self.c4FieldName18 = QLabel(self)\n self.c4FieldName18.setText('Account No.')\n self.c4FieldName18.move(App.c4fnx, 740)\n\n self.c4TextBox18 = QLineEdit(self)\n self.c4TextBox18.installEventFilter(self)\n self.c4TextBox18.move(App.c4tbx, 740)\n self.c4TextBox18.resize(App.tbx, App.tby)\n\n # # self.c1TextBox3.mousePressEvent(self.press)\n # # Create a button in the window\n # self.button = QPushButton('Show text', self)\n # self.button.move(500, 800)\n # # self.button.move(App.c1fnx, 20)\n # # connect button to function on_click\n # self.button.clicked.connect(self.on_click)\n\n self.show()\n\n def eventFilter(self, obj, event):\n global stri\n if event.type() == QEvent.FocusIn:\n self.focused_box = obj\n # print(obj.text())\n stri = obj.text()\n if event.type() == QEvent.FocusOut:\n stri = \"\"\n print(stri)\n\n return super(App, self).eventFilter(obj, event)\n\n @pyqtSlot()\n def on_click(self):\n # c1TextBoxValue = self.c1TextBox.text()\n # QMessageBox.question(self, 'Message - pythonspot.com', \"You typed: \" + c1TextBoxValue, QMessageBox.Ok, QMessageBox.Ok)\n # self.c1TextBox.setText(\"\")\n # self.press()\n global stri\n ex.focused_box.setText(ex.focused_box.text() + stri)\n ex.focused_box.repaint()\n stri = \"\"\n # ex.c1PushText.setText(stri)\n # ex.c1PushText.repaint()\n\n def on_clear(self):\n global stri\n stri = \"\"\n # ex.c1PushText.setText(stri)\n # ex.c1PushText.repaint()\n ex.focused_box.setText(stri)\n ex.focused_box.repaint()\n\n def tess(self):\n print(\"Test Passed\")\n\n\n\n# def on_cle(self):\n \n# stri = \"\"\n# ex.c1PushText.setText(stri)\n\n\n\n\n\napp = QApplication(sys.argv)\nex = App()\n# ex.focused_box = None\n# ex.focused_box = ex.c1TextBox\nex.focused_box = ex.c1TextBox3\n# ex.c1PushText = ''\n# stri = ex.c1TextBoxs\n\nThread(target=main).start()\nsys.exit(app.exec_())","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":38777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276587193","text":"\n\nfrom xai.brain.wordbase.nouns._peninsula import _PENINSULA\n\n#calss header\nclass _PENINSULAS(_PENINSULA, ):\n\tdef __init__(self,): \n\t\t_PENINSULA.__init__(self)\n\t\tself.name = \"PENINSULAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"peninsula\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_peninsulas.py","file_name":"_peninsulas.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"128478046","text":"\"\"\"\n MERGE SORT\n\"\"\"\n\ndef merge(a, start, middle, end):\n#temporary arrays to copy the elements of subarray\n size_of_temp1 = (middle-start)+1\n size_of_temp2 = (end-middle)\n\n temp1 = [0]*size_of_temp1\n temp2 = [0]*size_of_temp1\n\n for i in range(0, size_of_temp1):\n temp1[i] = a[start+i]\n\n for i in range(0, size_of_temp2):\n temp2[i] = a[middle+1+i]\n\n i=0\n j=0\n k=start\n\n while (i < size_of_temp1 and j < size_of_temp2):\n if (temp1[i] < temp2[j]):\n # filling the main array with the smaller element\n a[k] = temp1[i]\n i = i+1\n else:\n # filling the main array with the smaller element\n a[k] = temp2[j]\n j = j+1\n k = k+1\n\n # copying leftovers\n while (i':\n case1 = (a.letter.idx > b.letter.idx) and (a.octave == b.octave)\n case2 = a.octave > b.octave\n return case1 or case2\n elif opp == '<':\n case1 = (a.letter.idx < b.letter.idx) and (a.octave == b.octave)\n case2 = a.octave < b.octave\n return case1 or case2\n\npossible_keys = ['A', 'C', 'D', 'F', 'G', 'Bb', 'Eb']\npossible_tonalities = ['major', \n 'natural_minor',\n 'harmonic_minor',\n 'major_pentatonic']\n\nkey = Note(random.choice(possible_keys))\ntonality = random.choice(possible_tonalities)\n\ntune_chars = list(itertools.product(possible_keys, possible_tonalities))\n\nmelodies = []\ngood_melodies = []\n\nfor k, t in tune_chars:\n k_note = Note(k + '3')\n possible_notes = [*Scale(k_note, t)][:14]\n for melody in list(itertools.permutations(possible_notes, 4)):\n melodies.append((melody, k, t))\n\nlowest = Note('F4')\nhighest = Note('A#5')\nfor melody, k, t in tqdm(melodies):\n good = True\n for note in melody:\n if compare(note, '<', lowest):\n good = False\n elif compare(note, '>', highest):\n good = False\n elif '##' in str(note) or 'bb' in str(note):\n good = False\n else:\n pass\n\n if good and len(melody) == 4:\n good_melodies.append((melody, k, t))\n\n#print('Started with %i melodies, \\nended with %i.' % (len(melodies), len(good_melodies)))\n\n\npossible_harmony_beats = [((1, 1), (2, 1), (3, 1), (4, 1)),\n ((1, 2), (3, 1), (4, 1)),\n ((1, 1), (2, 2), (4, 1)),\n ((1, 2), (3, 2))]\n\npossible_harmony_qualities = ['maj', 'min']\n\nlowest = Note('B3')\nhighest = Note('A4')\n\ntunes = []\nfor melody, k, t in good_melodies:\n tune = {'key': k,\n 'tonality': t,\n 'melody': melody}\n\n harmony_beats = random.choice(possible_harmony_beats)\n beats = {}\n for beat, dur in harmony_beats:\n harmony_quality = random.choice(possible_harmony_qualities)\n root = melody[beat - 1]\n chord = Chord(root, harmony_quality)\n notes = random.sample(chord.notes, len(chord.notes))\n \n key_notes = Scale(k, tonality).notes\n \n for note in notes:\n for kn in key_notes:\n if note.letter == kn.letter:\n note.accidental = kn.accidental\n\n good_notes = []\n for note in notes:\n note.octave = 4\n\n if compare(note, '>', lowest) and compare(note, '<', highest):\n if '##' in str(note) or 'bb' in str(note):\n pass\n else:\n good_notes.append(note)\n\n if len(good_notes) >= 2:\n beats.update({beat:(dur, harmony_quality, good_notes)})\n break\n\n tune.update({'beats': beats})\n tunes.append(tune)\n\nsample = random.sample(tunes, 2000)\nexceptions = []\n\nstart_id = 2\ntune_id = start_id\nfor tune in tqdm(sample):\n try:\n tune_vals = (tune_id, tune['key'], tune['tonality'])\n tune_sql = 'INSERT INTO tunes(tune_id, tune_key, tonality) VALUES (%i, \"%s\", \"%s\")' % tune_vals\n \n cursor.execute(tune_sql)\n \n for mel_beat, mel_note in enumerate(tune['melody']):\n note_id = get_id(mel_note)\n \n melody_vals = (tune_id, note_id, mel_beat + 1)\n melody_sql = ('INSERT INTO melody_notes(tune_id, note_id, '\n 'beat_number, duration) '\n 'VALUES (%i, %i, %i, 1);' % melody_vals)\n \n cursor.execute(melody_sql)\n \n for harm_beat, (harm_dur, harm_qual, harm_notes) in tune['beats'].items():\n for harm_note in harm_notes:\n \n \n note_id = get_id(harm_note)\n harmony_vals = (tune_id, note_id, harm_beat, harm_dur)\n harmony_sql = ('INSERT INTO harmony_notes(tune_id, note_id, '\n 'beat_number, duration) '\n 'VALUES (%i, %i, %i, %i);' % harmony_vals)\n \n cursor.execute(harmony_sql)\n \n harm_details_sql = ('INSERT INTO harmony_details(tune_id, '\n 'beat_number, tonality) '\n 'VALUES (%i, %i, \"%s\")' % (tune_id, harm_beat, harm_qual))\n \n cursor.execute(harm_details_sql)\n except Exception as err:\n tune_sql = 'INSERT INTO tune_fails(tune_id, error_message) VALUES (%i, \"%s\")' % (tune_id, err)\n \n tune_id += 1\n \ncnxn.commit()\ncnxn.close()\n","sub_path":"python/create_tunes.py","file_name":"create_tunes.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382544936","text":"# coding: utf-8\nimport filecmp\nimport os\n\nimport re\n\nfrom corehq.apps.userreports.management.commands.build_report_builder_v2_diffs import (\n get_diff_filename,\n get_diff,\n VERSIONED_DIRS,\n)\n\nfrom django.test import SimpleTestCase\n\nRELATIVE_DIFF_STORAGE_TEMPLATES = 'data/report_builder_v2_diffs'\n\n\nCREATION_FAILURE_MSG = \"\"\"\n\n\n****************************************\n\nAre you editing REPORT BUILDER V2?\n\nYou probably forgot to run ./manage.py build_report_builder_v2_diffs\n\n****************************************\n\n\n\n\"\"\"\n\nDIFF_FAILURE_MSG = \"\"\"\n\n\n*************************************\n\nREPORT BUILDER V2 Diff Failure\n\nAn edit made to a V1 Report Builder file \"{}\" does not match the stored diff\nof its V2 counterpart.\n\n**Please make the edits to the V2 file so that it gets the changes from V1.**\n\nOnce you have done this, run ./manage.py build_report_builder_v2_diffs\nto rebuild the broken diffs.\n\nThese files are located in\n{}\n{}\n\n**************************************\n\n\n\n\"\"\"\n\n\nclass TestReportBuilderV2Diffs(SimpleTestCase):\n\n def setUp(self):\n this_dir = os.path.dirname(os.path.realpath(__file__))\n self.diff_dir = os.path.join(this_dir, RELATIVE_DIFF_STORAGE_TEMPLATES)\n self.base_dir = os.path.join(this_dir, \"..\")\n self.common_files = []\n for dir in VERSIONED_DIRS:\n v1_dir = os.path.join(self.base_dir, dir)\n v2_dir = os.path.join(self.base_dir, re.sub(\"v1/$\", \"\", dir))\n self.common_files.extend([\n (v1_dir, v2_dir, f) for f in filecmp.dircmp(v1_dir, v2_dir).common_files\n if not f.endswith(\".pyc\")\n ])\n\n def test_diffs_exist(self):\n for v1_dir, v2_dir, f in self.common_files:\n self.assertTrue(\n os.path.exists(os.path.join(\n self.diff_dir,\n get_diff_filename(v2_dir.replace(self.base_dir, \"\"), f)\n )), CREATION_FAILURE_MSG\n )\n\n def test_diffs(self):\n\n for v1_dir, v2_dir, f in self.common_files:\n v2_dir_relative = v2_dir.replace(self.base_dir, \"\")\n diff_filename = os.path.join(\n self.diff_dir,\n get_diff_filename(v2_dir_relative, f)\n )\n filename_v1 = os.path.join(v1_dir, f)\n filename_v2 = os.path.join(v2_dir, f)\n\n try:\n with open(diff_filename, 'r') as diff_file:\n existing_diff_lines = diff_file.readlines()\n current_diff = get_diff(filename_v1, filename_v2)\n self.assertEqual(\n \"\".join(existing_diff_lines),\n \"\".join(current_diff),\n DIFF_FAILURE_MSG.format(\n f, filename_v1, filename_v2\n )\n )\n except IOError:\n raise Exception(\n \"Issue opening diff file. \"\n \"You may need to manually create it using ./manage.py build_report_builder_v2_diffs.\\n\"\n \"File path is {}\".format(diff_filename)\n )\n","sub_path":"corehq/apps/userreports/tests/test_report_builder_v2_diffs.py","file_name":"test_report_builder_v2_diffs.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286946745","text":"# Copyright (c) 2013, System Engineering Software Society\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the System Engineering Software Society nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED.\n# IN NO EVENT SHALL SYSTEM ENGINEERING SOFTWARE SOCIETY BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Sympathy record type.\"\"\"\nfrom itertools import izip\n\nfrom . import sybase\nfrom . import exception as exc\n\n\ndef get_field(fields, field):\n \"\"\"Returns fields[field], raises AttributeError on KeyError.\"\"\"\n try:\n return fields[field]\n except KeyError:\n raise AttributeError(\n \"'syrecord' object has no attribute '{0}'\".format(field))\n\n\ndef get_fields(self):\n \"\"\"\n Returns the fields element or an empty dictionary when the attribute does\n not exist.\n Used internally, for __getattr__ and __setattr__ only.\n \"\"\"\n try:\n fields = self.__getattribute__('_fields')\n except AttributeError:\n fields = {}\n return fields\n\n\nclass syrecord(sybase.sygroup):\n \"\"\"A type representing a list.\"\"\"\n def __init__(self, container_type, datasource=sybase.NULL_SOURCE):\n \"\"\"Init.\"\"\"\n keys = container_type.keys()\n super(syrecord, self).__init__(container_type,\n datasource or sybase.NULL_SOURCE)\n self.content_types = {}\n self._content_types = {}\n for key in keys:\n content_type = container_type[key]\n self.content_types[key] = content_type\n try:\n while True:\n content_type = content_type.get()\n except AttributeError:\n self._content_types[key] = content_type\n\n self._fields = keys\n self._cache = dict.fromkeys(keys)\n\n def __getattr__(self, field):\n \"\"\"Get attribute.\"\"\"\n if field not in get_fields(self):\n return super(syrecord, self).__getattribute__(field)\n\n try:\n value = self._cache[field]\n if value is None:\n # Value is not cached, read it from datasource.\n content_type = self.content_types[field]\n try:\n # Read from datasource.\n source = self._datasource.read_with_type(\n field, self._content_types[field])\n except KeyError:\n # Create content without datasource.\n value = self._factory.from_type(content_type)\n else:\n # Create content from datasource.\n source = source or sybase.NullSource\n value = self._factory.from_datasource(\n source,\n content_type)\n self._cache[field] = value\n return value\n except KeyError:\n raise AttributeError()\n\n def __setattr__(self, field, value):\n \"\"\"Set attribute.\"\"\"\n if field in get_fields(self):\n content_type = get_field(self.content_types, field)\n sybase.assert_type(\n self, value.container_type, content_type)\n self._cache[field] = value\n else:\n super(syrecord, self).__setattr__(field, value)\n\n def __repr__(self):\n return str(self._cache)\n\n def keys(self):\n \"\"\"Returns a list of all record keys.\"\"\"\n return list(self._fields)\n\n def values(self):\n \"\"\"Returns a list of all values.\"\"\"\n return [getattr(self, field) for field in self._fields]\n\n def items(self):\n \"\"\"Return generator iterator over key, value pairs.\"\"\"\n return izip(self.keys(), self.values())\n\n def update(self, other):\n \"\"\"\n Updates current record with items from 'other record', replacing values\n with the same key; update requires the item types to match.\n \"\"\"\n for other_key, other_value in other.items():\n setattr(self, other_key, other_value)\n\n def source(self, other):\n self.update(other.__deepcopy__())\n\n def __copy__(self):\n obj = super(syrecord, self).__copy__()\n obj.content_types = self.content_types\n obj._content_types = self._content_types\n obj._fields = self._fields\n obj._cache = dict(self._cache)\n return obj\n\n def __deepcopy__(self, memo=None):\n obj = self.__copy__()\n obj._cache = {k: None if v is None else v.__deepcopy__()\n for k, v in self._cache.iteritems()}\n return obj\n\n def visit(self, group_visitor):\n \"\"\"Accept group visitor.\"\"\"\n group_visitor.visit_record(self)\n\n def writeback(self):\n super(syrecord, self).writeback()\n\n def _writeback(self, datasource, link=None):\n origin = self._datasource\n target = datasource\n exc.assert_exc(target.can_write, exc=exc.WritebackReadOnlyError)\n shares_origin = target.shares_origin(origin)\n\n if link:\n return False\n\n for key, value in (self._cache.items() if shares_origin\n else zip(self._fields, self.values())):\n if value is not None:\n if not value._writeback(target, key):\n new_target = target.write_with_type(\n key, value, self._content_types[key])\n value._writeback(new_target)\n\n return True\n","sub_path":"Sympathy_For_Data/sympathy/types/syrecord.py","file_name":"syrecord.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609078725","text":"\"\"\"Integration tests for ConversionServerClient\"\"\"\n\nimport os\nimport unittest\nfrom mediafire.media.conversion_server_client import (ConversionServerClient,\n ConversionServerError)\n\n# Getting Started With MediaFire.pdf\nDOCUMENT_QUICKKEY = 'm1qlyt9ywc5qdwt'\nDOCUMENT_HASH = '810bbfecf8f2a087ef94d52dbf6eaa2d3153e2bb'\n\n# Sample - Butterfly.jpg\nIMAGE_QUICKKEY = '1zzovvyw7x24ws9'\nIMAGE_HASH = '38cd11c3663e0714a2e879bd4b924a253336b8c0'\n\n\n@unittest.skipIf('CI' not in os.environ, \"Running outside CI environment\")\nclass ConversionServerClientSmokeTest(unittest.TestCase):\n \"\"\"Basic Conversion Server Smoke Test\"\"\"\n\n def setUp(self):\n self.conv = ConversionServerClient()\n\n def test_notype_error(self):\n \"\"\"Test that missing type causes bad request error\"\"\"\n with self.assertRaises(Exception):\n self.conv.request(IMAGE_HASH, IMAGE_QUICKKEY, None)\n\n def test_image_resize(self):\n \"\"\"Test that image resize works\"\"\"\n result = self.conv.request(IMAGE_HASH, IMAGE_QUICKKEY,\n 'i', size_id='1')\n self.assertEqual(result.headers['content-type'], 'image/jpeg')\n\n def test_image_nosize_error(self):\n \"\"\"Test that supplying no size causes error\"\"\"\n with self.assertRaises(ConversionServerError):\n self.conv.request(IMAGE_HASH, IMAGE_QUICKKEY, 'i')\n\n # No document tests since the API seems to be broken ATM\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/media/test_compression_server_smoke.py","file_name":"test_compression_server_smoke.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496228028","text":"import threading\nimport time\n\ndef run():\n rlock.acquire()\n print('123')\n time.sleep(1)\n rlock.release()\n\nif __name__ == '__main__':\n rlock = threading.RLock()\n for i in range(20):\n t = threading.Timer(5.0, run)\n t.start()\n t.join()\n print('Over')","sub_path":"python-面向对象/线程进程/信号_Timer.py","file_name":"信号_Timer.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390245230","text":"import re\nimport warnings\nfrom collections import OrderedDict\n\n#正規表現定義エリア\n#ブロック要素\nblock_rules = OrderedDict()\nblock_rules['ulLists'] = re.compile(r\"\"\"\n \\s*[\\*\\+\\-]\\s+\n\"\"\", re.VERBOSE)\nblock_rules['olLists'] = re.compile(r\"\"\"\n \\s*[0-9]+\\.\\s+\n\"\"\", re.VERBOSE)\nblock_rules['CodeBlock'] = re.compile(r\"\"\"\n \\s{4,}\\w*\n\"\"\", re.VERBOSE)\nblock_rules['Table'] = re.compile(r'''\n ^(.*) \n \\| #symbol that tells this block is a table \n (.*)\n''', re.VERBOSE)\nblock_rules['TaggedBlock'] = re.compile(r\"\"\"\n \\s*(\\) #start tag\n (.*)\n\"\"\", re.VERBOSE)\nblock_rules['TaggedBlockEnd'] = re.compile(r\"\"\"\n (.*)\n (\\< \\s* \\/ \\s* p \\s* \\>) #end tag\n\"\"\", re.VERBOSE)\n\n\n\n#インライン要素\ninline_rules = OrderedDict()\ninline_rules['LineBreak'] = re.compile(r'\\s{2,}$')\ninline_rules['BoldFont'] = re.compile(r\"\"\"\n (\\*\\*|\\_\\_)(.*?)\\1\n\"\"\", re.VERBOSE)\ninline_rules['EmphasizedFont'] = re.compile(r\"\"\"\n (\\*|\\_)(.*?)\\1\n\"\"\", re.VERBOSE)\ninline_rules['DeletedFont'] = re.compile(r\"\"\"\n \\~\\~(.*?)\\~\\~\n\"\"\", re.VERBOSE)\ninline_rules['InlineCode'] = re.compile(r\"\"\"\n (`{1,})(.*?)\\1\n\"\"\", re.VERBOSE)\ninline_rules['Links'] = re.compile(r\"\"\"\n (\\[)\n ([^\\[]+)\n (\\])\n ([\\[\\(])\n (.*?)\n ([\\]\\)])\n\"\"\", re.VERBOSE)\ninline_rules['Images'] = re.compile(r\"\"\"\n (!\\[)\n ([^\\[]+)\n (\\])\n ([\\[\\(])\n (.*?)\n ([\\]\\)])\n\"\"\", re.VERBOSE)\n\n#その他個別ルール\nblank_line = re.compile(r\"\"\"\n \\s*$\n\"\"\", re.VERBOSE)\nheader_line_h1 = re.compile(r\"\"\"\n \\={3,}\n\"\"\", re.VERBOSE)\nheader_line_h2 = re.compile(r\"\"\"\n \\-{3,}\n\"\"\", re.VERBOSE)\ntagged_line = re.compile(r\"\"\"\n \\s*(\\) # start tag\n (.*)\n (\\< \\s* \\/ \\s* p \\s* \\>) # end tag\n\"\"\", re.VERBOSE)\nheader_block = re.compile(r\"\"\"\n \\s*\n (\\#+) #header symbol\n (.*)\n\"\"\", re.VERBOSE)\nhorizontal_rule = re.compile(r\"\"\"\n ^((\\-\\s?){3,}|\n (\\*\\s?){3,}|\n (\\_\\s?){3,})\n \\s*$\n\"\"\", re.VERBOSE)\nblock_quote = re.compile(r\"\"\"\n (>|\\>) #symbol that tells this line is a start of blockquote\n (.*)\n\"\"\", re.VERBOSE)\ndefinition_block = re.compile(r\"\"\"\n (\\[)\n ([^\\[]+)\n (\\])\n \\:\n (.*)\n\"\"\", re.VERBOSE)\n\n\nclass MarkdownParser:\n def __init__(self):\n self.rootobject = root([])\n return\n\n def parseFile(self, filepath):\n \"\"\"\n This function will read markdown file and parse it.\n Only a file encoded with UTF-8 is appliable.\n \"\"\"\n #ファイルを行ごとに分割してlist形式にする\n print(\"reading file...\")\n with open(filepath, 'rt', encoding='utf-8') as f:\n data = f.read()\n splitted_data = []\n for line in data.split('\\n'):\n splitted_data.append(line)\n #parse関数の処理の都合上、末尾に空行を挿入する。\n splitted_data.append('')\n self.rootobject.rawdata = splitted_data\n self.rootobject.parse()\n\n def parseText(self, textdata):\n data = textdata\n splitted_data = []\n for line in data.split('\\n'):\n splitted_data.append(line)\n #parse関数の処理の都合上、末尾に空行を挿入する。\n self.rootobject.rawdata = splitted_data\n self.rootobject.parse()\n \n def exportHTML(self, filename=None):\n print(\"exporting...\")\n expanded_data = self.rootobject.expandToHTML()\n print(expanded_data)\n if filename == None:\n filename = 'export.html'\n with open(filename, 'wt', encoding='utf-8') as f:\n f.write(expanded_data)\n \n\n#URLリンクのid情報を保存する辞書オブジェクト\nlink_id_info = {}\n\nclass blockObject:\n def __init__(self, listed_data):\n self.rawdata = listed_data\n self.parsed_data = []\n self.inline_reg = [\n {'rule':inline_rules['LineBreak'] , 'class':lineBreak },\n {'rule':inline_rules['BoldFont'] , 'class':boldFont },\n {'rule':inline_rules['EmphasizedFont'] , 'class':emphasizedFont },\n {'rule':inline_rules['DeletedFont'] , 'class':deletedFont },\n {'rule':inline_rules['InlineCode'] , 'class':inlineCode },\n {'rule':inline_rules['Images'] , 'class':images },\n {'rule':inline_rules['Links'] , 'class':links },\n ]\n self.reset()\n \n def reset(self):\n self.index = 0\n self.text_buffer = []\n self.start_tag = ''\n self.end_tag = ''\n\n def parse(self):\n if len(self.rawdata) == 0:\n return\n\n previous_line_type = 'Blank'\n while self.index < len(self.rawdata):\n print(\"parsing line: {0}, previous line type: {1}\".format(self.index, previous_line_type))\n if previous_line_type == 'Blank':\n self.text_buffer = []\n previous_line_type = self.parseFirstTime(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'Normal':\n previous_line_type = self.parseNormalBlock(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'Table':\n previous_line_type = self.parseTableBlock(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'BlockQuote':\n previous_line_type = self.parseBlockQuote(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'TaggedBlock':\n previous_line_type = self.parseTaggedBlock(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'CodeBlock':\n previous_line_type = self.parseCodeBlock(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'ulLists':\n previous_line_type = self.parseUlLists(self.rawdata[self.index])\n self.index += 1\n continue\n\n if previous_line_type == 'olLists':\n previous_line_type = self.parseOlLists(self.rawdata[self.index])\n self.index += 1\n continue\n\n #ブロック要素として処理ができないとき、通常文として処理する。\n if previous_line_type == -1:\n previous_line_type = self.parseNormalBlock(self.rawdata[self.index])\n self.index += 1\n continue\n\n \n # text_bufferに残ってる場合の処理\n if len(self.text_buffer) > 0:\n if previous_line_type == 'Normal':\n self.parseNormalBlock('')\n if previous_line_type == 'Table':\n self.parseTableBlock('')\n if previous_line_type == 'BlockQuote':\n self.parseBlockQuote('')\n if previous_line_type == 'CodeBlock':\n self.parseCodeBlock('')\n if previous_line_type == 'ulLists':\n #空白、箇条書き以外の文字列を渡す\n self.parseUlLists('a')\n if previous_line_type == 'olLists':\n self.parseOlLists('a')\n del self.rawdata\n return\n\n #####################################################\n # 以下、ブロック要素のparse関数。 #\n # 返り値としてparseした行の種類を返します。(例外あり) #\n # 関数内でオブジェクトを生成したらparse()を実行すること。#\n #####################################################\n\n def parseFirstTime(self, text):\n \n #block要素のルールに合致するかを判断する。\n if tagged_line.match(text):\n print(\"creating an object : {}\".format(\"Tagged block\"))\n instance = taggedBlock([text])\n instance.parse()\n self.parsed_data.append(instance)\n return 'Blank'\n for rule in block_rules.keys():\n if block_rules[rule].match(text):\n self.text_buffer.append(text)\n #次の行の処理のために、現在の行の種類を保存する\n return rule\n if block_quote.match(text):\n #一番左の'>'を空白と置き換え、さらに左端の空白を切り詰める。\n stripped_text = re.sub(r'\\s*(\\>\\s|\\>)', '', text, 1)\n self.text_buffer.append(stripped_text)\n return 'BlockQuote'\n if header_block.match(text):\n print(\"creating an object : {}\".format(\"header\"))\n instance = headers(text)\n instance.parse()\n self.parsed_data.append(instance)\n return 'Blank'\n if horizontal_rule.match(text):\n self.parsed_data.append(horizontalRule([]))\n return 'Blank'\n if definition_block.match(text):\n self.parseDefinitionBlock(text)\n return 'Blank'\n #block要素のいずれにも合致しなかった場合\n if not blank_line.match(text):\n self.text_buffer.append(text)\n return 'Normal'\n else:\n return 'Blank'\n \n\n def parseInlineElements(self, text):\n \n parsed_text = []\n for dit in self.inline_reg:\n if dit['rule'].search(text):\n element = dit['rule'].search(text).group()\n print(\"creating an object : {}\".format(\"Inline object\"))\n instance = dit['class'](element)\n instance.shapeData()\n #(elementsを整形する処理は個別のクラスで実装)\n devided_text = re.sub(dit['rule'], '_splitter_', text, 1).split('_splitter_')\n #before_element and after_element should be list.\n before_element = self.parseInlineElements(devided_text[0])\n after_element = self.parseInlineElements(devided_text[1])\n for item in before_element:\n if item != '':\n parsed_text.append(item)\n parsed_text.append(instance)\n for item in after_element:\n if item != '':\n parsed_text.append(item)\n return parsed_text\n #if all inline rules didn't match with text\n return [text]\n\n def parseNormalBlock(self, text):\n #次の文が---等だった場合、前の要素がh1ヘッダになる\n if header_line_h1.match(text):\n print(\"creating an object : {}\".format(\"header\"))\n instance = headers(self.text_buffer[0], level=1)\n instance.parse()\n self.parsed_data.append(instance)\n #次の文の処理は振り出しに戻したいので'Blank'を返す\n return 'Blank'\n if header_line_h2.match(text):\n print(\"creating an object : {}\".format(\"header\"))\n instance = headers(self.text_buffer[0], level=2)\n instance.parse()\n self.parsed_data.append(instance)\n #次の文の処理は振り出しに戻したいので'Blank'を返す\n return 'Blank'\n if blank_line.match(text) or self.index >= len(self.rawdata) - 1:\n self.text_buffer.append(text)\n for line in self.text_buffer:\n parsed_line = self.parseInlineElements(line)\n for item in parsed_line:\n self.parsed_data.append(item)\n return 'Blank'\n else:\n self.text_buffer.append(text)\n return 'Normal'\n\n def parseTableBlock(self, text):\n if block_rules['Table'].match(text):\n self.text_buffer.append(text)\n return 'Table'\n elif blank_line.match(text) or self.index >= len(self.rawdata) - 1:\n self.text_buffer.append(text)\n print(\"creating an object : {}\".format(\"table\"))\n instance = table(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n return 'Blank'\n else:\n return -1\n\n def parseBlockQuote(self, text):\n if block_quote.match(text):\n #一番左の'>'を空白と置き換え、さらに左端の空白を切り詰める。\n stripped_text = re.sub(r'\\s*(\\>\\s|\\>)', '', text, 1)\n self.text_buffer.append(stripped_text)\n return 'BlockQuote'\n if blank_line.match(text) or self.index >= len(self.rawdata) - 1:\n #空行でブロックの終わりを検知する\n stripped_text = text.replace('>', '', 1)\n self.text_buffer.append(stripped_text)\n print(\"creating an object : {}\".format(\"block quote\"))\n instance = blockQuote(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n return 'Blank'\n else:\n self.text_buffer.append(text)\n return 'BlockQuote'\n\n def parseTaggedBlock(self, text):\n if block_rules['TaggedBlockEnd'].match(text):\n print(\"creating an object : {}\".format(\"tagged block\"))\n instance = taggedBlock(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n return 'Blank'\n else:\n self.text_buffer.append(text)\n return 'TaggedBlock'\n\n def parseCodeBlock(self, text):\n #まず、parseFirstTime()で処理されていない、text_bufferの1要素目の先頭の空白を取り除く。\n self.text_buffer[0] = self.text_buffer[0].lstrip()\n if block_rules['CodeBlock'].match(text):\n stripped_text = text.lstrip()\n self.text_buffer.append(stripped_text)\n return 'CodeBlock'\n elif blank_line.match(text) or self.index >= len(self.rawdata) - 1:\n stripped_text = text.lstrip()\n self.text_buffer.append(stripped_text)\n print(\"creating an object : {}\".format(\"code block\"))\n instance = codeBlock(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n return 'Blank'\n else:\n self.text_buffer.append(text)\n return 'CodeBlock'\n\n def parseUlLists(self, text):\n #現在のリストの基準となるインデントを元に、各行が入れ子なのか否かを判断する。\n base_indent = self.countIndent(self.text_buffer[0])\n current_line_indent = self.countIndent(text)\n if block_rules['ulLists'].match(text):\n self.text_buffer.append(text)\n return 'ulLists'\n elif block_rules['olLists'].match(text):\n #入れ子のときのみ、現在の行をulListsに含める。\n if current_line_indent >= base_indent + 2:\n self.text_buffer.append(text)\n return 'ulLists'\n else:\n print(\"creating an object : {}\".format(\"ul lists\"))\n instance = ulLists(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n #現在の行を処理し直す\n self.index -= 1\n return 'Blank'\n elif blank_line.match(text):\n self.text_buffer.append(text)\n return 'ulLists'\n elif self.index >= len(self.rawdata) - 1:\n print(\"creating an object : {}\".format(\"ul lists\"))\n instance = ulLists(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n else:\n if current_line_indent >= base_indent + 2:\n #インデントがある場合、前のアイテムの続きだと考える\n self.text_buffer.append(text)\n return 'ulLists'\n else:\n print(\"creating an object : {}\".format(\"ul lists\"))\n instance = ulLists(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n #現在の行を処理し直す\n self.index -= 1\n return 'Blank'\n \n\n def parseOlLists(self, text):\n #現在のリストの基準となるインデントを元に、各行が入れ子なのか否かを判断する。\n base_indent = self.countIndent(self.text_buffer[0])\n current_line_indent = self.countIndent(text)\n if block_rules['olLists'].match(text):\n self.text_buffer.append(text)\n return 'olLists'\n elif block_rules['ulLists'].match(text):\n #入れ子のときのみ、現在の行をulListsに含める。\n if current_line_indent >= base_indent + 2:\n self.text_buffer.append(text)\n return 'olLists'\n else:\n print(\"creating an object : {}\".format(\"ol lists\"))\n instance = olLists(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n #現在の行を処理し直す\n self.index -= 1\n return 'Blank'\n elif blank_line.match(text):\n self.text_buffer.append(text)\n return 'olLists'\n elif self.index >= len(self.rawdata) - 1:\n print(\"creating an object : {}\".format(\"ol lists\"))\n instance = olLists(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n else:\n #\n if current_line_indent >= base_indent + 2:\n #インデントがある場合、前のアイテムの続きだと考える\n self.text_buffer.append(text)\n return 'olLists'\n else:\n print(\"creating an object : {}\".format(\"ol lists\"))\n instance = olLists(self.text_buffer)\n instance.parse()\n self.parsed_data.append(instance)\n #現在の行を処理し直す\n self.index -= 1\n return 'Blank'\n\n def expandToHTML(self):\n expanded_text = \"\"\n for element in self.parsed_data:\n if isinstance(element, defined_classes):\n expanded_text += element.expandToHTML() + '\\n'\n else:\n expanded_text += element + '\\n'\n return self.start_tag + '\\n' + expanded_text + '\\n' + self.end_tag + '\\n'\n\n\n @staticmethod\n def countIndent(text):\n blank = re.compile(r'\\s')\n count = 0\n while blank.match(text):\n count += 1\n text = text.replace(' ', '', 1)\n return count\n\n @staticmethod\n def parseDefinitionBlock(text):\n rule_id = re.compile(r\"(\\[)(?P[^\\[]+)(\\]:)\")\n rule_option = re.compile(r'([\\\"\\'\\(])(?P