diff --git "a/4870.jsonl" "b/4870.jsonl" new file mode 100644--- /dev/null +++ "b/4870.jsonl" @@ -0,0 +1,624 @@ +{"seq_id":"467646986","text":"import platform\nimport sys\n\nosDic = {\"Darwin\": \"MacOS\",\n \"Linux\": \"Linux64\",\n \"Windows\":(\"Win32\",\"Win64\")}\nif platform.system() != \"Windows\":\n sys.path.append(\"PLUX-API-Python3/{}/plux.so\".format(osDic[platform.system()]))\nelse:\n if platform.architecture()[0] == '64bit':\n sys.path.append(\"PLUX-API-Python3/Win64\")\n else:\n sys.path.append(\"PLUX-API-Python3/Win32\")\nimport plux\n\n\nclass NewDevice(plux.SignalsDev):\n\n def __init__(self, address):\n plux.MemoryDev.__init__(address)\n self.time = 0\n self.frequency = 0\n\n def onRawFrame(self, nSeq, data): # onRawFrame takes three arguments\n if nSeq % 2000 == 0:\n print(nSeq)\n if nSeq/self.frequency > self.time:\n return True\n return False\n\n# example routines\n\n\ndef exampleAcquisition(address, time, freq, code): # time acquisition for each frequency\n \"\"\"\n Example acquisition.\n\n Supported channel number codes:\n {1 channel - 0x01, 2 channels - 0x03, 3 channels - 0x07\n 4 channels - 0x0F, 5 channels - 0x1F, 6 channels - 0x3F\n 7 channels - 0x7F, 8 channels - 0xFF}\n\n Maximum acquisition frequencies for number of channels:\n 1 channel - 8000, 2 channels - 5000, 3 channels - 4000\n 4 channels - 3000, 5 channels - 3000, 6 channels - 2000\n 7 channels - 2000, 8 channels - 2000\n \"\"\"\n device = NewDevice(address)\n device.time = time # interval of acquisition\n device.frequency = freq\n device.start(device.frequency, code, 16)\n device.loop() # calls device.onRawFrame until it returns True\n device.stop()\n device.close()\n\n\nexampleAcquisition(\"BTH00:07:80:4D:2E:76\", 20, 1000, 0x01)\n","sub_path":"OneDeviceAcquisitionExample.py","file_name":"OneDeviceAcquisitionExample.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"436448060","text":"import os\nimport yaml\nfrom rpgdiscordhelper.modules.path import Path\nfrom rpgdiscordhelper.models.ServerSetting import ServerSetting\nfrom rpgdiscordhelper.modules.settingname import SettingName\n\n\nclass SettingManager():\n def __init__(self, database_manager):\n self.settings = {}\n self.database_manager = database_manager\n\n self.settings_names = {\n SettingName.ADMIN_ROLE_ID.value: '',\n SettingName.CATEGORY_FOR_LOOKING_PLAYERS.value: '',\n SettingName.CATEGORY_FOR_STATS.value: '',\n SettingName.CHARACTERS_CHANNEL_ID.value: '',\n SettingName.LOGS_CHANNEL_ID.value: '',\n SettingName.MESSAGE_FOR_INACTIVE_PLAYERS.value: '',\n SettingName.MESSAGE_FOR_PLAYERS_WITHOUT_CHARACTER.value: '',\n SettingName.OFFTOPIC_CATEGORY.value: '',\n SettingName.PLAYER_WITH_CHARACTER_ROLE_ID.value: '',\n SettingName.PLAYER_WITHOUT_CHARACTER_ROLE_ID.value: '',\n SettingName.IGNORED_CHANNELS_FOR_STATS.value: '',\n 'checkedInactiveUsers': {},\n 'checkedUsersWithoutAccept': {}\n }\n pass\n\n def update_global_settings(self, data):\n with open(Path.SETTINGS.value, 'w', encoding='utf-8') as setting_file:\n yaml.dump(data, setting_file, allow_unicode=True)\n\n def create_global_settings(self):\n default_yaml_settings = {\n SettingName.DISCORD_TOKEN.value: '',\n SettingName.DATABASE_URL.value: '',\n }\n self.update_global_settings(default_yaml_settings)\n\n def load_global_settings(self):\n if os.path.exists(Path.SETTINGS.value):\n with open(Path.SETTINGS.value, 'r') as yaml_setting_file:\n return yaml.load(yaml_setting_file, Loader=yaml.FullLoader)\n else:\n self.create_global_settings()\n self.load_global_settings()\n\n def reload_settings(self, server_id):\n self.settings[server_id] = {}\n session = self.database_manager.create_session()\n settings = session.query(ServerSetting).filter(ServerSetting.name.in_(\n self.settings_names), ServerSetting.server_id == server_id).all()\n for setting in settings:\n if setting.name in self.settings_names:\n if setting.name in self.settings[server_id]:\n self.settings[server_id][setting.name].append(\n setting.value)\n else:\n self.settings[server_id][setting.name] = [setting.value]\n\n def load_settings(self, server_id):\n if server_id in self.settings:\n return self.settings[server_id]\n else:\n self.reload_settings(server_id)\n return self.load_settings(server_id)\n","sub_path":"rpgdiscordhelper/modules/settingmanager.py","file_name":"settingmanager.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514371861","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef after(text, string):\n # Find and validate first part.\n pos_str = text.rfind(string)\n if pos_str == -1: return \"\"\n # Returns chars after the found string.\n adjusted_pos_str = pos_str + len(string)\n if adjusted_pos_str >= len(text): return \"\"\n return text[adjusted_pos_str:]\n\ndef delete_nan_column(data, max_number_of_nas):\n data = data.loc[:, (data.isnull().sum(axis=0) <= max_number_of_nas)]\n return data\n\ndef import_data(init_path, years):\n total_data=[]\n errors =[]\n for year in years:\n try:\n path = init_path+year+'.csv'\n data = pd.read_csv(path)\n total_data.append(data)\n except Exception as err:\n errors.append(err)\n bound = int(after(str(err), 'Expected')[1:3])\n path = init_path+year+'.csv'\n data = pd.read_csv(path, usecols=[i for i in range(bound)], encoding = 'unicode_escape')\n total_data.append(data)\n \n for i in range(len(total_data)):\n total_data[i] = delete_nan_column(total_data[i], 100)\n \n return total_data\n\ndef min_odd(row_data, bookmaker):\n \n if bookmaker == 'Bet365':\n odds_to_take = ['B365H', 'B365D', 'B365A']\n \n elif bookmaker == 'Bet&Win':\n odds_to_take = ['BWH', 'BWD', 'BWA']\n \n subdata = row_data[odds_to_take]\n np_subdata = subdata.to_numpy()\n array_subdata = np.reshape(np_subdata, (-1, 1))\n idx_min = np.argmin(array_subdata)\n if idx_min == 0:\n odd_min = 'H'\n elif idx_min == 1:\n odd_min = 'D'\n else:\n odd_min = 'A'\n \n return odd_min\n\ndef bookmaker_accuracy(subdata, bookmaker):\n counts = []\n for i in range(len(subdata)):\n if min_odd(subdata.iloc[i], bookmaker) == subdata['FTR'].iloc[i]:\n #subdata['FTR'].iloc[i] == 'H' and odds_to_prob(subdata['B365H'].iloc[i]) > 0.5 or subdata['FTR'].iloc[i] == 'D' and odds_to_prob(subdata['B365D'].iloc[i])>0.5 or subdata['FTR'].iloc[i] == 'A' and odds_to_prob(subdata['B365A'].iloc[i]) > 0.5:\n count = 1\n else:\n count = 0\n counts.append(count)\n accuracy = np.sum(counts)/len(subdata)\n return accuracy\n\ndef show_odds(data, bookmaker):\n if bookmaker == 'Bet365':\n odds = ['B365H', 'B365D', 'B365A']\n elif bookmaker == 'Bet&Win':\n odds = ['BWH', 'BWD', 'BWA']\n return data[odds]\n\ndef odds_to_prob(odd):\n return 1/odd\n\ndef choose_bet(data, date, bookmaker, min_odd):\n chosen_data = data[data['Date']==date]\n if bookmaker == 'Bet365':\n home = 'B365H'\n away = 'B365A'\n elif bookmaker == 'Bet&Win':\n home = 'BWH'\n away = 'BWA'\n opt_bets = []\n for i in range(len(chosen_data)):\n home_odd = chosen_data[home].iloc[i]\n away_odd = chosen_data[away].iloc[i]\n if home_odd <= min_odd:\n opt_odd = home\n bet = chosen_data[['Date','HomeTeam', 'AwayTeam', 'FTR', opt_odd]].iloc[i]\n opt_bets.append(bet)\n elif away_odd <= min_odd:\n opt_odd = away\n bet = chosen_data[['Date','HomeTeam', 'AwayTeam', 'FTR', opt_odd]].iloc[i]\n opt_bets.append(bet)\n return opt_bets\n\ndef count_result(data):\n n_games = 380\n HomeWins = data['FTR'].str.count('H')\n n_HomeWins = np.sum(HomeWins)\n AwayWins = data['FTR'].str.count('A')\n n_AwayWins = np.sum(AwayWins)\n Draws = data['FTR'].str.count('D')\n n_Draws = np.sum(Draws)\n prob_HomeWins = n_HomeWins/n_games\n prob_AwayWins = n_AwayWins/n_games\n prob_Draw = n_Draws/n_games\n print('ratio of Home Wins: ' + str(round(prob_HomeWins*100)), '%, ratio of Away Wins: ' + str(round(prob_AwayWins*100)), '%, ratio of Draws: ' + str(round(prob_Draw*100)),'%')\n return prob_HomeWins, prob_AwayWins, prob_Draw\n\ndef bar_chart(data, time_horizon):\n fig= plt.figure(figsize=(12,7))\n width = 0.4\n plt.bar(time_horizon, data, width)\n \ndef find_games(data, HomeTeam, AwayTeam):\n return data.loc[(data['HomeTeam'] == HomeTeam) & (data['AwayTeam'] == AwayTeam)]\n \n\n","sub_path":"Project/FootballProjectPartII-master/DATA_SCIENCE_PROJECT_ALLAN/CODE/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148596302","text":"import os, sys, shutil\nimport os.path as osp\nimport re\nimport requests\n\nif __name__ == '__main__':\n\n\n pdf_pattern = re.compile(r'http:.*?\\.pdf')\n name_pattern = re.compile(r'[0-9a-zA-Z\\s-]+')\n\n pdf_dir = 'cmu_computer_vision'\n if osp.exists(pdf_dir):\n shutil.rmtree(pdf_dir)\n os.mkdir(pdf_dir)\n\n html_file = 'page.html'\n lecture_id = 0\n with open(html_file, 'r') as in_f:\n for line in in_f:\n urls = pdf_pattern.findall(line)\n if len(urls)>0:\n file_name = name_pattern.findall(line)[0]\n file_name = file_name.replace('','').replace('','')\n for i, url in enumerate(urls):\n lec_prefix = '{}_{}'.format(lecture_id, \\\n '{}_'.format(i) if len(urls)>1 else '')\n pdf_name = '{}{}.pdf'.format(lec_prefix, '_'.join(file_name.split()))\n pdf_file_path = osp.join(pdf_dir, pdf_name)\n with open(pdf_file_path, 'wb') as out_f:\n response = requests.get(url)\n out_f.write(response.content)\n print('we have complete', pdf_name)\n lecture_id += 1\n","sub_path":"download_pdf.py","file_name":"download_pdf.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"72059585","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n suma = 0\n if len(sys.argv) == 1:\n print('0')\n else:\n for x in range(len(sys.argv)):\n if x > 0:\n suma = suma + int(sys.argv[x])\n\n print('{}'.format(suma))\n","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"336844276","text":"'''\n# Sample code to perform I/O:\n\nname = input() # Reading input from STDIN\nprint('Hi, %s.' % name) # Writing output to STDOUT\n\n# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail\n'''\n\n\n# Write your code here\ndef chooseX(nxt, prev, curr):\n i = 0\n while int(curr) - i >= int(nxt) and int(curr) - i > int(prev):\n i += 1\n return i\n\n\ndef checkIfStrictlyInc(array):\n array = [int(x) for x in array]\n idx = 0\n for idx in range(len(array) - 1):\n if array[idx] < array[idx + 1]:\n continue\n else:\n if idx == 0:\n x = chooseX(float(\"-inf\"), array[idx + 1], array[idx])\n array[idx] = array[idx] - x\n else:\n x = chooseX(array[idx - 1], array[idx + 1], array[idx])\n array[idx] = array[idx] - x\n\n for i in range(len(array) - 1):\n if array[i] > array[i + 1]:\n print(\"No\")\n return\n print(\"Yes\")\n\nif __name__ == '__main__':\n n = int(input())\n for i in range(n):\n input()\n arr = input()\n print(arr)\n checkIfStrictlyInc(arr.split(' '))\n","sub_path":"Medium/StrictlyIncSeq.py","file_name":"StrictlyIncSeq.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"393731325","text":"import os\nimport re\nimport sys\n\ndef load_uuid(_file):\n '''\n load uuid from a file\n '''\n fin = open(_file)\n fin.readline().strip() # skip the header\n \n uuid = set()\n for line in fin:\n _id = line.split(',')[0].strip()\n uuid.add(_id)\n fin.close()\n \n return uuid\n\ndef dump_uuid(_file, uuids):\n '''\n dump uuid to a file\n '''\n fout = open(_file, 'w')\n for uuid in uuids:\n fout.write(\"%s\\n\" % str(uuid))\n fout.close()\n\ndef main():\n '''\n '''\n _input1 = ''\n _input2 = ''\n _output = ''\n\n omnibus_uuid = load_uuid(_input1) # the file contains omni uuid\n global_uuid = load_uuid(_input2) # the file contains global uuid\n \n # dump uuids to files\n dump_uuid(omnibus_uuid.union(global_uuid), _output)\n \n\nif __name__ == '__main__':\n '''\n '''\n main()\n\n","sub_path":"python/merge_uuid.py","file_name":"merge_uuid.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"323917098","text":"from tastypie.api import Api\n\nfrom django.conf.urls import include, patterns, url\n\nfrom .api import (\n UserResource, TaskResource, CompletedTaskResource,\n ActivityLogResource\n)\nfrom .views import home, Status\n\napi = Api(api_name='v1')\napi.register(UserResource())\napi.register(TaskResource())\napi.register(CompletedTaskResource())\napi.register(ActivityLogResource())\n\n\nurlpatterns = patterns('inthe_am.taskmanager.views',\n url('^api/', include(api.urls)),\n url('^status/', Status.as_view()),\n url('^', home, name='home'),\n)\n","sub_path":"inthe_am/taskmanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"604437297","text":"import logging\n\nlogger = logging.getLogger()\n\n\ndef run(config, event_json, good_indicators):\n logger.debug('Running the Morning Please detection module.')\n\n tags = []\n detections = []\n extra = []\n\n # Identify the module's name.\n module_name = __name__.split('.')[-1]\n\n # Get the list of e-mail addresses from the config.\n mp_emails = config.get(module_name, 'emails').split(',')\n\n # Try to identify Morning Please by the e-mail body.\n for email in event_json['emails']:\n\n # Make sure there is a Word document attachment.\n if any('.doc' in attach['name'].lower() for attach in email['attachments']):\n\n # These are the possible Morning Please string combinations.\n string_combos = []\n string_combos.append(['Morning,', 'Attached'])\n string_combos.append(['Morning,', 'Please see attached.'])\n string_combos.append(['Morning,', 'Please see attached and confirm.'])\n\n for ss in string_combos:\n if all(s in email['body'] for s in ss):\n detections.append('Detected a Morning Please phish by Word document attachment and the e-mail body: {}'.format(ss))\n tags.append('morningplease')\n elif all(s in email['html'] for s in ss):\n detections.append('Detected a Morning Please phish by Word document attachment and the e-mail body: {}'.format(ss))\n tags.append('morningplease')\n\n for whois in event_json['whois']:\n for mp_email in mp_emails:\n if mp_email.lower() in whois['raw'].lower():\n extra.append(whois['raw'])\n detections.append('Detected a Morning Please domain \"{}\" by WHOIS e-mail: {}'.format(whois['domain'], mp_email))\n tags.append('morningplease')\n\n return tags, detections, extra\n","sub_path":"lib/modules/detections/morningplease.py","file_name":"morningplease.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54319183","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport urllib\nimport regex\n\nfrom orbis.libs.entity_type_lib import normalize_entity_type\nfrom orbis.libs.entity_type_lib import get_dbpedia_type\nfrom orbis.libs.entity_type_lib import get_sparql_redirect\nfrom orbis.libs.entity_type_lib import get_regex_patterns\nfrom orbis.config import regex_patterns\n\n\ndef test_regex():\n\n organization_pattern, person_pattern, location_pattern = get_regex_patterns()\n\n person_pattern = regex.compile(regex_patterns.base_pattern + \"(\" + \"|\".join(regex_patterns.person_pattern) + \")[0-9]*\")\n result = person_pattern.match(\"http://schema.org/Person\")\n\n assert result\n\n\ndef test_url_quoted():\n uri = \"http://dbpedia.org/resource/Stephen_Hawking\"\n uri_quoted = urllib.parse.quote(uri).encode(\"utf8\")\n\n assert uri_quoted == b'http%3A//dbpedia.org/resource/Stephen_Hawking'\n\n\ndef test_get_sparql_redirect():\n endpoint_url = \"http://dbpedia.org/sparql\"\n\n uri = \"http://dbpedia.org/resource/Steven_Hawking\"\n\n result = get_sparql_redirect(endpoint_url, uri)\n assert result == 'http://dbpedia.org/resource/Steven_Hawking'\n\n\ndef test_normalize_entity_type():\n \"\"\" \"\"\"\n\n assert normalize_entity_type(\"Random\") == \"Random\"\n assert normalize_entity_type(\"random\") == \"Random\"\n\n assert normalize_entity_type(\"location\") == \"Place\"\n assert normalize_entity_type(\"Location\") == \"Place\"\n\n assert normalize_entity_type(\"http://some.thing/location\") == \"Place\"\n assert normalize_entity_type(\"http://some.thing/location/\") == \"Place\"\n\n\ndef test_get_dbpedia_type():\n \"\"\" \"\"\"\n uri = \"http://dbpedia.org/resource/Stephen_Hawking\"\n\n result = get_dbpedia_type(uri, check_redirect=True)\n assert result == \"Person\"\n\n\ndef main():\n test_regex()\n test_url_quoted()\n test_get_sparql_redirect()\n test_normalize_entity_type()\n test_get_dbpedia_type()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/libs/test_entity_type_lib.py","file_name":"test_entity_type_lib.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104230916","text":"#!/usr/bin/env python\n\nimport os\nimport subprocess\n\nTHISDIR = os.path.dirname(os.path.abspath(__file__))\n\nargs = [\"git\", \"add\", \".\"]\nreturncode = subprocess.call(args, cwd = THISDIR)\nif returncode != 0:\n raw_input(\"Press ENTER to continue...\")\n ","sub_path":"_git_add.py","file_name":"_git_add.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60675730","text":"import os\nimport re\n\n\nclass ValidatorException(Exception):\n pass\n\n\nclass Validator:\n \"\"\"This guys job is to find the data, validate it, and put it into data structures\"\"\"\n def __init__(self, folderNames):\n self.folderNames = folderNames\n\n def validate(self, dataframefile):\n self.validate_folders()\n self.validate_files()\n dfs = self.validate_data(dataframefile)\n return dfs # to an Analyst instance\n\n def validate_folders(self):\n with os.scandir(path='./data') as it: # try ./data\n # store name attribute of each os.DirEntry in iterator provided by scandir()\n currentFolders = [x.name for x in it]\n for folder in self.folderNames:\n if folder not in currentFolders:\n raise ValidatorException(f\"Required folder not present: {folder}\")\n it.close() # Explicitly close the iterator to free memory\n print(\"Folders Validated\")\n\n def validate_files(self):\n for folder in self.folderNames:\n with os.scandir(path='./data/'+folder) as it:\n currentFiles = [x.name for x in it if x.name != \".gitignore\"] # store name attributes of all files in a folder\n for fileName in currentFiles:\n if not fileName.endswith(\".csv\"): # validate filetype is a csv\n raise ValidatorException(f\"File Type is not csv: {fileName}\")\n if not re.match(r\"\\d{4}\", fileName[:4]): # validate that first four digits of file name is a year\n raise ValidatorException(f\"File name must start with YYYY: {fileName}\")\n it.close() # Explicity close the iterator to free memory\n print(f\"All files validated within {folder}\")\n print(\"Files validated\")\n\n def validate_data(self, dataframefile):\n # dfs is a dictionary of dataframes\n dfs = {}\n for folder in self.folderNames:\n with os.scandir(path='./data/'+folder) as it:\n currentFiles = [x.name for x in it if x.name != \".gitignore\"] # store name attributes of all files in a folders\n for fileName in currentFiles:\n df = dataframefile.read(os.path.join('./data/', folder, fileName))\n # check the column titles\n for col in df.columns:\n if type(col) is not str: # ensure column names are string types\n raise ValidatorException(f\"File {fileName} needs to be formatted correctly: {col}\")\n # if column names are valid, then we can safely store the dataframe to our master dictionary\n dfs[fileName[:4] + folder] = df\n # i.e. dfs['2016carbon_data']\n it.close()\n print(f\"All data validated within {folder}\")\n print(\"Data validated\")\n return dfs\n","sub_path":"ffequity/processors/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45942230","text":"import os\nimport logging\n\nimport pandas as pd\nimport numpy as np\n\nfrom drain import util, aggregate, data\nfrom drain.aggregate import Aggregate, Count, aggregate_counts, days\nfrom drain.aggregation import SpacetimeAggregation\nfrom drain.data import FromSQL\nfrom drain.util import list_filter_none, union\n\nclass EnrollAggregation(SpacetimeAggregation):\n def __init__(self, spacedeltas, dates, **kwargs):\n SpacetimeAggregation.__init__(self,\n spacedeltas = spacedeltas,\n dates = dates,\n prefix = 'wicenroll',\n date_column = 'register_d', **kwargs)\n\n if not self.parallel:\n self.inputs = [FromSQL(query=\"\"\"\nwith enroll as (\nSELECT kid_id, p.* \nFROM cornerstone.partenrl p join aux.kid_wics using (part_id_i)\nUNION ALL\nSELECT kid_id, p.*\nFROM cornerstone.partenrl p join aux.kid_mothers on p.part_id_i = mothr_id_i)\n\nselect *, \narray_remove(array[lang_1_c, lang_2_c, lang_3_c], null) as language,\narray_remove(array[pa_cde1_c, pa_cde2_c, pa_cde3_c, pa_cde4_c, pa_cde5_c], null) as assistance\nfrom enroll \n\"\"\", tables=['aux.kid_wics', 'aux.kid_mothers'], parse_dates=['register_d', 'last_upd_d'], target=True)]\n\n def get_aggregates(self, date, delta):\n \n aggregates = [\n Aggregate(lambda e: e.med_risk_f == 'Y', 'any', \n 'medical_risk', fname=False),\n Aggregate('emplymnt_c', lambda e: set(list_filter_none(e)), \n 'employment_status', fname=False),\n Aggregate('occptn_c', lambda o: set(list_filter_none(o)), \n 'occupation', fname=False),\n Aggregate(['hsehld_n', 'hse_inc_a'], 'median', \n ['household_size', 'household_income']),\n Aggregate('language', lambda ls: union(set(l) for l in ls),\n fname=False),\n Aggregate('assistance', lambda ls: union(set(l) for l in ls),\n fname=False),\n Aggregate('clinicid_i', lambda c: set(c), 'clinic', fname=False)\n ]\n\n return aggregates\n\n\nclass BirthAggregation(SpacetimeAggregation):\n def __init__(self, spacedeltas, dates, **kwargs):\n SpacetimeAggregation.__init__(self,\n spacedeltas = spacedeltas,\n dates = dates,\n prefix = 'wicbirth',\n date_column = 'date_of_birth', **kwargs)\n\n if not self.parallel:\n self.inputs = [FromSQL(target=True, query=\"\"\"\nSELECT *, \napgar_n::int as apgar,\nnullif(lgt_inch_n, 0) as length,\nnullif(wgt_grm_n, 0) as weight,\nnullif(headcirc_n, 0) as head_circumference,\narray_remove(array[\n inf_cmp1_c, inf_cmp2_c, inf_cmp3_c, inf_cmp4_c, inf_cmp5_c\n], null) as complication\nFROM aux.kids\nJOIN aux.kid_mothers USING (kid_id)\nJOIN cornerstone.birth USING (part_id_i, mothr_id_i)\n\"\"\", tables=['aux.kids', 'aux.kid_mothers'], parse_dates=['date_of_birth'])\n ]\n\n def get_aggregates(self, date, delta):\n \n aggregates = [\n Aggregate('length', 'max', fname=False),\n Aggregate('weight', 'max', fname=False),\n Aggregate('head_circumference', 'max', fname=False),\n Aggregate('apgar', 'max', 'apgar_score', fname=False),\n Aggregate('brth_typ_c', lambda b: set(b), 'place_type', fname=False),\n Aggregate('inf_disp_c',lambda i: set(i), 'disposition', fname=False),\n Aggregate('complication', lambda cs: union(set(c) for c in cs), fname=False),\n Aggregate(lambda b: b.apors_f == 'Y', 'any', 'apors', fname=False),\n Aggregate(lambda b: b.icu_f == 'Y', 'any', 'icu', fname=False),\n Aggregate('clinicid_i', lambda c: set(c), 'clinic', fname=False)\n ]\n\n return aggregates\n\nclass PrenatalAggregation(SpacetimeAggregation):\n def __init__(self, spacedeltas, dates, **kwargs):\n SpacetimeAggregation.__init__(self,\n spacedeltas = spacedeltas,\n dates = dates,\n prefix = 'wicprenatal',\n date_column = 'visit_d', **kwargs)\n\n if not self.parallel:\n self.inputs = [FromSQL(target=True, query=\"\"\"\nSELECT kid_id, date_of_birth, p.*\nFROM aux.kids\nJOIN aux.kid_mothers USING (kid_id)\nJOIN cornerstone.birth b USING (part_id_i, mothr_id_i)\nJOIN cornerstone.prenatl p ON b.mothr_id_i = p.part_id_i\nwhere date_of_birth - visit_d between -365 and 365\n\"\"\", tables=['aux.kids', 'aux.kid_mothers'], parse_dates=['date_of_birth', 'visit_d'])\n ]\n\n def get_aggregates(self, date, delta):\n\n aggregates = [\n Count(),\n Aggregate(days('visit_d', 'date_of_birth'), ['min', 'max'], 'visit'),\n Aggregate('serv_typ_c', lambda s: set(s), 'service', fname=False),\n Aggregate('preg_nbr_n', 'max', 'previous_pregnancies', fname=False),\n Aggregate('lv_brth_n', 'max', 'previous_births', fname=False),\n Aggregate('othr_trm_n', 'max', 'previous_terminations', fname=False),\n Aggregate(lambda p: p.smk3_mth_f == 'Y', 'any', 'smoked_3mo', fname=False),\n Aggregate('cig3_day_n', 'max', 'cigarettes_per_day', fname=False),\n Aggregate(lambda p: p.drk3_mth_f == 'Y', 'any', 'drank_3mo', fname=False),\n Aggregate('dr_dy_wk_n', 'max', 'days_drank_per_week', fname=False),\n Aggregate('drnk_day_n', 'max', 'drinks_per_day', fname=False),\n Aggregate('clinicid_i', lambda c: set(c), 'clinic', fname=False)\n ]\n\n return aggregates\n","sub_path":"output/wic.py","file_name":"wic.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"218000937","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport errno\n\nPY2 = sys.version_info[0] == 2\nPY3K = sys.version_info[0] >= 3\nPY33 = sys.version_info >= (3, 3)\n\nif PY2:\n import imp\n\n long = long\n unicode = unicode\n basestring = basestring\n\n reduce = reduce\n\n from urllib import quote_plus, unquote_plus, quote, unquote\n from urlparse import parse_qsl\n\n def load_module(module, path):\n with open(path, 'rb') as fh:\n mod = imp.load_source(module, path, fh)\n\n return mod\nelse:\n long = int\n unicode = str\n basestring = str\n\n from functools import reduce\n\n from urllib.parse import (quote_plus, unquote_plus,\n parse_qsl, quote, unquote)\n\n if PY33:\n from importlib import machinery\n\n def load_module(module, path):\n return machinery.SourceFileLoader(\n module, path\n ).load_module(module)\n else:\n import imp\n\n def load_module(module, path):\n with open(path, 'rb') as fh:\n mod = imp.load_source(module, path, fh)\n\n return mod\n\n\nclass Null(object):\n\n def __bool__(self):\n return False\n\n def __eq__(self, other):\n return other is None\n\n\ndef decode(string, encodings=None):\n if not PY2 and not isinstance(string, bytes):\n return string\n\n if encodings is None:\n encodings = ['utf-8', 'latin1', 'ascii']\n\n for encoding in encodings:\n try:\n return string.decode(encoding)\n except UnicodeDecodeError:\n pass\n\n return string.decode(encodings[0], errors='ignore')\n\n\ndef encode(string, encodings=None):\n if not PY2 and isinstance(string, bytes):\n return string\n\n if PY2 and isinstance(string, unicode):\n return string\n\n if encodings is None:\n encodings = ['utf-8', 'latin1', 'ascii']\n\n for encoding in encodings:\n try:\n return string.encode(encoding)\n except UnicodeDecodeError:\n pass\n\n return string.encode(encodings[0], errors='ignore')\n\n\ndef mkdir_p(path, mode=0o777):\n try:\n os.makedirs(path, mode)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef value(val):\n if callable(val):\n return val()\n\n return val\n\n\ndef data_get(target, key, default=None):\n \"\"\"\n Get an item from a list, a dict or an object using \"dot\" notation.\n\n :param target: The target element\n :type target: list or dict or object\n\n :param key: The key to get\n :type key: string or list\n\n :param default: The default value\n :type default: mixed\n\n :rtype: mixed\n \"\"\"\n from ..support import Collection\n\n if key is None:\n return target\n\n if not isinstance(key, list):\n key = key.split('.')\n\n for segment in key:\n if isinstance(target, (list, tuple)):\n try:\n target = target[segment]\n except IndexError:\n return value(default)\n elif isinstance(target, dict):\n try:\n target = target[segment]\n except IndexError:\n return value(default)\n elif isinstance(target, Collection):\n try:\n target = target[segment]\n except IndexError:\n return value(default)\n else:\n try:\n target = getattr(target, segment)\n except AttributeError:\n return value(default)\n\n return target\n","sub_path":"orator/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"198904897","text":"import os\r\nimport cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n _, frame = cap.read()\r\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n acik_sari= np.array([25, 146, 190])\r\n koyu_sari= np.array([100, 190, 250])\r\n sari_mask=cv2.inRange(hsv_frame, acik_sari,koyu_sari)\r\n sari = cv2.bitwise_and(frame, frame, mask=sari_mask)\r\n\r\n acik_kirmizi = np.array([161, 155, 84])\r\n koyu_kirmizi = np.array([179, 255, 255])\r\n kirmizi_mask = cv2.inRange(hsv_frame, acik_kirmizi, koyu_kirmizi)\r\n kirmizi = cv2.bitwise_and(frame, frame, mask=kirmizi_mask)\r\n \r\n acik_mavi = np.array([94, 80, 2])\r\n koyu_mavi = np.array([126, 255, 255])\r\n mavi_mask = cv2.inRange(hsv_frame, acik_mavi, koyu_mavi)\r\n mavi = cv2.bitwise_and(frame, frame, mask=mavi_mask)\r\n\r\n \r\n acik_yesil = np.array([25, 52, 72])\r\n koyu_yesil = np.array([102, 255, 255])\r\n yesil_mask = cv2.inRange(hsv_frame, acik_yesil, koyu_yesil)\r\n yesil = cv2.bitwise_and(frame, frame, mask=yesil_mask)\r\n\r\n \r\n acik = np.array([0, 42, 0])\r\n koyu = np.array([179, 255, 255])\r\n mask = cv2.inRange(hsv_frame, acik, koyu)\r\n sonuc = cv2.bitwise_and(frame, frame, mask=mask)\r\n cv2.imshow(\"Kirmizi\", kirmizi)\r\n cv2.imshow(\"Mavi\", mavi)\r\n cv2.imshow(\"Yeşil\", yesil)\r\n cv2.imshow(\"Sari\",sari)\r\n cv2.imshow(\"Diger Renkler\", sonuc)\r\n\r\n font = cv2.FONT_HERSHEY_COMPLEX\r\n\r\n if int(cv2.__version__[0]) > 3:\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n else:\r\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for cnt in contours:\r\n area = cv2.contourArea(cnt)\r\n approx = cv2.approxPolyDP(cnt, 0.02*cv2.arcLength(cnt, True), True)\r\n x = approx.ravel()[0]\r\n y = approx.ravel()[1]\r\n\r\n if area > 400:\r\n cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)\r\n\r\n if len(approx) == 3:\r\n cv2.putText(frame, \"Ucgen\", (x, y), font, 1, (0, 0, 0))\r\n elif len(approx) == 4:\r\n cv2.putText(frame, \"Dikdortgen\", (x, y), font, 1, (0, 0, 0))\r\n\r\n\r\n cv2.imshow(\"Sekilleri Algila\", frame)\r\n\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n","sub_path":"color recognition/color recognition/Renk_Tanima.py","file_name":"Renk_Tanima.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"161788134","text":"import logging\nimport unittest\n\nimport networkx as nx\n\nfrom pybel.cx import to_cx_json, from_cx_json, hash_tuple\nfrom pybel.io import from_path\nfrom tests.constants import test_bel_thorough, BelReconstitutionMixin, test_bel_simple\n\nlog = logging.getLogger(__name__)\n\n\nclass TestCx(BelReconstitutionMixin, unittest.TestCase):\n def test_cx_simple(self):\n graph = from_path(test_bel_simple)\n self.bel_simple_reconstituted(graph)\n\n node_mapping = dict(enumerate(sorted(graph.nodes_iter(), key=hash_tuple)))\n\n cx = to_cx_json(graph)\n reconstituted = from_cx_json(cx)\n\n nx.relabel.relabel_nodes(reconstituted, node_mapping, copy=False)\n\n self.bel_simple_reconstituted(reconstituted, check_metadata=False)\n\n def test_cx_thorough(self):\n graph = from_path(test_bel_thorough, allow_nested=True)\n self.bel_thorough_reconstituted(graph)\n\n node_mapping = dict(enumerate(sorted(graph.nodes_iter(), key=hash_tuple)))\n\n cx = to_cx_json(graph)\n reconstituted = from_cx_json(cx)\n\n nx.relabel.relabel_nodes(reconstituted, node_mapping, copy=False)\n\n self.bel_thorough_reconstituted(\n reconstituted,\n check_metadata=False,\n check_warnings=False,\n check_provenance=False\n )\n","sub_path":"tests/test_cx.py","file_name":"test_cx.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339472503","text":"# 1. Найти сумму и произведение цифр трехзначного числа, которое\n# вводит пользователь.\n\nNUM = int(input('Enter number \\n'))\n\nNUM_A = NUM // 100\nNUM_B = (NUM // 10) % 10\nNUM_C = NUM % 10\nprint(f\"Произведение чисел: {NUM_A}, {NUM_B}, {NUM_C} равно {NUM_A * NUM_B * NUM_C}\")\nprint(f\"Сумма чисел: {NUM_A}, {NUM_B}, {NUM_C} равно {NUM_A + NUM_B + NUM_C}\")\n","sub_path":"Lesson_1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382935181","text":"\"\"\"Build and publish a docker image.\n\nThis is a work-in-progress script, it is not yet ready for prime-time!\n\"\"\"\nimport pathlib\nimport subprocess\n\nimport getconfig\nfrom labm8.py import app\nfrom labm8.py import dockerutil\n\nFLAGS = app.FLAGS\n\napp.DEFINE_string(\"target\", None, \"The bazel image target.\")\napp.DEFINE_string(\"tag\", None, \"The docker image tag to export.\")\napp.DEFINE_boolean(\n \"push\", False, \"Run `docker push` on the final tagged image.\"\n)\n\nPHD_BUILD = dockerutil.BazelPy3Image(\"tools/docker/phd_build/phd_build\")\n\n\ndef BuildAndLoad(target: str) -> str:\n assert target.startswith(\"//\")\n target_without_prefix = target[len(\"//\") :]\n\n target_components = target.split(\":\")\n assert len(target_components) == 2\n path = target_components[0] + \"/\" + target_components[1]\n tar_target = f\"{target}.tar\"\n\n app.Log(1, \"Building %s image\", tar_target)\n with PHD_BUILD.RunContext() as ctx:\n ctx.CheckCall([\"bazel\", \"build\", tar_target], timeout=600)\n\n phd_root = getconfig.GetGlobalConfig().paths.repo_root\n tar_path = pathlib.Path(f\"{phd_root}/bazel-bin/{path}.tar\")\n assert tar_path.is_file()\n\n # Load the tarfile build\n import_tag = f\"bazel/{target_without_prefix}\"\n app.Log(1, \"Loading docker image %s\", import_tag)\n subprocess.check_call(\n [\"timeout\", \"-s9\", str(300), \"docker\", \"load\", \"-i\", tar_path]\n )\n return import_tag\n\n\ndef RenameTag(src_tag: str, dst_tag: str) -> None:\n app.Log(1, \"Tagging %s\", dst_tag)\n subprocess.check_call(\n [\"timeout\", \"-s9\", str(60), \"docker\", \"tag\", src_tag, dst_tag]\n )\n subprocess.check_call([\"timeout\", \"-s9\", str(60), \"docker\", \"rmi\", src_tag])\n\n\ndef PushTag(tag: str):\n app.Log(1, \"Pushing docker image %s\", tag)\n subprocess.check_call([\"timeout\", \"-s9\", str(360), \"docker\", \"push\", tag])\n\n\ndef main():\n \"\"\"Main entry point.\"\"\"\n loaded_tag = BuildAndLoad(FLAGS.target)\n if FLAGS.tag:\n RenameTag(loaded_tag, FLAGS.tag)\n if FLAGS.push:\n PushTag(FLAGS.tag)\n\n\nif __name__ == \"__main__\":\n app.Run(main)\n","sub_path":"tools/docker/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"208071023","text":"from tkinter import *\nfrom tkinter import ttk\n\nfrom calculator import *\nfrom calculator import WIDTHBTN, HEIGHTBTN\n\nclass MainApp(Tk):\n def __init__(self):\n Tk.__init__(self)\n self.title('Calculadora')\n self.geometry(\"{}x{}\".format(WIDTHBTN*4, HEIGHTBTN*6))\n\n c = Calculator(self)\n c.pack()\n \n\n def start(self):\n self.mainloop()\n\nif __name__ == '__main__':\n app = MainApp()\n app.start()\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315771033","text":"import pymysql\nfrom settings import config\n\n\ndef fetch(sql, params):\n print(sql)\n print(params)\n try:\n conn = pymysql.connect(**config)\n cur = conn.cursor()\n try:\n cur.execute(sql, params)\n desc = cur.description\n rows = cur.fetchall()\n fields = []\n for f in desc:\n fields.append(f[0])\n data = [tuple(fields), ]\n for d in rows:\n data.append(d)\n result = (tuple(data), None)\n except pymysql.Error as e:\n print(e)\n result = (((),), e)\n cur.close()\n conn.close()\n except pymysql.Error as e:\n print(e)\n result = (((),), e)\n return result\n\n\ndef affect(sql, params):\n print(sql)\n print(params)\n try:\n db = pymysql.connect(**config)\n cur = db.cursor()\n try:\n cur.execute(sql, params)\n db.commit()\n result = (cur.rowcount, '')\n except pymysql.Error as e:\n print(e)\n result = (cur.rowcount, e)\n db.rollback()\n cur.close()\n db.close()\n except pymysql.Error as e:\n print(e)\n result = (0, e)\n return result\n\n\ndef get_artists(name, surname, option, year_from, year_to):\n if option == 'singer':\n x = ' JOIN singer_prod ON ar_taut = tragoudistis '\n elif option == 'songwriter':\n x = ' JOIN tragoudi ON ar_taut = sinthetis '\n elif option == 'composer':\n x = ' JOIN tragoudi ON ar_taut = stixourgos '\n else:\n x = ' '\n\n sql = \"SELECT distinct ar_taut AS 'National_ID', \" \\\n \"onoma AS 'Name', epitheto AS 'Surname', \" \\\n \"etos_gen AS 'Birth_Year' \" \\\n \"FROM kalitexnis\" + x + \\\n \"WHERE etos_gen \" \\\n \"BETWEEN %s AND %s \" \\\n \"AND (onoma LIKE %s OR onoma IS NULL) \" \\\n \"AND epitheto LIKE %s \" \\\n \"ORDER BY kalitexnis.onoma ASC, kalitexnis.epitheto ASC\"\n return fetch(sql, [year_from, year_to, '%' + name + '%', '%' + surname + '%'])\n\n\ndef update_artist(id, name, surname, year):\n name = name if name != '' else None\n sql = \"\"\"UPDATE `kalitexnis` SET `onoma`= %s , `epitheto`= %s, `etos_gen`= %s WHERE (`ar_taut`= %s)\"\"\"\n return affect(sql, [name, surname, year, id])\n\n\ndef delete_artist(id):\n sql = \"\"\"DELETE FROM `kalitexnis` WHERE (`ar_taut`= %s)\"\"\"\n return affect(sql, [id, ])\n\n\ndef insert_artist(id, name, surname, year):\n sql = \"\"\"INSERT INTO `kalitexnis` (`ar_taut`, `onoma`, `epitheto`, `etos_gen`) VALUES (%s, %s, %s, %s)\"\"\"\n return affect(sql, [id, name, surname, year])\n\n\ndef get_songs(title, year, company):\n params = ['%' + title + '%']\n if company != '':\n sp1 = \" ,cp.etos AS `CD PRODUCTION`, cp.etaireia AS `CD COMPANY` \"\n sp2 = \" LEFT JOIN singer_prod sp ON sp.title = t.titlos LEFT JOIN cd_production cp ON sp.cd = cp.code_cd \"\n sp3 = \" AND cp.etaireia LIKE %s\"\n params.append('%' + company + '%')\n else:\n sp1 = \" \"\n sp2 = \" \"\n sp3 = \" \"\n\n if year != '':\n y = \" AND t.etos_par = %s\"\n params.append(year)\n else:\n y = \" \"\n\n sql = \"SELECT t.titlos AS TITLE, k1.onoma AS `COMPOSER NAME`, k1.epitheto AS `COMPOSER SURNAME`, \" \\\n \"t.etos_par AS `SONG PRODUCTION`, k2.onoma AS `WRITER NAME`, k2.epitheto AS `WRITER SURNAME`\" + sp1 + \\\n \"FROM tragoudi t LEFT JOIN kalitexnis k1 ON k1.ar_taut = t.sinthetis LEFT JOIN kalitexnis k2 \" \\\n \"ON k2.ar_taut = t.stixourgos\" + sp2 + \"WHERE t.titlos LIKE %s \" + sp3 + y\n return fetch(sql, params)\n\n\ndef insert_song(title, year, cd, singer, composer, songwriter):\n sql1 = \"\"\"INSERT INTO `tragoudi` (`titlos`, `sinthetis`, `etos_par`, `stixourgos`) VALUES (%s, %s, %s, %s)\"\"\"\n sql2 = \"\"\"INSERT INTO `singer_prod` (`cd`, `tragoudistis`, `title`) VALUES (%s, %s, %s)\"\"\"\n return affect(sql1, [title, composer, year, songwriter]), affect(sql2, [cd, singer, title])\n\n\ndef get_artists_list():\n sql = \"\"\"SELECT * FROM `kalitexnis` ORDER BY kalitexnis.onoma ASC, kalitexnis.epitheto ASC\"\"\"\n return fetch(sql, [])\n\n\ndef get_productions_list():\n sql = \"\"\"SELECT * FROM cd_production ORDER BY cd_production.etaireia ASC\"\"\"\n return fetch(sql, [])\n\n\ndef get_artist_range_year():\n sql = \"\"\"SELECT MIN(etos_gen) AS Min, MAX(etos_gen) AS Max FROM kalitexnis\"\"\"\n return fetch(sql, [])\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35160342","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'ToDoApp'\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.DetailView.as_view(), name='detail'),\n path('delete_todo_modal//', views.DeleteToDoView.as_view(), name='delete_todo_modal'),\n path('delete_todo//', views.delete_todo, name='delete_todo'),\n path('create_todo_modal/', views.CreateToDoModalView.as_view(), name='create_todo_modal'),\n path('create_todo/', views.create_todo, name='create_todo'),\n path('change_status//', views.change_status, name='change_status'),\n]","sub_path":"ToDoProject/ToDoApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"958358","text":"import matplotlib.pyplot as plt\n\nX = []\nY = []\n\ninFile = open(\"projectileCoriolisData.txt\", \"r\")\nfor line in inFile:\n t, x, y, z, v_x, v_y, v_z, a_x, a_y, a_z = line.split(\" \")\n X.append(float(x))\n Y.append(float(y))\ninFile.close()\n\nplt.xlabel(\"$x$ (m)\")\nplt.ylabel(\"$y$ (m)\")\nplt.plot(X,Y)\nplt.show()","sub_path":"ProjectileXYplotCoriolis.py","file_name":"ProjectileXYplotCoriolis.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288794325","text":"import os\nimport re\nimport subprocess\nimport time\nfrom influxdb import InfluxDBClient\n\ndef send_influxdb(network_data):\n client = InfluxDBClient('influxdb', 8086, 'admin', 'admin', 'network_monitoring')\n\n data = [{\n \"measurement\": \"speed\",\n \"fields\": network_data\n }]\n client.write_points(data)\n\n\ndef get_network_info():\n print(\"Collectin speed test\")\n response = subprocess.Popen('speedtest-cli --simple', shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')\n print(response)\n ping = re.findall('Ping:\\s(.*?)\\s', response, re.MULTILINE)\n download = re.findall('Download:\\s(.*?)\\s', response, re.MULTILINE)\n upload = re.findall('Upload:\\s(.*?)\\s', response, re.MULTILINE)\n\n ping = ping[0].replace(',', '.')\n download = download[0].replace(',', '.')\n upload = upload[0].replace(',', '.')\n\n speed_data = {\n \"download\": float(download),\n \"upload\": float(upload),\n \"ping\": float(ping)\n }\n\n\n\n try:\n f = open('tmp/speedtest.csv', 'a+')\n if os.stat('tmp/speedtest.csv').st_size == 0:\n f.write('Date,Time,Ping (ms),Download (Mbit/s),Upload (Mbit/s)\\r\\n')\n f.write('{},{},{},{},{}\\r\\n'.format(time.strftime('%m/%d/%y'), time.strftime('%H:%M'), ping, download, upload))\n\n except:\n print(\"fail save csv\")\n\n send_influxdb(speed_data)\n\n\nget_network_info()\n","sub_path":"sppedtest.py","file_name":"sppedtest.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240549785","text":"from flask import Flask,jsonify, request\n\nfrom flasgger import Swagger\n\nfrom sklearn.externals import joblib\n\nimport numpy as np\n\nfrom flask_cors import CORS\n\nimport pandas as pd\n\napp = Flask(__name__)\nSwagger(app)\nCORS(app)\n\n@app.route('/input/task', methods=['POST'])\ndef predict():\n \"\"\"\n\n Ini Adalah Endpoint Untuk Memprediksi Makanan\n\n ---\n\n tags:\n\n - Rest Controller\n\n parameters:\n\n - name: body\n\n in: body\n\n required: true\n\n schema:\n\n id: Calories\n\n required:\n\n - Calories\n\n - Fat\n\n - Cholesterol\n\n - Sugars\n\n - Protein\n\n properties:\n\n Calories:\n\n type: int\n\n description: Please input with valid Calories.\n\n default: 0\n\n Fat:\n\n type: int\n\n description: Please input with valid Fat.\n\n default: 0\n\n Cholesterol:\n\n type: int\n\n description: Please input with valid Cholesterol.\n\n default: 0\n\n Sugars:\n\n type: int\n\n description: Please input with valid Sugars.\n\n default: 0\n\n Protein:\n\n type: int\n\n description: Please input with valid Protein.\n\n default: 0\n\n responses:\n\n 200:\n\n description: Success Input\n\n \"\"\"\n new_task = request.get_json()\n\n calories = new_task['Calories']\n fat = new_task['Fat']\n cholesterol = new_task['Cholesterol']\n sugars = new_task['Sugars']\n protein = new_task['Protein']\n\n X_New = np.array([[calories,fat,cholesterol,sugars,protein]])\n\n clf = joblib.load('Caloria_new.pkl')\n\n resultPredict = clf[0].predict(X_New)\n\n return jsonify({'message' : str(resultPredict)})\n # return jsonify({'message' : format(clf[1].target_names[resultPredict])})\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352274437","text":"import os\r\nfrom forms import PhotoUpload\r\nfrom flask import Flask, render_template, redirect, url_for\r\n\r\n\r\nIMG_DIR = \"static/img/\"\r\n\r\n\r\nclass Image:\r\n def __init__(self, index, filename):\r\n self.index = index\r\n self.filename = filename\r\n\r\n\r\ndef image_generator(images):\r\n for index, filename in enumerate(images):\r\n yield Image(index, filename)\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route(\"/gallery\", methods=[\"GET\", \"POST\"])\r\ndef gallery():\r\n form = PhotoUpload()\r\n if form.validate_on_submit():\r\n form.photo.data.save(IMG_DIR + form.photo.data.filename)\r\n params = {\r\n \"title\": \"Красная планета\",\r\n \"images\": list(image_generator(os.listdir(IMG_DIR))),\r\n \"form\": form,\r\n }\r\n return render_template(\"gallery.html\", **params)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.config['SECRET_KEY'] = \"SECRET_KEY\"\r\n app.run(port=8080, host=\"127.0.0.1\")\r\n","sub_path":"Галерея с загрузкой/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"625665153","text":"\n\ndef write_file(path, src_list):\n with open(path, 'w', encoding='UTF-8') as fw:\n for line in src_list: fw.write(line)\n\n\n\nnot_matching = []\nmatching = []\nwith open(\"C:/Users/iwin1203/Desktop/enzh정제/ner.enzh/ner.enzh.final_again_re.out\", encoding='UTF-8') as fo:\n k = 0\n for line_idx, line in enumerate(fo,1):\n if len(line.split(\"\\t\")[0].split()) != len(line.split(\"\\t\")[1].split()):\n k+=1\n print(line)\n not_matching.append(line)\n else:\n matching.append(line)\n\n print(k)\n\n\n\nnot_p = r\"C:/Users/iwin1203/Desktop/enzh정제/ner.enzh/ner.enzh.not_matching.out\"\np = r\"C:/Users/iwin1203/Desktop/enzh정제/ner.enzh/ner.enzh.matching.out\"\nwrite_file(not_p, not_matching)\nwrite_file(p, matching)","sub_path":"한중음차정제/enzh_not_matching_checker.py","file_name":"enzh_not_matching_checker.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50074178","text":"\n# write a personal assistant that helps you out with a task\n\n# You must:\n# use a loop to store information\n# use a dictionary\n\n# I've got you started below:\n\nuser_input = ''\n\nwhile user_input != 'stop' or user_input != 'exit':\n user_input = input('Hello, how may I help you?')\n print(user_input) # this line is just for testing\n\n# Ideas for personal assistant:\n# keep a shopping list\n# add contacts to a phone book (hint hint: dictionaries would help here)\n# keep a to do list\n\n\n\n\n","sub_path":"code_camp_2018/challenges/day_four/challenges.py","file_name":"challenges.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103192164","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.lines as Lines\r\nfrom matplotlib.patches import Circle\r\nfrom pathlib import Path\r\nfrom matplotlib.backend_tools import ToolBase\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module=\"matplotlib\")\r\nplt.rcParams['toolbar'] = 'toolmanager'\r\n\r\n#%%\r\ncolor_list=[\"r\",\"c\",\"orange\",\"g\",\"purple\",\"saddlebrown\",\"deeppink\",\"lime\",\"gray\"]\r\n\r\n\r\nclass _draggable_lines:\r\n\tdef __init__(self,axes,position,color,orientation,linestyle):\r\n\t\tself.orientation=orientation\r\n\t\tself.axes=axes\r\n\t\tself.canvas=axes[0].figure.canvas\r\n\t\tself.position=position\r\n\t\tif orientation=='vertical':\r\n\t\t\tself.lines=[Lines.Line2D([position,position],list(ax.get_ylim()),picker=True,pickradius=4,c=color,linestyle=linestyle) for ax in self.axes]\r\n\t\tif orientation=='horizontal':\r\n\t\t\tself.lines=[Lines.Line2D(list(ax.get_xlim()),[position,position],picker=True,pickradius=4,c=color,linestyle=linestyle) for ax in self.axes]\r\n\t\t\t\r\n\t\tself.line_artists=[self.axes[i].add_line(self.lines[i]) for i in range(len(self.axes))]\r\n\t\tself.canvas.draw_idle()\r\n\r\n\r\n\tdef start_event(self, event):\r\n\t\t[line_artist.set_visible(False) for line_artist in self.line_artists]\r\n\t\tself.canvas.draw()\r\n\t\tself.backgrounds=[self.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]\r\n\t\t[line_artist.set_visible(True) for line_artist in self.line_artists]\r\n\t\tself.canvas.draw()\r\n\t\tself.follower = self.canvas.mpl_connect(\"motion_notify_event\", self.followmouse)\r\n\t\tself.releaser = self.canvas.mpl_connect(\"button_press_event\", self.releaseonclick)\r\n\r\n\tdef followmouse(self, event):\r\n\t\tif event.xdata and event.ydata:\r\n\t\t\t[self.canvas.restore_region(background) for background in self.backgrounds]\r\n\t\t\tif self.orientation=='vertical':\r\n\t\t\t\t[line.set_xdata([event.xdata, event.xdata]) for line in self.lines]\r\n\t\t\tif self.orientation=='horizontal':\r\n\t\t\t\t[line.set_ydata([event.ydata, event.ydata]) for line in self.lines]\r\n\t\t\t[self.axes[i].draw_artist(self.lines[i]) for i in range(len(self.axes))]\r\n\t\t\t[self.canvas.blit(ax.bbox) for ax in self.axes]\r\n\r\n\tdef releaseonclick(self, event):\r\n\t\t[self.canvas.blit(ax.bbox) for ax in self.axes]\r\n\t\tif self.orientation=='vertical':\r\n\t\t\tself.position=self.lines[0].get_xdata()[0]\r\n\t\tif self.orientation=='horizontal':\r\n\t\t\tself.position=self.lines[0].get_ydata()[0]\r\n\r\n\t\tself.canvas.mpl_disconnect(self.releaser)\r\n\t\tself.canvas.mpl_disconnect(self.follower)\r\n\r\n\tdef clear(self):\r\n\t\t[line.remove() for line in self.lines]\r\n\t\tself.canvas.draw()\r\n\t\treturn self.position\r\n\r\nclass _draggable_circles:\r\n\tdef __init__(self,ax,position,radius,color,linestyle):\r\n\t\tself.ax=ax\r\n\t\tself.canvas=ax.figure.canvas\r\n\t\tself.position=position\r\n\t\tself.radius=radius\r\n\t\tself.circle=Circle(position,radius,color=color,linestyle=linestyle,fill=False)\r\n\t\t\r\n\t\tdelta=min([self.ax.get_xlim()[1]-self.ax.get_xlim()[0],self.ax.get_ylim()[1]-self.ax.get_ylim()[0]])\r\n\t\tself.currently_selected=False\r\n\r\n\t\t\r\n\t\tself.center_dot=Circle(position,delta/200,color=color)\r\n\t\tself.circle_artist=self.ax.add_artist(self.circle)\r\n\t\tself.center_dot_artist=self.ax.add_artist(self.center_dot)\r\n\t\tself.center_dot_artist.set_visible(False)\r\n\r\n\t\tself.canvas.draw_idle()\r\n\r\n\t\r\n\tdef circle_picker(self,mouseevent):\r\n\t\tif (mouseevent.xdata is None) or (mouseevent.ydata is None):\r\n\t\t\treturn False, dict()\r\n\t\tcenter_xdata,center_ydata = self.circle.get_center()\r\n\t\tradius=self.circle.get_radius()\r\n\t\ttolerance = 0.05\r\n\t\td = np.sqrt(\r\n\t\t\t(center_xdata - mouseevent.xdata)**2 + (center_ydata - mouseevent.ydata)**2)\r\n\r\n\t\tif d>=radius*(1-tolerance) and d<=radius*(1+tolerance):\r\n\t\t\tpickx = center_xdata\r\n\t\t\tpicky = center_ydata\r\n\t\t\tprops = dict(pickx=pickx, picky=picky)\r\n\t\t\treturn True,props\r\n\t\telse:\r\n\t\t\treturn False, dict()\r\n\t\t\r\n\r\n\tdef click_position_finder(self,event):\r\n\t\tself.initial_click_position=(event.xdata,event.ydata)\r\n\r\n\tdef drag_circle(self,event):\r\n\t\tif event.xdata and event.ydata:\r\n\t\t\tself.canvas.restore_region(self.background)\r\n\t\t\tcentervector=(self.position[0]-self.initial_click_position[0],self.position[1]-self.initial_click_position[1])\r\n\t\t\tnewcenter=(centervector[0]+event.xdata,centervector[1]+event.ydata)\r\n\t\t\tself.center_dot.set_center(newcenter)\r\n\t\t\tself.circle.set_center(newcenter)\r\n\t\t\tself.ax.draw_artist(self.circle_artist)\r\n\t\t\tself.ax.draw_artist(self.center_dot_artist)\r\n\t\t\tself.canvas.blit(self.ax.bbox)\r\n\r\n\r\n\t\r\n\tdef change_circle_size(self, event):\r\n\t\tif event.xdata and event.ydata:\r\n\t\t\tself.canvas.restore_region(self.background)\r\n\t\t\tnewradius=((self.position[0]-event.xdata)**2+(self.position[1]-event.ydata)**2)**0.5\r\n\t\t\tself.circle.set_radius(newradius)\r\n\t\t\tself.ax.draw_artist(self.circle_artist)\r\n\t\t\tself.ax.draw_artist(self.center_dot_artist)\r\n\t\t\tself.canvas.blit(self.ax.bbox)\r\n\r\n\r\n\tdef start_event(self, event):\r\n\t\tif self.currently_selected:\r\n\t\t\treturn\r\n\t\t\r\n\t\tself.currently_selected=True\r\n\t\tself.center_dot_artist.set_visible(False)\r\n\t\tself.circle_artist.set_visible(False)\r\n\t\tself.canvas.draw()\r\n\t\tself.background=self.canvas.copy_from_bbox(self.ax.bbox)\r\n\t\tself.circle_artist.set_visible(True)\r\n\t\tself.releaser = self.canvas.mpl_connect(\"button_press_event\", self.releaseonclick)\r\n\r\n\t\tif event.button==1:\r\n\t\t\tself.canvas.draw_idle()\r\n\t\t\tself.follower = self.canvas.mpl_connect(\"motion_notify_event\", self.change_circle_size)\r\n\r\n\t\tif event.button==3:\r\n\t\t\tself.click_position_finder(event)\r\n\t\t\tself.center_dot_artist.set_visible(True)\r\n\t\t\tself.canvas.draw_idle()\r\n\t\t\tself.follower = self.canvas.mpl_connect(\"motion_notify_event\", self.drag_circle)\r\n\r\n\tdef releaseonclick(self, event):\r\n\t\tself.radius = self.circle.get_radius()\r\n\t\tself.position=self.circle.get_center()\r\n\t\tself.center_dot_artist.set_visible(False)\r\n\t\tself.canvas.mpl_disconnect(self.follower)\r\n\t\tself.canvas.mpl_disconnect(self.releaser)\r\n\t\tself.canvas.draw_idle()\r\n\t\tself.currently_selected=False\r\n\r\n\tdef clear(self):\r\n\t\tself.circle.remove()\r\n\t\tself.canvas.draw()\r\n\t\treturn self.radius\r\n\r\nclass toolbarbutton(ToolBase):\r\n\tdef __init__(self,*args,**kwargs):\r\n\t\tself.image=kwargs.pop('image')\r\n\t\tself.func=kwargs.pop('func')\r\n\t\tself.description=kwargs.pop('description')\r\n\t\tToolBase.__init__(self, *args, **kwargs)\r\n\t\tself.toggle(True)\r\n\r\n\tdef toggle(self,active):\r\n\t\tself._active=active\r\n\t\t\r\n\tdef trigger(self, *args, **kwargs):\r\n\t\tif self._active:\r\n\t\t\tself.func()\r\n\r\n\r\n\r\nclass _circles_tool_class:\r\n\tdef __init__(self,ax,marker_group_size,linestyle,clear):\r\n\t\tself.marker_group_size=marker_group_size\r\n\t\tself.canvas=ax.figure.canvas\r\n\t\tself.markers=[]\r\n\t\tself.linestyle=linestyle\r\n\t\tself.clear=clear\r\n\t\tself.ax=ax\r\n\t\tself.tm = self.canvas.manager.toolmanager\r\n\t\tself.tb=self.canvas.manager.toolbar\r\n\r\n\r\n\t\tself.add_tool=self.tm.add_tool('add',\r\n\t\t\t\t\ttoolbarbutton,\r\n\t\t\t\t\timage=str(Path(__file__).parent / \"icons/\" / 'add_{}_circles_icon.png'.format(marker_group_size)),\r\n\t\t\t\t\tfunc=self.add_f,\r\n\t\t\t\t\tdescription='Add circles',\r\n\t\t\t\t\t)\r\n\t\t\r\n\t\tself.remove_tool=self.tm.add_tool('remove',\r\n\t\t\t\t\ttoolbarbutton,\r\n\t\t\t\t\timage=str(Path(__file__).parent / \"icons/\" / 'remove_{}_circles_icon.png'.format(marker_group_size)),\r\n\t\t\t\t\tfunc=self.delete_f,\r\n\t\t\t\t\tdescription='Remove circles',\r\n\t\t\t\t\t)\r\n\t\t\r\n\t\tself.tb.add_tool(self.add_tool, \"foo\",0)\r\n\t\tself.tb.add_tool(self.remove_tool, \"foo\",1)\r\n\t\t\r\n\t\tself.check_marker_count()\r\n\t\tself.sid = self.canvas.mpl_connect('button_press_event', self._circle_selector)\r\n\t\t\r\n\tdef _circle_selector(self,event):\r\n\t\tcontains=np.array([marker.circle_picker(event)[0] for marker in self.markers])\r\n\t\tif (contains==True).any():\r\n\t\t\tself.markers[np.where(contains==True)[0][0]].start_event(event)\r\n\t\r\n\tdef add_f(self):\r\n\t\txlimits=self.ax.get_xlim()\r\n\t\tylimits=self.ax.get_ylim()\r\n\t\tdelta=min([xlimits[1]-xlimits[0],ylimits[1]-ylimits[0]])\r\n\t\ttargeted_circle_spawning_radius=delta/5\r\n\r\n\t\ttargeted_circle_spawning_center=((xlimits[1]-xlimits[0])/2,(ylimits[1]-ylimits[0])/2)\r\n\t\tselected_color=color_list[int(len(self.markers)/self.marker_group_size)]\r\n\t\t\r\n\t\tself.markers.extend((_draggable_circles(self.ax,targeted_circle_spawning_center,targeted_circle_spawning_radius,selected_color,self.linestyle) for marker in range(self.marker_group_size)))\r\n\t\tself.check_marker_count()\r\n\t\t\r\n\tdef delete_f(self):\r\n\t\t[marker.clear() for marker in self.markers[-self.marker_group_size:]]\r\n\t\tdel self.markers[-self.marker_group_size:]\r\n\t\tself.check_marker_count()\r\n\t\t\r\n\tdef check_marker_count(self):\r\n\t\tif len(self.markers)/self.marker_group_size==len(color_list):\r\n\t\t\tself.add_tool.toggle(False)\r\n\t\telif len(self.markers)/self.marker_group_size0:\r\n\t\t\tself.add_tool.toggle(True)\r\n\t\t\tself.remove_tool.toggle(True)\r\n\t\telif len(self.markers)==0:\r\n\t\t\tself.remove_tool.toggle(False)\r\n\t\t\t\r\n\tdef sort_positions(self, unsorted):\r\n\t\tsorted_array=np.empty([0,self.marker_group_size])\r\n\t\tfor i in range(0,len(unsorted), self.marker_group_size):\r\n\t\t\tgroup=np.sort(unsorted[np.arange(i,i+self.marker_group_size)])\r\n\t\t\tsorted_array=np.vstack((sorted_array,group))\r\n\t\treturn sorted_array\r\n\t\t\r\n\tdef returnpositions(self):\r\n\t\tif self.clear:\r\n\t\t\tunsorted=np.array([marker.clear() for marker in self.markers])\r\n\t\t\tself.canvas.draw_idle()\r\n\t\tif not self.clear:\r\n\t\t\tunsorted=np.array([marker.position for marker in self.markers])\r\n\r\n\t\treturn self.sort_positions(unsorted)\r\n\r\n\t\r\n\tdef handle_close(self,event):\r\n\t\tself.canvas.stop_event_loop()\r\n\r\n\r\nclass _lines_tool_class:\r\n\tdef __init__(self,canvas,marker_group_size,linestyle,axes,clear):\r\n\t\tself.marker_group_size=marker_group_size\r\n\t\tself.canvas=canvas\r\n\t\tself.v_markers,self.h_markers=[],[]\r\n\t\tself.linestyle=linestyle\r\n\t\tself.clear=clear\r\n\t\t\r\n\t\tif axes==None:\r\n\t\t\tself.axes=self.canvas.figure.get_axes()\r\n\t\telse:\r\n\t\t\tself.axes=axes\r\n\t\t\r\n\t\tself.tm = self.canvas.manager.toolmanager\r\n\t\tself.tb=self.canvas.manager.toolbar\r\n\r\n\r\n\t\tself.add_v_tool=self.tm.add_tool('add_v',\r\n\t\t\t\t\ttoolbarbutton,\r\n\t\t\t\t\timage=str(Path(__file__).parent / \"icons/\" / 'add_{}_vbar_icon.png'.format(marker_group_size)),\r\n\t\t\t\t\tfunc=(lambda: self.add_f(self.v_markers,'vertical')),\r\n\t\t\t\t\tdescription='Add vertical lines',\r\n\t\t\t\t\t)\r\n\t\t\r\n\t\tself.remove_v_tool=self.tm.add_tool('remove_v',\r\n\t\t\t\t\ttoolbarbutton,\r\n\t\t\t\t\timage=str(Path(__file__).parent / \"icons/\" / 'remove_{}_vbar_icon.png'.format(marker_group_size)),\r\n\t\t\t\t\tfunc=(lambda: self.delete_f(self.v_markers,'vertical')),\r\n\t\t\t\t\tdescription='Remove vertical lines',\r\n\t\t\t\t\t)\r\n\t\tself.add_h_tool=self.tm.add_tool('add_h',\r\n\t\t\t\t\ttoolbarbutton,\r\n\t\t\t\t\timage=str(Path(__file__).parent / \"icons/\" / 'add_{}_hbar_icon.png'.format(marker_group_size)),\r\n\t\t\t\t\tfunc=(lambda: self.add_f(self.h_markers,'horizontal')),\r\n\t\t\t\t\tdescription='Add horizontal lines',\r\n\t\t\t\t\t)\r\n\t\t\r\n\t\tself.remove_h_tool=self.tm.add_tool('remove_h',\r\n\t\t\t\t\ttoolbarbutton,\r\n\t\t\t\t\timage=str(Path(__file__).parent / \"icons/\" / 'remove_{}_hbar_icon.png'.format(marker_group_size)),\r\n\t\t\t\t\tfunc=(lambda: self.delete_f(self.h_markers,'horizontal')),\r\n\t\t\t\t\tdescription='Remove horizontal lines',\r\n\t\t\t\t\t)\r\n\t\t\r\n\t\tself.tb.add_tool(self.add_v_tool, \"foo\",0)\r\n\t\tself.tb.add_tool(self.remove_v_tool, \"foo\",1)\r\n\t\tself.tb.add_tool(self.add_h_tool, \"foo\",2)\r\n\t\tself.tb.add_tool(self.remove_h_tool, \"foo\",3)\r\n\t\t\r\n\t\t\r\n\t\tself.check_marker_count()\r\n\t\tself.sid = self.canvas.mpl_connect('button_press_event', self._line_selector)\r\n\t\t\r\n\tdef _line_selector(self,event):\r\n\t\tif event.button==1:\r\n\t\t\tvertical_contains=np.empty(0)\r\n\t\t\tfor marker in self.v_markers:\r\n\t\t\t\tany_has_been_clicked_on=np.array([line.contains(event)[0] for line in marker.lines]).any()\r\n\t\t\t\tvertical_contains=np.append(vertical_contains, any_has_been_clicked_on)\r\n\t\t\t\r\n\t\t\thorizontal_contains=np.empty(0)\r\n\t\t\tfor marker in self.h_markers:\r\n\t\t\t\tany_has_been_clicked_on=np.array([line.contains(event)[0] for line in marker.lines]).any()\r\n\t\t\t\thorizontal_contains=np.append(horizontal_contains, any_has_been_clicked_on)\r\n\r\n\t\t\tif (vertical_contains==True).any():\r\n\t\t\t\tself.v_markers[np.where(vertical_contains==True)[0][0]].start_event(event)\r\n\t\t\telif (horizontal_contains==True).any():\r\n\t\t\t\tself.h_markers[np.where(horizontal_contains==True)[0][0]].start_event(event)\r\n\t\t\r\n\tdef add_f(self,markers_list,orientation):\r\n\t\tselected_color=color_list[int(len(markers_list)/self.marker_group_size)]\r\n\t\tif orientation=='vertical':\r\n\t\t\tax_min,ax_max=self.canvas.figure.get_axes()[0].get_xlim()\r\n\t\tif orientation==\"horizontal\":\r\n\t\t\tax_min,ax_max=self.canvas.figure.get_axes()[0].get_ylim()\r\n\t\tstarting_position=ax_min+(ax_max-ax_min)/15\r\n\t\tmarkers_list.extend((_draggable_lines(self.axes,starting_position,selected_color,orientation,self.linestyle) for marker in range(self.marker_group_size)))\r\n\t\tself.check_marker_count()\r\n\t\t\r\n\tdef delete_f(self,markers_list,orientation):\r\n\t\t[marker.clear() for marker in markers_list[-self.marker_group_size:]]\r\n\t\tdel markers_list[-self.marker_group_size:]\r\n\t\tself.check_marker_count()\r\n\t\t\r\n\tdef check_marker_count(self):\r\n\t\tif len(self.v_markers)/self.marker_group_size==len(color_list):\r\n\t\t\tself.add_v_tool.toggle(False)\r\n\t\telif len(self.v_markers)/self.marker_group_size0:\r\n\t\t\tself.add_v_tool.toggle(True)\r\n\t\t\tself.remove_v_tool.toggle(True)\r\n\t\telif len(self.v_markers)==0:\r\n\t\t\tself.remove_v_tool.toggle(False)\r\n\t\t\r\n\t\tif len(self.h_markers)/self.marker_group_size==len(color_list):\r\n\t\t\tself.add_h_tool.toggle(False)\r\n\t\telif len(self.h_markers)/self.marker_group_size0:\r\n\t\t\tself.add_h_tool.toggle(True)\r\n\t\t\tself.remove_h_tool.toggle(True)\r\n\t\telif len(self.h_markers)==0:\r\n\t\t\tself.remove_h_tool.toggle(False)\r\n\r\n\t\tself.canvas.draw_idle()\r\n\r\n\tdef sort_positions(self, unsorted):\r\n\t\tsorted_array=np.empty([0,self.marker_group_size])\r\n\t\tfor i in range(0,len(unsorted), self.marker_group_size):\r\n\t\t\tgroup=np.sort(unsorted[np.arange(i,i+self.marker_group_size)])\r\n\t\t\tsorted_array=np.vstack((sorted_array,group))\r\n\t\treturn sorted_array\r\n\t\t\r\n\t\t\r\n\tdef returnpositions(self):\r\n\t\tif self.clear:\r\n\t\t\tv_unsorted=np.array([marker.clear() for marker in self.v_markers])\r\n\t\t\th_unsorted=np.array([marker.clear() for marker in self.h_markers])\r\n\t\t\tself.canvas.draw_idle()\r\n\t\tif not self.clear:\r\n\t\t\tv_unsorted=np.array([marker.position for marker in self.v_markers])\r\n\t\t\th_unsorted=np.array([marker.position for marker in self.h_markers])\r\n\r\n\t\treturn self.sort_positions(v_unsorted), self.sort_positions(h_unsorted)\r\n\r\n\tdef handle_close(self,event):\r\n\t\tself.canvas.stop_event_loop()\r\n\r\ndef lines_tool(figure,markergroupsize:int=1,linestyle='solid',axes=None,clear=True):\r\n\t\"\"\"\r\n\tAdds four buttons on the figure that allow you to add lines on the plot. Click on the green ones to add a line group of corresponding orientation (vertical or horizontal).\r\n\tThe red ones remove the last group of said orientation. \r\n\r\n\r\n\tParameters\r\n\t----------\r\n\tfigure : plt.figure() object\r\n\t\t\r\n\tmarkergroupsize : int\r\n\t\tHow many lines you want in a group. All the lines in said group will be the same color and their positions will be in the same\r\n\t\tsub-list in the returned list. The default is 1.\r\n\tlinestyle : string, optional\r\n\t\tThe default is 'solid'. This is the usual linestyle argument, anything that lines2D will accept works.\r\n\taxes : list of plt.add_subplot() objects, optional\r\n\t\tWich axes you want the lines to appear in. The default is 'All of them'.\r\n\tclear : bool, optional\r\n\t\tRemove all lines from the figure after it is closed. Useful if you still want to do something with it, like saving it, or use more tools.\r\n\t\tIf you want to have the markers stay, set to False. The default is True.\r\n\r\n\tRaises\r\n\t------\r\n\tdraggable_markersError\r\n\t\t\r\n\r\n\tReturns\r\n\t-------\r\n\tnumpy array\r\n\t\tarrays of the positions of all lines.The first one is vertical lines and the second horizontal. Each row of the array is one marker group. Each group sub-list is sorted\r\n\r\n\t\"\"\"\r\n\tif markergroupsize>3 or markergroupsize<1:\r\n\t\traise draggable_markersError(\"Only supports marker groups sizes in the interval [1,3]\")\r\n\tif plt.get_backend()!='Qt5Agg':\r\n\t\traise draggable_markersError(\"Requires interactive backend. Switch to Qt5Agg by using plt.switch_backend('Qt5Agg'). This closes all current figures\")\r\n\r\n\tlines_tool_obj=_lines_tool_class(figure.canvas,markergroupsize,linestyle,axes,clear)\r\n\tfigure.canvas.mpl_connect('close_event', lines_tool_obj.handle_close)\r\n\tplt.get_current_fig_manager().window.showMaximized()\r\n\tplt.show()\r\n\t\r\n\tfigure.canvas.start_event_loop()\r\n\treturn lines_tool_obj.returnpositions()\r\n\r\ndef circles_tool(ax,markergroupsize:int=1,linestyle='solid',clear=True):\r\n\t\"\"\"\r\n\tAdds two buttons on the figure that allow you to add circles on the plot. Click on the green one to add a circle group.\r\n\tThe red one removes the last group.\r\n\tClick on the edge of a circle to select it and change its radius. Right click after having selected a circle to drag it. Left click again to lock selected circle\r\n\r\n\tParameters\r\n\t----------\r\n\tax : figure ax\r\n\t\tfigure.add_suplot() object\r\n\tmarkergroupsize : int\r\n\t\tHow many circles you want in a group. All the circles in said group will be the same color and their radius will be in the same\r\n\t\tsub-list in the returned list. The default is 1.\r\n\tlinestyle : TYPE, optional\r\n\t\tCircle linestyle. The default is 'solid'.\r\n\tclear : bool, optional\r\n\t\tRemove all circles from the figure after it is closed. Useful if you still want to do something with it, like saving it.\r\n\t\tIf you want to have the markers stay, set to False. The default is True.\r\n\r\n\tRaises\r\n\t------\r\n\tdraggable_markersError\r\n\t\t\r\n\r\n\tReturns\r\n\t-------\r\n\tlist\r\n\t\tlist of the radii of all circles. Has the form [[group1],[group2],[group3]]. Each group sub-list is sorted\r\n\r\n\t\"\"\"\r\n\r\n\tif markergroupsize>3 or markergroupsize<1:\r\n\t\traise draggable_markersError(\"Only supports marker groups sizes in the interval [1,3]\")\r\n\tif plt.get_backend()!='Qt5Agg':\r\n\t\traise draggable_markersError(\"Requires interactive backend. Switch to Qt5Agg by using plt.switch_backend('Qt5Agg'). This closes all current figures\")\r\n\r\n\tplt.get_current_fig_manager().window.showMaximized()\r\n\r\n\tcircles_tool_obj=_circles_tool_class(ax,markergroupsize,linestyle,clear)\r\n\tplt.show()\r\n\r\n\tax.figure.canvas.mpl_connect('close_event', circles_tool_obj.handle_close)\r\n\t\r\n\tax.figure.canvas.start_event_loop()\r\n\t\r\n\treturn circles_tool_obj.returnpositions()\r\n\r\nclass draggable_markersError(Exception):\r\n\tpass\r\n\r\n\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n\t#testing figure\r\n\tfig=plt.figure()\r\n\t\r\n\ta=np.arange(20)\r\n\tb=np.arange(20)\r\n\tax0 = fig.add_subplot(211)\r\n\tax0.plot(a,b)\r\n\tax0.set_ylabel('b')\r\n\tax0.set_title('ab')\r\n\tax0.get_xaxis().set_visible(False)\r\n\t\r\n\ta=np.arange(20)\r\n\tb=np.arange(20)\r\n\tax1 = fig.add_subplot(212)\r\n\tax1.plot(a,b)\r\n\tax1.set_xlabel('a')\r\n\tax1.set_ylabel('b')\r\n\t\r\n\tpos=lines_tool(fig,2,axes=[ax1])\r\n\r\n\tfig=plt.figure()\r\n\t\r\n\ta=np.arange(20)\r\n\tb=np.arange(20)\r\n\tax0 = fig.add_subplot(111)\r\n\tax0.plot(a,b)\r\n\tax0.set_ylabel('b')\r\n\tax0.set_title('ab')\r\n\tax0.set_xlabel('a')\r\n\t\r\n\trad=circles_tool(ax0,3,clear=True)\r\n","sub_path":"miat/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":18793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12186641","text":"from summit.multiview_platform.monoview_classifiers.additions.SVCClassifier import \\\n SVCClassifier\nfrom ..monoview.monoview_utils import BaseMonoviewClassifier\nfrom summit.multiview_platform.utils.hyper_parameter_search import CustomUniform\n\n# Author-Info\n__author__ = \"Baptiste Bauvin\"\n__status__ = \"Prototype\" # Production, Development, Prototype\n\nclassifier_class_name = \"SVMLinear\"\n\n\nclass SVMLinear(SVCClassifier, BaseMonoviewClassifier):\n \"\"\"\n This class is an adaptation of scikit-learn's `SVC `_\n\n Here, it is the linear kernel version\n \"\"\"\n\n def __init__(self, random_state=None, C=1.0, **kwargs):\n SVCClassifier.__init__(self,\n C=C,\n kernel='linear',\n random_state=random_state\n )\n self.param_names = [\"C\", \"random_state\"]\n self.distribs = [CustomUniform(loc=0, state=1), [random_state]]\n","sub_path":"summit/multiview_platform/monoview_classifiers/svm_linear.py","file_name":"svm_linear.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399286035","text":"def myLog(x, b):\r\n '''\r\n x: a positive integer\r\n b: a positive integer; b >= 2\r\n\r\n returns: log_b(x), or, the logarithm of x relative to a base b.\r\n '''\r\n # Your Code Here\r\n \r\n assert x > 0\r\n \r\n if ( x < b):\r\n return 0\r\n c = 0\r\n n = b\r\n while ( n <= x):\r\n n *= b\r\n c += 1\r\n \r\n return c\r\n\r\nx = 1\r\nb = 2\r\nprint (\"%d to the power %d is %d\" % ( b, myLog(x,b), x))\r\n ","sub_path":"Python/CS Course/mylog.py","file_name":"mylog.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"534531604","text":"# --------------------------------------------------------------------------\r\n# Name : Sandeep Tripathi\r\n# Branch: Mechanical Engineering\r\n# Cubic Interpolation Method\r\n# --------------------------------------------------------------------------\r\n\r\n# Define various modules required \r\nfrom __future__ import division\r\nimport math\r\nfrom sympy import *\r\nimport pylab\r\n\r\nprint(\"------------------Cubic Interpolation Method------------------\\n\")\r\n\r\n\"\"\"\r\nCalculate the value of function at given value of x\r\nx : Value at which function is to be computed\r\n\"\"\"\r\ndef evalfun(x):\r\n # Change the function as required\r\n return float(x**5 - (5 * x**3) - (20 * x) + 5)\r\n\r\n\"\"\"\r\nEvaluate the derivative of the given function at given value of x\r\nx : Value at which function derivative is to be computed\r\n\"\"\"\r\ndef evalder(y): \r\n x = Symbol(\"x\")\r\n # Change the function as required\r\n funct = x**5 - (5 * x**3) - (20 * x) + 5\r\n functderiv = funct.diff(x)\r\n return functderiv.evalf(subs={x:y})\r\n\r\n\"\"\"\r\nReturn the value of B\r\nA: Initial assumption\r\nt0: Initial step size\r\n\"\"\"\r\ndef returnB(A, t0):\r\n if evalder(A) < 0:\r\n B = t0\r\n while evalder(B) < 0:\r\n B = 2 * B\r\n else:\r\n B = t0\r\n while evalder(B) >= 0:\r\n B = 2 * B\r\n return B\r\n\r\n\"\"\"\r\nReturn the minimum of h(x) by substituting required values\r\nwhere h(x) = a + bx + cx2+ dx3\r\n\"\"\"\r\ndef returnlam(A, fA, dfA, B, fB, dfB): \r\n Z = ((3 * (fA - fB)) / (B - A)) + dfA + dfB\r\n Q = math.sqrt(Z**2 - (dfA * dfB))\r\n lam1 = A + (((dfA + Z + Q) / (dfA + dfB + (2 * Z))) * (B - A))\r\n lam2 = A + (((dfA + Z - Q) / (dfA + dfB + (2 * Z))) * (B - A))\r\n if lam1 >= 0:\r\n return round(lam1, 8)\r\n else:\r\n return round(lam2, 8)\r\n\r\n\"\"\"\r\nReturn the value of h(x)\r\n\"\"\"\r\ndef returnhlam(A, B, dfA, dfB, lam):\r\n Z = ((3 * (fA - fB)) / (B - A)) + dfA + dfB\r\n b = ((B**2 * dfA) + (A**2 * dfB) + (2 * A * B * Z)) / (A - B)**2\r\n c = - ((((A + B) * Z) + (B * dfA) + (A * dfB)) / (A - B)**2)\r\n d = ((2 * Z) + dfA + dfB) / (3 * (A - B)**2)\r\n a = (fA - (b * A) - (c * A**2) - (d * A**3))\r\n return (a + (b * lam) + (c * lam**2) + (d * lam**3))\r\n\r\n# Accept input from the user for initial step size and desired accuracy\r\nt0 = float(raw_input(\"Enter the initial step size: \"))\r\nacc = float(raw_input(\"Enter the desired accuracy (%): \"))\r\n\r\n# Evaluate all the required values\r\nA = 0; B = returnB(A, t0)\r\nfA = evalfun(A); dfA = evalder(A); fB = evalfun(B); dfB = evalder(B)\r\nlam = returnlam(A, fA, dfA, B, fB, dfB)\r\nh = returnhlam(A, B, dfA, dfB, lam); f = evalfun(lam)\r\n\r\n# Initialize number of refit\r\nn = 1\r\n# Compute untill desired accuracy is reached\r\nwhile abs((h - f) / f) > (acc / 100):\r\n if evalder(lam) < 0:\r\n A = lam\r\n else:\r\n B = lam\r\n\r\n # Evaluate all the required values\r\n fA = evalfun(A); dfA = evalder(A); fB = evalfun(B); dfB = evalder(B)\r\n lam = returnlam(A, fA, dfA, B, fB, dfB)\r\n h = returnhlam(A, B, dfA, dfB, lam); f = evalfun(lam)\r\n n += 1\r\n\r\n# Printing the results\r\nprint(\"The optimum point is \" + str(lam) + \".\")\r\nprint(\"The value of function at optimum point is \" + str(evalfun(lam)) + \".\")\r\nprint(\"The number of refit required to achieve desired accuracy is \" + str(n) + \".\")\r\n\r\n# Storing the value of x and f(x) at intermediate points\r\n# Storing the value of low1 and low2 such that optimized point lie within it\r\nlow1 = 0; high1 = 5; Xaxis = []; Yaxis = []\r\nwhile low1 <= high1:\r\n Xaxis.append(low1)\r\n Yaxis.append(evalfun(low1))\r\n low1 += 0.001\r\n \r\n# Plotting the function and its optimized value\r\npylab.plot(Xaxis, Yaxis,)\r\npylab.scatter(lam, evalfun(lam),s = 20, c = 'r')\r\npylab.title('Cubic Interpolation Method', fontsize = 15)\r\npylab.xlabel('x ------>')\r\npylab.ylabel('f(x)------>')\r\npylab.grid(True)\r\npylab.show()\r\n","sub_path":"Cubic_Interpolation_Method.py","file_name":"Cubic_Interpolation_Method.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"544264598","text":"from review.models import *\nfrom accounts.models import UserProfile\n\nfrom django.forms import ModelForm, Form\nfrom django.forms import Textarea, HiddenInput, ChoiceField, CharField\n\nclass CommentForm(ModelForm):\n class Meta:\n model = Comment\n fields = ('text', 'start', 'end', 'chunk', 'similar_comment')\n widgets = {\n 'text': Textarea(attrs={'id': 'hidden-textarea'}), \n 'start': HiddenInput(),\n 'end': HiddenInput(),\n 'chunk': HiddenInput(),\n 'similar_comment': HiddenInput(attrs={'id': 'hidden-similar-comment'}),\n }\n\nclass ReplyForm(ModelForm):\n class Meta:\n model = Comment\n fields = ('text', 'parent', 'similar_comment')\n widgets = {\n 'text': Textarea(attrs={'id': 'hidden-textarea'}), \n 'parent': HiddenInput(),\n 'similar_comment': HiddenInput(attrs={'id': 'hidden-similar-comment'}),\n }\n\nclass EditCommentForm(Form):\n text = CharField(widget=Textarea(attrs={'id': 'hidden-textarea'}))\n comment_id = CharField(widget=HiddenInput())\n similar_comment = CharField(widget=HiddenInput(attrs={'id': 'hidden-similar-comment'}))","sub_path":"review/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"563090530","text":"#!/usr/bin/python3\n# https://stackoverflow.com/questions/31844713/python-convert-xml-to-csv-file\n\nimport sys\nfrom xml.etree import ElementTree\ntree = ElementTree.parse('curriculo.xml')\ndef recode(var):\n # Py2: return var.decode('iso-8859-1').encode('utf8')\n return bytes(var, 'iso-8859-1').decode('utf8')\nroot = tree.getroot()\nid_lattes = root.get('NUMERO-IDENTIFICADOR')\ndt_upd = root.get('DATA-ATUALIZACAO')\nhr_upd = root.get('HORA-ATUALIZACAO')\n\noutput = ''\nline = [id_lattes]\ngr_curso = ''\nme_ = 0 # nro de mestrados\ndr_ = 0 # nro de doutorados\npd_ = 0 # nro de pós-doutorados\n\nfor attrib in root:\n #print(attrib)\n if attrib.tag == 'DADOS-GERAIS':\n nome = attrib.get('NOME-COMPLETO')\n line += [nome, dt_upd, hr_upd]\n for part in attrib:\n '''\n print(part.tag)\n continue\n\n RESUMO-CV\n OUTRAS-INFORMACOES-RELEVANTES\n ENDERECO\n FORMACAO-ACADEMICA-TITULACAO\n ATUACOES-PROFISSIONAIS\n AREAS-DE-ATUACAO\n IDIOMAS\n PREMIOS-TITULOS\n '''\n for detail in part:\n '''\n print('detail.tag=='+detail.tag)\n continue\n '''\n tag = detail.tag\n if tag == 'ENDERECO-PROFISSIONAL':\n empresa = detail.get('NOME-INSTITUICAO-EMPRESA')\n orgao = detail.get('NOME-ORGAO')\n unidade = detail.get('NOME-UNIDADE')\n cidade = detail.get('CIDADE')\n uf = detail.get('UF')\n pais = detail.get('PAIS')\n line += [empresa, orgao, unidade, cidade, uf, pais]\n if tag == 'GRADUACAO' and gr_curso == '': # considera a 1a graduacao\n gr_curso = detail.get('NOME-CURSO')\n gr_ies = detail.get('NOME-INSTITUICAO')\n gr_concl = detail.get('ANO-DE-CONCLUSAO')\n line += [gr_curso, gr_ies,gr_concl]\n if tag == 'MESTRADO':\n me_ += 1\n me_curso = detail.get('NOME-CURSO')\n me_ies = detail.get('NOME-INSTITUICAO')\n me_ini = detail.get('ANO-DE-INICIO')\n me_concl = detail.get('ANO-DE-CONCLUSAO')\n me_per = me_ini+'-'+me_concl\n me_titulo = detail.get('TITULO-DA-DISSERTACAO-TESE')\n me_orient = detail.get('NOME-COMPLETO-DO-ORIENTADOR')\n me_id_or = detail.get('NUMERO-ID-ORIENTADOR')\n if me_ > 1 and me_concl == '': # desprezar novo mestrado em andamento\n continue\n else:\n line += [str(me_), me_curso, me_ies, me_per, \n me_titulo, me_orient, me_id_or]\n if tag == 'DOUTORADO':\n dr_ += 1\n dr_curso = detail.get('NOME-CURSO')\n dr_ies = detail.get('NOME-INSTITUICAO')\n dr_ini = detail.get('ANO-DE-INICIO')\n dr_concl = detail.get('ANO-DE-CONCLUSAO')\n dr_per = dr_ini+'-'+dr_concl\n dr_titulo = detail.get('TITULO-DA-DISSERTACAO-TESE')\n dr_orient = detail.get('NOME-COMPLETO-DO-ORIENTADOR')\n dr_id_or = detail.get('NUMERO-ID-ORIENTADOR')\n if dr_ > 1 and dr_concl == '': # desprezar novo doutorado em andamento\n continue\n else:\n line += [str(dr_), dr_curso, dr_ies, dr_per, \n dr_titulo, dr_orient, dr_id_or]\n if tag == 'POS-DOUTORADO':\n pd_ += 1\n pd_ies = detail.get('NOME-INSTITUICAO')\n pd_ini = detail.get('ANO-DE-INICIO')\n pd_concl = detail.get('ANO-DE-CONCLUSAO')\n pd_per = pd_ini+'-'+pd_concl\n pd_titulo = detail.get('TITULO-DO-TRABALHO')\n #pd_orient = detail.get('NOME-COMPLETO-DO-ORIENTADOR')\n pd_id_or = detail.get('NUMERO-ID-ORIENTADOR')\n line += [str(pd_), pd_ies, pd_per, \n pd_titulo, pd_id_or]\n\ni = 0\nfor field in line:\n #print(repr(field))\n line[i] = field.replace('\\n',' ').replace('\\r','')\n i += 1\n#line = '\", \"'.join(line) never use spaces after delimiting commas!\nline = '\",\"'.join(line)\nline = '\"' + line + '\"\\n'\noutput += line\nfn = id_lattes+'_alu.csv'\noutfile = open(fn, 'w')\noutfile.write(output)\noutfile.close()\nprint('File {} ({}) written'.format(fn,nome))\n\n","sub_path":"lattesalumn.py","file_name":"lattesalumn.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"212743065","text":"from invoke import Collection\n\nfrom .loader import get_apps\n\n\ndef invoker(apps=None, envs=None, context_processors=None):\n root_collection = Collection()\n\n # setup the collection for each environment\n env_collections = {}\n for env in envs or []:\n env_collections[env] = Collection(env)\n env_collections[env].configure({'env': env})\n\n for col in env_collections.values():\n root_collection.add_collection(col)\n\n # add each app to the root collection or each relevant env collection\n for app in get_apps(apps, context_processors):\n if (app.envs is None and envs) or app.envs:\n for env in app.envs or envs or []:\n env_collections[env].add_collection(app.get_collection(env=env))\n else:\n root_collection.add_collection(app.get_collection())\n\n return root_collection\n","sub_path":"invoker/invoker.py","file_name":"invoker.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"554223705","text":"from flask import Flask, request, render_template, jsonify, session\nfrom boggle import Boggle\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"abcdefg\"\n\nboggle_game = Boggle()\n\n@app.route(\"/\")\ndef homepage():\n \"\"\"Show board\"\"\"\n\n board = boggle_game.make_board()\n session['board'] = board\n highscore = session.get(\"highscore\", 0)\n numplays = session.get(\"numplays\", 0)\n\n return render_template(\"index.html\", board=board, highscore=highscore,\n numplays=numplays)\n\n\n@app.route(\"/check-word\")\ndef check_word():\n\n word = request.args[\"word\"]\n board = session[\"board\"]\n response = boggle_game.check_valid_word(board, word)\n\n return jsonify({'result': response})\n\n@app.route(\"/post-score\", methods=[\"POST\"])\ndef post_score():\n \"\"\"Receive score, update numplay, highscore\"\"\"\n\n score =request.json[\"score\"]\n highscore = session.get(\"highscore\", 0)\n numplay = session.get(\"numplay\", 0)\n\n session['numplay'] = numplay + 1\n session['highscore'] = max(score, highscore)\n\n return jsonify(brokeRecord=score > highscore)\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424476595","text":"import lib.bulkloaddata as bulkloaddata\nimport sys\n\n# add data to an entity from stardate to today\n# example: addentity.py M15 2017-08-15T12:51:22Z\n\n#name of the script = sys.argv[0]\ngranularity = sys.argv[1]\nstart = sys.argv[2]\n#granularity = 'M15'\n#start = '2017-08-15T12:51:22Z'\nbulkloaddata.bulkloadlivedatabytime('DE30_EUR',granularity,start)\nbulkloaddata.bulkloadlivedatabytime('EUR_JPY',granularity,start)\nbulkloaddata.bulkloadlivedatabytime('EUR_USD',granularity,start)\n","sub_path":"pong-backup/web/flask/src/addentity.py","file_name":"addentity.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66684737","text":"from flask import Flask, request, jsonify\nimport requests\nimport base64\nimport pytesseract\nfrom PIL import Image\nimport re\n\napp = Flask(__name__)\n\n@app.route('/process-request', methods=['POST', 'GET'])\ndef process_request():\n img_str = \"\"\n if request.method == 'POST':\n print(\"hello\")\n img_str = request.json[\"image\"]\n img_data = base64.b64decode(img_str)\n img_jpg = 'img.jpg'\n with open(img_jpg, 'wb') as f:\n f.write(img_data)\n text = pytesseract.image_to_string(Image.open('img.jpg'))\n text = re.sub(\"\\n\",r\" \",text)\n text = text.split(',')\n samplefile = open ('sampleFile.txt', 'r')\n temp = samplefile.read().splitlines()\n mylist = []\n for line in temp:\n reg = re.compile(line)\n for item in text:\n m = re.search(reg, item.lower())\n if m:\n mylist.append(m.group().capitalize())\n \n for m in mylist:\n if m == \"Almonds\":\n print(\"almonds\")\n response = requests.get(\"http://128.189.211.78:8080/emissions?category=Nuts/Seeds&product=\"+ m)\n print(response.text)\n return jsonify(response.text)\n\n return \"Error!\" \n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5000)\n\n #ngrok http 5000","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"188613948","text":"import os\n\nfrom behave import *\nfrom mutagen.easyid3 import EasyID3\n\n\nuse_step_matcher(\"re\")\n\n\ndef find_mp3_file_in_source(context):\n for root, dirs, file_names in os.walk(context.source_path):\n for f in file_names:\n if os.path.splitext(f)[1] == \".mp3\":\n return os.path.join(root, f)\n return \"\"\n\n\n@given('There is a file with a \\\"(.*)\\\" genre exists in source')\ndef step_impl(context, genre):\n context.file_path = find_mp3_file_in_source(context)\n audio = EasyID3(context.file_path)\n audio[\"genre\"] = genre\n audio.save()\n\n\n@then(\"file should not exists in target\")\ndef step_impl(context):\n assert not os.path.exists(context.file_path)\n","sub_path":"features/steps/metadata_filters_steps.py","file_name":"metadata_filters_steps.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"387362524","text":"import datetime\nimport serial\n\nser = serial.Serial('/dev/ttyACM0', 9600)\ndata = []\nwhile True:\n try:\n line = ser.readline().decode()\n print(line)\n row = [split.split(\":\") for split in line.split(\" \")]\n data.append(row)\n except ValueError as e:\n print(e)\n \n","sub_path":"DHT/readsensor.py","file_name":"readsensor.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"283517150","text":"import math\nimport os\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport cv2\n\ndef resizeImages(files, indir, outdir):\n for file in files:\n img = cv2.imread(indir + file)\n h, w = img.shape[:2]\n if (w > h):\n dim = (int(w / (h / 40)), 40)\n else:\n dim = (int(h / (w / 40)), 40)\n\n img = cv2.resize(img, dim)\n h, w = img.shape[:2]\n\n cv2.imwrite(outdir + file, img)\n\ndef createFiles(files, indir, outdir):\n\n rotationDegrees = [-2, -3, -4, -5, -6, -7, -8, -9, -10, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for file in files:\n img = cv2.imread(indir+file)\n horizontal_img = img.copy()\n vertical_img = img.copy()\n horizontal_img = cv2.flip(img, 0)\n vertical_img = cv2.flip(img, 1)\n\n fileCount = (len([name for name in os.listdir(outdir) if os.path.isfile(os.path.join(outdir, name))]))\n\n cv2.imwrite(outdir + file + str(fileCount) + '_h.jpg', horizontal_img)\n cv2.imwrite(outdir + file + str(fileCount) + '_v.jpg', vertical_img)\n\n rows, cols = img.shape[:2]\n\n for rotation in rotationDegrees:\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotation, 1)\n dst = cv2.warpAffine(img, M, (cols, rows))\n cv2.imwrite(outdir + file + '_'+str(rotation) + '_' + str(fileCount) + '.jpg', dst)\n\n#xDir = 'data/X/'\n#oDir = 'data/O/'\nemptyDir = 'data/train/X/'\ncellsDir = 'data/cells/'\n#xFiles = [name for name in os.listdir(xDir)]\n#xFiles = [name for name in os.listdir(xDir)]\nemptyFiles = [name for name in os.listdir(cellsDir)]\n\n#createFiles(xFiles, xDir, 'data/X/')\n#createFiles(oFiles, oDir, 'data/O/')\ncreateFiles(emptyFiles, cellsDir, emptyDir)\n\n\n","sub_path":"chatbot/prepImages.py","file_name":"prepImages.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232192926","text":"import os\nimport shutil\n\n\ndef main(path, out):\n for files in os.listdir(path):\n name = os.path.join(path, files)\n back_name = os.path.join(out, files)\n if os.path.isfile(name):\n if os.path.isfile(back_name):\n shutil.copy(name, back_name)\n else:\n shutil.copy(name, back_name)\n else:\n if not os.path.isdir(back_name):\n os.makedirs(back_name)\n main(name, back_name)\n\n\nif __name__ == '__main__':\n path_a = \"script\"\n path_b = \"script1\"\n main(path_a, path_b)\n","sub_path":"3.python_copytree/copyf.py","file_name":"copyf.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"240746474","text":"# Importing Pakages\nfrom __future__ import print_function\nimport sys\nimport numpy as np\nimport pandas as pd\nimport string, os \nimport random\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Importing keras\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\n\n# ***********************************************************************\n# Importing the dataset\nfile=pd.read_csv(\"C:/Users/RISHABH/Documents/GitHub/Mini/Copy CatBot/mann_ki_baat.csv\",encoding=\"unicode_escape\")\n\n# Dropping of columns like month and year\nfile.drop('month', axis=1, inplace=True)\nfile.drop('year', axis=1, inplace=True)\n\n\n# ***********************************************************************\n# Merging of all the rows to generate raw text\n#raw_text=list()\nraw_text=''\nfor i in range(47):\n raw_text=raw_text+file.iloc[i,0]\n #raw_text.append(file.iloc[i,0])\n\n# Lower casing all the words.\nraw_text = raw_text.lower()\nprint('text length', len(raw_text))\nprint(raw_text[:300])\n\n\n# ***********************************************************************\n\n# create mapping of unique chars to integers, and a reverse mapping\n# Since we are training on character level, therefore we have to relate \n# each unique character to a number\nchars = sorted(list(set(raw_text)))\nprint('total chars: ', len(chars))\n\ncharacter_to_integer = dict((c, i) for i, c in enumerate(chars))\ninteger_to_character = dict((i, c) for i, c in enumerate(chars))\n\n# ***********************************************************************\n\n# Split up into subsequences\n# Creates an array, \"sentences\" made up of characters upto \"maxlen\" (40) \n## characters chunked in steps of 3 characters from our corpus \"raw_text\".\n# Create an array, \"next_character\" of a single character.\n\nmaxlen = 40\nstep = 3\nsentences = []\nnext_character = []\nfor i in range(0, len(raw_text) - maxlen, step):\n sentences.append(raw_text[i: i + maxlen])\n next_character.append(raw_text[i + maxlen])\nprint('nb sequences:', len(sentences))\n\nprint(sentences[:10], \"\\n\")\nprint(next_character[:10])\n\n# ***********************************************************************\n\n# reshape our data in a format we can pass to the Keras LSTM \n# The shape look like [samples, time steps, features]\n\n# Create a sparse boolean tensors x & y.\nx = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\n\n# Encoding character level features from \"sentences\" and \"next_character\".\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n x[i, t, character_to_integer[char]] = 1\n y[i, character_to_integer[next_character[i]]] = 1\n \n# ***********************************************************************\n\n# Define the LSTM model\n \n# Sequential modeling( used to define a linear stack of network layer )\nmodel = Sequential()\n\n# Defining Single Hidden LSTM layer with,\n# units -> Dimensionality O/P space\n# Input_shape -> shape of the I/P\nmodel.add(LSTM(units = 128, input_shape=(maxlen, len(chars))))\n\n# Adding Dropout Layer\nmodel.add(Dropout = 0.2)\n\n# Adding O/P layer which is Dense & activation function is softmax\nmodel.add(Dense(len(chars), activation = 'softmax'))\n\n\n# Compile the network with the loss function and optimizer function.\n# This will allow our network to change weights and minimize the loss. \nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\nprint(model.summary())\n# ***********************************************************************\n\n# Samples an index from a probability array with some temperature.\n\n# Temperature is the scaling factor applied to our O/P of our dense layer before\n## softmax function is applied.\n\n# In short, it defines how \"conservative/creative\" our model's guesses can be\n## for our next character in the sequence.\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n# ***********************************************************************\n\n# Callback function to print predicted text generated by our LSTM\n# Diversity is for values of Temperature.\n \ndef on_epoch_end(epoch, logs):\n # Function invoked at end of each epoch. Prints generated text.\n print()\n print('----- Generating text after Epoch: %d' % epoch)\n\n start_index = random.randint(0, len(raw_text) - maxlen - 1)\n for diversity in [0.2, 0.5, 1.0, 1.2]:\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = raw_text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(400):\n x_pred = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, character_to_integer[char]] = 1.\n\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = integer_to_character[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\nprint_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\n# ***********************************************************************\n\n# Here, We will use model checkpointing to record all of the network weights to \n## a file each time an improvement in loss is observed at the end of the epoch.\n\n\nfrom keras.callbacks import ModelCheckpoint\n\nfilepath = \"weights.hdf5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='loss',\n verbose=1, save_best_only=True,\n mode='min')\n\n\n# ***********************************************************************\n\n# Defining Callbacks\nfrom keras.callbacks import ReduceLROnPlateau\nreduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2,\n patience=1, min_lr=0.001)\n\n\ncallbacks = [print_callback, checkpoint, reduce_lr]\n\n# ***********************************************************************\n\n# \nmodel.fit(x, y, batch_size=128, epochs=15, callbacks=callbacks)\n\n# ***********************************************************************\n\n# Generate new text\n\ndef generate_text(length, diversity):\n # Get random starting text\n start_index = random.randint(0, len(raw_text) - maxlen - 1)\n generated = ''\n sentence = raw_text[start_index: start_index + maxlen]\n generated += sentence\n for i in range(length):\n x_pred = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, character_to_integer[char]] = 1.\n\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = integer_to_character[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n return generated\n\n\n\nprint(generate_text(500, 0.2))\n\n# ***********************************************************************\n# ***********************************************************************\n# ***********************************************************************\n\n# IDEAS for future steps :-\n\n# 1) Add more LSTM Layers, Use more LSTM cells.\n# 2) Experiment with Temperature,batch size, & no. of epochs.\n# 3) Change the maxlen size.\n\n \n \n \n \n \n \n \n \n ","sub_path":"Copy CatBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106186777","text":"# Author : Milan Zanussi\n# Date : January 2017\n# Project : Sierpinski Shape generator\n# Description : Some toy code I wrote during HackMT 2017. Planned on trying to implement a command in our\n# project which would allow players in a game to arrange their characters they control into Sierpinski-like shape.\n\n\n# REQUIREMENTS TO RUN : This test code was originally written in Python 2.7 and uses matplotlib\n\nimport matplotlib.pyplot as plt\nimport random\nimport math\n\ndef SierpinskiShape(verts, n):\n\n # Generate vertices of polygon for shape generation\n vertices = []\n for i in range(1, verts + 1):\n vertices.append((math.cos(i*((2*math.pi)/verts) + math.pi/2), math.sin(i*((2*math.pi)/verts) + math.pi/2)))\n\n x_vals = [0]\n y_vals = [0]\n for i in range(1, n):\n\n # Get previous point (current position, so to speak)\n last_x = x_vals[i-1]\n last_y = y_vals[i-1]\n\n # Randomly select pivot point\n select = random.randint(1,verts)\n \n pivot = vertices[select-1]\n\n # Append new point to data lists\n x_vals.append((last_x + pivot[0])/(verts-1))\n y_vals.append((last_y + pivot[1])/(verts-1)) \n\n plt.scatter(x_vals, y_vals)\n plt.show()\n\n\n\n","sub_path":"Python/Toy Code/sierpinski.py","file_name":"sierpinski.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439884309","text":"# -*- coding: utf-8 -*-\nfrom .discretization import Discretization\nimport numpy as np\n\nclass EFD(Discretization):\n \"\"\"docstring for EFD.\"\"\"\n def __init__(self, data, n_tracks):\n super(EFD, self).__init__()\n self.data = data\n self.n_tracks = n_tracks\n self.n_elements = len(self.data)\n self.n_attrs = len(self.data[0])\n self.edges = []\n\n\n def discretize(self, labels):\n \"\"\"Inicia o processo de discretizacao.\"\"\"\n discrete_data = []\n\n for attr_index in range(self.n_attrs):\n edges = self.calc_edges(attr_index)\n self.edges.append(edges)\n discrete_data.append(super().fit(self.data, self.edges, attr_index, edges))\n\n # Armazena os dados discretizados\n self.discrete_data = np.transpose(np.array(discrete_data))\n\n self.discrete_clusters = super().make_discrete_clusters(self.discrete_data, labels)\n\n\n def calc_edges(self, attr_index):\n \"\"\"Calcula os pontos de corte. Recebe o índice da coluna a ser\n discretizada.\n attr_index eh o indice da coluna (atributo)\n \"\"\"\n col = self.data[:,attr_index]\n unique_values = np.array(list(set(col)))\n unique_values.sort()\n\n maximo = max(col)\n minimo = min(col)\n largura = int(len(unique_values) / self.n_tracks)\n\n edges = [minimo]\n for cut_point in range(1, self.n_tracks):\n edges.append(unique_values[cut_point * largura])\n\n edges.append(maximo)\n\n # Retorna uma lista contendo os pontos de corte do intervalo de valores\n # do atributo\n return edges\n\n\n @property\n def discrete_data_(self):\n \"\"\"Retorna os dados discretizados.\"\"\"\n return self.discrete_data\n\n @property\n def edges_(self):\n \"\"\"Retorna os pontos de corte dos atributos.\"\"\"\n return self.edges\n\n @property\n def discrete_clusters_(self):\n \"\"\"Retorna os clusters com valores discretos.\"\"\"\n return self.discrete_clusters\n","sub_path":"discretizers/efd.py","file_name":"efd.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"607159090","text":"\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras import regularizers\n\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\n\nfrom sklearn import preprocessing, metrics\nfrom sklearn.cross_validation import train_test_split\n\n\ndata = read_csv('drivers/edges_5000.csv')\nXX = data[['Edge', 'TimeStartEdge', 'Distance']]\ny = data['Accidents']\nX = preprocessing.scale(XX)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=11)\n\n\ndef create_model():\n model = Sequential()\n model.add(Dense(12, input_dim=X.shape[1], activation='relu'))\n model.add(Dense(6, activation='relu'))\n model.add(Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l2(0.001), activity_regularizer=regularizers.l1(0.001)))\n model.compile(loss='binary_crossentropy', optimizer='adam',\n metrics=['mae'])\n return model\n\ndef fit_model():\n model = KerasClassifier(build_fn=create_model, epochs=50, batch_size=10, verbose=0)\n # isotonic = CalibratedClassifierCV\n model.fit(X_train, y_train)\n prob_pos = model.predict_proba(X_test)[:,-1]\n model_score = metrics.brier_score_loss(y_test, prob_pos, pos_label=y.max())\n\n return model_score\n\nif __name__=='__main__':\n print('Brier Score: ' + fit_model())\n","sub_path":"fit_to_koef.py","file_name":"fit_to_koef.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"125049858","text":"from django.db import models\nimport lxml\nfrom lxml.html import fromstring\nimport requests\n\n\n# Create your models here.\nclass expandedurl(models.Model):\n destination_url = models.URLField(default='')\n short_url = models.URLField(default='http://')\n http_status_code = models.IntegerField(default=0)\n page_title = models.CharField(max_length=100)\n screen_capture = models.URLField(default='http://')\n\n def publish(self):\n if str(self.short_url).startswith('http://'):\n response = requests.get(self.short_url)\n else:\n response = requests.get('http://' + self.short_url)\n self.destination_url = response.url\n self.http_status_code = response.status_code\n siteTree = fromstring(response.content)\n self.page_title = siteTree.findtext('.//title')\n self.save()\n\n def __str__(self):\n return self.page_title","sub_path":"urlexpander/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"68388097","text":"import os\nimport sys\nfrom collections import defaultdict\nimport multiprocessing\nfrom os import listdir\nfrom os.path import isfile, join\n\n\nmymap = open('mapfile', 'r')\n\n\n\ncounter = 0\nmapfilenum =0 \nfor line in mymap:\n\tthing = line.strip()\n\n\tif counter == 0:\n\t\tout = open('mapfile' + str(mapfilenum), 'w')\n\t\tout.write(thing + '\\n')\n\t\tcounter += 1\n\t\tmapfilenum += 1\n\telif counter > 0 and counter < 100000:\n\t\tout.write(thing + '\\n')\n\t\tcounter += 1\n\telse:\n\t\tout.write(thing + '\\n')\n\t\tout.close()\n\t\tcounter = 0\n\n\t\n#\tout = open('mapfile' + str(counter), 'w')\n#\tthedir = [f for f in os.listdir('.') if os.path.isfile(f)]\n\n#\tfor thing in thedir:\n#\t\tif line.strip() in thing and 'genefam' in thing:\n\n\n\n#\t\t\tout.write('_'.join(thing.split('_')[0:2])+'\\n')\n\n\n#\tout.write(line)\n#\tout.close()\n\n#\tcounter += 1\n","sub_path":"stats/6clustering/4distance_stats_phylo.old/makemapfile.py","file_name":"makemapfile.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"367052649","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import optimize\r\nmonth = np.arange(12)\r\nmax = np.array([17,19,21,28,33,38,37,37,31,23,19,18])\r\nmin = np.array([-62,-59,-56,-46,-32,-18,-9,-13,-25,-46,-52,-58])\r\n\r\ndef test_func(t, a, b, c):\r\n return a * np.cos((t + b) * 2 * np.pi / 12) + c # 형태에서 cos함수라고 판단, 1년단위이므로 12개월이 주기\r\n\r\nparams_max, params_max_covariance = optimize.curve_fit(test_func, month, max) #optimize.curve_fit(함수, x값,y값[[즉 데이터값]])\r\nparams_min, params_min_covariance = optimize.curve_fit(test_func, month, min)\r\ndays = np.linspace(0, 12, num=365)\r\n\r\nyHat_max = test_func(days, params_max[0], params_max[1], params_max[2]) #params_max = [a,b,c]\r\nyHat_min = test_func(days, params_min[0], params_min[1], params_min[2])\r\n\r\nplt.plot(days, yHat_max, \"b\", label = \"max\")\r\nplt.plot(days, yHat_min, \"r\", label = \"min\")\r\nplt.scatter(month, max, color = \"blue\")\r\nplt.scatter(month, min, color = \"red\")\r\nplt.xlabel('month')\r\nplt.ylabel('min and max temperature')\r\nplt.legend()\r\nplt.show()","sub_path":"Statistical_Computing/12140244_HW2(Fitting)/12140244_HW2(Fitting).py","file_name":"12140244_HW2(Fitting).py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383422377","text":"import oauth2 as oauth\nimport json\nimport codecs\n\n# Import the necessary methods from \"twitter\" library\nfrom twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream\n\nCONSUMER_KEY = \"QykfpJa9UUvp9tP44BK3UaF5f\"\nCONSUMER_SECRET = \"Yl9sqzRKs0fLG1q8sgczjbt4r44Xu2UFGu9Otg6oTSGM6fHrH6\"\n\nACCESS_KEY = \t\"17414102-MXCD5KJaxJwchcGbqxN1ADHEvgL8Piou007GXoM4Z\"\nACCESS_SECRET = \t\"Bn4gzk0YTWMkX5wdW4sga16dWHpmihJ5nkWGRoJEgA6iA\"\n\nconsumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\naccess_token = oauth.Token(key=ACCESS_KEY, secret=ACCESS_SECRET)\n\noauth = OAuth(ACCESS_KEY, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) \n\n# Initiate the connection to Twitter Streaming API\ntwitter_stream = TwitterStream(auth=oauth)\n\n# Get a sample of the public data following through Twitter\niterator = twitter_stream.statuses.sample()\n\n# Print each tweet in the stream to the screen \n# Here we set it to stop after getting 1000 tweets. \n# You don't have to set it to stop, but can continue running \n# the Twitter API to collect data for days or even longer. \ntweet_count = 1000\nfor tweet in iterator:\n tweet_count -= 1\n # Twitter Python Tool wraps the data returned by Twitter \n # as a TwitterDictResponse object.\n # We convert it back to the JSON format to print/score\n #print (json.dumps(tweet) )\n \n # The command below will do pretty printing for JSON data, try it out\n # print json.dumps(tweet, indent=4)\n \n if tweet_count <= 0:\n break","sub_path":"TwitterOauthAuthentication.py","file_name":"TwitterOauthAuthentication.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452207211","text":"@staticmethod\ndef pacific_now():\n now = datetime.now()\n today = now.date()\n keyy = \"utc_pacific_offset_\" + str(today.month) + \"_\" + str(today.day) + \"_\" + str(today.year)\n\n val = memcache.get(keyy)\n\n hours_behind = 0\n\n if val is None:\n\n time_zone_api_success = False\n #ask for DST status from timezonedb.com\n urll = \"https://api.timezonedb.com/?zone=America/Los_Angeles&key=ZJK8HOHCPJV1&format=json\"\n\n try:\n result = urlfetch.fetch(url=urll,\n deadline=30)\n\n if result.status_code == 200:\n #check that it's actually JSON\n header_val = str(result.header_msg.getheaders(\"Content-Type\")[0])\n\n if not header_val == \"\":\n\n if header_val.lower() == \"application/json\":\n\n json_data = json.loads(result.content)\n\n if str(json_data[\"status\"]) == \"OK\":\n\n if str(json_data[\"dst\"]) == \"1\" or str(json_data[\"dst\"]).lower() == \"true\":\n hours_behind = 7\n else:\n hours_behind = 8\n\n time_zone_api_success = True\n\n else:\n time_zone_api_success = False\n\n except:\n time_zone_api_success = False\n\n google_maps_api_success = False\n if not time_zone_api_success:\n logging.info(\"heretimezone\")\n #try for the google maps timezone api\n result2 = urlfetch.fetch(\"https://maps.googleapis.com/maps/api/timezone/json?location=34.0801026,-117.75010780000001×tamp=\" + str(time.time()))\n\n if result2.status_code == 200:\n logging.info(\"here3googlemaps\")\n #check that it's actually JSON\n\n try:\n json_data2 = json.loads(result2.content)\n\n if int(json_data2[\"dstOffset\"]) > 0:\n hours_behind = 8 - int(int(json_data2[\"dstOffset\"]) / 60 / 60)\n else:\n hours_behind = 7\n\n google_maps_api_success = True\n\n except:\n logging.info(\"here7\")\n google_maps_api_success = False\n else:\n logging.info(\"here8\")\n google_maps_api_success = False\n\n if (not time_zone_api_success) and (not google_maps_api_success):\n\n #assume march and november are the daylight savings hours\n hours_behind = 8\n\n if today.month > 3 and today.month < 11:\n hours_behind = 7\n\n memcache.set(key=keyy, value=str(hours_behind), time=60 * 60 * 24 * 2)\n\n else:\n hours_behind = int(val)\n\n return now + timedelta(hours=hours_behind * -1)\n\n","sub_path":"classes/Helpers_/pacific_now.py","file_name":"pacific_now.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233360812","text":"import os\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nimport matplotlib as mlp \nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nmlp.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom functools import partial\nfrom tensorflow.layers import \\\n Conv2D, \\\n BatchNormalization, \\\n Dense, \\\n Conv2DTranspose\n\n\ndef load_image(file_name, size):\n img = Image.open(file_name).convert('RGB').resize(size)\n return np.array(img) / 255\n\n\n\n\nclass Dataset(object):\n def __init__(self, data_dir, size=(64, 64)):\n self.data_dir = data_dir\n self.data_files = [f for f in os.listdir(self.data_dir) \\\n if os.path.isfile(os.path.join(self.data_dir, f))]\n self.size = size\n\n def load_img(self, file_name):\n img = Image.open(file_name).convert('RGB').resize(self.size)\n return np.array(img) / 255\n\n def batch(self, batch_size):\n if batch_size > len(self.data_files):\n batch_size = len(self.data_files)\n perm = np.random.permutation(self.data_files)\n f = partial(load_image, size=self.size)\n with Pool() as p:\n res = p.map(f, [os.path.join(self.data_dir, perm[i]) \\\n for i in range(batch_size)])\n # res = [self.load_img(os.path.join(self.data_dir, perm[i]))\\\n # for i in range(batch_size)]\n return np.array(res)\n\n\ndef normalize_img(img):\n return (img - img.min())/(img.max() - img.min())\n\n\nclass Generator(object):\n def __init__(self):\n kwargs = {\n \"kernel_size\":5,\n \"strides\":2,\n \"padding\":'same',\n \"activation\":tf.nn.relu\n }\n self.fc = Dense(units=4*4*1024, activation=tf.nn.relu)\n self.bn = tf.layers.batch_normalization\n self.conv1 = Conv2DTranspose(512, **kwargs)\n self.conv2 = Conv2DTranspose(256, **kwargs)\n self.conv3 = Conv2DTranspose(128, **kwargs)\n kwargs['activation'] = tf.tanh\n self.conv4 = Conv2DTranspose(3, **kwargs)\n\n def __call__(self, x, train):\n with tf.variable_scope('Gen', reuse=tf.AUTO_REUSE):\n x = self.bn(self.fc(x), training=train)\n x = tf.reshape(x, [-1, 4, 4, 1024])\n x = self.bn(self.conv1(x), training=train)\n x = self.bn(self.conv2(x), training=train)\n x = self.bn(self.conv3(x), training=train)\n x = self.conv4(x)\n return x\n\nclass Discriminator(object):\n def __init__(self):\n kwargs = {\n \"kernel_size\":5,\n \"strides\":2,\n \"padding\":'same',\n \"activation\":tf.nn.leaky_relu\n }\n self.conv1 = Conv2D(128, **kwargs)\n self.conv2 = Conv2D(256, **kwargs)\n self.conv3 = Conv2D(512, **kwargs)\n self.conv4 = Conv2D(1024, **kwargs)\n self.fc = Dense(128)\n self.bn = tf.layers.batch_normalization\n\n def __call__(self, x, train):\n with tf.variable_scope('Dis', reuse=tf.AUTO_REUSE):\n x = self.conv1(x)\n x = self.bn(self.conv2(x), training=train)\n x = self.bn(self.conv3(x), training=train)\n x = self.bn(self.conv4(x), training=train)\n x = tf.layers.flatten(x)\n x = self.bn(self.fc(x), training=train)\n return x\n\n\ndef main():\n epochs = 10\n alpha = 5e-5\n c = 0.01\n m = 64\n n_critic = 5\n\n dset = Dataset('../data/pokemon')\n\n tf.reset_default_graph()\n z_in = tf.placeholder(tf.float32, [None, 100])\n i_in = tf.placeholder(tf.float32, [None, 64, 64, 3])\n\n gen = Generator()\n dis = Discriminator()\n\n generate = gen(z_in, False)\n\n w_loss = tf.reduce_mean(dis(i_in, True)) \\\n - tf.reduce_mean(dis(gen(z_in, False), True))\n\n\n w_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Dis')\n w_optim = tf.train.RMSPropOptimizer(alpha)\n w_train_op = w_optim.minimize(w_loss, var_list=w_vars)\n\n clipped = [tf.assign(var, tf.clip_by_value(var, -c, c))\\\n for var in w_vars]\n with tf.control_dependencies([w_train_op]):\n w_train_op = tf.tuple(clipped)\n\n \n\n theta_loss = -tf.reduce_mean(dis(gen(z_in, True), False))\n theta_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Gen')\n theta_optim = tf.train.RMSPropOptimizer(alpha)\n theta_train_op = theta_optim.minimize(theta_loss, var_list=theta_vars)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in tqdm(range(epochs)):\n for t in tqdm(range(n_critic)):\n img_in = dset.batch(m)\n z = np.random.normal(size=[m, 100])\n sess.run(w_train_op, feed_dict={z_in:z, i_in:img_in})\n z = np.random.normal(size=[m, 100])\n sess.run(theta_train_op, feed_dict={z_in:z})\n\n print()\n z = np.random.normal(size=[1, 100])\n img = sess.run(generate, feed_dict={z_in:z})[0]\n img = normalize_img(img)\n plt.imshow(img)\n plt.show()\n\n\nif __name__ == '__main__':\n main()","sub_path":"wgan/wgan_tf.py","file_name":"wgan_tf.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213696117","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 7 18:00:41 2017\n\n@author: davlars\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport cv2\n \ndef get_centroid(points): \n x = [p[0] for p in points]\n y = [p[1] for p in points]\n return np.array((sum(x) / len(points), sum(y) / len(points)))\n \ndef adjust_center(cord):\n centroid = get_centroid(cord)\n cord[:,0] = cord[:,0] - centroid[0]\n cord[:,1] = cord[:,1] - centroid[1]\n return cord\n \ndef get_scale(cord):\n scale_x = np.max(cord[:,0]) - np.min(cord[:,0])\n scale_y = np.max(cord[:,1]) - np.min(cord[:,1])\n return (scale_x, scale_y)\n \ndef get_translation(image, src, dst, knn, resolution, est_rot):\n dst_size = np.array(image.shape)\n tran_itr = np.int(dst_size[0]/resolution) # has to be even number (number of iteration excluding the center)\n print(image)\n if tran_itr%2 == 1:\n tran_itr = tran_itr+1\n delta_tran = dst_size/tran_itr\n dsts, dists, dists_sum = [],[],[]\n \n for i in range(np.int(-tran_itr/2),np.int(tran_itr/2)+1):\n cur_tran_x = delta_tran[0] * i\n for j in range(np.int(-tran_itr/2),np.int(tran_itr/2)+1):\n cur_tran_y = delta_tran[1] * j\n Tr = np.array([[np.cos(est_rot), -np.sin(est_rot), cur_tran_x],\n [np.sin(est_rot), np.cos(est_rot) , cur_tran_y],\n [0 , 0 , 1]])\n dst_temp = cv2.transform(dst,Tr[0:2])\n ret, results, neighbours, dist = knn.findNearest(dst_temp[0], 1)\n dsts.append(dst_temp)\n dists.append(dist)\n #plt.scatter(dst_temp[0][:,0],dst_temp[0][:,1],marker='.')\n \n for i in range(len(dists)):\n dists_sum.append(np.sum(dists[i],axis=0))\n idx = np.where(dists_sum == min(i for i in dists_sum))[0].astype('int32')\n idx_x = -int(idx/(tran_itr+1))\n idx_y = -int(idx%(tran_itr+1))\n shift = np.array([idx_x*delta_tran[0]+dst_size[0]/2,\n idx_y*delta_tran[1]+dst_size[1]/2])\n \n return shift\n \ndef get_rotation(src,dst,knn,rot_itr):\n \n # Rearranging the center \n src[0] = adjust_center(src[0])\n dst[0] = adjust_center(dst[0])\n \n delta_rot = 2*np.pi/rot_itr\n dsts, dists, dists_sum = [],[],[]\n \n for i in range(rot_itr):\n cur_rot = delta_rot * i\n Tr = np.array([[np.cos(cur_rot), -np.sin(cur_rot), 0],\n [np.sin(cur_rot), np.cos(cur_rot) , 0],\n [0 , 0 , 1]])\n dst_temp = cv2.transform(dst,Tr[0:2])\n ret, results, neighbours, dist = knn.findNearest(dst_temp[0], 1)\n dsts.append(dst_temp)\n dists.append(dist)\n \n for i in range(len(dists)):\n dists_sum.append(np.sum(dists[i],axis=0))\n idx = np.where(dists_sum == min(i for i in dists_sum))[0].astype('int32')\n \n return idx/rot_itr*360\n \ndef run(image,image1, image2, sur_threshold=0.1,rot_itr=180,resolution=2):\n # Getting contour\n # image1: phantom; image2: standard phantom\n ret_phtm,thresh_phtm = cv2.threshold(image1,sur_threshold,255,0)\n ret_std_phtm,thresh_std_phtm = cv2.threshold(image2,sur_threshold,255,0)\n image_phtm, contours_phtm, hierarchy_phtm = cv2.findContours(thresh_phtm.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n image_std_phtm, contours_std_phtm, hierarchy_std_phtm = cv2.findContours(thresh_std_phtm.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n \n # Converting to point clouds and getting the same number of points\n num_sample_ratio=0.8\n contours_phtm_ = []\n contours_std_phtm_ = []\n # Considering all points\n for i in range(len(contours_phtm)):\n contours_phtm_.extend(contours_phtm[i])\n for i in range(len(contours_std_phtm)):\n contours_std_phtm_.extend(contours_std_phtm[i])\n num_sample = int(num_sample_ratio * min(len(contours_phtm_),len(contours_std_phtm_)))\n Points_phtm = np.ravel(random.sample(list(contours_phtm_), num_sample)).reshape((-1,2)).T\n Points_std_phtm = np.ravel(random.sample(list(contours_std_phtm_), num_sample)).reshape((-1,2)).T\n '''\n f2, sub_f2 = plt.subplots(2, 2,figsize=(6, 6))\n sub_f2[0,0].imshow(image_std_phtm, cmap='bone', origin='lower',extent=[0,300, 0,300])\n sub_f2[0,0].set_title('Threshold')\n sub_f2[0,1].plot(Points_std_phtm[0], Points_std_phtm[1],'.')\n sub_f2[0,1].axis([0, 300, 0, 300])\n sub_f2[0,1].set_title('Contour')\n sub_f2[1,0].imshow(image_phtm, cmap='bone', origin='lower',extent=[0,300, 0,300])\n sub_f2[1,0].set_title('Threshold')\n sub_f2[1,1].plot(Points_phtm[0], Points_phtm[1],'.')\n sub_f2[1,1].axis([0, 300, 0, 300])\n sub_f2[1,1].set_title('Contour')\n f2.subplots_adjust(hspace=0.3)\n plt.show()\n '''\n \n src = np.array([Points_std_phtm.T], copy=True).astype(np.float32)\n dst = np.array([Points_phtm.T], copy=True).astype(np.float32)\n \n # Create knn to measure the distances\n knn = cv2.ml.KNearest_create()\n responses = np.array(range(len(src[0]))).astype(np.float32)\n knn.train(src[0],cv2.ml.ROW_SAMPLE, responses)\n \n est_rot = get_rotation(src,dst,knn,rot_itr)\n print(est_rot)\n est_tran = get_translation(image,src,dst,knn,resolution,0)\n print(est_tran)\n #return [[est_tran],est_rot]","sub_path":"Test Files/headnhat_debug.py","file_name":"headnhat_debug.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"59777177","text":"from fabric.api import *\nfrom fabric.contrib.files import *\n\"\"\" This script create trust between machines\n\"\"\"\ndef ssh_keygen():\n if not exists('~/.ssh/id_rsa.pub'):\n run('ssh-keygen')\n\ndef get_key():\n local('rm -f ./id_ras.pub')\n get('~/.ssh/id_rsa.pub', './id_rsa.pub')\n\ndef put_key():\n put('./id_rsa.pub', '/tmp/')\n run('cat /tmp/id_rsa.pub >> ~/.ssh/authorized_keys2')\n run('rm -f /tmp/id_rsa.pub')\n\ndef create_relation(from_host, remote_hosts):\n execute(ssh_keygen, hosts=[from_host])\n execute(get_key, hosts=[from_host])\n execute(put_key, hosts=remote_hosts)\n local('rm -f ./id_ras.pub')\n\ndef auto_ssh(*args):\n \"\"\"Create ssh trust connection beween host1, host2, ..., hostn\n fab -f create_trust.py auto_ssh:host1,host2,host3,hostN\n \"\"\"\n if len(args) < 2:\n abort('argument >= 2')\n\n for host in args:\n execute(ssh_keygen, hosts=[host])\n\n for (i, from_host) in enumerate(args):\n remote_hosts = list(args[0:i])\n remote_hosts.extend(args[i+1:])\n create_relation(from_host, remote_hosts)\n\n","sub_path":"create_trust.py","file_name":"create_trust.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464719356","text":"\n# Now, input and information are both number arrays\ndef multi_input_neural_network(input_information, weights):\n assert( len(input_information) == len(weights))\n\n calories_burned = 0\n for index in range(len(weights)):\n calories_burned += input_information[index] * weights[index]\n\n return calories_burned\n\n\n#DEMO\n\n# Inputs is an array, ordered as: minutes jogging --> jogging speed ---> mass of the runner\n# Minutes jogging: 30\n# Jogging speed: 3 m/s\n# Runner's mass: 80kg\ninputs = [30, 3, 80]\n\n# The weights array follows the same order\ncalculated_weights = [6.2, 8.1, 0.31]\n\ncalories_burned = multi_input_neural_network(inputs, calculated_weights )\n\nprint(\"According to my neural network, I burned {} calories\".format(calories_burned))","sub_path":"DeepLearningBasics/NeuralNetwork_2-Estimation/multiple_input_nn.py","file_name":"multiple_input_nn.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272132568","text":"\"\"\"Dicstring.\"\"\"\nimport unittest\nimport platform\nimport doctest\nimport sys\nimport os\nfrom tempita import Template\nsys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))\n\n\nclass TestTempita(unittest.TestCase):\n \"\"\"Dicstring.\"\"\"\n def test_doctest_template(self):\n if platform.python_implementation == 'CPython':\n doctest.testfile('test_template{}.txt'.format(sys.version))\n else:\n doctest.testfile('test_template.pypytxt')\n\n def test_doctest_docs_index(self):\n doctest.testfile('../docs/index.txt')\n\n def test_read_template_from_file_with_encoding(self):\n filename = '/tests/test_basetemplate.txt'\n namespace = dict(name=\"Arthur Dent\")\n t = Template.from_filename(sys.path[0] + filename, namespace=namespace, encoding=\"latin-1\")\n print(t)\n\n def test_read_template_from_file_without_encoding(self):\n filename = '/tests/test_basetemplate.txt'\n namespace = dict(name=\"Arthur Dent\")\n t = Template.from_filename(sys.path[0] + filename, namespace=namespace, encoding=None)\n print(t)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_template.py","file_name":"test_template.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443998050","text":"from .sqlalchemy_service import BaseDatabaseService\nfrom ..model.sqlalchemy import Review\nimport requests\nimport json\n\n\nclass ReviewService(BaseDatabaseService):\n \"\"\"\n 리뷰와 관련된 기능을 제공하는 서비스\n - 리뷰 검색\n - 리뷰 가져오기\n - 리뷰 삭제, 추가 등\n - 리뷰에 답글 달기\n - 리뷰 분석\n\n \"\"\"\n \n \n def get_review(self, id):\n return self.query(Review).filter_by(id=id).first()\n\n def get_review_list(self, channel_id=\"\", count=10, index=None, sort='updated_date', order='desc', filter=None):\n q = self.query(Review)\n \n if filter == \"replied\":\n q = q.filter_by(is_replied=True)\n elif filter == \"unreplied\":\n q = q.filter_by(is_replied=False)\n\n if sort in ('updated_date', 'created_date', 'rating') and hasattr(Review, sort):\n column = getattr(Review, sort)\n if order == \"desc\":\n q = q.order_by(column.desc())\n else:\n q = q.order_by(column.asc())\n\n if count:\n q = q.limit(count)\n if index:\n q = q.offset(index)\n \n return q.all()\n\n def reply_review(self, id, reply):\n r = self.get_review(id)\n if r:\n r.is_replied = True\n r.reply = reply\n self.commit()\n\n if r.rid:\n self.app.services.gcp.reply_review(r.rid, reply)\n \n return True\n else:\n return False\n\n","sub_path":"api/service/review_service.py","file_name":"review_service.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331845983","text":"# -*- coding: utf-8 -*-\r\nimport sys\r\nsys.path.extend(['../'])\r\nimport numpy as np\r\nfrom collections import Counter\r\nimport tensorflow as tf\r\nimport math\r\nimport codecs\r\nimport random\r\nimport functools\r\n\r\n\r\nclass FilterData(object):\r\n def __init__(self):\r\n self.pos_filter = {'X', 'NUM', 'PRON', 'SYM', 'SPACE', 'DET', 'PUNCT'}\r\n self.pos_function = {'PART', 'ADP', 'CCONJ'}\r\n self.pos_content = {'VERB', 'ADJ', 'NOUN'}\r\n # 这个部分那不一定注释,其中包括了副词和表示情感的词语\r\n self.pos_other = {'INTJ', 'ADV', 'PROPN', }\r\n\r\n with open('stop_word.txt') as file:\r\n eng_stop_word = file.readlines()\r\n self.eng_stop_word = set([word.strip() for word in eng_stop_word])\r\n # print(self.eng_stop_word)\r\n pass\r\n\r\n def filter_pos_other(self, word):\r\n return word[1] not in self.pos_filter\r\n\r\n def filter_stop_word(self, word):\r\n return word[0] not in self.eng_stop_word\r\n\r\n def filter_isalpha(self, word):\r\n return word[0].encode('UTF-8').isalpha()\r\n\r\n def filter_pos_function(self, word):\r\n return word[1] not in self.pos_function\r\n\r\n def filter_pos_not_function_or_content(self, word):\r\n return word[1] not in self.pos_other\r\n\r\n @staticmethod\r\n def filter_now(cause_clause, effect_clause, filter_kind):\r\n cause_clause = filter(filter_kind, cause_clause)\r\n effect_clause = filter(filter_kind, effect_clause)\r\n cause_clause, effect_clause = list(cause_clause), list(effect_clause)\r\n return cause_clause, effect_clause\r\n\r\n @staticmethod\r\n def cmp(x, y):\r\n if x[3] > y[3]:\r\n return 1\r\n if x[3] < y[3]:\r\n return -1\r\n else:\r\n return 0\r\n\r\n\r\nclass BaseData(object):\r\n def __init__(self, sample_neg_randomly, num_samples=None):\r\n # 这个参数用于决定生成正样本还是进行max采样\r\n self.sample_neg_randomly = sample_neg_randomly\r\n self.vocab_left, self.vocab_rev_left, self.vocab_left_size = [], {}, 0\r\n self.vocab_right, self.vocab_rev_right, self.vocab_right_size = [], {}, 0\r\n self.c2e_test, self.e2c_test = [], []\r\n self.x_left, self.x_right, self.x_target = None, None, None\r\n self.max_length = 0\r\n self.test_left, self.test_right, self.test_pairs = None, None, None\r\n self.num_samples = num_samples\r\n self.labels = []\r\n\r\n @staticmethod\r\n def load_samples(data_file_path):\r\n \"\"\"\r\n samples: 地震导致多人死亡----地震----多人 死亡\r\n \"\"\"\r\n input_left, input_right = [], []\r\n count = 0\r\n with codecs.open(data_file_path, 'r', 'utf-8') as fin:\r\n while True:\r\n line = fin.readline()\r\n count += 1\r\n # print(len(line), line)\r\n if not line:\r\n break\r\n if len(line) > 1:\r\n items = line.strip().split('----')\r\n s1 = items[0].split(' ')\r\n s2 = items[1].split(' ')\r\n if '' in s1 or '' in s2:\r\n continue\r\n # 其中s1 和s2都是包含不止一个单词\r\n if len(s1) > 0 and len(s2) > 0:\r\n if len(s1) > 50 or len(s2) > 50:\r\n pass\r\n else:\r\n input_left.append(s1)\r\n input_right.append(s2)\r\n return input_left, input_right\r\n\r\n @staticmethod\r\n def load_samples_from_numpy(\r\n data_file_path, filter_pos_other_flag=1, filter_stop_word_flag=1, filter_isalpha_flag=1,\r\n filter_pos_not_function_or_content_flag=1, filter_pos_function_flag=1):\r\n \"\"\"\r\n samples: 从原始获得的numpy语料中抽取出对应的数据,首先过滤数据,只保留内容词的部分\r\n \"\"\"\r\n\r\n filter_data = FilterData()\r\n input_left, input_right = [], []\r\n for data_file in data_file_path:\r\n cause_effect_list = np.load(data_file, allow_pickle=True)\r\n for cause_effect_pair in cause_effect_list:\r\n cause_clause, effect_clause = cause_effect_pair\r\n cause_clause = sorted(cause_clause, key=functools.cmp_to_key(FilterData.cmp))\r\n effect_clause = sorted(effect_clause, key=functools.cmp_to_key(FilterData.cmp))\r\n\r\n # 过滤其他类型\r\n if filter_pos_other_flag:\r\n cause_clause, effect_clause = filter_data.filter_now(\r\n cause_clause, effect_clause, filter_data.filter_pos_other)\r\n # 过滤停用词\r\n if filter_stop_word_flag:\r\n cause_clause, effect_clause = filter_data.filter_now(\r\n cause_clause, effect_clause, filter_data.filter_stop_word)\r\n #  过滤含有其他字符的词\r\n if filter_isalpha_flag:\r\n cause_clause, effect_clause = filter_data.filter_now(\r\n cause_clause, effect_clause, filter_data.filter_isalpha)\r\n # 过滤第三类成分词\r\n if filter_pos_not_function_or_content_flag:\r\n cause_clause, effect_clause = filter_data.filter_now(\r\n cause_clause, effect_clause, filter_data.filter_pos_not_function_or_content)\r\n # 过滤功能词\r\n if filter_pos_function_flag:\r\n cause_clause, effect_clause = filter_data.filter_now(\r\n cause_clause, effect_clause, filter_data.filter_pos_function)\r\n\r\n if len(cause_clause) > 0 and len(effect_clause) > 0:\r\n if len(cause_clause) > 50 or len(effect_clause) > 50:\r\n pass\r\n else:\r\n # print(' '.join([word[0] for word in sorted(cause_effect_pair[0], key=functools.cmp_to_key(FilterData.cmp))]))\r\n # print(' '.join([word[0] for word in sorted(cause_effect_pair[1], key=functools.cmp_to_key(FilterData.cmp))]))\r\n # print(' '.join([word[0] for word in cause_clause]))\r\n # print(' '.join([word[0] for word in effect_clause]))\r\n # print('-------')\r\n input_left.append([word[0] for word in cause_clause])\r\n input_right.append([word[0] for word in effect_clause])\r\n # for word in cause_clause:\r\n # input_left.append(word[0])\r\n # for word in effect_clause:\r\n # input_right.append(word[0])\r\n print(len(input_left), len(input_right))\r\n return input_left, input_right\r\n\r\n def load_dev(self, labeled_path):\r\n # cause_dev = ['侵扰', '事故', '爆炸', '台风', '冲突', '矛盾', '地震', '农药', '违章', '腐蚀',\r\n # '感染', '病毒', '暴雨', '疲劳', '真菌', '贫血', '感冒', '战乱', '失调', '摩擦']\r\n # effect_dev = ['污染', '愤怒', '困境', '损失', '不适', '疾病', '失事', '悲剧', '危害', '感染',\r\n # '故障', '死亡', '痛苦', '失败', '矛盾', '疲劳', '病害', '塌陷', '洪灾']\r\n cause_dev = ['claim', 'mercy', 'law', 'accident', 'explosion', 'earthquake', 'virus', 'storm', 'war']\r\n effect_dev = ['happy', 'heard', 'surprised', 'pollution', 'death', 'loss', 'failure', 'disease', 'illness', 'flood']\r\n \"\"\"\r\n load test word to show similarity\r\n \"\"\"\r\n for w in cause_dev:\r\n try:\r\n self.c2e_test.append(self.vocab_rev_left[w])\r\n except KeyError as e:\r\n print('{} is not existed in cause vocab!'.format(e))\r\n for w in effect_dev:\r\n try:\r\n self.e2c_test.append(self.vocab_rev_right[w])\r\n except KeyError as e:\r\n print('{} is not existed in effect vocab!'.format(e))\r\n # self.load_labeled_data(labeled_path)\r\n\r\n def load_labeled_data_from_numpy(self, data_path):\r\n lefts, rights, pairs = [], [], []\r\n with codecs.open(data_path, 'r', 'utf-8') as f:\r\n line = f.readline()\r\n while line:\r\n if line.strip() == '':\r\n continue\r\n result = line.strip().split('##')\r\n pair = result[1].split(' ')\r\n left = result[0].split('----')[1].split(' ')\r\n right = result[0].split('----')[2].split(' ')\r\n try:\r\n pair_left, pair_right = self.vocab_rev_left[pair[0]], self.vocab_rev_right[pair[1]]\r\n temp_left, temp_right = [], []\r\n for l in left:\r\n try:\r\n temp_left.append(self.vocab_rev_left[l])\r\n except KeyError as e:\r\n pass\r\n for w in right:\r\n try:\r\n temp_right.append(self.vocab_rev_right[w])\r\n except KeyError as e:\r\n pass\r\n if pair_left in temp_left and pair_right in temp_right:\r\n lefts.append(temp_left)\r\n rights.append(temp_right)\r\n pairs.append([pair_left, pair_right])\r\n except KeyError:\r\n c = 0\r\n line = f.readline()\r\n self.test_left, self.test_right, self.test_pairs = lefts, rights, pairs\r\n print('num of valid labelled data is {}.'.format(len(self.test_pairs)))\r\n\r\n def load_labeled_data(self, data_path):\r\n \"\"\"\r\n 注意:这些样本中,只有正例的句子\r\n :param data_path: 输入数据中包含信息: 原始语料, cause,effect cause中核心词,effect中的核心词\r\n :return:\r\n \"\"\"\r\n # 58冬季放水不当引起的故障----冬季 放水 不当----故障##不当 故障\r\n lefts, rights, pairs = [], [], []\r\n with codecs.open(data_path, 'r', 'utf-8') as f:\r\n line = f.readline()\r\n while line:\r\n if line.strip() == '':\r\n continue\r\n result = line.strip().split('##')\r\n pair = result[1].split(' ')\r\n left = result[0].split('----')[1].split(' ')\r\n right = result[0].split('----')[2].split(' ')\r\n try:\r\n pair_left, pair_right = self.vocab_rev_left[pair[0]], self.vocab_rev_right[pair[1]]\r\n temp_left, temp_right = [], []\r\n for l in left:\r\n try:\r\n temp_left.append(self.vocab_rev_left[l])\r\n except KeyError as e:\r\n pass\r\n for w in right:\r\n try:\r\n temp_right.append(self.vocab_rev_right[w])\r\n except KeyError as e:\r\n pass\r\n if pair_left in temp_left and pair_right in temp_right:\r\n lefts.append(temp_left)\r\n rights.append(temp_right)\r\n pairs.append([pair_left, pair_right])\r\n except KeyError:\r\n c = 0\r\n line = f.readline()\r\n self.test_left, self.test_right, self.test_pairs = lefts, rights, pairs\r\n print('num of valid labelled data is {}.'.format(len(self.test_pairs)))\r\n\r\n def build_vocab(self, new_x_left, new_x_right, min_count):\r\n \"\"\"\r\n 1. build vocab, most frequent words while have low index in vocab\r\n 2. the word will be insert into vocab at head position in nce node because of padding\r\n \"\"\"\r\n # assert mode in ['pad', 'not_pad']\r\n data_left = [word for x in new_x_left for word in x]\r\n data_right = [word for x in new_x_right for word in x]\r\n # data_left = new_x_left\r\n # data_right = new_x_right\r\n\r\n number = 0\r\n c = Counter(data_left)\r\n for key in c:\r\n if c[key] < min_count:\r\n number += 1\r\n counter = c.most_common(len(c) - number)\r\n self.vocab_left = [counter[i][0] for i in range(len(counter))]\r\n del c, counter\r\n\r\n number = 0\r\n c = Counter(data_right)\r\n for key in c:\r\n if c[key] < min_count:\r\n number += 1\r\n counter = c.most_common(len(c) - number)\r\n self.vocab_right = [counter[i][0] for i in range(len(counter))]\r\n del c, counter\r\n # 保存所有的词\r\n self.vocab_left.insert(0, '')\r\n self.vocab_right.insert(0, '')\r\n # 保存所有的词对应id\r\n self.vocab_rev_left = {x: i for i, x in enumerate(self.vocab_left)}\r\n self.vocab_rev_right = {x: i for i, x in enumerate(self.vocab_right)}\r\n # 保存id_str和str_id\r\n\r\n # print(self.vocab_rev_left)\r\n # print(self.vocab_rev_right)\r\n\r\n self.vocab_left_size = len(self.vocab_left)\r\n self.vocab_right_size = len(self.vocab_right)\r\n del data_left, data_right\r\n\r\n def convert2one_hot(self, input_left, input_right):\r\n \"\"\"\r\n 1. convert to ont hot representation\r\n 2. get max_length of left and right\r\n \"\"\"\r\n one_hot_left, one_hot_right = list(), list()\r\n max_length = 0\r\n count1, count2 = 0, 0\r\n for i in range(len(input_left)):\r\n cause, effect = list(), list()\r\n left = input_left[i]\r\n right = input_right[i]\r\n for w_l in left:\r\n try:\r\n cause.append(self.vocab_rev_left[w_l])\r\n except KeyError as e:\r\n count1 += 1\r\n for w_r in right:\r\n try:\r\n effect.append(self.vocab_rev_right[w_r])\r\n except KeyError as e:\r\n count2 += 1\r\n if cause and effect:\r\n one_hot_left.append(cause)\r\n one_hot_right.append(effect)\r\n # 更新最大句子长度\r\n local_max_length = max(len(cause), len(effect))\r\n if max_length < local_max_length:\r\n max_length = local_max_length\r\n return one_hot_left, one_hot_right, max_length\r\n\r\n def padding_data(self, one_hot_left, one_hot_right):\r\n # 将全部的单词加上padding\r\n padding_word = 0\r\n new_x_left = list()\r\n new_x_right = list()\r\n for i in range(len(one_hot_left)):\r\n x_left = one_hot_left[i]\r\n num_padding = self.max_length - len(x_left)\r\n new_x = x_left + [padding_word] * num_padding\r\n new_x_left.append(new_x)\r\n x_right = one_hot_right[i]\r\n num_padding = self.max_length - len(x_right)\r\n new_y = x_right + [padding_word] * num_padding\r\n new_x_right.append(new_y)\r\n return new_x_left, new_x_right\r\n\r\n def neg_sent(self, batch_size):\r\n \"\"\"\r\n get negative samples from sentence\r\n \"\"\"\r\n neg_left, neg_right = [], []\r\n L = len(self.x_left) - 1\r\n for i in range(self.num_samples * batch_size):\r\n k = random.randint(0, L)\r\n neg_left.append(self.x_left[k])\r\n j = random.randint(0, L)\r\n neg_right.append(self.x_right[j])\r\n return neg_left, neg_right\r\n\r\n @staticmethod\r\n def get_len(one_hot_left, one_hot_right):\r\n left_len, right_len = [], []\r\n for i in range(len(one_hot_right)):\r\n left_len.append(len(one_hot_left[i]))\r\n right_len.append(len(one_hot_right[i]))\r\n return left_len, right_len\r\n\r\n def load_data(self, data_path, min_count):\r\n if self.sample_neg_randomly:\r\n # 获取样本,构建词典\r\n samples_left, samples_right = self.load_samples(data_path['pos_path'])\r\n # samples_left, samples_right = self.load_samples_from_numpy(data_path['pos_path'])\r\n self.build_vocab(samples_left, samples_right, min_count)\r\n # 将单词进行one-hot编码\r\n onehot_left, onehot_right, max_len = self.convert2one_hot(samples_left, samples_right)\r\n self.x_left, self.x_right, self.max_length = onehot_left, onehot_right, max_len\r\n else:\r\n pos_left, pos_right = self.load_samples(data_path['pos_path'])\r\n neg_left, neg_right = self.load_samples(data_path['neg_path'])\r\n self.build_vocab(pos_left, pos_right, min_count)\r\n pos_onehot_left, pos_onehot_right, pos_max_len = self.convert2one_hot(pos_left, pos_right)\r\n pos_target = [1.0 for _ in range(len(pos_onehot_left))]\r\n neg_onehot_left, neg_onehot_right, neg_max_len = self.convert2one_hot(neg_left, neg_right)\r\n neg_target = [0.0 for _ in range(len(neg_onehot_left))]\r\n self.x_left = pos_onehot_left + neg_onehot_left\r\n self.x_right = pos_onehot_right + neg_onehot_right\r\n self.x_target = pos_target + neg_target\r\n self.max_length = max(pos_max_len, neg_max_len)\r\n self.load_dev(data_path['labeled_path'])\r\n\r\n\r\nclass BaseModel(object):\r\n\r\n def __init__(self, embedding_size, batch_size, num_epochs, num_samples, learning_rate, data_loader):\r\n self.dataLoader = data_loader\r\n self.sample_neg_randomly = data_loader.sample_neg_randomly\r\n self.max_len, self.num_samples, self.num_epochs = 0, num_samples, num_epochs\r\n self.embedding_size, self.batch_size = embedding_size, batch_size\r\n # used for showing similarity of cause->effect and effect->cause\r\n self.cause_word_id, self.effect_word_id = None, None\r\n self.cause_normed_embed, self.c2e_similar = None, None\r\n self.effect_normed_embed, self.e2c_similar = None, None\r\n\r\n self.input_left, self.input_right = None, None\r\n self.left_len, self.right_len, self.targets = None, None, None\r\n self.input_left_embed, self.input_right_embed = None, None\r\n # embedding of cause and effect vocabs\r\n self.cause_embed_dict, self.effect_embed_dict = None, None\r\n self.vocab_left_size, self.vocab_right_size = 0, 0\r\n\r\n self.train_op, self.loss, self.global_steps, self.init = None, None, None, None\r\n self.learning_rate, self.average_loss = learning_rate, 0.0\r\n self.graph, self.sess = tf.Graph(), None\r\n\r\n def load_data(self, data_path, min_count):\r\n self.dataLoader.load_data(data_path, min_count)\r\n self.max_len = self.dataLoader.max_length\r\n self.vocab_left_size = self.dataLoader.vocab_left_size\r\n self.vocab_right_size = self.dataLoader.vocab_right_size\r\n if '' in self.dataLoader.vocab_left or '' in self.dataLoader.vocab_right:\r\n print('error')\r\n exit(1)\r\n print('max length of phrase: {}!'.format(self.max_len))\r\n print('length of left vocab: {}'.format(self.dataLoader.vocab_left_size))\r\n print('length of right vocab: {}'.format(self.dataLoader.vocab_right_size))\r\n\r\n def construct_graph(self):\r\n pass\r\n\r\n def train_stage(self, cause_output_path, effect_output_path):\r\n pass\r\n\r\n @staticmethod\r\n def shuffle_pos_neg(pos_data, neg_data):\r\n data = np.array(pos_data + neg_data)\r\n shuffle_indices = np.random.permutation(np.arange(len(data)))\r\n return data[shuffle_indices]\r\n\r\n def show_loss(self, feed_dict):\r\n _, global_step, loss_val = self.sess.run([self.train_op, self.global_steps, self.loss],\r\n feed_dict=feed_dict)\r\n self.average_loss += loss_val\r\n current_step = tf.train.global_step(self.sess, self.global_steps)\r\n # print(current_step)\r\n if current_step % 1000 == 0:\r\n self.average_loss /= 1000\r\n print('Average loss at step ', current_step, ': ', self.average_loss)\r\n self.average_loss = 0.0\r\n if current_step % 10000 == 0:\r\n self.show_similar()\r\n\r\n def init_embedding(self):\r\n self.cause_embed_dict = tf.Variable(tf.truncated_normal(\r\n [self.vocab_left_size, self.embedding_size],\r\n stddev=0.01 / math.sqrt(self.embedding_size), dtype=tf.float32))\r\n\r\n self.effect_embed_dict = tf.Variable(tf.truncated_normal(\r\n [self.vocab_right_size, self.embedding_size],\r\n stddev=0.01 / math.sqrt(self.embedding_size), dtype=tf.float32))\r\n\r\n def calculate_similar(self):\r\n \"\"\"\r\n 分别对原因的embedding和结果的embedding做归一化,然后再计算成绩\r\n 获取test中的部分词汇的id ,然后分别计算这些词在cause空间和effect空间的相似性\r\n :return:\r\n \"\"\"\r\n self.cause_word_id = tf.constant(self.dataLoader.c2e_test, dtype=tf.int32)\r\n cause_norm = tf.sqrt(tf.reduce_sum(tf.square(self.cause_embed_dict), 1, keep_dims=True))\r\n self.cause_normed_embed = self.cause_embed_dict / cause_norm\r\n c_test_embed = tf.nn.embedding_lookup(self.cause_normed_embed, self.cause_word_id)\r\n\r\n self.effect_word_id = tf.constant(self.dataLoader.e2c_test, dtype=tf.int32)\r\n effect_norm = tf.sqrt(tf.reduce_sum(tf.square(self.effect_embed_dict), 1, keep_dims=True))\r\n self.effect_normed_embed = self.effect_embed_dict / effect_norm\r\n e_test_embed = tf.nn.embedding_lookup(self.effect_normed_embed, self.effect_word_id)\r\n\r\n self.c2e_similar = tf.matmul(c_test_embed, tf.transpose(self.effect_normed_embed))\r\n self.e2c_similar = tf.matmul(e_test_embed, tf.transpose(self.cause_normed_embed))\r\n\r\n def show_similar(self):\r\n \"\"\"\r\n 获取原空间中的单词在对应的空间中排序最靠前的15个单词,然后打印观察相似性,是否能判断具有相似性\r\n :return:\r\n \"\"\"\r\n sim = self.c2e_similar.eval()\r\n for i in range(len(self.dataLoader.c2e_test)):\r\n valid_word = self.dataLoader.vocab_left[self.dataLoader.c2e_test[i]]\r\n top_k = 15\r\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\r\n log_str = 'Nearest effect words to %s:' % valid_word\r\n for k in range(top_k):\r\n close_word = self.dataLoader.vocab_right[nearest[k]]\r\n log_str = '%s %s,' % (log_str, close_word)\r\n print(log_str)\r\n print('\\n\\n')\r\n sim = self.e2c_similar.eval()\r\n for i in range(len(self.dataLoader.e2c_test)):\r\n valid_word = self.dataLoader.vocab_right[self.dataLoader.e2c_test[i]]\r\n top_k = 15\r\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\r\n log_str = 'Nearest cause words to %s:' % valid_word\r\n for k in range(top_k):\r\n close_word = self.dataLoader.vocab_left[nearest[k]]\r\n log_str = '%s %s,' % (log_str, close_word)\r\n print(log_str)\r\n\r\n def accuracy(self):\r\n \"\"\"\r\n self.dataLoader.test_left, self.dataLoader.test_right 中保存的都是单词的id\r\n\r\n :return:\r\n \"\"\"\r\n left_words, right_words = self.dataLoader.test_left, self.dataLoader.test_right\r\n labelled_pairs = self.dataLoader.test_pairs\r\n causeVec, effectVec = self.cause_embed_dict.eval(), self.effect_embed_dict.eval()\r\n\r\n def predict(left, right):\r\n \"\"\"\r\n 计算两个词向量之间的内积,然后做排序\r\n # 分别计算了两个词在句子中的排序和\r\n :param left: 原因句\r\n :param right: 结果句\r\n :return:\r\n \"\"\"\r\n d = {}\r\n for l in left:\r\n for r in right:\r\n l_vec, r_vec = causeVec[l], effectVec[r]\r\n\r\n d[' '.join([str(l), str(r)])] = l_vec.dot(r_vec.T)\r\n result = sorted(d.items(), key=lambda item: item[1], reverse=True)\r\n return result\r\n\r\n count, mrr = 0, []\r\n for i in range(len(left_words)):\r\n res = predict(left_words[i], right_words[i])\r\n s = ' '.join([str(labelled_pairs[i][0]), str(labelled_pairs[i][1])])\r\n if res[0][0] == s:\r\n count += 1\r\n for index, [k, _] in enumerate(res):\r\n if k == s:\r\n mrr.append(1.0 / float(index + 1))\r\n\r\n return round(count / float(len(labelled_pairs)), 4), round(sum(mrr) / float(len(mrr)), 4)\r\n\r\n @staticmethod\r\n def generate_batches(data, batch_size, shuffle=True):\r\n data = np.array(data)\r\n data_size = len(data)\r\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\r\n # Shuffle the data at each epoch\r\n if shuffle:\r\n shuffle_indices = np.random.permutation(np.arange(data_size))\r\n shuffled_data = data[shuffle_indices]\r\n else:\r\n shuffled_data = data\r\n for batch_num in range(num_batches_per_epoch):\r\n start_index = batch_num * batch_size\r\n end_index = min((batch_num + 1) * batch_size, data_size)\r\n yield shuffled_data[start_index:end_index]\r\n\r\n def write_embedding(self, cause_output_path, effect_output_path, step):\r\n tail = '_' + step + '.txt'\r\n cause_path, effect_path = cause_output_path + tail, effect_output_path + tail\r\n with codecs.open(cause_path, 'w', 'utf-8') as fcause, codecs.open(effect_path, 'w', 'utf-8') as feffect:\r\n with self.sess.as_default():\r\n cause_embeddings, effect_embeddings = self.cause_embed_dict.eval(), self.effect_embed_dict.eval()\r\n if '' in self.dataLoader.vocab_left:\r\n length = self.dataLoader.vocab_left_size - 1\r\n else:\r\n length = self.dataLoader.vocab_left_size\r\n fcause.write(str(length) + ' ' + str(self.embedding_size) + '\\n')\r\n if '' in self.dataLoader.vocab_right:\r\n length = self.dataLoader.vocab_right_size - 1\r\n else:\r\n length = self.dataLoader.vocab_right_size\r\n feffect.write(str(length) + ' ' + str(self.embedding_size) + '\\n')\r\n for i in range(self.dataLoader.vocab_left_size):\r\n s = self.dataLoader.vocab_left[i]\r\n if s != '':\r\n for j in range(self.embedding_size):\r\n s += ' ' + str(cause_embeddings[i][j])\r\n fcause.write(s + '\\n')\r\n for i in range(self.dataLoader.vocab_right_size):\r\n s = self.dataLoader.vocab_right[i]\r\n if s != '':\r\n for j in range(self.embedding_size):\r\n s += ' ' + str(effect_embeddings[i][j])\r\n feffect.write(s + '\\n')\r\n print('word embedding are stored in {} and {} respectively!'.format(cause_path, effect_path))\r\n\r\n @staticmethod\r\n def mask_softmax(match_matrix, mask_matrix):\r\n \"\"\"\r\n :param match_matrix: (batch, max_len, max_len)\r\n :param mask_matrix: (batch, max_len, max_len)\r\n :return:\r\n \"\"\"\r\n match_matrix_masked = match_matrix * mask_matrix\r\n\r\n match_matrix_shifted_1 = mask_matrix * tf.exp(\r\n match_matrix_masked - tf.reduce_max(match_matrix_masked, axis=1, keep_dims=True))\r\n match_matrix_shifted_2 = mask_matrix * tf.exp(\r\n match_matrix_masked - tf.reduce_max(match_matrix_masked, axis=2, keep_dims=True))\r\n\r\n Z1 = tf.reduce_sum(match_matrix_shifted_1, axis=1, keep_dims=True)\r\n Z2 = tf.reduce_sum(match_matrix_shifted_2, axis=2, keep_dims=True)\r\n softmax_1 = match_matrix_shifted_1 / (Z1 + 1e-12) # weight of left words\r\n softmax_2 = match_matrix_shifted_2 / (Z2 + 1e-12) # weight of right words\r\n return softmax_1, softmax_2\r\n\r\n @staticmethod\r\n def make_attention(input_left_embed, input_right_embed):\r\n return tf.matmul(input_left_embed, tf.transpose(input_right_embed, perm=[0, 2, 1]))\r\n","sub_path":"code/reference/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":28412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241868781","text":"from htm.enums import PredictionType\n\nrun = dict(\n pattern_length = 8, #32\n pattern_iterations = 200,\n data_path='basic_training.csv',\n prediction_type = PredictionType.STEPS,\n prediction_steps = 8,\n prediction_threshold = 0\n)\n\nrun['train_episodes'] = run['pattern_length'] * run['pattern_iterations']\n","sub_path":"htm_latest/htm/config/htm_run_base.py","file_name":"htm_run_base.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"648497844","text":"import os\nimport shutil\nimport subprocess\n\ndef freebsd_xdev_test():\n xdev_arch = os.environ['TARGET_ARCH']\n xdev_arch_map = {\n 'arm*': 'arm',\n 'mips*': 'mips',\n 'pc98': 'i386',\n 'powerpc*': 'powerpc',\n }\n\n xdev_arch_matched = False\n for pattern, xdev in xdev_arch_map.items():\n if fnmatch.fnmatch(xdev_arch, pattern):\n xdev_arch_matched = True\n break\n\n if not xdev_arch_matched:\n xdev = xdev_arch\n\n freebsd_xdev_prefix = f\"/usr/{xdev_arch}-freebsd/usr/bin/\"\n cc_path = shutil.which('cc', path=freebsd_xdev_prefix)\n\n if cc_path is None:\n print(\"Can't find appropriate FreeBSD xdev tools.\")\n print(f\"Tested: {cc_path}\")\n print(\"If you have FreeBSD-CURRENT sources in /usr/src, you can build these with the following command:\")\n print(\"\")\n print(f\"cd /usr/src && sudo make XDEV={xdev} XDEV_ARCH={xdev_arch} WITH_GCC=1 WITH_GCC_BOOTSTRAP=1 WITHOUT_CLANG=1 WITHOUT_CLANG_BOOTSTRAP=1 WITHOUT_CLANG_IS_CC=1 WITHOUT_TESTS=1 xdev\")\n print(\"\")\n print(\"Run this script again after you have the xdev tools installed.\")\n exit(1)\n\n try:\n include_dir = subprocess.check_output([cc_path, '-print-file-name=include'], text=True).strip()\n if not os.path.exists(os.path.join(include_dir, 'stdarg.h')):\n print(\"FreeBSD xdev tools are broken.\")\n print(\"The following command should print the full path to a directory\")\n print(\"containing stdarg.h and other basic headers suitable for this target:\")\n print(f\" $ {cc_path} -print-file-name=include\")\n print(\"Please install a newer version of the xdev tools.\")\n exit(1)\n print(f\"Found FreeBSD xdev tools for {xdev_arch}\")\n except subprocess.CalledProcessError:\n print(\"Error while checking xdev tools.\")\n exit(1)\n\n# Call the function\nfreebsd_xdev_test()","sub_path":"src/libs/freebsd_xdev.py","file_name":"freebsd_xdev.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"440637815","text":"import turtle # python needs this to use all the turtle functions\nturtle.shape('turtle') # changes the shape to a turtle\nfinn = turtle.clone() # creates new turtle and saves it in finn\nfinn.goto(100,0)\nfinn.goto(100,100)\nfinn.goto(0,100)\nfinn.goto(0,0)\n\ncharlie= finn.clone()\ncharlie.shape(\"triangle\")\ncharlie.penup()\ncharlie.goto(-50,0)\ncharlie.pendown()\ncharlie.goto(-100,0)\ncharlie.left(90)\ncharlie.goto(-100,100)\ncharlie.right(30)\ncharlie.goto(-50,0)\ncharlie.penup\ncharlie.goto(200,0)\ncharlie.pendown()\ncharlie.stamp()\ncharlie.goto(200,100)\n\nturtle.mainloop()\n","sub_path":"funturtle.py","file_name":"funturtle.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"635207413","text":"import sys\nimport time\nfrom PacketCreator import Packet\nfrom PacketParser import PacketParser\nimport ChecksumHandler\nimport codecs\n\n\ndef readFile(file, MSS):\n\ttry:\n\t file_bytes = []\n\t #test_file = open(my_test_file, 'rb')\n\t with codecs.open(file, \"rb\") as f:\n\t while True:\n\t file_segment = f.read(MSS) # Read the file MSS bytes each time Foo\n\t if file_segment:\n\t file_bytes.append(file_segment)\n\t else:\n\t break\n\texcept IOError:\n\t sys.exit(\"Failed to open file!\")\n\treturn file_bytes\n\ndef convert_file_into_packets(src_port,dst_port,file_content):\n\tpkts_to_send = []\n\tfor item in file_content: # Every MSS bytes should be packaged into segment Foo\n\t\tfile_packet = Packet( (src_port,dst_port) )\n\t\tpacket_checksum = ChecksumHandler.checksumCalculator(PacketParser(file_packet.getPacketBytes()))\n\t\tfile_packet.setPacketPayLoad(item)\n\t\tfile_packet.setPacketHeader(options=len(file_content),checksum=packet_checksum)\n\t\tpkts_to_send.append(file_packet)\n\treturn pkts_to_send\n\t#your code here\n\ndef sendFile(socket, file_packets):\n\tfor packet in file_packets:\n\t\tprint('sending packet')\n\t\ttime.sleep(0.3)\n\t\tsocket.sendPacket(packet)\n\n","sub_path":"FileUtility/FileUtility.py","file_name":"FileUtility.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361538691","text":"#!/usr/bin/python3\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\n\nTESTDIR = os.getcwd() + \"/tests/\"\nOUTDIR = TESTDIR + \"out/\"\nERRDIR = TESTDIR + \"err/\"\nPARSER_EXE = \"./parser\"\n\nCORRECT = 0\nWHITESPACE = 1\nERROR = 2\n\ndef colorize(color_code, text):\n return \"\\033[%dm%s\\033[0m\" % (color_code, text)\n\ndef red(text):\n return colorize(31, text)\ndef green(text):\n return colorize(32, text)\ndef yellow(text):\n return colorize(33, text)\n\ndef line_diff(a, b):\n a = a.splitlines()\n b = b.splitlines()\n ws = False\n if len(a) != len(b):\n return ERROR\n \n for i in range(len(a)):\n if a[i] != b[i]:\n if a[i].strip() == b[i].strip():\n ws = True\n else:\n return ERROR\n\n if ws:\n return WHITESPACE\n else:\n return CORRECT\n \n\ndef getCommand(filename):\n return [PARSER_EXE, filename]\n\ndef getFileContents(filename):\n s = \"\"\n with open(filename) as f:\n s = f.read()\n return s\n\ndef maybe_atoi(c):\n return int(c) if c.isdigit() else c\n\ndef natural_keys(word):\n return [ maybe_atoi(c) for c in re.split('(\\d+)', word) ]\n\ndef getFilesOnly(path):\n for f in sorted(os.listdir(path), key=natural_keys):\n if os.path.isfile(os.path.join(path, f)):\n yield f\n\ndef main():\n num_tests = 0\n incorrect = []\n ws_wrong = []\n syntax_only = False\n\n if len(sys.argv) == 2 and sys.argv[1] == 'syntax':\n syntax_only = True\n\n # Ensure the program has been built\n out = subprocess.call(\"make\")\n\n nl = True\n start = time.time()\n files = getFilesOnly(TESTDIR)\n for filename in files:\n sys.stdout.flush()\n num_tests += 1\n command = getCommand(TESTDIR + filename)\n\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out,err = process.communicate()\n\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n # Correct output\n correct_out = getFileContents(OUTDIR + filename)\n correct_err = getFileContents(ERRDIR + filename)\n\n # If the user only wants to check their grammar, just look for \"syntax error\"\n if syntax_only:\n # If the program should throw a syntax error, check for syntax error in the user's output\n if correct_err == \"syntax error\":\n if err == \"syntax error\":\n print(green(\".\"), end=\"\")\n else:\n print(red(\".\"), end=\"\")\n incorrect.append(filename)\n # If the user said there was a syntax error, but there SHOULDN'T be, they failed the test\n elif err == \"syntax error\":\n print(red(\".\"), end=\"\")\n incorrect.append(filename)\n # No syntax error and the user didn't say so either; correct\n else:\n print(green(\".\"), end=\"\")\n # Standard testing procedure, check for correct output and error\n else:\n out_diff = line_diff(out, correct_out)\n err_diff = line_diff(err, correct_err)\n if process.returncode > 1:\n print(red(\".\"), end=\"\")\n incorrect.append(filename)\n elif out_diff == CORRECT and err_diff == CORRECT:\n print(green(\".\"), end=\"\")\n elif out_diff != ERROR and err_diff != ERROR:\n print(yellow(\".\"), end=\"\")\n ws_wrong.append(filename)\n else:\n print(red(\".\"), end=\"\")\n incorrect.append(filename)\n \"\"\"\n if out == correct_out and err == correct_err:\n print(green(\".\"), end=\"\")\n elif out.strip() == correct_out and err.strip() == correct_err:\n print(yellow(\".\"), end=\"\")\n ws_wrong.append(filename)\n else:\n print(red(\".\"), end=\"\")\n incorrect.append(filename)\n \"\"\"\n\n if num_tests % 25 == 0:\n print(\" {}\".format(num_tests))\n #end for\n end = time.time()\n elapsed = end - start\n\n print()\n if len(ws_wrong) > 0:\n print(\"You had whitespace issues in the following test cases:\")\n ws_wrong.sort(key=natural_keys)\n print(\", \".join(ws_wrong))\n print()\n if len(incorrect) > 0:\n print(\"Failed {} tests.\".format(len(incorrect)))\n incorrect.sort(key=natural_keys)\n print(\", \".join(incorrect))\n \n if len(ws_wrong) == 0 and len(incorrect) == 0:\n print(\"All tests passed.\")\n print(\"Ran {} tests in {:.2f} seconds.\".format(num_tests, elapsed))\n\nmain()\n","sub_path":"cs352/part4/harness.py","file_name":"harness.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"13198682","text":"import csv\nimport os\nimport tempfile\nfrom os.path import dirname\nfrom typing import Dict\nfrom unittest import TestCase\n\nfrom data.keybindings import Keybindings\nfrom events.events_base import BasicEvents\n\n# Ensure that working directory is sixth_corp\nos.chdir(dirname(dirname(dirname(dirname(os.path.abspath(__file__))))))\n\n\nclass KeybindingsTest(TestCase):\n\n def default_binding(self) -> Dict[str, BasicEvents]:\n return {'y': BasicEvents.SETTINGS,\n 'z': BasicEvents.SETTINGS}\n\n def load_bindings(self, bindings: Dict[str, BasicEvents]) -> None:\n self.preference_file = tempfile.NamedTemporaryFile(mode='w')\n self.keybindings = Keybindings()\n self.keybindings.preference_file = self.preference_file.name\n\n BINDING = 'binding'\n KEY = 'key'\n with open(self.preference_file.name, 'w') as fake_csv:\n writer = csv.DictWriter(fake_csv, fieldnames=[BINDING, KEY])\n writer.writeheader()\n for key, binding in bindings.items():\n writer.writerow({BINDING: binding, KEY: key})\n\n self.keybindings.load()\n\n def test_load_settings(self) -> None:\n self.load_bindings(self.default_binding())\n\n self.assertEqual(\n self.keybindings.event_for_key('y'),\n BasicEvents.SETTINGS)\n\n def test_save_settings(self) -> None:\n self.load_bindings(self.default_binding())\n new_prefs_file = tempfile.NamedTemporaryFile(mode='w')\n self.keybindings.preference_file = new_prefs_file.name\n\n self.keybindings.save()\n\n # load from new file\n self.keybindings = Keybindings()\n self.keybindings.preference_file = new_prefs_file.name\n self.keybindings.load()\n self.assertEqual(self.keybindings.event_for_key('y'),\n BasicEvents.SETTINGS)\n\n def test_update_settings(self) -> None:\n self.load_bindings(self.default_binding())\n self.keybindings.update_binding('y', BasicEvents.NONE)\n\n self.assertEqual(self.keybindings.event_for_key('y'), BasicEvents.NONE)\n\n def test_inverse_binding(self) -> None:\n bindings = self.default_binding()\n event = BasicEvents.SETTINGS\n self.load_bindings(bindings)\n actual = tuple(sorted((self.keybindings.keys_for_event(event))))\n expected = tuple(sorted(k for k, v in bindings.items() if v == event))\n assert actual == expected\n\n def test_inverse_binding_no_keys(self) -> None:\n bindings = self.default_binding()\n event = BasicEvents.DEBUG\n self.load_bindings(bindings)\n actual = tuple(sorted((self.keybindings.keys_for_event(event))))\n assert actual == ()\n expected = tuple(sorted(k for k, v in bindings.items() if v == event))\n assert actual == expected\n\n def test_update_settings_are_saved(self) -> None:\n self.load_bindings(self.default_binding())\n self.keybindings.update_binding('y', BasicEvents.NONE)\n\n # ensure key change persists through saving\n self.keybindings = Keybindings()\n self.keybindings.preference_file = self.preference_file.name\n self.keybindings.load()\n self.assertEqual(self.keybindings.event_for_key('y'), BasicEvents.NONE)\n","sub_path":"src/data/tests/keybindings_test.py","file_name":"keybindings_test.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156528332","text":"from ignite.exceptions import NotComputableError\nfrom ignite.metrics import Recall\nimport pytest\nimport torch\n\n\ndef test_no_update():\n recall = Recall()\n with pytest.raises(NotComputableError):\n recall.compute()\n\n\ndef test_compute():\n recall = Recall()\n\n y_pred = torch.eye(4)\n y = torch.ones(4).type(torch.LongTensor)\n recall.update((y_pred, y))\n\n result = list(recall.compute())\n\n assert result[0] == 0.0\n assert result[1] == 0.25\n assert result[2] == 0.0\n assert result[3] == 0.0\n\n recall.reset()\n y_pred = torch.eye(2)\n y = torch.ones(2).type(torch.LongTensor)\n recall.update((y_pred, y))\n y = torch.zeros(2).type(torch.LongTensor)\n recall.update((y_pred, y))\n\n result = list(recall.compute())\n\n assert result[0] == 0.5\n assert result[1] == 0.5\n\n\ndef test_compute_average():\n recall = Recall(average=True)\n\n y_pred = torch.eye(4)\n y = torch.ones(4).type(torch.LongTensor)\n recall.update((y_pred, y))\n\n assert isinstance(recall.compute(), float)\n assert recall.compute() == 0.0625\n\n\ndef test_compute_all_wrong():\n recall = Recall()\n\n y_pred = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])\n y = torch.ones(2).type(torch.LongTensor)\n recall.update((y_pred, y))\n\n result = list(recall.compute())\n\n assert result[0] == 0.0\n assert result[1] == 0.0\n","sub_path":"tests/ignite/metrics/test_recall.py","file_name":"test_recall.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"455104466","text":"ids = open('input.txt').read().splitlines()\n\ndub = trip = 0\nfor id in ids:\n tempDub = tempTrip = 0\n\n for letter in id:\n if id.count(letter) == 2:\n tempDub += 1\n if id.count(letter) == 3:\n tempTrip += 1\n \n if tempDub > 0:\n dub += 1\n if tempTrip > 0:\n trip += 1\n\nprint('Part 1:', dub * trip)\n\nfor i, _ in enumerate(ids):\n for j in range(i, len(ids)):\n diff = 0\n diffIndex = 0\n for k, pair in enumerate(zip(ids[i], ids[j])):\n if pair[0] != pair[1]:\n diff += 1\n diffIndex = k\n if diff == 1:\n diffID = ids[j][:diffIndex] + ids[j][diffIndex+1:]\n print('Part 2:', diffID)\n","sub_path":"2018/day2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"8091832","text":"#!/usr/bin/python\n# encoding:utf-8\n\nimport time\nfrom DBtools import OPMysql\n\n\n# 获取时间\ndef get_time():\n time_str = time.strftime(\"%Y{}%m{}%d{} %X\")\n return time_str.format(\"年\", \"月\", \"日\")\n\n\n# 测试数据表是否写入信息(主要用作测试使用)\ndef test():\n mysqlDB = OPMysql()\n sql = \"select * from details\"\n res = mysqlDB.query(sql)\n return res[0]\n\n\n# 获取全国疫情的关键信息,按照更新的时间节点的最新日期从表中查取数据包括累计确诊、治愈、死亡等相关数据\ndef get_c1_data():\n mysqlDB = OPMysql()\n sql = \"select sum(confirm),\" \\\n \"(select suspect from history order by ds desc limit 1),\" \\\n \"sum(heal),sum(dead) from details \" \\\n \"where update_time=(select update_time from details order by update_time desc limit 1) \"\n res = mysqlDB.query(sql)\n return res[0]\n\n\n# 获取各省份以及各省份对应的累计确诊人数,做可视化疫情地图做数据支撑\ndef get_c2_data():\n mysqlDB = OPMysql()\n sql = \"select province,sum(confirm) from details \" \\\n \"where update_time=(select update_time from details \" \\\n \"order by update_time desc limit 1) \" \\\n \"group by province\"\n res = mysqlDB.query(sql)\n return res\n\n\n# 通过查找history库获取累计确认折线图的数据支撑\ndef get_l1_data():\n mysqlDB = OPMysql()\n sql = \"select ds,confirm,suspect,heal,dead from history\"\n res = mysqlDB.query(sql)\n return res\n\n\n# 通过查找history库获取累计确诊折线图的数据支撑\ndef get_l2_data():\n mysqlDB = OPMysql()\n sql = \"select ds,confirm_add,suspect_add from history\"\n res = mysqlDB.query(sql)\n return res\n\n\n# 通过查库获取非武汉地区的全国top5城市地区的确诊数据\ndef get_r1_data():\n mysqlDB = OPMysql()\n sql = 'select city,confirm from ' \\\n '(select city,confirm from details ' \\\n 'where update_time=(select update_time from details order by update_time desc limit 1) ' \\\n 'and province not in (\"湖北\",\"北京\",\"上海\",\"天津\",\"重庆\",\"香港\") ' \\\n 'union all ' \\\n 'select province as city,sum(confirm) as confirm from details ' \\\n 'where update_time=(select update_time from details order by update_time desc limit 1) ' \\\n 'and province in (\"北京\",\"上海\",\"天津\",\"重庆\",\"香港\") group by province) as a ' \\\n 'order by confirm desc limit 5'\n res = mysqlDB.query(sql)\n return res\n\n\n# 通过查找hotsearch表获取经过统计的词频和热搜词为构建词云图做数据支撑\ndef get_r2_data():\n mysqlDB = OPMysql()\n sql = \"select content from hotsearch order by id desc limit 20\"\n res = mysqlDB.query(sql)\n return res\n\n","sub_path":"0608/monitor4cov/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"639041288","text":"#!/usr/bin/python\n\nimport numpy as np\nimport operator\n\n#test_k = np.array([0, 2, 7, 0])\n#test_v = 5\n#test_v2 = 4\n\ndef calc(mem):\n already_visited = {}\n #already_visited = []\n it = 0\n current = str(mem)\n l = len(mem)\n while current not in already_visited.values():\n #while current not in already_visited:\n already_visited[it] = current\n #already_visited.append(current)\n m_i, m_v = max(enumerate(mem), key = operator.itemgetter(1))\n mem[m_i] = 0\n mem += int(m_v / l)\n r = m_v % l\n e = m_i + r\n if e < l:\n mem[range(m_i + 1, e + 1)] += 1\n else:\n mem[range(m_i - l + 1, (e + 1) % l)] += 1\n current = str(mem)\n it += 1\n loop_size = it - [k for k in already_visited.keys() if already_visited[k] == current][0]\n #loop_size = it - already_visited.index(current)\n return it, loop_size\n\nif __name__ == '__main__':\n #print('%s | %d,%d | %s' % (test_k, test_v, test_v2, calc(test_k.copy())))\n #assert (test_v, test_v2) == calc(test_k.copy())\n with open('input', 'r') as f:\n mem = np.array(list(map(int, f.read().strip().split('\\t'))))\n a1, a2 = calc(mem.copy())\n print('answer 1: %d' % a1)\n print('answer 2: %d' % a2)\n","sub_path":"2017/d6/d6.py","file_name":"d6.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"590050399","text":"import sys # sys нужен для передачи argv в QApplication\nfrom PyQt5 import QtWidgets, QtGui\nimport button # Это наш преобразованный файл дизайна\nclass ExampleApp(QtWidgets.QMainWindow, button.Ui_MainWindow):\n def __init__(self):\n # Это здесь нужно для доступа к переменным, методам\n # и т.д. в файле design.py\n super().__init__()\n self.setupUi(self)# Это для инициализации дизайна\n\n text = self.label.text()\n #self.label_2.setText('
'+text+'
')\n #self.label.setPixmap((QtGui.Qpixmap('1.png')))\n\n #self.pushButton.clicked.connect(self.edit)\n #self.pushButton.pressed.connect(self.edit)\n #\n self.pushButton.released.connect(self.edit)\n\n def edit(self):\n # Обработчик\n self.label_2.setText('На кнопку нажали')\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n # Новый экземплярQApplication\n window = ExampleApp()\n # Создаём объект класса ExampleApp\n window.show() # Показываем окно\n app.exec_() # и запускаем приложение\n\nif __name__ == '__main__':\n# Если мы запускаем файл напрямую,а не импортируем\n main() # то запускаем функцию main()\n","sub_path":"btn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588336526","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nfrom sys import argv\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser(description='Получить список интерфейс => IP, MCASK')\n\nparser.add_argument('file_name', action=\"store\", help=\"конфигурационный файл\")\n\nargs = parser.parse_args()\n\ndef parse_cfg(file_name):\n '''\n Задание 15.3a\n\n Переделать функцию parse_cfg из задания 15.3 таким образом, чтобы она возвращала словарь:\n * ключ: имя интерфейса\n * значение: кортеж с двумя строками:\n * IP-адрес\n * маска\n\n Например (взяты произвольные адреса):\n {'FastEthernet0/1':('10.0.1.1', '255.255.255.0'),\n 'FastEthernet0/2':('10.0.2.1', '255.255.255.0')}\n\n Для получения такого результата, используйте регулярные выражения.\n\n Проверить работу функции на примере файла config_r1.txt.\n\n Обратите внимание, что в данном случае, можно не проверять корректность IP-адреса,\n диапазоны адресов и так далее, так как обрабатывается вывод команды, а не ввод пользователя.\n\n '''\n\n regex_intf = '^interface\\s(\\S+)'\n regex_ip = '((?:\\d+\\.){3}(?:\\d+)) ((?:\\d+\\.){3}(?:\\d+))'\n\n \n intf = False\n dict_ip_mask = dict()\n\n with open(file_name, \"r\") as f:\n for line in f:\n if line.startswith(\"!\"):\n intf = False;\n continue;\n elif not intf and re.search(regex_intf, line):\n intf = re.search(regex_intf, line).group(1)\n elif intf and re.search(regex_ip, line):\n dict_ip_mask.setdefault(intf, list())\n dict_ip_mask[intf].append(re.search(regex_ip, line).groups())\n\n\n return dict_ip_mask\n\nprint(parse_cfg(args.file_name))\n","sub_path":"secondary_chapter/task_15_3a.py","file_name":"task_15_3a.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99065834","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for converting long-form PPMI data into wide-form PPMI data\n\"\"\"\nimport pandas as pd\n\n\ndef _pivot_data(data, return_date=False):\n \"\"\"\n Helper function for pivoting data\n\n Parameters\n ----------\n data : pandas.DataFrame\n Long-format DataFrame that has columns ['TEST', 'SCORE', 'PARTICIPANT']\n and is pivot-ready\n return_data : bool, optional\n Whether to also return DataFrame containing visit date of `data`. If\n True, provided `data` must have 'VISIT_DATE', column. Default: False\n\n Returns\n -------\n wide : (N, G) pandas.DataFrame\n Wide-format data where `N` is samples and `G` is features\n \"\"\"\n\n wide = pd.pivot_table(data,\n columns='TEST',\n values='SCORE',\n index='PARTICIPANT')\n\n if return_date:\n age = data[data.PARTICIPANT.isin(wide.index)]\n age = age[['PARTICIPANT', 'VISIT_DATE']].drop_duplicates('PARTICIPANT')\n return wide, age.set_index('PARTICIPANT')\n\n return wide\n\n\ndef pivot_datscan(data, visit='SC', return_date=False):\n \"\"\"\n Extracts DAT scan data from `data`\n\n Parameters\n ----------\n data : pandas.DataFrame\n Long-format DataFrame as obtained by `ppmi.data.get_all()`\n visit : str, optional\n Visit for which to extract DAT scan data. Default: 'SC'\n return_date : bool, optional\n Whether to also return dataframe containing visit date. Default: False\n\n Returns\n -------\n data : (N, G) pandas.DataFrame\n Wide-format DAT data where `N` is participants and `G` is variables\n \"\"\"\n\n # query appropriate data from input\n all_data = data.query(f'PAG_NAME == \"DATSCAN\" & VISIT == \"{visit}\"')\n\n return _pivot_data(all_data, return_date=return_date)\n\n\ndef pivot_biospecimen(data, visit='BL', return_date=False):\n \"\"\"\n Extracts biospecimen data from `data`\n\n Parameters\n ----------\n data : pandas.DataFrame\n Long-format DataFrame as obtained by `ppmi.data.get_all()`\n visit : str, optional\n Visit for which to extract biospecimen data. Default: 'BL'\n return_date : bool, optional\n Whether to also return dataframe containing visit date. Default: False\n\n Returns\n -------\n data : (N, G) pandas.DataFrame\n Wide-format biospecimen data where N is participants and G is variables\n \"\"\"\n\n # query appropriate data from input\n all_data = data.query(f'PAG_NAME == \"BIOSPEC\" & VISIT == \"{visit}\"')\n\n return _pivot_data(all_data, return_date=return_date)\n\n\ndef pivot_behavior(data, return_date=False):\n \"\"\"\n Extracts behavioral-clinical data from `data`\n\n Parameters\n ----------\n data : pandas.DataFrame\n Long-format DataFrame as obtained by `ppmi.data.get_all()`\n return_date : bool, optional\n Whether to also return dataframe containing visit date. Default: False\n\n Returns\n -------\n data : (N, G) pandas.DataFrame\n Wide-format behav data where N is participants and G is variables\n \"\"\"\n\n # query appropriate data from input\n # we want to drop non-behavioral data and the unmedicated UPDRS III score\n # also, we need MOCA from the SC visit as it was not administered at BL\n all_data = data.query(\n f'PAG_NAME not in [\"DATSCAN\", \"BIOSPEC\", \"NUPDRS3A\"] & '\n '(VISIT == \"BL\" & TEST != \"MOCA\" | (VISIT == \"SC\" & TEST == \"MOCA\"))'\n )\n\n return _pivot_data(all_data, return_date=return_date)\n","sub_path":"ppmi/pivot.py","file_name":"pivot.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116780039","text":"nomes = ['Guilherme', 'Marcelo', 'João', 'Júlia']\n\n'''for nome in nomes: # Para \"nome em \"nomes\"\n # \"nome\" percorre cada ítem da lista \"nomes\"\n print(nome)'''\n\n# for x in range(0, 5): # Percorre o intervalo de 0 até o quinto ítem\n# print(x)\n\n# for x in range(5): # Percorre 5 vezes sem especificar o início\n# print(x)\n\n# for x in range(0, 100, 2): # De 0 até o Centésimo ítem passo 2\n# print(x)\n\n# for x in range(4): # Outra forma de percorrer itens de uma lista\n# print(nomes[x])\n\n# for x in range(len(nomes)): # Forma mais segura de usar \"for\" com indices\n# print(nomes[x])\n\n# For com str\n# palavra = 'Guilherme Junqueira'\n\n# for letra in palavra: # Percorreu cada caractere da str\n# print(letra)\n\n# While\n'''i = 0\n\nwhile i < 10: # Enquanto a expressão for verdadeira ficacará dentro do laço while\n print('i ainda é menor que 10: ', i)\n i += 1'''\n\n# Quebrando laços\nn = 0\nwhile True: # Loop infinito\n print(n)\n if n == 20:\n break # Comando que para um laço\n n += 1\n","sub_path":"aula-05-estruturas-de-laco-while-e-for.py","file_name":"aula-05-estruturas-de-laco-while-e-for.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214360767","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/robertoalotufo/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/ia636/iapercentile.py\n# Compiled at: 2014-08-21 22:30:04\n\n\ndef iapercentile(f, p=1):\n import numpy as np\n k = (f.size - 1) * p / 100.0\n dw = np.floor(k).astype(int)\n up = np.ceil(k).astype(int)\n g = np.sort(f.ravel())\n d = g[dw]\n d0 = d * (up - k)\n d1 = g[up] * (k - dw)\n return np.where(dw == up, d, d0 + d1)","sub_path":"pycfiles/ia636-0.11.8.macosx-10.6-i386.tar/iapercentile.py","file_name":"iapercentile.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237573110","text":"import pytest\n\nfrom app.fib import Fibonacci, ReversedFibonacci\n\n\n@pytest.mark.parametrize('expected, iterator', [\n ([1, 2, 3, 5, 8, 13, 21, 34, 55], Fibonacci),\n ([21, 13, 8, 5, 3, 2, 1, 1, 0], ReversedFibonacci),\n])\ndef test_iterable(expected, iterator):\n collection = iterator(10)\n result = [_ for _ in collection]\n assert expected == result[1:]\n","sub_path":"tests/test_iterator.py","file_name":"test_iterator.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"557427297","text":"'''\nGiven a sorted array nums, remove the duplicates in-place such that each element appear only once and return the new length.\n\nDo not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.\n'''\nclass Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n i = 0\n for j in range(len(nums)):\n if j+1 < len(nums) and nums[j] != nums[j+1]:\n nums[i+1] = nums[j+1]\n i += 1\n return i+1\n","sub_path":"removedup.py","file_name":"removedup.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452103704","text":"import argparse\r\nimport torch\r\nfrom torch.utils import data as data\r\nfrom torch.autograd import Variable\r\nfrom datasets import dataset\r\nfrom model import Model\r\nfrom vis_tool import Visualizer\r\nimport numpy as np\r\nimport os\r\nfrom tqdm import *\r\n\r\n# parser\r\nclass train_options():\r\n def __init__(self):\r\n self.parser = argparse.ArgumentParser(description='training options of CADN')\r\n\r\n # data related\r\n self.parser.add_argument('--data_roots', type=str, default='OCTdata')\r\n self.parser.add_argument('--nThreads', type=int, default=4)\r\n self.parser.add_argument('--img_channels', type=int, default=1)\r\n self.parser.add_argument('--scale_factor', type=int, default=2)\r\n\r\n # network related\r\n self.parser.add_argument('--num_features', type=int, default=128)\r\n self.parser.add_argument('--num_blocks', type=int, default=3)\r\n\r\n # training related\r\n self.parser.add_argument('--num_epochs', type=int, default=1000)\r\n self.parser.add_argument('--batch_size', type=int, default=2)\r\n self.parser.add_argument('--lr', type=float, default=1e-4)\r\n self.parser.add_argument('--train_interval', type=int, default=10)\r\n self.parser.add_argument('--num_critics', type=int, default=3)\r\n self.parser.add_argument('--step', type=int, default=100)\r\n\r\n # resume train related\r\n self.parser.add_argument('--resume', type=bool, default=False)\r\n self.parser.add_argument('--start_epoch', type=int, default=1)\r\n self.parser.add_argument('--G_AB_checkpoint', type=str, default='')\r\n self.parser.add_argument('--G_BA_checkpoint', type=str, default='')\r\n self.parser.add_argument('--D_A_checkpoint', type=str, default='')\r\n self.parser.add_argument('--D_B_checkpoint', type=str, default='')\r\n\r\n def parse(self):\r\n self.opt = self.parser.parse_args()\r\n args = vars(self.opt)\r\n print('\\n--- load options ---')\r\n for name, value in sorted(args.items()):\r\n print('%s: %s' % (str(name), str(value)))\r\n return self.opt\r\n\r\n\r\n# train\r\nclass trainer():\r\n def __init__(self, ):\r\n super(trainer, self).__init__()\r\n # parse option\r\n self.train_parser = train_options()\r\n self.opts = self.train_parser.parse()\r\n # train data\r\n print('loading training data...')\r\n self.datasets = dataset(self.opts.data_roots, self.opts.scale_factor)\r\n self.train_dataloader = data.DataLoader(self.datasets, self.opts.batch_size, shuffle=True,\r\n num_workers=self.opts.nThreads)\r\n # device\r\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n # model\r\n self.model = Model(self.opts, self.device)\r\n # visualizer\r\n self.train_vis = Visualizer(env='Training')\r\n\r\n # adjustable learning rate\r\n def adjust_learning_rate(self, epoch):\r\n lr = self.opts.lr * (0.1 ** (epoch // self.opts.step))\r\n if lr < 1e-7:\r\n lr = 1e-7\r\n return lr\r\n\r\n def train_process(self, model, start_epoch):\r\n for epoch in range(start_epoch, self.opts.num_epochs):\r\n lr = self.adjust_learning_rate(epoch - 1)\r\n for param_group in self.model.G_AB_optim.param_groups:\r\n param_group[\"lr\"] = lr\r\n print(\"epoch =\", epoch, \"lr =\", self.model.G_AB_optim.param_groups[0][\"lr\"])\r\n for param_group in self.model.G_BA_optim.param_groups:\r\n param_group[\"lr\"] = lr\r\n print(\"epoch =\", epoch, \"lr =\", self.model.G_BA_optim.param_groups[0][\"lr\"])\r\n for param_group in self.model.D_A_optim.param_groups:\r\n param_group[\"lr\"] = lr\r\n print(\"epoch =\", epoch, \"lr =\", self.model.D_A_optim.param_groups[0][\"lr\"])\r\n for param_group in self.model.D_B_optim.param_groups:\r\n param_group[\"lr\"] = lr\r\n print(\"epoch =\", epoch, \"lr =\", self.model.D_B_optim.param_groups[0][\"lr\"])\r\n\r\n for i, (imageX, imageY, imageZ) in enumerate(self.train_dataloader):\r\n imageX, imageY, imageZ = Variable(imageX), Variable(imageY), Variable(imageZ)\r\n imageX, imageY, imageZ = imageX.to(self.device), imageY.to(self.device), imageZ.to(self.device)\r\n # print(imageZ)\r\n\r\n model.G_AB_optim.zero_grad()\r\n model.G_BA_optim.zero_grad()\r\n model.D_A_optim.zero_grad()\r\n model.D_B_optim.zero_grad()\r\n\r\n\r\n # clearX: the clear vision of noisy imageX\r\n # noisyY: the noisy vision of clear imageY\r\n clearX = model.G_AB(imageX)\r\n noisyY = model.G_BA(imageY)\r\n\r\n # adversarial loss\r\n dis_noisy_X = model.D_A(imageX)\r\n dis_noisy_Y = model.D_A(noisyY)\r\n real = Variable(torch.ones(dis_noisy_Y.size())).to(self.device)\r\n fake = Variable(torch.zeros(dis_noisy_X.size())).to(self.device)\r\n L_dis_noisy = 0.5 * model.criterionL2(dis_noisy_X, real) + 0.5 * model.criterionL2(dis_noisy_Y,\r\n fake) + model.criterionL2(\r\n dis_noisy_Y, real)\r\n\r\n dis_clear_X = model.D_B(clearX)\r\n dis_clear_Y = model.D_B(imageY)\r\n real = Variable(torch.ones(dis_clear_Y.size())).to(self.device)\r\n fake = Variable(torch.zeros(dis_clear_X.size())).to(self.device)\r\n L_dis_clear = 0.5 * model.criterionL2(dis_clear_Y, real) + 0.5 * model.criterionL2(dis_clear_X,\r\n fake) + model.criterionL2(\r\n dis_clear_X, real)\r\n\r\n L_adv = L_dis_noisy + L_dis_clear\r\n\r\n # Cyclic loss\r\n noisyX = model.G_BA(clearX)\r\n clearY = model.G_AB(noisyY)\r\n\r\n L_cyclic = model.criterion_cyclic(imageX, noisyX) + model.criterion_cyclic(imageY, clearY)\r\n\r\n # identity loss\r\n L_identity = model.criterion_identity(model.G_AB(imageY), imageY) + \\\r\n model.criterion_identity(model.G_BA(imageX), imageX)\r\n\r\n loss = L_adv + 10 * L_cyclic + 5 * L_identity\r\n loss.backward()\r\n\r\n model.G_AB_optim.step()\r\n model.G_BA_optim.step()\r\n model.D_A_optim.step()\r\n model.D_B_optim.step()\r\n\r\n # vis\r\n idx = np.random.choice(self.opts.batch_size)\r\n images_noise_row = {'imageX': imageX[idx].detach().cpu().numpy(),\r\n 'clearX': clearX[idx].clamp(0, 1).mul(255).detach().cpu().numpy(),\r\n 'noisyX': noisyX[idx].clamp(0, 1).mul(255).detach().cpu().numpy()}\r\n images_clear_row = {'imageY': imageY[idx].detach().cpu().numpy(),\r\n 'noisyY': noisyY[idx].clamp(0, 1).mul(255).detach().cpu().numpy(),\r\n 'clearY': clearY[idx].clamp(0, 1).mul(255).detach().cpu().numpy()}\r\n losses = {'loss': loss.item(), 'adversarial loss': L_adv.item(),\r\n 'cyclic loss': L_cyclic.item(), 'identity loss': L_identity.item()}\r\n vis_images(self.train_vis, images_noise_row)\r\n vis_images(self.train_vis, images_clear_row)\r\n vis_loss(self.train_vis, losses)\r\n\r\n print('[{}/{}] [{}/{}] loss:{}'.format(epoch, self.opts.num_epochs, i, len(self.train_dataloader), loss.item()))\r\n\r\n if epoch % self.opts.train_interval == 0:\r\n models = {'G_AB': model.G_AB,\r\n 'G_BA': model.G_BA,\r\n 'D_A': model.D_A,\r\n 'D_B': model.D_B}\r\n save = save_model(models, epoch)\r\n save.save_checkpoint()\r\n\r\n # first training\r\n def first_train(self):\r\n self.train_process(self.model, self.opts.start_epoch)\r\n\r\n # resume training\r\n def resume_train(self):\r\n # load model parameters\r\n G_AB_checkpoint = torch.load(self.opts.G_AB_checkpoint)\r\n self.model.G_BA.load_state_dict(G_AB_checkpoint['model'].state_dict())\r\n G_BA_checkpoint = torch.load(self.opts.G_BA_checkpoint)\r\n self.model.G_AB.load_state_dict(G_BA_checkpoint['model'].state_dict())\r\n D_A_checkpoint = torch.load(self.opts.D_A_checkpoint)\r\n self.model.D_A.load_state_dict(D_A_checkpoint['model'].state_dict())\r\n D_B_checkpoint = torch.load(self.opts.D_B_checkpoint)\r\n self.model.D_B.load_state_dict(D_B_checkpoint['model'].state_dict())\r\n # train\r\n self.train_process(self.model, self.opts.start_epoch)\r\n\r\n def train(self):\r\n if self.opts.resume:\r\n print('resume training at epoch {}...'.format(self.opts.start_epoch))\r\n self.resume_train()\r\n else:\r\n print('start first training...')\r\n self.first_train()\r\n\r\n\r\n# visualizer\r\ndef vis_images(vis, images):\r\n vis.img_many(images)\r\n\r\n\r\ndef vis_loss(vis, losses):\r\n vis.plot_many(losses)\r\n\r\n\r\nclass save_model():\r\n def __init__(self, models, epoch):\r\n self.model_folder = \"model_para/\"\r\n if not os.path.exists(self.model_folder):\r\n os.makedirs(self.model_folder)\r\n self.models = models\r\n self.epoch = epoch\r\n\r\n def save_checkpoint(self):\r\n for (key, value) in self.models.items():\r\n checkpoint_path = self.model_folder + '{}_{}.pkl'.format(key, self.epoch)\r\n state_dict = {'epoch': self.epoch, 'model': value}\r\n torch.save(state_dict, checkpoint_path)\r\n print(\"Checkpoint saved to {}\".format(checkpoint_path))\r\n\r\n\r\nif __name__ == '__main__':\r\n train = trainer()\r\n train.train()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"train_v2.py","file_name":"train_v2.py","file_ext":"py","file_size_in_byte":10126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50070922","text":"#!/usr/bin/python3.5\n# -*-coding:utf-8 -*-\nimport postgresql\nfrom bottle import route, run, debug, template, get, request, default_app, error\nimport re\nimport datetime\n\nlog_broker = open('log-main-application.log', 'a')\n\ntry:\n db = postgresql.open('pq://postgres:password@188.116.57.50:5432/ussc')\nexcept Exception:\n log_broker.writelines('{0} Fail on connect to database\\n'.format(datetime.datetime.now()))\nelse:\n log_broker.writelines('{0} Connect to DB succesfull\\n'.format(datetime.datetime.now()))\n\n\n@property\ndef charset(self, default='UTF-8'):\n if 'charset=' in self.content_type:\n return self.content_type.split('charset=')[-1].split(';')[0].strip()\n return default\n\n@error(500)\n@error(501)\n@error(502)\ndef internralerror(code):\n\treturn 'Servery ploxo {0}'.format(code)\n\ndef isvalidphone(phone):\n return re.match(r'[7-8]{1}[0-9]{9}', phone) and len(phone) == 11\n\n\n@route('/')\ndef main():\n return template('main_forms.tpl')\n\n\n@get('/show')\ndef printdb():\n result = db.query('SELECT * FROM users')\n return template('input.tpl', rows=result)\n\n\ndef stripchar(pattern, sym):\n result = \"\"\n for i in range(len(pattern)):\n if pattern[i] in sym:\n continue\n else:\n result += pattern[i]\n\n return result\n\n\n@route('/add', method='POST')\ndef add_to_db():\n phone = stripchar(str(request.POST.get('phone').strip()), ['', ' ', '(', ')', '-', '+'])\n user = request.POST.get('name')\n if isvalidphone(phone):\n index = db.query(\"select count(*) from users\")[0][0] + 1\n db.query(\"insert into users (id,pnumber,uname) values({0}, '{1}', '{2}');\".format(index, phone, user))\n return template('success.tpl')\n else:\n return template('errphone.tpl')\n\n# debug(True)\n# run(port=888)\n\nif __name__ == '__main__':\n run(host='188.116.57.50', port=8181)\n# Run bottle in application mode. Required in order to get the application working with uWSGI!\nelse:\n application = app = default_app()\n\n# bottle.run(app=StripPathMiddleware(app),server='python_server',host='188.116.57.50',port=9999)\n# test_ussc = application = default_app()\n# db.close()\nlog_broker.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"125110252","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nimport random\n\n# Create your views here.\n\n@xframe_options_exempt\ndef index(request):\n\n n = list(range(1,46))\n gamesets = []\n \n while len(n) >= 3:\n gameset=[]\n while len(gameset) <6:\n \n if len(n) > 3:\n i = random.randrange(0,len(n))\n gameset.append(n[i])\n n.pop(i)\n \n gameset.sort()\n gamesets.append(gameset)\n print(gameset)\n \n if len(n) == 3:\n \n new_n = list(set(range(1,46)) - set(n)) \n \n for i in range(0,3):\n j = random.randrange(1,len(new_n))\n n.append(new_n[j])\n new_n.pop(j)\n \n \n print(n)\n gamesets.append(n)\n \n \n break\n \n context={ \n 'gamesets':gamesets\n }\n \n return render(request,'main/gl.html',context=context)\n ","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101342461","text":"from .components import CrudComponent, KongError\nfrom .routes import Routes\nfrom .plugins import KongEntityWithPlugins\nfrom .utils import local_ip\n\n\nREMOVE = frozenset(('absent', 'remove'))\nLOCAL_HOST = frozenset(('localhost', '127.0.0.1'))\n\n\nclass Service(KongEntityWithPlugins):\n \"\"\"Object representing a Kong service\n \"\"\"\n @property\n def routes(self):\n return Routes(self)\n\n @property\n def host(self):\n return self.data.get('host')\n\n\nclass Services(CrudComponent):\n \"\"\"Kong Services\n \"\"\"\n Entity = Service\n\n async def delete(self, id_):\n srv = self.wrap({'id': id_})\n await srv.routes.delete_all()\n await srv.plugins.delete_all()\n return await super().delete(id_)\n\n async def apply_json(self, data):\n \"\"\"Apply a JSON data object for a service\n \"\"\"\n if not isinstance(data, list):\n data = [data]\n result = []\n for entry in data:\n if not isinstance(entry, dict):\n raise KongError('dictionary required')\n ensure = entry.pop('ensure', None)\n name = entry.pop('name', None)\n routes = entry.pop('routes', [])\n plugins = entry.pop('plugins', [])\n host = entry.pop('host', None)\n if host in LOCAL_HOST:\n host = local_ip()\n if not name:\n raise KongError('Service name is required')\n if ensure in REMOVE:\n if await self.has(name):\n await self.delete(name)\n continue\n # backward compatible with config entry\n config = entry.pop('config', None)\n if isinstance(config, dict):\n entry.update(config)\n if await self.has(name):\n srv = await self.update(name, host=host, **entry)\n else:\n srv = await self.create(name=name, host=host, **entry)\n srv.data['routes'] = await srv.routes.apply_json(routes)\n srv.data['plugins'] = await srv.plugins.apply_json(plugins)\n result.append(srv.data)\n return result\n","sub_path":"kong/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532080437","text":"#!/home/waftab/anaconda3/bin/python\n#coding: utf-8\n\nimport argparse\nimport os\nimport sys\nimport re\nimport time\n\nparser = argparse.ArgumentParser(description='Modify MaxQuant parameter file')\nrequiredNamed = parser.add_argument_group('required named arguments')\nrequiredNamed.add_argument('--input_xml', '-in', type=argparse.FileType('r', encoding='UTF-8'), required=True, help='input xml file')\nrequiredNamed.add_argument('--output_xml', '-out', type=str, required=True, help='output xml file')\nrequiredNamed.add_argument('--raw_files_folder', '-raw', type=str, nargs='+', required=True, help='raw file\\'s folder')\nrequiredNamed.add_argument('--fasta_file_fullpath', '-fasta', type=str, required=True, help='fasta file with full path')\nrequiredNamed.add_argument('--mq_version', '-mq', type=str, default='1_6_6_0', required=True, help='MaxQuant version')\nrequiredNamed.add_argument('--threads', '-t', type=int, default=72, required=True, help='Number of threads')\nrequiredNamed.add_argument('--time', '-run', type=str, required=True, help='Runtime')\nrequiredNamed.add_argument('--partition', '-p', type=str, required=True, help='Partition')\nrequiredNamed.add_argument('--jobname', '-j', type=str, required=True, help='Job Name')\nargs = parser.parse_args()\n\n## read input xml\nmqpar = open(args.input_xml.name, 'r')\nmqpar_text = mqpar.read()\nmqpar.close()\n#print(mqpar)\n#print(mqpar_text)\n\n## replace fasta file's path in the xml\n#fasta_file_fullpath = ('' + re.sub(r\"_\", \".\", str(args.fasta_file_fullpath)) + '')\nfasta_file_fullpath = ('' + str(args.fasta_file_fullpath) + '')\nmqpar_text = re.sub(r'\\(.|\\n|\\r)*\\<\\/fastaFilePath\\>', fasta_file_fullpath, mqpar_text)\n#print(fasta_file_fullpath)\n\n## replace the raw files path in the xml\nfile_counter = 0\nfile_path_repl_text = '\\n'\n\nfor folder in args.raw_files_folder:\n dirs = [f for f in os.listdir(folder) if os.path.isdir(os.path.join(folder, f))]\n # only select directories endind with d\n dirs = [d for d in dirs if d[-2:] == '.d']\n for dir in dirs:\n file_path_repl_text += ('\\t' + os.path.join(os.path.abspath(folder), dir) + '\\n')\n file_counter += 1 \n \nfile_path_repl_text += ' '\n\nmqpar_text = re.sub(r'\\(.|\\n|\\r)*\\<\\/filePaths\\>', file_path_repl_text, mqpar_text)\n\n# replace number of threads\nthreads_tag = ('' + str(args.threads) + '')\nmqpar_text = re.sub(r'\\(.|\\n|\\r)*\\<\\/numThreads\\>', threads_tag, mqpar_text)\n\n# write the MQ version\nMQ_version = ('' + re.sub(r\"_\", \".\", str(args.mq_version)) + '') \nmqpar_text = re.sub(r'\\(.|\\n|\\r)*\\<\\/maxQuantVersion\\>', MQ_version, mqpar_text)\n\n## write output xml file\nout_file = open(args.output_xml, 'w')\nout_file.write(mqpar_text)\nout_file.close()\nprint('XML write success!')\n\n#!/usr/bin/sh\n#SBATCH --job-name=MQ\n#SBATCH --output=MQ.out\n#SBATCH --cpus-per-task=72\n#SBATCH --mem=256000\n#SBATCH --time=30-00:00:00\n#SBATCH --partition=slim18\n\n#source /home/waftab/.bashrc\n#srun mono $MQ_1_6_14_0 /work/project/becimh_005/Shibo3/mqpar_mod_14.xml\n#srun mono $MQ_1_6_15_0 /work/project/becimh_005/Shibo3/mqpar_mod_15.xml\n\n## create the slurm script\nslurm_script = ('#!/usr/bin/sh\\n'\n'#SBATCH --job-name={JOBNAME}\\n'\n'#SBATCH --output={JOBNAME}.out\\n'\n'#SBATCH --cpus-per-task={THREADS}\\n'\n'#SBATCH --mem=256000\\n'\n'#SBATCH --time={TIME}\\n'\n'#SBATCH --partition={PARTITION}\\n\\n'\n'source /home/'+os.getlogin()+'/.bashrc\\n'\n'srun mono ${MQ_VERSION} {MQPAR}\\n'\n)\n\n## create the folder\noutput_folder=\"Slurm_Scripts\"\nif not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n# replace variables in the slurm script\nslurm_script = re.sub(r'{MQ_VERSION}', ('MQ_' + args.mq_version), slurm_script)\nslurm_script = re.sub(r'{MQPAR}', os.path.abspath(args.output_xml), slurm_script)\nslurm_script = re.sub(r'{JOBNAME}', str(args.jobname), slurm_script)\nslurm_script = re.sub(r'{THREADS}', str(args.threads), slurm_script)\nslurm_script = re.sub(r'{TIME}', str(args.time), slurm_script)\nslurm_script = re.sub(r'{PARTITION}', str(args.partition), slurm_script)\n\n# write slurm script - same format as the output folder\nslurm_script_path = os.path.abspath(output_folder)+'/slurm.sh'\nslurm_script_file = open(slurm_script_path, 'w')\nslurm_script_file.write(slurm_script)\nslurm_script_file.close()\nprint('Slurm script write success!')\n","sub_path":"gen_mqpar.py","file_name":"gen_mqpar.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148174906","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pw.learn_pw_support as support\n\ndef tightgauss(x, y):\n sigma = 0.02\n return -0.5 * np.log(2*np.pi) - np.log(sigma) - (x-y)**2 / (2*sigma**2)\n\nif __name__ == '__main__':\n true_const = 4.77\n k = 4\n\n fig, (ax_ll, ax_prior) = plt.subplots(1, 2, sharex=True)\n ax_prior.set_xlim(-10,10)\n ax_prior.set_ylim(0, 1)\n ax_prior.set_title('Prior PDF')\n ax_prior.set_xlabel('x')\n ax_prior.set_ylabel('P(x)')\n xs = np.linspace(-10, 10, 2000)\n ys_prior = 1.0 / 20 * np.ones(xs.shape)\n ax_prior.plot(xs, ys_prior)\n\n ax_ll.set_title('Log likelihood density (k=%s, c=%s)' % (k, true_const))\n ax_ll.set_xlabel('x')\n ax_ll.set_ylabel('log P(y|x)')\n #ax_ll.set_ylim(-800, 0)\n ys_ll = np.array([tightgauss(k, support.commonsteps(x, true_const, -10, 10, k)) for x in xs])\n ax_ll.plot(xs, ys_ll)\n\n fig.subplots_adjust(left=0.10, right=0.97, top=0.87, bottom=0.23, wspace=0.4)\n fig.set_size_inches((10,2))\n fig.savefig('commonsteps.pdf')\n","sub_path":"plot_commonsteps.py","file_name":"plot_commonsteps.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300956106","text":"import sys\nsys.stdin = open('1486.txt', 'r')\n\ndef janghun(n, height):\n global minimum\n if height >= minimum:\n return\n elif n >= N:\n if height >= B and height < minimum:\n minimum = height\n return\n else:\n janghun(n+1, height+H[n])\n janghun(n+1, height)\n\nfor tc in range(1, int(input())+1):\n N, B = map(int, input().split())\n H = list(map(int, input().split()))\n minimum = 987654321\n janghun(0, 0)\n print('#{} {}'.format(tc, minimum-B))","sub_path":"Algorithm/SWEA/파이썬SW문제해결 Tree/1486_장훈이의높은선반.py","file_name":"1486_장훈이의높은선반.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240951442","text":"#!/usr/bin/python\n#abigailc@Actaeon Jan 4 2017; edited july6 2017\n#classes used in OVerall_DTL_Detector.py\nimport sys\nimport argparse\nimport os\nimport re\nimport time\nclass Fasta:\n def __init__(self, name):\n #all ids should be stripped and have \">\" removed for reasons.\n #for now, sequences do not have any stripping applied\n self.name = name\n self.ids = []\n self.original_ids = []\n self.original_seqs = []\n self.seqs = []\n self.species_names = []\n def gen_original_lists(self, fastaname):\n if self.original_ids != []:\n self.ids = []\n self.original_ids = []\n self.original_seqs = []\n self.seqs = []\n self.species_names = []\n with open(fastaname) as fastafile:\n for line in fastafile:\n if \"\\n\" == line:\n pass\n if \">\" in line:\n #write the previous AA seq\n try:\n AAseq=AAseq.strip()\n self.seqs.append(AAseq)\n self.original_seqs.append(AAseq)\n except:\n pass\n #initialize a new AAseq\n AAseq = \"\"\n #format the seqID\n newline = line.strip()\n newline = line.strip(\">\")\n #write the seqID\n self.ids.append(newline.strip())\n self.original_ids.append(newline.strip())\n else:\n AAseq = AAseq+line\n AAseq=AAseq.strip()\n #catch the last AAseq pass\n self.seqs.append(AAseq)\n self.original_seqs.append(AAseq)\n print(\"Initial sequence and ID lists created. Contains \"+str(len(self.ids))+\" sequences\")\n \n def gen_new_fasta(self, new_fasta_name):\n #this should print the changed seqids and changed AA sequences to file.\n newfasta = new_fasta_name\n # print(len(self.original_ids))\n # print(len(self.ids))\n # print(len(self.original_seqs))\n # print(len(self.seqs))\n with open (newfasta, \"w\") as new:\n for i in range(len(self.ids)):\n new.write(\">\"+self.ids[i].strip()+\"\\n\")\n # print(i) #\n #unclear if this needs a \"\\n\" after it... check.#TODO\n #print(self.seqs)\n #print(type(self.seqs[i]))\n new.write(self.seqs[i]+\"\\n\")\n print(\"Finished, your new fasta file is located at \"+newfasta)\n #done\n def extract(self, list_of_keeps):\n keep_ids = []\n keep_seq = []\n success = 0\n suc_num = len(list_of_keeps)\n or_num = len(self.original_ids)\n for item in list_of_keeps:\n item = item.strip()\n found = \"n\"\n for thing in self.original_ids:\n if thing.strip() == item:\n keep_ids.append(thing)\n index = self.original_ids.index(item)\n seq = self.original_seqs[index]\n keep_seq.append(seq)\n success += 1\n #print(\"matched:\"+item+\":with:\"+thing.strip())\n found =\"y\"\n if found == \"n\":\n print (\"could not find in .fasta the tip:\"+item)\n if suc_num == success:\n print(\"100% complete extract\")\n else:\n print(str(success)+\"out of \"+str(suc_num)+\" sequences extracted\")\n #print(\"looked for\")\n #print(list_of_keeps)\n #print(\"in\")\n #print(self.original_ids)\n self.ids = keep_ids\n self.seqs = keep_seq\n\n def gen_gis_list(self):\n gilist = []\n for item in self.ids:\n #print(item)\n taxon = re.sub(\"(.*)(gi#\\|?)([0-9]*)(.*)\", \"\\\\3\", item)\n #print(taxon)\n if \"|\" in taxon:\n print(\"TAXON error in gen_gis_lists():\" + taxon)\n bleh, taxon = taxon.split(\"#\")\n #print(taxon)\n gilist.append(taxon)\n self.gis_list = gilist\n return gilist\n\n def gen_species_lists(self):\n speclist = []\n for item in self.ids:\n taxon = re.sub(\"([^_]*)([A-Z][a-z]*_?[A-Z]?[a-z]*[^\\|]*)(.*)\", \"\\\\2\", item)\n if \"|\" in taxon:\n tlist = item.split(\"|\")\n taxon = tlist[-2]\n if \"|\" in taxon:\n print (\"TAXON error in gen_species_lists():\" + taxon)\n speclist.append(taxon)\n self.species_names = speclist\n return speclist\n def gen_species_lists_first_section(self):\n speclist = []\n for item in self.ids:\n parse = item.split(\"|\")\n taxon = parse[0]\n speclist.append(taxon)\n self.species_names = speclist\n return speclist\n\n\n def ret_speclist(self):\n return self.species_names\n\n\nclass Subtree:\n def __init__(self, name):\n #all ids should be stripped and have \">\" removed for reasons.\n #for now, sequences do not have any stripping applied\n # 123\n self.number_name = name\n #Cyanobacteria\n self.string_name = \"\"\n #Bacterua\n self.category = \"\"\n #[A,B,C]\n self.tips = []\n # cyano.fasta\n self.fasta = \"\"\n # object\n self.fasta_object = \"\"\n # object\n self.species_fasta_object = \"\"\n # object\n self.gene_fasta_object = \"\"\n #the subtree as generated in figtree\n self.fasttree_st = \"\"\n #i expect this to be the RAxML_BestTree but it looks like its a fasta instead?\n self.gene_tree = \"\"\n #RAxML_besttree\n self.gene_tree_name = \"\"\n self.species_tree_name = \"\"\n self.gene_tree_species_tips = \"\"\n self.species_tree = \"\"\n self.species_list_original = []\n self.species_list_gene_to_species = []\n self.species_list_after_removal = []\n self.species_list_plus_og_loss = []\n self.besttree_str = \"\"\n self.prefix = \"\"\n self.cladetype = \"\"\n self.projectname = \"\"\n def set_gene_tree_species_tips(self):\n a = self.gene_tree\n if self.fasta_object == \"\":\n if self.fasta == \"\":\n print(\"this isn't going to work\")\n raise SystemExit\n self.fasta_object = Fasta(self.fasta)\n b = self.fasta_object\n c = b.gen_species_lists()\n d = b.gen_gis_list()\n print(d[0])\n old = b.ids\n newt = a\n new = []\n length = len(b.species_names)\n print(length)\n for i in range(length):\n new.append(b.species_names[i]+\"_\"+d[i])\n for item in old:\n index = old.index(item)\n newt = newt.replace(item, new[index])\n self.gene_tree_species_tips = newt\n def set_alignment_with_species_tips(self):\n #make a new .fasta object and then change its tips.\n a = Fasta(self.fasta)\n try:\n a.gen_original_lists(self.fasta)\n except:\n a.gen_original_lists(\"./\"+self.projectname+\"/Gene_Trees/muscle/\"+self.fasta+\"_Muscle.fasta\")\n self.fasta_object_with_species_names = a\n \n b = a.gen_species_lists()\n c = a.gen_gis_list()\n old = a.original_ids\n new = []\n for i in range(len(a.species_names)):\n new.append(a.species_names[i]+\"_\"+c[i])\n\n for item in old:\n index = old.index(item)\n a.ids[index] = new[index]\n fasta_with_species_names = a.gen_new_fasta(\"./\"+self.projectname+\"/Gene_Trees/fasta/\"+self.prefix+\".gene_sp_names.fasta\")\n return fasta_with_species_names\n #edit april ^\n def ret_gene_tree_species_tips(self):\n return self.gene_tree_species_tips\n def set_fasta_object(self):\n MyFasta = Fasta(self.fasta)\n MyFasta.gen_original_lists(self.fasta)\n MyFasta.gen_species_lists()\n self.fasta_object = MyFasta\n \n def set_species_fasta_object(self, input_fasta):\n MyFasta = Fasta(input_fasta)\n MyFasta.gen_original_lists(self.fasta)\n MyFasta.gen_species_lists()\n self.species_fasta_object = MyFasta\n self.species_list_after_removal = MyFasta.gen_species_lists()\n def set_species_list(self, splist):\n self.species_list = splist\n def set_type(self, typ):\n self.cladetype = typ\n def set_prefix(self, pref):\n self.prefix = pref\n def set_species_tree_name(self,speciest):\n self.species_tree_name = speciest\n with open(speciest) as old:\n self.species_tree = old.read().strip()\n def set_gene_tree_name(self, gene_tree):\n self.gene_tree_name = gene_tree\n with open(gene_tree) as old:\n self.gene_tree = old.read().strip()\n def set_fasttree(self, ft):\n self.fasttree_st=ft\n def set_string(self, stringn):\n self.string_name = stringn\n def set_category(self, cat):\n self.category = cat\n def set_fasta(self, fasta):\n self.fasta = fasta\n def set_tips(self, tips):\n if type(tips) == str:\n self.tips.append(str)\n else:\n for item in tips:\n self.tips.append(item)\n def ret_fasta_object(self):\n return self.fasta_object\n def ret_number(self):\n return self.number_name\n def ret_type(self):\n return self.cladetype\n def ret_species_list(self):\n return self.species_list\n def ret_string(self):\n return self.string_name\n def ret_name(self):\n return self.number_name\n def ret_prefix(self):\n return self.prefix\n def ret_cat(self):\n return self.category\n def ret_tips(self):\n return self.tips\n def ret_fasta(self):\n return self.fasta\n def ret_fasttree(self):\n return self.fasttree_st\n def ret_gene_tree(self):\n return self.gene_tree\n def ret_species_tree(self):\n return self.species_tree\n","sub_path":"oxy_mods/Classes_DTL_Detector.py","file_name":"Classes_DTL_Detector.py","file_ext":"py","file_size_in_byte":11483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"322319810","text":"import urllib\nimport numpy\n\"\"\"\nidea, instead of doing the URL list for loop all over again, maybe use string.replace ????\n\"\"\"\nEntireHTMLstring = \"\"\nListBoyNames = []\nListGirlNames = []\npage1boy = \"http://www.prokerala.com/kids/baby-names/boy/\"\nListOfURLSboy = [page1boy]\nfor i in range(170):\n ListOfURLSboy.append(\"http://www.prokerala.com/kids/baby-names/boy/page-\" + str(i+2) + \".html\")\nfor URL in ListOfURLSboy:\n CurrentPageData = urllib.urlopen(URL)\n htmlSource = CurrentPageData.read()\n CurrentPageData.close()\n EntireHTMLstring += htmlSource\n\n htmlData = htmlSource.split('nameDetails')\n htmlData = htmlData[1:]\n\n for elem in htmlData:\n elem = elem.splitlines()[0]\n elem = elem[2:-12]\n ListBoyNames.append(elem)\npage1girl = \"http://www.prokerala.com/kids/baby-names/girl/\"\nListOfURLSgirl = [page1girl]\nfor i in range(170):\n ListOfURLSgirl.append(\"http://www.prokerala.com/kids/baby-names/girl/page-\" + str(i+2) + \".html\")\nfor URL in ListOfURLSgirl:\n CurrentPageData = urllib.urlopen(URL)\n htmlSource = CurrentPageData.read()\n CurrentPageData.close()\n htmlData = htmlSource.split('nameDetails')\n htmlData = htmlData[1:]\n for elem in htmlData:\n elem = elem.splitlines()[0]\n elem = elem[2:-12]\n ListGirlNames.append(elem)\ndef nameGenerate(sex):\n if sex == \"male\":\n yield ListBoyNames\n if sex == \"female\":\n yield ListGirlNames\nboy_names_array = numpy.asarray(ListBoyNames)\ngirl_names_array = numpy.asarray(ListGirlNames)\nnumpy.savetxt('boy_names_output_two.dat', boy_names_array, delimiter=\"\\n\", fmt=\"%s\")\nnumpy.savetxt('girl_names_output_two.dat', girl_names_array, delimiter=\"\\n\", fmt=\"%s\")\n# print(len(ListBoyNames))\n# print(len(ListGirlNames))","sub_path":"program_to_pull_names_from_prokerala.py","file_name":"program_to_pull_names_from_prokerala.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"543380199","text":"from controller.behavior import *\nimport data.attck as attck\nimport data.rule as rule\n\nform = {\n \"behaviorType\" : \"ProcessBehavior\",\n \"endpointID\" : \"DESKTOP-3LRRD6K\",\n \"startDate\" : \"2020-04-08\",\n \"endDate\" : \"2020-04-08\",\n \"pageSize\" : \"10\",\n \"pageIndex\" : \"0\"\n}\n\nts_tr = format_daterange(('2020-04-05', '2020-04-07'))\nraw_data = ES.load(WINLOGBEAT_INDEX, '_doc', args={\n 'bool': {\n 'filter': {\n 'range': {\n '@timestamp': {\n 'gte': ts_tr[0],\n 'lte': ts_tr[1]\n }\n }\n }\n }\n})\nbehavior_list = []\n\nfor _, e in raw_data.iterrows():\n eid = str(e['winlog']['event_id'])\n func = SysmonData.eventid_behavior_mappings(eid)\n\n if func:\n try:\n props = [en.split(': ')[1] for en in e['message'].split('\\n')[1:]]\n mid = encode_md5(e['winlog']['computer_name'])\n behav = func(eid, mid, props)\n behavior_list.append(behav)\n except Exception as e:\n log_error('Error: {} {}-{}'.format(e, eid, repr(props)))\n\nattck_techs = attck.load_attcks('../attck.yaml')\n\nabnormals = rule.filter_abnormal_behaviors(behavior_list, attck_techs)\n\nfor abnormal in abnormals:\n print(str(abnormal.attck_ids) + '###' + str(abnormal))\n","sub_path":"core/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"167592742","text":"'''\nhttps://leetcode.com/problems/arranging-coins/description/\n'''\n\ndef arrangeCoins(n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if(n == 0 or n == 1):\n return n\n if(n == 2):\n return 1\n count = 0\n for i in range(1,n):\n count += i\n if(count > n):\n return i-1\n if(count == n):\n return i\n \n\nn = 10\nprint(arrangeCoins(n))\n","sub_path":"LeetCode/1. Easy/Arranging Coins.py","file_name":"Arranging Coins.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293827174","text":"def gcd(a, b): \n if a == 0 : \n return b \n \n return gcd(b%a, a) \n \n\nprint(\"Ingrese el primer numero\") \na = int(input())\n\nprint(\"Ingrese el segundo numero\") \nb = int(input())\n\nprint(\"GCD(\", a , \",\" , b, \") = \", gcd(a, b)) \n\n","sub_path":"Seguridad/GCD.py","file_name":"GCD.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"614200927","text":"import pandas as pd\nfrom pandas import Series, DataFrame\n\nemp_list = [{'empno':100, 'name':'나루토', 'job':'DEV'},\n{'empno':120, 'name':'사스케', 'job':'DEV'},\n{'empno':210, 'name':'이타치', 'job':'DEV'}]\n\n#json과 유사하고, 이런 형태로 많이 추출한다\ndf = DataFrame(emp_list)\n#make(transform) dataFrame\nprint(df) #key = column\n\nprint(\"---\")\ndf = df[['empno','job','name']] #컬럼 순서 변경하기 \nprint(df)\n","sub_path":"1109/pandasEx4.py","file_name":"pandasEx4.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187747116","text":"import os\nimport datetime\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q, Count\n\nfrom rest_framework import generics\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import DjangoModelPermissions\n\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import cm\n\nfrom models import OS\nfrom serializers import OSSerializer\nfrom .filters import OSFilter\n\nfrom hospital.equipamentos.models import Setor\n\nclass JSONResponse(HttpResponse):\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\nclass OSList(generics.ListCreateAPIView):\n\n permission_classes = (DjangoModelPermissions,)\n queryset = OS.objects.all()\n paginate_by = None\n serializer_class = OSSerializer\n\nclass OSDetail(generics.RetrieveUpdateDestroyAPIView):\n\n permission_classes = (DjangoModelPermissions,)\n queryset = OS.objects.all()\n serializer_class = OSSerializer\n\ndef MontarPDF(ordem, p):\n p.translate(10,28*cm)\n p.drawString(195,30,\"iHospital - Lista de Ordens de Servico\")\n p.drawString(230,10, ordem.eas.nome)\n\n \t###Descricao do Equipamento###\n p.drawString(0,0, \"Descricao do Equipamento\")\n ##Linhas laterais\n p.line(0,-15,0,-115)\n p.line(575,-15,575,-115)\n\n p.line(0,-15,575,-15)\n p.drawString(0,-30, \" ID do Equipamento: \")\n p.drawString(200,-30, str(ordem.equipamento.id))\n p.line(0,-35,575,-35)\n\n p.drawString(0,-50, \" Tipo: \")\n p.drawString(200,-50, ordem.equipamento.tipo)\n p.line(0,-55,575,-55)\n\n p.drawString(0,-70, \" Fabricante: \")\n p.drawString(200,-70, ordem.equipamento.fabricante)\n p.line(0,-75,575,-75)\n\n p.drawString(0,-90, \" Modelo: \")\n p.drawString(200,-90, ordem.equipamento.modelo)\n p.line(0,-95,575,-95)\n\n p.drawString(0,-110, \" Tombamento: \")\n p.drawString(200,-110, ordem.equipamento.tombamento)\n p.line(0,-115,575,-115)\n\n ###Descricao da Ordem###\n p.drawString(0,-140, \"Descricao da Ordem\")\n ##Linhas laterais\n p.line(0,-155,0,-595)\n p.line(575,-155,575,-595)\n\n p.line(0,-155,575,-155)\n p.drawString(0,-170, \" ID Requisicao: \")\n p.drawString(200,-170, str(ordem.rs.id))\n p.line(0,-175,575,-175)\n\n p.drawString(0,-190, \" ID Ordem: \")\n p.drawString(200,-190, str(ordem.id))\n p.line(0,-195,575,-195)\n\n p.drawString(0,-210, \" Empresa: \")\n p.drawString(200,-210, ordem.empresa.nome)\n p.line(0,-215,575,-215)\n\n p.drawString(0,-230, \" Reclamante: \")\n p.drawString(200,-230, ordem.reclamante)\n p.line(0,-235,575,-235)\n \n p.drawString(0,-250, \" Aberto por: \")\n p.drawString(200,-250, ordem.aberto_por)\n p.line(0,-255,575,-255)\n\n p.drawString(0,-270, \" Data abertura: \")\n p.drawString(200,-270, str(ordem.data_abertura))\n p.line(0,-275,575,-275)\n\n p.drawString(0,-290, \" Tipo: \")\n p.drawString(200,-290, ordem.tipo)\n p.line(0,-295,575,-295)\n\n p.drawString(0,-310, \" Servico realizado: \")\n p.drawString(200,-310, ordem.servico_realizado)\n p.line(0,-315,575,-315)\n\n p.drawString(0,-330, \" Falha relatada: \")\n p.drawString(200,-330, ordem.falha_relatada)\n p.line(0,-335,575,-335)\n\n p.drawString(0,-350, \" Orcamento: \")\n p.drawString(200,-350, str(ordem.orcamento))\n p.line(0,-355,575,-355)\n\n p.drawString(0,-370, \" Tipo defeito: \")\n p.drawString(200,-370, ordem.tipo_defeito)\n p.line(0,-375,575,-375)\n\n p.drawString(0,-390, \" Componentes utilizados: \")\n p.drawString(200,-390, ordem.componentes_utilizados)\n p.line(0,-395,575,-395)\n\n p.drawString(0,-410, \" Tipo manutencao: \")\n p.drawString(200,-410, ordem.tipo_manutencao)\n p.line(0,-415,575,-415)\n\n p.drawString(0,-430, \" Entregue a: \")\n p.drawString(200,-430, ordem.entregue_a)\n p.line(0,-435,575,-435)\n\n p.drawString(0,-450, \" Data entrega: \")\n p.drawString(200,-450, str(ordem.data_entrega))\n p.line(0,-455,575,-455)\n\n p.drawString(0,-470, \" Previsao retorno: \")\n p.drawString(200,-470, str(ordem.previsao_retorno))\n p.line(0,-475,575,-475)\n\n p.drawString(0,-490, \" Tecnico responsavel \")\n p.drawString(200,-490, ordem.tecnico_responsavel)\n p.line(0,-495,575,-495)\n\n p.drawString(0,-510, \" Valor: \")\n p.drawString(200,-510, str(ordem.valor))\n p.line(0,-515,575,-515)\n\n p.drawString(0,-530, \" Recebido por: \")\n p.drawString(200,-530, ordem.recebido_por)\n p.line(0,-535,575,-535)\n\n p.drawString(0,-550, \" Data Recebimento: \")\n p.drawString(200,-550, str(ordem.data_recebimento))\n p.line(0,-555,575,-555)\n\n p.drawString(0,-570, \" Data Fechamento: \")\n p.drawString(200,-570, str(ordem.data_fechamento))\n p.line(0,-575,575,-575)\n\n p.drawString(0,-590, \" Observacao: \")\n p.drawString(200,-590, ordem.obs)\n p.line(0,-595,575,-595)\n\n p.showPage()\n p.save()\n\n return p\n\n@api_view(['GET'])\ndef ListPDF(request):\n\n lista_ordens = OS.objects.all()\n\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Ordens.pdf\"'\n\n p = canvas.Canvas(response)\n\n for ordem in lista_ordens:\n MontarPDF(ordem,p)\n\n return response\n\n\ndef DetailPDF(request, ordem_id):\n ordem = get_object_or_404(OS,pk=ordem_id)\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Ordem_%s.pdf\"' % ordem.id\n p = canvas.Canvas(response)\n MontarPDF(ordem, p)\n return response\n\ndef getOrdensFromEq(request,equipamento_id):\n\n ordem_list = OS.objects.filter(equipamento__id = equipamento_id)\n\n serializer = OSSerializer(ordem_list, many=True)\n\n return HttpResponse(content = JSONResponse(serializer.data) , status = 201)\n\ndef getOrdensFromReq(request, rs_id):\n\n ordem_list = OS.objects.filter(rs__id = rs_id)\n\n serializer = OSSerializer(ordem_list,many=True)\n\n return HttpResponse(content = JSONResponse(serializer.data) , status = 201)\n\ndef FiltroData(data_min, data_max, situacao):\n\n if data_min and data_min:\n if situacao == 'Todas':\n queryset = OS.objects.filter(Q(data_abertura__gte=data_min) & Q(data_abertura__lte=data_max))\n elif situacao == 'Fechadas':\n queryset = OS.objects.filter(Q(data_abertura__gte=data_min) & Q(data_abertura__lte=data_max) & Q(data_fechamento__lte=datetime.datetime.today()))\n else:\n queryset = OS.objects.filter(Q(data_abertura__gte=data_min) & Q(data_abertura__lte=data_max) & ( Q(data_fechamento__gt=datetime.datetime.today()) | Q(data_fechamento__isnull=True) ) )\n else:\n if situacao == 'Todas':\n queryset = OS.objects.all()\n elif situacao == 'Fechadas':\n queryset = OS.objects.filter(Q(data_fechamento__lte=datetime.datetime.today()))\n else:\n queryset = OS.objects.filter(Q(data_fechamento__gt=datetime.datetime.today()) | Q(data_fechamento__isnull=True))\n\n return queryset\n\n\ndef FiltroOrdens(request):\n\n data_min = request.POST['data_min']\n data_max = request.POST['data_max']\n situacao = request.POST['situacao']\n\n queryset = FiltroData(data_min,data_max,situacao)\n\n filtro = OSFilter(request.POST, queryset)\n serializer = OSSerializer(filtro, many=True)\n response = HttpResponse(content = JSONResponse(serializer.data), status = 200)\n\n return response\n\ndef FiltroPDF(request):\n\n queryset = FiltroData(request.POST['data_min'], request.POST['data_max'], request.POST['situacao'])\n lista_ordens = OSFilter(request.POST, queryset)\n\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Ordens.pdf\"'\n\n p = canvas.Canvas(response)\n\n for ordem in lista_ordens:\n MontarPDF(ordem,p)\n\n return response\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"hospital/ordem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273956690","text":"#!/usr/bin/env python3\r\n\r\nprint('Content-type: text/html\\n')\r\n\r\nfile=open(\"top100moviesAFI.txt\",\"r\")\r\nafi=file.readlines()\r\nfile.close()\r\n\r\nfile=open(\"top100moviesRT.txt\",\"r\")\r\nrt=file.readlines()\r\nfile.close()\r\n\r\nhtml=\"\"\"\r\n \r\n \r\n \r\n Top Movie Comparison\r\n \r\n \r\n

Top 100 Film Comparisons

\r\n {content{
MovieAFI Rank
\r\n \r\n \"\"\"\r\n\r\ntable=\"\"\r\n\r\nfor movie in sorted(set(afi) | set(rt)):\r\n if movie in afi:\r\n afiRank = afi.index(movie)\r\n else:\r\n afiRank = \"--\"\r\n if movie in rt:\r\n rtRank = rt.index(movie)\r\n else:\r\n rtRank = \"--\"\r\n table += \"\"+movie+\"\"+str(afiRank)+\"\"+str(rtRank)+\"\"\r\n\r\n\r\nprint(html.format(content=table))\r\n\r\n\r\n","sub_path":"Weekly_challenge 8.cgi.py","file_name":"Weekly_challenge 8.cgi.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"236827811","text":"# 导入必要的模块\nimport json\nimport pygal_maps_world.maps\nfrom country_codes import get_country_code\nfrom pygal.style import RotateStyle\n\n# 将数据加载到列表中\nfilename = '/home/yyh/Documents/VSCode_work/chapter16/population_data.json'\nwith open(filename) as f:\n pop_data = json.load(f)\n\n# 创建一个包含人口数量的字典\ncc_populations = {} \nfor pop_dict in pop_data: # 将每个字典依次存储在pop_dict中 \n if pop_dict['Year'] == '2010':\n country = pop_dict['Country Name']\n print(country)\n population = int(float(pop_dict['Value']))\n code = get_country_code(country)\n print(code)\n if code:\n cc_populations[code] = population\n\n# 根据人口数量将所有的国家分成三组\ncc_pops_1, cc_pops_2, cc_pops_3 = {}, {}, {}\nfor cc, pop in cc_populations.items():\n if pop < 10000000:\n cc_pops_1[cc] = pop\n elif pop < 1000000000:\n cc_pops_2[cc] = pop\n else:\n cc_pops_3[cc] = pop\n\n# 看看每组分别包含多少个国家\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\n\nwm = pygal_maps_world.maps.World()\nwm_style = RotateStyle('#336699') # 创建实例\nwm = pygal_maps_world.maps.World(style=wm_style)\nwm.title = 'World Population in 2010, by Country'\nwm.add('0-10m', cc_pops_1)\nwm.add('10m-1bn', cc_pops_2)\nwm.add('>1bn', cc_pops_3)\n\nwm.render_to_file('/home/yyh/Documents/jupyter_work/world_population_3.svg')","sub_path":"VSCode_work/chapter16/world_population.py","file_name":"world_population.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"550067423","text":"from vnpy.trader.utils import optimize\nfrom doubleMaIfStrategy import DoubleMaStrategy\nfrom datetime import datetime\nimport os\nimport json\n\ndef setConfig(root=None):\n # 设置策略类\n optimize.strategyClass = DoubleMaStrategy\n # 设置缓存路径,如果不设置则不会缓存优化结果。\n optimize.root = root\n # 设置引擎参数\n optimize.engineSetting = {\n 'dbURI': \"mongodb://192.168.0.104:27017\",\n \"bardbName\": \"VnTrader_1Min_Db_contest\",\n \"timeRange\": {\n \"tradeStart\": datetime(2014,6,1),\n \"tradeEnd\": datetime(2016,6,1),\n \"historyStart\": datetime(2014,3,1),\n },\n \"contract\":[{\n \"slippage\": 0.5,\n \"rate\": 0.0005,\n }]\n }\n # 设置策略固定参数\n optimize.globalSetting = {\n \"symbolList\": [\"IF88:CTP\"],\n # \"barPeriod\": 100,\n }\n # 设置策略优化参数\n optimize.paramsSetting = {\n \"fastPeriod\": range(5,21,5),\n \"slowPeriod\": range(30,81,20)\n }\n path = os.path.split(os.path.realpath(__file__))[0]\n with open(path+\"//CTA_setting.json\") as f:\n globalSetting = json.load(f)[0]\n optimize.globalSetting = globalSetting\n optimize.initOpt()\n\n# 并行优化 无缓存\ndef runSimpleParallel():\n start = datetime.now()\n print(\"run simple | start: %s -------------------------------------------\" % start)\n setConfig()\n # optimize.runParallel() 并行优化,返回回测结果\n report = optimize.runParallel()\n print(report)\n report.sort_values(by = 'sharpeRatio', ascending=False, inplace=True)\n # 将结果保存成csv\n report.to_csv('opt_IF88.csv') \n end = datetime.now()\n print(\"run simple | end: %s | expire: %s -----------------------------\" % (end, end-start))\n\ndef main():\n runSimpleParallel()\n\nif __name__ == '__main__':\n main()\n","sub_path":"qfcSection1/doubleMaStrategy/runOptParallel.py","file_name":"runOptParallel.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122374528","text":"from turtle import Turtle\n\n\nclass Setup(Turtle):\n def __init__(self, position):\n super().__init__()\n self.penup()\n self.goto(0, 295)\n self.setheading(270)\n self.color('white')\n self.width(5)\n self.draw_line()\n # self.shape('turtle')\n self.border(position)\n\n def draw_line(self):\n for _ in range(30):\n self.pendown()\n self.fd(15)\n self.penup()\n self.fd(15)\n\n def border(self, position):\n self.shape('square')\n self.setheading(90)\n self.shapesize(stretch_wid=100, stretch_len=2)\n self.color('white')\n self.penup()\n self.sety(position)\n","sub_path":"Day-22/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233330095","text":"# natarea.py\n# written by: Thomas A. Grate\n# copyright (c) 2017 by Thomas A. Grate, all rights reserved.\n#\n# for OYD Daily program\n\n# class NatAreas_Table - used to query lists of national areas\n# Note: the schema for the NatAreas_Table() is defined in the NatArea() object\nclass NatAreas_Table(object):\n def __init__(self):\n self.areas = []\n self.limit = None\n self.offset = None\n\n # Exectue the SQL query to get Courses from the Database\n def _sql_query (self, c, limit=None, offset=None):\n\n # add limit and offset if presented to query\n limoff = ''\n if self.limit:\n limoff = ' limit ' + str(limit)\n if self.offset:\n limoff += ' offset ' + str(offset)\n\n # execute the query for selected Courses in the database\n c.execute('SELECT * FROM areas' + limoff)\n row = c.fetchone()\n\n # check if a row was returned\n if row:\n # store all rows in the list\n while row:\n self.areas.append(row)\n row = c.fetchone()\n\n # return success\n return 0\n else:\n # return ERROR\n return 1\n\n # Count\n def count(self, db):\n \"\"\" NatAreas_Table.count() function\n Returns number of rows in areas table\n Parameters:\n db - Database() object\n Returns:\n number of rows in courses table\"\"\"\n try:\n c = db.cursor\n\n # execute the query for all users in the database\n c.execute('SELECT count(*) FROM areas')\n row = c.fetchone()\n\n # check if result otherwise return None\n if row:\n return row[0]\n else:\n return None\n except:\n return None\n\n # Query for a range of Students in the Database\n def query_range(self, db, limit, offset):\n self.areas = []\n self.limit = limit\n self.offset = offset\n\n return self._sql_query (c=db.cursor, limit=limit, offset=offset)\n\n # Query for All National Areas in the Database\n def query_all(self, db):\n self.areas = []\n self.limit = None\n self.offset = None\n\n return self._sql_query (c = db.cursor)\n\n# class NatArea - used to manage available OYD National Areas\n# includes the Schema for the National Areas Table - areas\nclass NatArea (object):\n def __init__(self, area_name=None, area_abbrev=None):\n \"\"\"NatArea Object: __init__ Method\n Parameters: (all default to None)\n area_name - national area name\n area_abbrev - abbreviated name of national area\"\"\"\n\n # Dictionary of School Attributes\n self.attrs = {'nat_area_id': None, # internal use only, do not change\n 'area_name': area_name,\n 'area_abbrev': area_abbrev\n }\n\n # Matching dictionary of UI Elements for NatArea Attributes\n self.ui2 = {\n 0: {'item': 'area_name',\n 'label':'Area Name',\n 'edit_name':'editAreaName',\n 'edit_type':'text',\n 'placeholder':'Area Name',\n 'select_options': None},\n 1: {'item': 'area_abbrev',\n 'label':'Area Abbreviation',\n 'edit_name':'editAreaAbbrev',\n 'edit_type':'text',\n 'placeholder':'Abbreviation',\n 'select_options':None}\n }\n\n # SQL Schema\n # Must match the attrs (attributes) above, line for line\n self.schema = ['nat_area_id',\n 'area_name',\n 'area_abbrev'\n ]\n\n # create the INSERT schema substituion string\n self.schema_insert = \", \".join(self.schema)\n\n # SQL Data Types for the SQL Schema\n # Must match the SQL Schema above, line for line\n self.types = ['integer primary key',\n 'text',\n 'text'\n ]\n\n # make the CREATE schema substituion string\n # used to create the student table\n self.schema_create = ''\n limit = len(self.schema) - 1\n i = 0\n for i in range(0, limit):\n addstr = self.schema[i] + ' ' + self.types[i] + ', '\n self.schema_create += addstr\n self.schema_create += self.schema[limit] + ' ' + self.types[limit]\n\n # VALUE substitution string\n self.schema_insert_sub = '(' + '?,' * (len(self.schema) - 1) + '?)'\n\n # Method to get a tuuple of all school data\n def _get (self):\n \"\"\" NatArea Object: _get method (private)\n Returns a tuple of the NatArea data\n Used by sql_insert \"\"\"\n\n # assuming that dictiionaries are unordered\n # retrive the data in oder as a tuple\n results = []\n for key in self.schema:\n results.append(self.attrs[key])\n\n return tuple(results)\n\n # Method to set all NatArea data from a SQL row tuple\n def _set (self, sql_data):\n \"\"\" NatArea Object: _set method (private)\n Sets NatArea data in the instance of the object.\n Used to set data from a sql query into the object.\n Parameters:\n sql_data - a 'row' tuple returned from a sql query for a National Area\"\"\"\n\n # copy the sql_data, a row, to self.attrs dictionary\n # use self.schema instead of self.attrs.keys() to interate\n # becaause the self.schema is a list and the order will not change\n i = 0\n for key in self.schema:\n self.attrs[key] = sql_data[i]\n i += 1\n\n def _sql_populate (self, c):\n \"\"\" NatArea Object: _sql_populate method (private)\n Populates the instance of NatArea from the database as a new row\n Parameters:\n c = cursor to database\"\"\"\n\n try:\n # Test 1, check for the SQL ID\n if self.attrs['nat_area_id']:\n test = (self.attrs['nat_area_id'], )\n c.execute('SELECT * FROM areas WHERE nat_area_id=?', test)\n # Test 2, check for NatArea Name\n elif self.attrs['area_name']:\n test = (self.attrs['area_name'], )\n c.execute('SELECT * FROM areas WHERE area_name=?', test)\n else:\n # if you did't fill in any information to test, why did you call populate?\n return 1\n except Exception as e:\n print (f\"ERROR: NatArea()._sql_populate: {e}\")\n return 1 # return Error\n\n # check if a row is returned\n row = c.fetchone()\n if row:\n self._set(row)\n return 0\n else:\n return 1\n\n def _sql_insert (self, conn, c):\n \"\"\"NatArea Object: _sql_insert method (private)\n Inserts the instance of NatArea into the database as a new row\n Parameters:\n conn = connection to database\n c = cursor to database\"\"\"\n\n try:\n c.execute('INSERT INTO areas (' + self.schema_insert + \\\n ') VALUES ' + self.schema_insert_sub, self._get())\n except:\n # Insert failed so return Error\n return (1, 'Failed to add New Area!')\n\n # Save (commit) the changes\n conn.commit()\n\n # sql_id is auto assigned on insert. So, retrive the sql_id from the db\n name = (self.attrs['area_name'], )\n c.execute('SELECT nat_area_id FROM areas WHERE area_name=?', name)\n row = c.fetchone()\n self.attrs['nat_area_id'] = row[0]\n\n return (0, 'Area Successfully Added!')\n\n def _sql_update (self, conn, c):\n \"\"\"NatArea Object: _sql_update method (private)\n Commits all attribute in the instance of NatArea\n to the associated existing row in the database\n Parameters:\n conn = connection to database\n c = cursor to database\"\"\"\n\n if self.attrs['nat_area_id'] is not None:\n try:\n # create the label = value string to UPDATE\n lvl = ''\n for label in self.schema:\n lvl += label + ' = \"' + str(self.attrs[label]) + '\", '\n lvl = lvl [:-2]\n\n # update the row\n c.execute('UPDATE areas SET ' + lvl + \\\n ' WHERE nat_area_id=' + str(self.attrs['nat_area_id']))\n\n # Save (commit) the changes\n conn.commit()\n\n return 0 # return Success\n except Exception as e:\n print (f\"ERROR: NatArea()._sql_update: {e}\")\n return 1 # return Error\n else:\n print (\"ERROR: NatArea()._sql_update: sql_id not yet set\")\n return 1 # return Error\n\n def _sql_delete (self, conn, c):\n \"\"\"NatArea Object: _sql_delete method (private)\n Deletes the row in the database assoicated with the\n instance of School\n Parameters:\n conn = connection to database\n c = cursor to database\"\"\"\n\n # ??? Handle Exceptions Here ???\n if self.attrs['nat_area_id'] is not None:\n try:\n # delete the row\n c.execute('DELETE FROM areas WHERE nat_area_id=' + \\\n str(self.attrs['nat_area_id']))\n\n # Save (commit) the changes\n conn.commit()\n\n return 0 # return Success\n except Exception as e:\n print (f\"ERROR: NatArea()._sql_delete: {e}\")\n return 1 # return Error\n else:\n print (\"ERROR: NatArea()._sql_delete: sql_id not yet set\")\n return 1 # return Error\n\n def get (self, db):\n \"\"\" NatArea Object: get method\n Populates the instance of NatArea from the database as a new row\n Parameters:\n db = Database object from which to retrive the cursor\"\"\"\n\n return self._sql_populate (db.cursor)\n\n def put (self, db):\n \"\"\"NatArea Object: put method\n Inserts the instance of NatArea into the database as a new row\n Parameters:\n db = Database object that contains the\n connection & cursor to database\"\"\"\n\n return self._sql_insert(db.conn, db.cursor)\n\n # commit the data in NatArea to the DB\n def update (self, db):\n \"\"\"Nat Are aObject: update method\n Commits all attributes in the instance of NatArea\n to the associated existing row in the database\n Parameters:\n db = Database object that contains the\n connection & cursor to database\"\"\"\n\n return self._sql_update(db.conn, db.cursor)\n","sub_path":"projects/DailyDataAPI/natarea.py","file_name":"natarea.py","file_ext":"py","file_size_in_byte":10654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103782353","text":"import os\nimport time\n\nfrom fabric.api import task, env, run, sudo\n\nenv.use_ssh_config = True\nenv.forward_agent = True\n\ngit_repo = 'git@github.com:vrtx/dashboard.git'\nenv.hosts = ['mongo-sim-ui-1.vpc3.10gen.cc']\n\nbase_dir = '/opt/10gen/mongo-sim-ui'\ncurrent_link = os.path.join(base_dir, 'current')\nreleases_dir = os.path.join(base_dir, 'releases')\n\ndatetime_format = '%Y%m%d%H%M%S'\n\n@task\ndef deploy():\n deploy_dir = os.path.join(releases_dir, time.strftime(datetime_format))\n\n # clone the repo\n run('git clone {0} {1}'.format(git_repo, deploy_dir))\n\n # fix deploy_dir permissions so anyone w/ deploy privs can clean up later\n run('chmod 2775 {0}'.format(deploy_dir))\n\n # update the current symlink\n run('ln -sfn {0} {1}'.format(deploy_dir, current_link))\n\n # restart the service\n sudo('/etc/init.d/mongo-sim-ui restart')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"577909538","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\n\nfrom members.forms import SignupForm\nfrom members.views import User\n\n\n\nclass UserAdmin(BaseUserAdmin):\n fieldsets = BaseUserAdmin.fieldsets + (\n ('추가 정보', {'fields' : ('img_profile', 'age', 'like_posts')}),\n )\n add_fieldsets = BaseUserAdmin.add_fieldsets + (\n ('추가 정보', {\n 'fields': ('img_profile', 'age',),\n }),\n )\n add_form = SignupForm\n\n\nadmin.site.register(User, UserAdmin)","sub_path":"app/members/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"161367421","text":"\"\"\"DPC elements on tensor product cells.\"\"\"\n\nimport sympy\nfrom itertools import product\nfrom ..finite_element import CiarletElement\nfrom ..polynomials import polynomial_set\nfrom ..quadrature import get_quadrature\nfrom ..functionals import PointEvaluation, DotPointEvaluation\n\n\nclass DPC(CiarletElement):\n \"\"\"A dPc element.\"\"\"\n\n def __init__(self, reference, order, variant=\"equispaced\"):\n if order == 0:\n dofs = [\n PointEvaluation(\n tuple(sympy.Rational(1, reference.tdim + 1) for i in range(reference.tdim)),\n entity=(reference.tdim, 0))]\n else:\n points, _ = get_quadrature(variant, order + 1)\n\n dofs = []\n for i in product(range(order + 1), repeat=reference.tdim):\n if sum(i) <= order:\n point = tuple(sympy.Rational(j, order) for j in i[::-1])\n dofs.append(PointEvaluation(point, entity=(reference.tdim, 0)))\n super().__init__(\n reference, order, polynomial_set(reference.tdim, 1, order), dofs, reference.tdim, 1\n )\n self.variant = variant\n\n def init_kwargs(self):\n \"\"\"Return the kwargs used to create this element.\"\"\"\n return {\"variant\": self.variant}\n\n names = [\"dPc\"]\n references = [\"interval\", \"quadrilateral\", \"hexahedron\"]\n min_order = 0\n continuity = \"L2\"\n\n\nclass VectorDPC(CiarletElement):\n \"\"\"Vector dPc finite element.\"\"\"\n\n def __init__(self, reference, order, variant=\"equispaced\"):\n scalar_space = DPC(reference, order, variant)\n dofs = []\n if reference.tdim == 1:\n directions = [1]\n else:\n directions = [\n tuple(1 if i == j else 0 for j in range(reference.tdim))\n for i in range(reference.tdim)\n ]\n for p in scalar_space.dofs:\n for d in directions:\n dofs.append(DotPointEvaluation(p.point, d, entity=p.entity))\n\n super().__init__(\n reference, order,\n polynomial_set(reference.tdim, reference.tdim, order),\n dofs,\n reference.tdim,\n reference.tdim,\n )\n self.variant = variant\n\n def init_kwargs(self):\n \"\"\"Return the kwargs used to create this element.\"\"\"\n return {\"variant\": self.variant}\n\n names = [\"vector dPc\"]\n references = [\"quadrilateral\", \"hexahedron\"]\n min_order = 0\n continuity = \"L2\"\n","sub_path":"symfem/elements/dpc.py","file_name":"dpc.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273658887","text":"from Bio import SeqIO\nimport argparse\nimport subprocess\n\nimport re\ncigar_re = re.compile('[0-9]+[MIDNSHPX=]') # CIGAR token\n\ndef match_length(cigar):\n \"\"\"Returns total number of bases matched to a reference.\"\"\"\n sizes = re.findall(r'(\\d+)M', cigar)\n return sum(map(int, sizes))\n\n\nparser = argparse.ArgumentParser(\n description='Use bowtie2 to map sequences to HCV reference genotypes '\n 'and subtypes.'\n)\nparser.add_argument('infile', help='Path to FASTA input.')\nparser.add_argument('index', help='Path to Bowtie2 index files.')\nparser.add_argument('outfile', help='Path to write CSV output.')\n\nargs = parser.parse_args()\n\np = subprocess.Popen([\n 'bowtie2',\n '-f', args.infile,\n '-x', args.index,\n '--local',\n '--no-hd', # no header lines\n '--quiet',\n '-p6' # number of threads\n], stdout=subprocess.PIPE)\n\noutfile = open(args.outfile, 'w')\noutfile.write('qname,flag,rname,pos,mapq,match.len,seq.len\\n')\n\nfor line in p.stdout:\n qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual = line.strip('\\n').split('\\t')[:11]\n seqlen = len(seq)\n subtype = ''\n if rname != '*':\n subtype = rname.split('.')[1]\n\n outfile.write(','.join(map(str, [qname, flag, subtype, pos, mapq, match_length(cigar), seqlen])))\n outfile.write('\\n')\n\np.wait()\nif p.returncode():\n raise subprocess.CalledProcessError(p.returncode, 'bowtie2')\n\noutfile.close()\n","sub_path":"subtyping.py","file_name":"subtyping.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"131163863","text":"\"\"\"\nCopy your implementation of MultilayerPerceptron class from your notebook here for submission.\nAlso append training related commands, such as model.compile, model.fit, etc.\n\"\"\"\nclass MultilayerPerceptron(keras.Model): # Subclassing\n \n def __init__(self, dim_output, dim_hidden, num_layers=1, activation=keras.activations.linear):\n super(MultilayerPerceptron, self).__init__(name='multilayer_perceptron')\n self.dim_output = dim_output\n self.dim_hidden = dim_hidden\n\n # Within Model.__init__ we initialize all the layers we will use\n self.hidden_layers = []\n for _ in range(num_layers):\n layer = keras.layers.Dense(units=dim_hidden, activation=activation)\n self.hidden_layers.append(layer)\n self.layer_o = keras.layers.Dense(units=dim_output, activation=keras.activations.softmax)\n\n def call(self, x): # call defines the flow of the computation, e.g. in this particular model\n # we simply call the two layers one after the oter\n h = x\n for layer in self.hidden_layers:\n h = layer(h)\n y = self.layer_o(h)\n return y\n\n def run_to_tensorboard(self):\n model = MultilayerPerceptron(\n dim_output=3,\n dim_hidden=32,\n num_layers=3,\n activation=keras.activations.sigmoid)\n\n model.compile(\n optimizer=keras.optimizers.SGD(learning_rate=0.003),\n loss='categorical_crossentropy', # 'mean_squared_error'\n metrics=['accuracy'])\n\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir=os.path.join(\"logs\", timestamp()),\n histogram_freq=1)\n\n model.fit(\n x=data.x,\n y=data.y,\n batch_size=4,\n epochs=20,\n validation_split=0.2,\n callbacks=[tensorboard_callback], # Callback\n verbose=0) # Supressing text output","sub_path":"week_4/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"201437843","text":"import config\nimport json\nimport datetime\nimport timehandler as timeh\nimport messagecomposer\n\n\n# Class that define Merb info and a bunch of utilities\nclass Merb:\n def __init__(self, name, alias, respawn_time, plus_minus, recurring, windows, tag, tod, pop,\n author_tod, author_pop, accuracy, target, date_rec, date_print):\n\n self.d_rec = date_rec\n self.d_print = date_print\n # Complete name of the Merb\n self.name = name\n # Aliases\n self.alias = alias\n # Respawn Time\n self.respawn_time = respawn_time\n # Variance\n self.plus_minus = plus_minus\n # Windows \n self.windows = windows;\n # Current window\n if len(self.windows) > 0:\n self.current_window = 1;\n else:\n self.current_window = 0\n # If the spawn is recurring. (ex scout)\n self.recurring = recurring\n # Tag of the merb\n self.tag = tag\n # Bool, True if target\n self.target = target\n # Time of Death\n self.tod = datetime.datetime.strptime(tod, self.d_rec)\n # Pop Time\n self.pop = datetime.datetime.strptime(pop, self.d_rec)\n # Author of the last ToD\n self.signed_tod = author_tod\n # Author of the last pop\n self.signed_pop = author_pop\n # Accuracy. 0 for approx time, 1 for exact time, -1 when pop > tod\n self.accuracy = accuracy\n # Number of spawns since last tod (for recurring mobs)\n self.spawns = 0\n # Spawn Windows {\"start\"} {\"end\"}\n if self.tod > self.pop:\n self.window = self.get_window(self.tod)\n else:\n self.window = self.get_window(self.pop)\n self.accuracy = -2\n # Eta\n self.eta = self.get_eta()\n\n def get_window(self, from_date, skipped = False):\n if not skipped:\n w_start = from_date + datetime.timedelta(hours=self.respawn_time) - datetime.timedelta(hours=self.plus_minus)\n w_end = from_date + datetime.timedelta(hours=self.respawn_time) + datetime.timedelta(hours=self.plus_minus)\n else : \n adjusted_eta = self.eta \n if self.current_window - 2 >= 0:\n adjusted_eta = self.eta + datetime.timedelta(hours=self.windows[self.current_window - 2])\n \n w_start = adjusted_eta + datetime.timedelta(hours=self.respawn_time) - datetime.timedelta(hours=self.windows[self.current_window -1])\n w_end = adjusted_eta + datetime.timedelta(hours=self.respawn_time) + datetime.timedelta(hours=self.windows[self.current_window -1])\n \n return {\"start\": w_start, \"end\": w_end}\n\n def update_tod(self, new_tod, author, approx=1):\n self.tod = new_tod\n self.signed_tod = author\n self.accuracy = approx\n self.window = self.get_window(new_tod)\n self.eta = self.get_eta()\n self.target = False\n if( len(self.windows) > 0 ): \n self.plus_minus = self.windows[0]\n self.current_window = 1;\n\n \n def update_pop(self, new_pop, author):\n self.pop = new_pop\n self.signed_pop = author\n self.window = self.get_window(new_pop)\n self.eta = self.get_eta()\n if( len(self.windows) > 0 ): \n self.plus_minus = self.windows[0]\n self.current_window = 1;\n\n def update_skip(self, skipTime, author):\n if(len(self.windows) > 0 and self.current_window + 1 <= len(self.windows)):\n self.signed_tod = author\n self.window = self.get_window(skipTime, True)\n self.eta = self.get_eta()\n self.target = False\n self.plus_minus = self.windows[self.current_window]\n self.current_window = self.current_window + 1;\n else:\n return {\"Trying to skip last cycle\"}\n\n\n def get_eta(self, virtual_tod=None):\n eta = datetime.datetime.strptime(config.DATE_DEFAULT,config.DATE_FORMAT)\n\n # virtual tod is last saved tod if this function is directly called\n if not virtual_tod:\n virtual_tod = self.tod\n self.spawns = 0\n\n # virtual tod is last saved pop if the latter is newer than the former\n if self.pop > virtual_tod:\n self.accuracy = -1\n virtual_tod = self.pop\n\n # get now date to calculate the timeframe\n now = datetime.datetime.utcnow()\n delta_hour = datetime.timedelta(hours=self.respawn_time)\n\n # merb has no window and spawn in the future\n if self.plus_minus == 0 and now < (virtual_tod + delta_hour):\n eta = virtual_tod + delta_hour\n\n # merb has window and we are before window opens\n if now < self.window[\"start\"] and self.plus_minus:\n eta = self.window[\"start\"]\n\n # we are in window\n if self.window[\"start\"] < now < self.window[\"end\"]:\n eta = self.window[\"end\"]\n\n # if the merb is a recurring one and we are past the calculated eta...\n # set a new tod for recurring mob (scout)\n if self.recurring and self.plus_minus == 0 and now >= virtual_tod + delta_hour and self.spawns < 12:\n self.spawns += 1\n eta = self.get_eta(virtual_tod + delta_hour)\n\n return eta\n\n def in_window(self):\n now = timeh.now()\n if (self.window['start'] < now < self.window['end']) and self.plus_minus:\n return True\n else:\n return False\n\n def print_short_info(self):\n self.eta = self.get_eta()\n return messagecomposer.time_remaining(self.name, self.eta, self.plus_minus, self.window,\n self.spawns, self.accuracy, self.target, self.current_window)\n\n def print_long_info(self, timezone):\n self.eta = self.get_eta()\n if self.eta == datetime.datetime.strptime(config.DATE_DEFAULT, config.DATE_FORMAT):\n eta = \"N/A\"\n else:\n eta = timeh.change_naive_to_tz(self.eta, timezone)\n eta = eta.strftime(self.d_print)\n\n tod_tz = timeh.change_naive_to_tz(self.tod, timezone)\n pop_tz = timeh.change_naive_to_tz(self.pop, timezone)\n w_start_tz = timeh.change_naive_to_tz(self.window[\"start\"], timezone)\n w_end_tz = timeh.change_naive_to_tz(self.window[\"end\"], timezone)\n\n tz_print = \"Timezone %s\\n\\n\" % timezone\n \n return tz_print + messagecomposer.detail(self.name,\n tod_tz.strftime(self.d_print),\n pop_tz.strftime(self.d_print),\n self.signed_tod,\n self.signed_pop,\n self.respawn_time,\n self.plus_minus,\n self.tag,\n w_start_tz.strftime(self.d_print),\n w_end_tz.strftime(self.d_print),\n self.accuracy,\n eta,\n self.current_window,\n self.windows\n )\n\n def print_meta(self):\n return messagecomposer.meta(self.name, self.alias, self.tag)\n\n # serialize data\n def serialize(self):\n return ({self.name: {\n \"tod\": self.tod.strftime(self.d_rec),\n \"pop\": self.pop.strftime(self.d_rec),\n \"signed_tod\": self.signed_tod,\n \"signed_pop\": self.signed_pop,\n \"accuracy\": self.accuracy\n }\n })\n\n # Check tag\n def check_tag(self, tag):\n for i in self.tag:\n if i.lower() == tag.lower():\n return True\n return False\n\n\n# Class container of Merbs, load from JSON\nclass MerbList:\n\n def __init__(self, url_entities, url_timers, url_targets,date_format_rec, date_format_print):\n self.url_entities = url_entities\n self.url_timers = url_timers\n self.url_targets = url_targets\n self.max_respawn_time = 0\n\n with open(url_entities) as f:\n json_entities = json.load(f)\n with open(url_timers) as f:\n json_timers = json.load(f)\n with open(url_targets) as f:\n json_targets = json.load(f)\n\n self.merbs = list()\n self.tags = list()\n for i in json_entities:\n # CALCULATE LIMIT HOURS FOR GET ALL REQUESTS\n limit_respawn_time = json_entities[i][\"respawn_time\"] + json_entities[i][\"plus_minus\"]\n if limit_respawn_time > self.max_respawn_time:\n self.max_respawn_time = limit_respawn_time\n if i in json_timers:\n tod = json_timers[i][\"tod\"]\n pop = json_timers[i][\"pop\"]\n signed_tod = json_timers[i][\"signed_tod\"]\n if \"signed_pop\" not in json_timers[i]:\n signed_pop = signed_tod\n else:\n signed_pop = json_timers[i][\"signed_pop\"]\n\n accuracy = json_timers[i][\"accuracy\"]\n else:\n tod = config.DATE_DEFAULT\n pop = config.DATE_DEFAULT\n signed_tod = \"Default\"\n signed_pop = \"Default\"\n accuracy = 0\n if i in json_targets:\n target = True\n else:\n target = False\n if(\"windows\" in json_entities[i]):\n windows = json_entities[i][\"windows\"]\n else:\n windows = []; \n self.merbs.append(Merb(i,\n json_entities[i][\"alias\"],\n json_entities[i][\"respawn_time\"],\n json_entities[i][\"plus_minus\"],\n json_entities[i][\"recurring\"],\n windows,\n json_entities[i][\"tag\"],\n tod,\n pop,\n signed_tod,\n signed_pop,\n accuracy,\n target,\n date_format_rec,\n date_format_print\n ))\n # Create a list of tag\n for tag in json_entities[i][\"tag\"]:\n if not tag.lower() in self.tags and tag:\n self.tags.append(tag.lower())\n self.tags.sort()\n\n def save_timers(self):\n with open(self.url_timers, 'w') as outfile:\n json.dump(self.serialize(), outfile, indent=4)\n\n def save_targets(self):\n with open(self.url_targets, 'w') as outfile:\n self.order('eta')\n output = list()\n for merb in self.merbs:\n if merb.target:\n output.append(merb.name)\n json.dump(output,outfile, indent=4)\n\n def order(self, order='name'):\n if order == 'name':\n self.merbs.sort(key=lambda merb: merb.name.lower())\n if order == 'eta':\n self.merbs.sort(key=lambda merb: merb.eta)\n self.merbs.sort(key=lambda merb: merb.in_window(), reverse=True)\n\n def get_all_window(self):\n self.order('eta')\n output = list()\n\n for merb in self.merbs:\n if merb.window['start'] <= timeh.now() <= merb.window['end']:\n output.append(merb.print_short_info())\n\n return output\n\n def get_all(self, timezone, mode=\"countdown\", limit_hours=None):\n if not limit_hours:\n limit_hours = self.max_respawn_time\n now = timeh.now()\n self.order('eta')\n output = list()\n\n for merb in self.merbs:\n date_limit = now + datetime.timedelta(hours=limit_hours)\n date_diff = date_limit - merb.eta\n hour_diff = date_diff.total_seconds() / 3600\n if timeh.now() < merb.eta and hour_diff >= 0:\n # Show online merb eta in the future\n if mode == \"countdown\":\n output.append(merb.print_short_info())\n else:\n output.append(merb.print_long_info(timezone))\n return output\n\n def get_all_by_tag(self, tag):\n self.order('eta')\n output = list()\n for merb in self.merbs:\n if merb.check_tag(tag) and timeh.now() < merb.eta:\n output.append(merb.print_short_info())\n return output\n\n def get_all_targets(self):\n self.order('eta')\n output = list()\n for merb in self.merbs:\n if merb.target:\n output.append(merb.print_short_info())\n return output\n\n def get_all_meta(self):\n self.order('name')\n output = list()\n for merb in self.merbs:\n output.append(merb.print_meta())\n return output\n\n def get_all_tags(self):\n output = list()\n for tag in self.tags:\n output.append(\"%s\\n\" % tag)\n return output\n\n def get_re_tags(self):\n output = \"\"\n for tag in self.tags:\n output += \"%s|\" % tag\n output = output[:-1]\n return output\n\n def serialize(self):\n json_output = {}\n for merb in self.merbs:\n json_output.update(merb.serialize())\n return json_output\n","sub_path":"npc.py","file_name":"npc.py","file_ext":"py","file_size_in_byte":13808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458790056","text":"import clipboard\nfrom pynput.keyboard import Controller, Key, KeyCode, Listener\nimport json\n\n# The key combination to check::\nPASTE_COMBINATIONS = [\n {Key.ctrl, Key.alt, KeyCode(char='v')},\n {Key.ctrl_l, Key.alt_l, KeyCode(char='v')},\n {Key.ctrl_l, Key.alt_r, KeyCode(char='v')},\n {Key.ctrl_r, Key.alt_l, KeyCode(char='v')},\n {Key.ctrl_r, Key.alt_r, KeyCode(char='v')}\n]\n\nCOPY_COMBINATIONS = [\n\t{Key.ctrl, KeyCode(char = 'c')},\n\t{Key.ctrl_l, Key.alt_r, KeyCode(char='c')},\n {Key.ctrl_r, Key.alt_l, KeyCode(char='c')},\n {Key.ctrl_r, Key.alt_r, KeyCode(char='c')}\n]\n\n# The currently active modifiers\ncurrent = set()\nkb_actor = Controller()\n\n#read from json file::\ndata_json = []\nwith open('./clipboard_data/sample.json', 'rb') as f:\n\tdata_json = json.load(f)['records']\n\ndef execute_paste():\n\n\t#empty the current set, since task will be executed now\n\tcurrent.clear()\n\tprint(current)\n\n\t#clipboard mei copy::\n\tclipboard.copy(data_json[0]['text'])\n\tprint('lol before gen event')\n\n\t#emulate paste event::\n\tkb_actor.press(Key.ctrl)\n\tkb_actor.press(Key.shift)\n\tkb_actor.press('v')\n\t\n\t#empty the set of the keys added above:\n\tcurrent.clear()\n\n\tkb_actor.release(Key.ctrl)\n\tkb_actor.release(Key.shift)\n\tkb_actor.release('v')\n\n\tprint('lol after gen event')\n\n\ndef execute_copy():\n\tcopied_data = clipboard.paste()\n\n\ndef on_press(key):\n if any([key in COMBO for COMBO in PASTE_COMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in PASTE_COMBINATIONS):\n \tprint('ctrlV detected!')\n \tprint(current)\n \texecute_paste()\n\n\n if any([key in COMBO for COMBO in COPY_COMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in COPY_COMBINATIONS):\n \tprint('ctrlC detected!')\n \texecute_copy()\n \t#empty the current set, since task has been executed now\n \tcurrent.clear()\n\ndef on_release(key):\n\tcurrent.clear()\n\nwith Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()","sub_path":"working_paste.py","file_name":"working_paste.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"221066063","text":"# -*- coding: utf-8 -*-\r\n\r\nimport tkinter as tk\r\nimport tkinter.ttk as ttk\r\nfrom datetime import datetime\r\n\r\nclass NotificationFrame(tk.Frame):\r\n '''Box to display notification in the GUI'''\r\n\r\n def __init__(self, master, _logFile, **kwargs):\r\n '''\r\n The constructor for NotificationsFrame\r\n\r\n :param master: The Tk parent widget\r\n :param _logFile: The file handle for the output log file\r\n '''\r\n\r\n super().__init__(master, **kwargs)\r\n \r\n self.logFile = _logFile\r\n self.label = ttk.Label(self, text=\"Notifications\")\r\n self.label.pack()\r\n \r\n self.scrollbar = ttk.Scrollbar(self)\r\n self.scrollbar.pack(fill=\"x\", expand=1)\r\n \r\n self.text = tk.Text(self.scrollbar, height=4)\r\n self.text.config(state=tk.DISABLED)\r\n self.text.pack()\r\n \r\n\r\n def append_line(self, line):\r\n '''\r\n Prints a line to the notification box and \r\n a timestamped line to the log file\r\n '''\r\n\r\n self.text.config(state=tk.NORMAL)\r\n self.text.insert(tk.END, line + \"\\n\")\r\n now = datetime.now()\r\n timestamp = now.strftime(\"%H:%M:%S\")\r\n self.logFile.write(timestamp + \" - \" + line + \"\\n\") #Print notification to external log file\r\n self.text.see(tk.END) #Scrolls down to show latest notification automatically\r\n self.text.config(state=tk.DISABLED)\r\n \r\n \r\n ","sub_path":"gui/NotificationsFrame.py","file_name":"NotificationsFrame.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30736915","text":"\"\"\"\nLineup optimizations based on predicted scores for each player\n\"\"\"\n\nimport itertools\n\nimport collections\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport db\nimport features as f\nimport model as m\nfrom util import Timed, lineup_csv_filename\nimport fanduel as fd\n\npd.set_option('display.expand_frame_repr', False)\n\nPlayerList = collections.namedtuple(\"PlayerList\", \"pg, sg, sf, pf, c\")\nCombination = collections.namedtuple(\"Combination\", \"players cost\")\nPOSITIONS = [\"PG_1\", \"PG_2\", \"SG_1\", \"SG_2\", \"SF_1\", \"SF_2\", \"PF_1\", \"PF_2\", \"C_1\"]\n\n\nclass Player:\n def __init__(self, **kwargs):\n self.fanduel_id = kwargs[\"fanduel_id\"]\n self.db = kwargs[\"db\"]\n self.name = kwargs[\"name\"]\n self.cost = kwargs[\"cost\"]\n self.position = kwargs[\"position\"]\n self.predicted_score = kwargs[\"predicted_score\"]\n self.actual_score = kwargs[\"actual_score\"]\n self.fppg = kwargs[\"fppg\"]\n self.fp_std = kwargs[\"fp_std\"]\n self.team = kwargs[\"team\"]\n self.opponent = kwargs[\"opponent\"]\n self.last_game_fp = kwargs[\"last_game_fp\"]\n self.last_last_game_fp = kwargs[\"last_last_game_fp\"]\n self.last_last_last_game_fp = kwargs[\"last_last_last_game_fp\"]\n self.cv = self.fp_std / self.fppg\n\n import console as c\n self.skew = c.ALLP[c.ALLP.Name == self.name].Skew.iloc[0]\n self.kurtosis = c.ALLP[c.ALLP.Name == self.name].Kurtosis.iloc[0]\n\n def __repr__(self):\n return \"\".format(\n self.db.id,\n self.name,\n self.position,\n self.cost,\n self.predicted_score,\n self.actual_score,\n )\n\n @property\n def csv_str(self):\n return \"{}:{}\".format(self.fanduel_id, self.name)\n\n\nclass Lineup:\n def __init__(self, **kwargs):\n self.pg = kwargs.get(\"pg\")\n self.sg = kwargs.get(\"sg\")\n self.sf = kwargs.get(\"sf\")\n self.pf = kwargs.get(\"pf\")\n self.c = kwargs.get(\"c\")\n\n @property\n def players(self):\n pgs = self.pg.players if self.pg else ()\n sgs = self.sg.players if self.sg else ()\n sfs = self.sf.players if self.sf else ()\n pfs = self.pf.players if self.pf else ()\n cs = self.c.players if self.c else ()\n return pgs + sgs + sfs + pfs + cs\n\n @property\n def cost(self):\n return sum([p.cost for p in self.players])\n\n @property\n def predicted_score(self):\n return sum([p.predicted_score for p in self.players])\n\n @property\n def actual_score(self):\n return sum([p.actual_score for p in self.players])\n\n @property\n def fp_std(self):\n return sum([p.fp_std for p in self.players])\n\n @property\n def cv(self):\n return sum([p.cv for p in self.players])\n\n @property\n def fppg(self):\n return sum([p.fppg for p in self.players])\n\ndef old_filter_players(game_day, db_model):\n f.load_gldb_if_empty(game_day)\n\n cv_cutoff = 0.4\n score_cutoff = 20.0\n\n import console as c\n\n all_players = c.allp(game_day)\n\n manual_drop = []\n injuries_drop = []\n cv_drop = list(all_players[all_players.CV > cv_cutoff].Name)\n\n fanduel_csv = fd.FanduelCSV \\\n .preprocessed(game_day) \\\n .drop_players_not_used_to_train_model() \\\n .add_column_cv(c.allp(game_day)) \\\n .drop_players(manual_drop, \"Manual\") \\\n .drop_players(injuries_drop, \"Injuries\") \\\n .drop_players(cv_drop, \"High CV\") \\\n # .drop_players_with_low_fppg(26.0)\n\n players = []\n for (_, row) in fanduel_csv.df.iterrows():\n name = row.Name\n try:\n db_player = db.session.query(db.Player) \\\n .filter(db.Player.name == name) \\\n .one()\n except NoResultFound as e:\n print(\"ERROR: couldn't find {}, query went to DB\".format(name))\n raise e\n\n try:\n gl = db.session.query(db.GameLog) \\\n .join(db.GameLog.game) \\\n .filter(db.Game.date == game_day) \\\n .filter(db.GameLog.player_id == db_player.id) \\\n .one()\n actual_score = gl.fanduel_score\n except NoResultFound:\n actual_score = 0.0\n\n gls = f.GLDB[\"players\"][name]\n fps = [gl.fanduel_score for gl in gls if gl.zeroes != True]\n\n if gls[0].game.date == game_day:\n last_game_fp = gls[1].fanduel_score\n last_last_game_fp = gls[2].fanduel_score\n last_last_last_game_fp = gls[3].fanduel_score\n else:\n last_game_fp = gls[0].fanduel_score\n last_last_game_fp = gls[1].fanduel_score\n last_last_last_game_fp = gls[2].fanduel_score\n\n players.append(Player(\n fanduel_id=row.Id,\n db=db_player,\n name=name,\n cost=row.Salary,\n position=row.Position,\n predicted_score=m.predict(db_model, name, game_day, row.Game)[0],\n actual_score=actual_score,\n fppg=np.array(fps).mean(),\n fp_std=np.array(fps).std(),\n team=row.Team,\n opponent=row.Opponent,\n last_game_fp=last_game_fp,\n last_last_game_fp=last_last_game_fp,\n last_last_last_game_fp=last_last_last_game_fp,\n ))\n return [player for player in players if player.predicted_score > score_cutoff]\n\ndef players_by_position(ps):\n list = PlayerList(\n pg=[],\n sg=[],\n sf=[],\n pf=[],\n c=[])\n for p in ps:\n if p.position == \"PG\":\n list.pg.append(p)\n elif p.position == \"SG\":\n list.sg.append(p)\n elif p.position == \"SF\":\n list.sf.append(p)\n elif p.position == \"PF\":\n list.pf.append(p)\n elif p.position == \"C\":\n list.c.append(p)\n\n return list\n\n\ndef choose(ps, count):\n return [Combination(players=c, cost=sum([p.cost for p in c])) for c in itertools.combinations(ps, count)]\n\ndef all_lineups(players):\n player_list = players_by_position(players)\n pg_combos = choose(player_list.pg, 2)\n sg_combos = choose(player_list.sg, 2)\n sf_combos = choose(player_list.sf, 2)\n pf_combos = choose(player_list.pf, 2)\n c_combos = choose(player_list.c, 1)\n\n n_lineups = len(pg_combos) * len(sg_combos) * len(sg_combos) * len(sf_combos) * len(c_combos)\n print(\"Expected {} lineups to be created, {} players total\".format(n_lineups, len(players)))\n\n lineups = []\n\n for pg in pg_combos:\n lu = Lineup(pg=pg)\n if lu.cost > 60000: continue\n for sg in sg_combos:\n lu.sg = sg\n if lu.cost > 60000: continue\n for sf in sf_combos:\n lu.sf = sf\n if lu.cost > 60000: continue\n for pf in pf_combos:\n lu.pf = pf\n if lu.cost > 60000: continue\n for c in c_combos:\n lu.c = c\n if lu.cost > 60000: continue\n lineups.append(lu)\n lu = Lineup(pg=pg, sg=sg, sf=sf, pf=pf)\n\n print(\"{} lineups\".format(len(lineups)))\n lineups = sorted(lineups, key=lambda l: l.predicted_score, reverse=True)\n\n return lineups\n\ndef old_filter_lineups(lineups):\n # initially didn't filter\n return lineups\n\ndef new_filter_lineups(lineups):\n highest_predicted_score = lineups[0].predicted_score\n score_cutoff = highest_predicted_score - 10 #arbitrary\n\n top_lineups = []\n for lineup in lineups:\n if lineup.predicted_score >= score_cutoff:\n top_lineups.append(lineup)\n else:\n break\n\n print(\"{} lineups within 10 points\".format(len(top_lineups)))\n\n lineups = less_repetitive_lineups(top_lineups)\n return lineups\n\ndef less_repetitive_lineups(all_lineups):\n filtered_lineups = []\n\n for lineup in all_lineups:\n should_add = True\n for filtered_lineup in filtered_lineups:\n if lineups_are_too_similar(filtered_lineup, lineup):\n should_add = False\n break\n\n if should_add:\n filtered_lineups.append(lineup)\n\n print(\"{} lineups passed repetitive filter\".format(len(filtered_lineups)))\n\n return filtered_lineups\n\ndef lineups_are_too_similar(lineup1, lineup2):\n money_cutoff = 45000 # If more than this value is the same, we'll consider them too similar\n\n similar_players = []\n for player in lineup1.players:\n if player in lineup2.players:\n similar_players.append(player)\n\n if cost_of_players(similar_players) > money_cutoff:\n return True\n\n return False\n\ndef cost_of_players(players):\n cost = 0\n for player in players:\n cost += player.cost\n\n return cost\n\ndef pinned_lineups(ps):\n pass\n\n\ndef money(lineup, payout_cutoffs):\n winnings = 0.0\n for award, cutoff in payout_cutoffs.items():\n if lineup.actual_score >= cutoff and award > winnings:\n winnings = award\n return winnings\n\n\n# Idea here is you pass in game_day, the model, a method to filter players, and a method to filter lineups.\n# Should make it easier to swap out model and filtering methods to see how our lineups would be different.\ndef create_lineups(game_day, db_model, filter_players, filter_lineups):\n with Timed(\"#create_lineups -- lineup.py\", header=True):\n with Timed(\"Getting Players\"):\n # players = get_players(FDCSV.df, game_day, db_model, player_cutoff)\n players = filter_players(game_day, db_model)\n print(\"Using {} players to create lineups\".format(len(players)))\n\n with Timed(\"Collecting all line ups\"):\n all_possible_lineups = all_lineups(players)\n lineups = filter_lineups(all_possible_lineups)\n if len(lineups) == 0:\n raise Exception(\"0 lineups, you probably need to raise the player cutoff from {}\".format(player_cutoff))\n\n with Timed(\"Loading line ups into data frame\"):\n # NOTE: Should probably not import a module like console, into this module\n import console as c\n df = c.ludf(lineups)\n\n # noinspection PyArgumentList\n draft = db.FanduelDraft(\n drafted_at=game_day,\n player_cutoff=20.0,\n model=db_model,\n ldf=df,\n lineups=lineups,\n players=players,\n )\n\n return draft\n","sub_path":"projects/wakka/lineup.py","file_name":"lineup.py","file_ext":"py","file_size_in_byte":10493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47660539","text":"import os\nimport subprocess\n\ndef image_to_string(img, cleanup=True, plus=''):\n # cleanup为True则识别完成后删除生成的文本文件\n # plus参数为给tesseract的附加高级参数\n subprocess.check_output('tesseract ' + img + ' ' +\n img + ' ' + plus, shell=True) # 生成同名txt文件\n text = ''\n with open(img + '.txt', 'r') as f:\n text = f.read().strip()\n if cleanup:\n os.remove(img + '.txt')\n return text\n\n'''\nfrom skimage import io\nimg=io.imread('/home/longred/Desktop/TM_image/1.png')\nimg1=img[0:80,:,3]\nfor i in range(img.shape[0]):\n for j in range(img.shape[1]):\n if (img[i,j,1]<200):\n pass\n else:\n img[i,j,3]=0\nio.imsave('/home/longred/Desktop/1.png',img1)\n'''\nprint(image_to_string('/home/longred/Desktop/1.1.png',cleanup=False,plus='-l test')) \nprint(image_to_string('/home/longred/Desktop/1.2.png',cleanup=False,plus='-l test')) \nprint(image_to_string('/home/longred/Desktop/1.3.png',cleanup=False,plus='-l test')) \nprint(image_to_string('/home/longred/Desktop/1.png',cleanup=False,plus='-l chi_sim')) ","sub_path":"image_ocr.py","file_name":"image_ocr.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"122659044","text":"from os.path import dirname, join\nfrom django.conf.urls.defaults import patterns, include, url\nfrom django.contrib import admin\n\n_dir = dirname(__file__)\nadmin.autodiscover()\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'luttesn2.views.home', name='home'),\n # url(r'^luttesn2/', include('luttesn2.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media')}),\n\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204916391","text":"from bs4 import BeautifulSoup as bs\nimport requests\nfrom pprint import pprint\n\nurl = 'https://www.kinopoisk.ru'\n\nparams = {'quick_filters':'serials',\n 'tab':'all'}\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'}\n\nresponse = requests.get(url+'/popular/films', params=params, headers=headers)\n\nsoup = bs(response.text, 'html.parser')\n\nserials_list = soup.find_all('div',attrs={'class':'desktop-rating-selection-film-item'})\nprint()\n\nserials = []\n\nfor serial in serials_list:\n serial_data = {}\n serial_link = url + serial.find('a',attrs={'class':'selection-film-item-meta__link'}).get('href')\n serial_name = serial.find('p',attrs={'class':'selection-film-item-meta__name'}).getText()\n serial_genre = serial.find_all('span',attrs={'class':'selection-film-item-meta__meta-additional-item'})[1].getText()\n serial_rating = serial.find('span',attrs={'class':'rating__value'})\n if serial_rating:\n serial_rating = float(serial_rating.getText())\n\n serial_data['link'] = serial_link\n serial_data['name'] = serial_name\n serial_data['genre'] = serial_genre\n serial_data['rating'] = serial_rating\n\n serials.append(serial_data)\n\npprint(serials)\n","sub_path":"lesson 2 kinopoisk.py","file_name":"lesson 2 kinopoisk.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"422740083","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*\n\nimport time\nimport logging\n\nclass plugin(object):\n\t'''Plugin for FSTA\n\t'''\n\tdef __init__(self, file_name):\n\t\t'''Initialisation\n\t\t\t- file_name\n\t\t\t- modified_date\n\t\t'''\n\t\tself.file_name = file_name\n\t\tself.loaded_date = time.time()\n\t\n\tdef load(self, maison):\n\t\t'''Load the plugin with 'maison' as installation name\n\t\t'''\n\t\tlogging.info(\"Load the plugin %s ...\"%(self.file_name))\n\t\ttry:\n\t\t\texecfile(maison.plugin_path+'/'+self.file_name,globals(),locals())\n\t\t\tlogging.info(\"\tplugin loaded.\")\n\t\texcept Exception as e:\n\t\t\tlogging.error(\"\tError on plugin %s : %s\"%(self.file_name, e))","sub_path":"build/lib.linux-armv7l-2.7/FSTA/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"98392519","text":"# GUIBasic2-Expense.py\r\nfrom tkinter import *\r\nfrom tkinter import ttk, messagebox\r\nimport csv\r\nfrom datetime import datetime\r\n# ttk is theme of Tk\r\n\r\nGUI = Tk()\r\nGUI.title('ໂປຣແກຣມບັນທຶກຄ່າໃຊ້ຈ່າຍ version.1.0 by Hery')\r\nGUI.geometry('600x700+500+0')\r\n\r\n# B1 = Button(GUI,text='Hello')\r\n# B1.pack(ipadx=50,ipady=20) # .pack ຕິດປຸ່ມເຂົ້າກັບ GUI ຫຼັກ\r\n\r\n#--------------------MUNU--------------------\r\nmenubar = Menu(GUI)\r\nGUI.config(menu=menubar)\r\n\r\n# File menu\r\nfilemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='file',menu=filemenu)\r\nfilemenu.add_command(label='Import CSV')\r\nfilemenu.add_command(label='Export to Googlesheet')\r\n# Help\r\ndef About():\r\n\tprint(menu=menubar)\r\n\tmessagebox.showinfo('About','ສະບາຍດີ ໂປຣແກຣມນີ້ແມ່ນໂປຣແກຣມບັນທຶກຂໍ້ມູນ\\nສົນໃຈບໍລິຈາກເຮົາບໍ? ຂໍ 1 BTCພໍແລ້ວ\\nBTC Address: abc')\r\n\r\n\r\n\r\nhelpmenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Help',menu=helpmenu)\r\nhelpmenu.add_cascade(label='About',command=About)\r\n\r\n# Donate\r\ndef Donate():\r\n\tmessagebox.showinfo('Donate','BTC Address:')\r\ndonatemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Donate',menu=donatemenu)\r\ndonatemenu.add_cascade(label='Donate',command=Donate)\r\n\r\n\r\n\r\n#----------------------------------------\r\n\r\n\r\nTab = ttk.Notebook(GUI)\r\nT1 = Frame(Tab)\r\nT2 = Frame(Tab)\r\nTab.pack(fill=BOTH,expand=1) # expand ເອົາໄວ້ຂະຫຍາຍ ໃຊ້ຄູ່ກັບ fill\r\n\r\nicon_t1 = PhotoImage(file='t1_expense.png') # .subsample(2) = ຍໍ້ຮູບ\r\nicon_t2 = PhotoImage(file='t2_expenselist.png')\r\n\r\n\r\nTab.add(T1,text=f'{\"ເພີ່ມຄ່າໃຊ້ຈ່າຍ\":^{30}}',image=icon_t1,compound='top')\r\nTab.add(T2,text=f'{\"ຄ່າໃຊ້ຈ່າຍທັງໝົດ\":^{30}}',image=icon_t2,compound='top')\r\n\r\n\r\n\r\nF1 = Frame(T1)\r\n#F1.place(x=100,y=50)\r\nF1.pack()\r\n\r\ndays = {'Mon':'ຈັນ',\r\n\t\t'Tue':'ຄານ',\r\n\t\t'Wed':'ພຸດ',\r\n\t\t'Thu':'ພະຫັດ',\r\n\t\t'Fri':'ສຸກ',\r\n\t\t'Sat':'ເສົາ',\r\n\t\t'Sun':'ອາທິດ'}\r\n\r\ndef Save(event=None):\r\n\texpense = v_expense.get()\r\n\tprice = v_price.get()\r\n\tquantity = v_quantity.get()\r\n\r\n\tif expense == '':\r\n\t\tprint('No Data')\r\n\t\tmessagebox.showerror('Error','ກະລຸນາໃສ່ຂໍ້ມູນຄ່າໃຊ້ຈ່າຍ')\r\n\t\treturn\r\n\telif price == '':\r\n\t\tmessagebox.showerror('Error','ກະລຸນາໃສ່ລາຄາ')\r\n\t\treturn\r\n\telif quantity == '':\r\n\t\tmessagebox.showerror('Error','ກະລຸນາໃສ່ຈຳນວນ')\r\n\t\treturn\r\n\r\n\r\n\ttotal = float(price) * float(quantity)\r\n\ttry:\r\n\t\ttotal = float(price) * float(quantity)\r\n\t\t# .get ດຶງຄ່າມາຈາກ v_expense = StringVar()\r\n\t\tprint('ລາຍການ: {} ລາຄາ: {}'.format(expense,price))\r\n\t\tprint('ຈຳນວນ: {} ລວມທັງໝົດ: {} ກີບ'.format(quantity,total)) \r\n\t\ttext = 'ລາຍການ: {} ລາຄາ: {}\\n'.format(expense,price)\r\n\t\ttext = text + 'ຈຳນວນ: {} ລວມທັງໝົດ: {} ກີບ'.format(quantity,total)\r\n\t\tv_result.set(text)\r\n\t\t# clear ຂໍ້ມູນເກົ່າ\r\n\t\tv_expense.set('')\r\n\t\tv_price.set('')\r\n\t\tv_quantity.set('')\r\n\r\n\t\t# ບັນທຶກຂໍ້ມູນລົງ csv ຢ່າລືມ import csv ນຳ\r\n\t\ttoday = datetime.now().strftime('%a') # day['Mon'] = 'ຈັນ'\r\n\t\tprint(today)\r\n\t\tdt = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\r\n\t\tdt = days[today] + '-' + dt\r\n\t\twith open('savedata.csv','a',encoding='utf-8',newline='') as f:\r\n\t\t\t# with ແມ່ນສັ່ງເປີດ file ແລ້ວປິດອັດຕະໂນມັດ\r\n\t\t\t# 'a' ການບັນທຶກເລື່ອຍໆ ເພີ່ມຂໍ້ມູນຈາກຂໍ້ມູນເກົ່າ\r\n\t\t\t# newline='' ເຮັດໃຫ້ຂໍ້ມູນບໍ່ມີບັນທັດວ່າງ\r\n\t\t\tfw = csv.writer(f) # ສ້າງ function ສຳລັບຂຽນຂໍ້ມູນ\r\n\t\t\tdata = [dt,expense,price,quantity,total]\r\n\t\t\tfw.writerow(data)\r\n\t\t\t \r\n\t\t# ເຮັດໃຫ້ເຄີຣເຊີຣ໌ກັບໄປຕຳແໜ່ງບ່ອນໃສ່ E1\r\n\t\tE1.focus()\r\n\t\tupdate_table()\r\n\texcept Exception as e:\r\n\r\n\t\tprint('ERROR:',e)\r\n\t\tmessagebox.showerror('Error','ກະລຸນາໃສ່ຂໍ້ມູນໃໝ່ ເຈົ້າໃສ່ຂໍ້ມູນຜິດ')\r\n\t\tv_expense.set('')\r\n\t\tv_price.set('')\r\n\t\tv_quantity.set('')\r\n\t\t#messagebox.showwarning('Error','ກະລຸນາໃສ່ຂໍ້ມູນໃໝ່ ເຈົ້າໃສ່ຂໍ້ມູນຜິດ')\r\n\t\t#messagebox.showinfo('Error','ກະລຸນາໃສ່ຂໍ້ມູນໃໝ່ ເຈົ້າໃສ່ຂໍ້ມູນຜິດ')\r\n\r\n\r\n\t\t \r\n# ເຮັດໃຫ້ສາມາດກົດ enter ໄດ້ \r\nGUI.bind('',Save) # ຕ້ອງເພີ່ມໃນ def Save(event=None)\r\n\r\nFONT1 = (None,20) # None ປ່ຽນເປັນ 'Phetsarath'\r\n\r\n#-------------image----------------\r\n\r\nmain_icon = PhotoImage(file='icon_money.png')\r\n\r\nMainicon = Label(F1,image=main_icon)\r\nMainicon.pack()\r\n\r\n\r\n\r\n#----------------text1-------------------\r\nL = ttk.Label(F1,text='ລາຍການຄ່າໃຊ້ຈ່າຍ',font=FONT1).pack()\r\nv_expense = StringVar()\r\n# StringVar() ແມ່ນ ໂຕແປພິເສດສຳຫຼັບເກັບຂໍ້ມູນໃນ GUI\r\nE1 = ttk.Entry(F1,textvariable=v_expense,font=FONT1)\r\nE1.pack()\r\n#----------------------------------------------\r\n\r\n#----------------text2-------------------\r\nL = ttk.Label(F1,text='ລາຄາ (ກີບ)',font=FONT1).pack()\r\nv_price = StringVar()\r\n# StringVar() ແມ່ນ ໂຕແປພິເສດສຳຫຼັບເກັບຂໍ້ມູນໃນ GUI\r\nE2 = ttk.Entry(F1,textvariable=v_price,font=FONT1)\r\nE2.pack()\r\n#----------------------------------------------\r\n\r\n#----------------text3-------------------\r\nL = ttk.Label(F1,text='ຈຳນວນ (ອັນ)',font=FONT1).pack()\r\nv_quantity = StringVar()\r\n# StringVar() ແມ່ນ ໂຕແປພິເສດສຳຫຼັບເກັບຂໍ້ມູນໃນ GUI\r\nE3 = ttk.Entry(F1,textvariable=v_quantity,font=FONT1)\r\nE3.pack()\r\n#----------------------------------------------\r\n\r\nicon_b1 = PhotoImage(file='b_save.png')\r\n\r\n\r\nB2 = ttk.Button(F1,text=f'{\"Save\": ^{10}}',image=icon_b1,compound='top',command=Save)\r\nB2.pack(ipadx=50,ipady=20,pady=20)\r\n\r\nv_result = StringVar()\r\nv_result.set('-------------ຜົນຮັບ-------------')\r\nresult = ttk.Label(F1,textvariable=v_result,font=FONT1,foreground='black')\r\nresult.pack(pady=20)\r\n\r\n#-----------------TAB2------------------------\r\n\r\ndef read_csv():\r\n\twith open('savedata.csv',newline='',encoding='utf-8') as f:\r\n\t\tfr = csv.reader(f)\r\n\t\tdata = list(fr)\r\n\treturn data\r\n\t\t# print(data)\r\n\t\t# print('-----')\r\n\t\t# print(data[0][0])\r\n\t\t# for a,b,c,d,e in data:\r\n\t\t# \tprint(e)\r\n\r\n# table\r\nL = ttk.Label(T2,text='ຕາຕະລາງສະແດງຜົນລັບທັງໝົດ',font=FONT1).pack(pady=20)\r\n\r\nheader = ['ວັນ-ເວລາ','ລາຍການ','ຄ່າໃຊ້ຈ່າຍ','ຈຳນວນ', 'ລວມ']\r\nresulttable = ttk.Treeview(T2,columns=header,show='headings',height=15)\r\nresulttable.pack()\r\n\r\n# for i in range(len(header)):\r\n# \tresulttable.heading(header[i],text=header[i])\r\n\r\nfor h in header:\r\n\tresulttable.heading(h,text=h)\r\n\r\nheaderwidth = [150,170,80,80,80]\r\nfor h,w in zip(header,headerwidth):\r\n\tresulttable.column(h,width=w) \r\n\r\n# resulttable.insert('',0,value=['ຈັນ','ນ້ຳດື່ມ',5000,5,25000])\r\n# resulttable.insert('','end',value=['ຄານ','ນ້ຳດື່ມ',5000,5,25000])\r\n\r\n\r\ndef update_table():\r\n\tresulttable.delete(*resulttable.get_children())\r\n\t# for c in resulttable.get_children():\r\n\t# \tresulttable.delete(c)\r\n\tdata = read_csv()\r\n\tfor d in data:\r\n\t\tresulttable.insert('',0,value=d)\r\n\r\nupdate_table()\r\nprint('GET CHILD:',resulttable.get_children())\r\nGUI.bind('',lambda x: E2.focus())\r\nGUI.mainloop()\r\n","sub_path":"EP6-GUIBasic2-Expense.py","file_name":"EP6-GUIBasic2-Expense.py","file_ext":"py","file_size_in_byte":8103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"573949595","text":"import os\nimport logging\n\n__copyright__ = \"\"\"\\\nCopyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)\nCopyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)\nSee the documentation for further information on copyrights,\nor contact the author. All Rights Reserved.\n\"\"\"\n__version__ = '1.2'\n\n# Name (defaults to program name)\nname = ''\noptionlist = None # List of passed options\n\nclass MyLog(object):\n\n options = []\n # Header (default to program name)\n header = ''\n\n # Synopsis (%(name)s is replaced by the program name)\n synopsis = '%(name)s [option] files...'\n\n # General information printed after the possible options (optional)\n about = ''\n\n # Copyright to show\n copyright = __copyright__\n\n # Version (optional)\n version = ''\n\n def __init__(self, log):\n self.log = log\n self.first_log = True\n self.name = name\n\n def set_filename(self, filename):\n self.filename = filename\n\n def get_filename(self):\n return self.filename\n\n def log_message(self, message):\n if self.first_log:\n self.first_log = False\n self.log.append(\"### In file %s ###\" % self.filename)\n self.log.append(message)\n\n def get_log(self):\n return self.log\n\n def path_mtime(self, filename):\n filename = 'C:/_git/vcs/_1.data/______test_files1/picture1.jpg'\n return os.stat(filename)\n\n def print_header(self):\n\n print('-'*72)\n print(self.header % self.__dict__)\n print('-'*72)\n print()\n\n def handle__copyright(self,arg):\n\n self.print_header()\n copyright = self.copyright % self.__dict__\n print(copyright.strip())\n print()\n return 0\n\n def help(self,note=''):\n\n if self.synopsis:\n print('Synopsis:')\n # To remain backward compatible:\n try:\n synopsis = self.synopsis % self.name\n except (NameError, KeyError, TypeError):\n synopsis = self.synopsis % self.__dict__\n print(' ' + synopsis)\n print()\n self.print_options()\n if self.version:\n print('Version:')\n print(' %s' % self.version)\n print()\n if self.about:\n about = self.about % self.__dict__\n print(about.strip())\n print()\n if note:\n print('-'*72)\n print('Note:',note)\n print()\n\n def notice(self,note):\n\n print('-'*72)\n print('Note:',note)\n print('-'*72)\n print()\n\n def print_options(self):\n\n options = self.options\n print('Options and default settings:')\n if not options:\n print(' None')\n return\n int = [x for x in options if x.prefix == '--']\n short = [x for x in options if x.prefix == '-']\n items = short + int\n for o in options:\n print(' ',o)\n print()\n\n\n \n\nlog = []\n\nccc = MyLog(log)\n\nfilename = 'kkkkk.dat'\nccc.set_filename(filename)\n\nddd = ccc.get_filename()\nprint(ddd)\n\nmesg1 = 'aaaa'\nmesg2 = 'bbbb'\nmesg3 = 'cccc'\n\nccc.log_message(mesg1)\nccc.log_message(mesg2)\nccc.log_message(mesg3)\n\nlineno = 123\nfor_output = 'cccc.txt'\nmsg = \"Line %d: could not convert: %s\"\nccc.log_message(msg % (lineno, for_output))\n\nddd = ccc.get_log()\nprint(ddd)\n\neee = ccc.path_mtime('aaaaa')\nprint(type(eee))\nprint(eee)\n\nfff = ccc.handle__copyright('tttt')\n\nggg = ccc.help('222')\n\nclass BufferedSubFile(object):\n def __init__(self):\n self.mesg_stack = []\n self.mesg_count = 0\n self._closed = False\n\n def push_mesg(self, mesg):\n self.mesg_stack.append(mesg)\n self.mesg_count += 1\n\n def pop_mesg(self):\n if self.mesg_count > 0:\n mesg = self.mesg_stack.pop()\n self.mesg_count -= 1\n else:\n mesg = '無資料'\n return mesg\n\n def close(self):\n self.mesg_stack = []\n self.mesg_count = 0\n self._closed = True\n\n\nccc = BufferedSubFile()\nccc.push_mesg('aaa')\nccc.push_mesg('bbb')\nccc.push_mesg('ccc')\n\nppp = ccc.pop_mesg()\nprint(ppp)\nppp = ccc.pop_mesg()\nprint(ppp)\nppp = ccc.pop_mesg()\nprint(ppp)\nppp = ccc.pop_mesg()\nprint(ppp)\nppp = ccc.pop_mesg()\nprint(ppp)\n\n\n\n\n\n\n\n\n\n","sub_path":"_4.python/class/test04_class4.py","file_name":"test04_class4.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489844043","text":"import json\r\n\r\nimport requests\r\nimport time\r\nfrom requests import HTTPError\r\n\r\nimport secret\r\nfrom model import Model\r\nfrom sqlalchemy import (\r\n Column,\r\n String,\r\n exists,\r\n)\r\n\r\nfrom utility import log\r\n\r\n\r\nclass API(Model.base):\r\n __tablename__ = 'api'\r\n graph_query = Column(String, primary_key=True)\r\n response = Column(String)\r\n\r\n @classmethod\r\n def _exist(cls, query):\r\n statement = exists().where(API.graph_query == query)\r\n r = Model.session.query(statement).scalar()\r\n log('cache exist', r, query)\r\n return r\r\n\r\n @classmethod\r\n def _get(cls, query):\r\n result = Model.session.query(API).filter(API.graph_query == query).scalar()\r\n log('get result for query', query)\r\n log('get result for query', result)\r\n return result\r\n\r\n @classmethod\r\n def _set(cls, query, response):\r\n log('add result for query', query)\r\n c = API(\r\n graph_query=query,\r\n response=response,\r\n )\r\n Model.session.merge(c)\r\n Model.session.commit()\r\n\r\n @classmethod\r\n def get_v4(cls, query, force=False):\r\n if not force and cls._exist(query):\r\n c = cls._get(query)\r\n r = json.loads(c.response)\r\n return r\r\n else:\r\n url = 'https://api.github.com/graphql'\r\n json_query = {\r\n 'query': query\r\n }\r\n headers = {'Authorization': 'bearer {}'.format(secret.token)}\r\n r = requests.post(url=url, json=json_query, headers=headers)\r\n if r.status_code == 200:\r\n j = r.json()\r\n cls._set(query, r.text)\r\n return j\r\n else:\r\n message = 'url {} get error code {}'.format(url, r.status_code)\r\n raise HTTPError(message, response=r)\r\n\r\n @classmethod\r\n def get_v3(cls, query, force=False):\r\n if not force and cls._exist(query):\r\n c = cls._get(query)\r\n r = json.loads(c.response)\r\n return r\r\n else:\r\n base = 'https://api.github.com'\r\n url = '{}{}'.format(base, query)\r\n log('get v3 url', url)\r\n headers = {'Authorization': 'bearer {}'.format(secret.token)}\r\n r = requests.get(url=url, headers=headers)\r\n\r\n rate_limit = int(r.headers['X-RateLimit-Limit'])\r\n rate_reset = int(r.headers['X-RateLimit-Reset'])\r\n rate_remaing = int(r.headers['X-RateLimit-Remaining'])\r\n log('rate limit <{}> rate remaing <{}>'.format(rate_limit, rate_remaing))\r\n now = int(time.time())\r\n log('rate will reset in <{}>'.format(now - rate_reset))\r\n\r\n if r.status_code == 200:\r\n log('get v3 r', r)\r\n j = r.json()\r\n cls._set(query, r.text)\r\n return j\r\n elif rate_remaing == 0:\r\n log('no rate remaing')\r\n # 保险起见多睡 5 s\r\n time.sleep(now - rate_limit + 5)\r\n else:\r\n message = 'url {} get error code {}'.format(url, r.status_code)\r\n raise HTTPError(message, response=r)\r\n\r\n @classmethod\r\n def get_crawler(cls, query, force=False):\r\n if not force and cls._exist(query):\r\n c = cls._get(query)\r\n html = c.response\r\n return html\r\n else:\r\n base = 'https://github.com'\r\n url = '{}{}'.format(base, query)\r\n log('get crawler url', url)\r\n agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \" \\\r\n \"AppleWebKit/537.36 (KHTML, like Gecko) \" \\\r\n \"Chrome/62.0.3202.94 Safari/537.36\"\r\n headers = {'User-Agent': agent}\r\n r = requests.get(url=url, headers=headers)\r\n if r.status_code == 200:\r\n html = r.text\r\n cls._set(query, html)\r\n return html\r\n else:\r\n message = 'url {} get error code {}'.format(url, r.status_code)\r\n raise HTTPError(message, response=r)\r\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"488185281","text":"\n# coding: utf-8\n\n# ### Imports\n\n# In[1]:\n\n\n#get_ipython().magic(u'matplotlib inline')\n\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n# The GPU id to use, usually either \"0\" or \"1\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1,2,3\" \n\nimport numpy as np\nimport glob\nimport cv2\nimport pickle\nfrom random import shuffle\n\nfrom PIL import Image\nimport os\nimport sys\nimport bcolz\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport keras\n\nfrom keras_tqdm import TQDMNotebookCallback\nfrom keras import initializers\nfrom keras.applications.resnet50 import ResNet50, decode_predictions\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\n\nfrom keras.layers import *\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l2\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import decode_predictions, preprocess_input\n\nfrom keras.models import Model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import multi_gpu_model\nfrom keras.layers import Input\nfrom keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D\nfrom keras.utils.data_utils import get_file\nfrom keras.utils.layer_utils import convert_all_kernels_in_model\nimport keras.backend as K\n\nfrom vgg16_avg import VGG16_Avg\n\n\n# ### SDK Versions\n# \n\n# In[2]:\n\n\nprint('TensorFlow:',tf.__version__)\nprint('Keras:',keras.__version__)\n\n\n# # Celeb Faces\n\n# In[6]:\n\n\n#def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()\n#def load_array(fname): return bcolz.open(fname)[:]\n\n\n# In[ ]:\n\nfolder = '../CelebA/img_align_celeba/*jpg'\n\noriginal_img_fnames = []\n\nfor fname in glob.glob(folder, recursive=True):\n original_img_fnames.append(fname)\n \ndef batchGenerator(image_fnames, batch_size=128):\n \n targ = np.zeros((batch_size, 128))\n\n while True:\n\n orginal_image_batch = []\n small_res_image_batch = []\n shuffle(image_fnames)\n \n for fname in image_fnames:\n \n small_res_image = cv2.resize(cv2.cvtColor(cv2.imread(fname.replace('img_align_celeba', 'img_align_celeba_smallres')), cv2.COLOR_BGR2RGB),(22,22), interpolation = cv2.INTER_CUBIC)\n original_image = cv2.resize(cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB),(176,176), interpolation = cv2.INTER_CUBIC)\n \n orginal_image_batch.append(original_image)\n small_res_image_batch.append(small_res_image)\n \n if len(orginal_image_batch) == batch_size:\n #image_batch = [PIL.Image.fromarray(i) for i in image_batch]\n #val_data = [i.resize((INPUT_YOLO_FEATURE_SIZE, INPUT_YOLO_FEATURE_SIZE), PIL.Image.BICUBIC) for i in val_data]\n #image_batch = [np.array(image, dtype=np.float) for image in image_batch]\n #image_batch = [image/255. for image in image_batch]\n yield([np.array(small_res_image_batch), np.array(orginal_image_batch)], targ)\n orginal_image_batch = []\n small_res_image_batch = []\n \n# if len(orginal_image_batch) != 0:\n# yield([np.array(small_res_image_batch), np.array(orginal_image_batch)], targ)\n\n\n# In[7]:\n\n\n\"\"\"dpath = '/Users/samwitteveen/Dropbox/ai_learning/Key DL Learning'\n#dpath = '/home/paperspace/Dropbox/ai_learning/Key DL Learning'\nbcolz_hr = '/celeba-176_2k.bc'\nbcolz_lr = '/celeba-44_2k.bc'\nbcolz_elr = '/celeba-22_2k.bc'\nbcolz_test_lr = '/celeba-44_test.bc'\nbcolz_test_hr = '/celeba-176_test.bc'\nbcolz_test_elr ='/celeba-22_test.bc'\n\n#Original Image - Training\narr_hr = load_array(dpath+bcolz_hr)\n#Small Res Image - Training\narr_elr = load_array(dpath+bcolz_elr)\n#Original Image - Test\narr_test_hr = load_array(dpath+bcolz_test_hr)\n#Small Res Image - Test\narr_test_elr = load_array(dpath+bcolz_test_elr)\n\n#arr_lr = load_array(dpath+bcolz_lr)\"\"\"\n\n\n# In[5]:\n\n\n#arr_elr.shape\n\n\n# \n# #### Image Preproc\n\n# In[8]:\n\n\n# vgg preproc\nrn_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32) #RGB\npreproc = lambda x: (x - rn_mean)[:, :, :, ::-1] #Switch back to BGR\n\n\n# ### Set up Network parts\n# \n# ConvBlock \n# ResBlock\n\n# In[9]:\n\n\ndef conv_block(x, num_filters, filter_size, stride=(2,2), mode='same', act=True):\n x = Convolution2D(num_filters, filter_size, filter_size, subsample=stride, border_mode=mode)(x)\n x = BatchNormalization()(x)\n return Activation('relu')(x) if act else x\n\ndef res_block(initial_input, num_filters=64):\n x = conv_block(initial_input, num_filters, 3, (1,1))\n x = conv_block(x, num_filters, 3, (1,1), act=False)\n return add([x, initial_input])\n\n\n# Deconvolution / Transposed Conv / Fractionally Strident Convs\n\n# In[10]:\n\n\n# Up Sampling block aka Decon\ndef up_block(x, num_filters, size):\n x = keras.layers.UpSampling2D()(x)\n x = Convolution2D(num_filters, size, size, border_mode='same')(x)\n x = BatchNormalization()(x)\n return Activation('relu')(x)\n\n\n# ### Set up Deconv network - Upsampling network\n\n# In[1]:\n\n\ndef get_upsampling_model_8x():\n inp=Input([22,22,3])\n x=conv_block(inp, 64, 9, (1,1))\n x=res_block(x)\n x=res_block(x)\n x=res_block(x)\n x=res_block(x)\n x=up_block(x, 64, 3)\n x=up_block(x, 64, 3)\n x=up_block(x, 64, 3)\n x=Convolution2D(3, 9, 9, activation='tanh', border_mode='same')(x)\n outp=Lambda(lambda x: (x+1)*127.5)(x)\n return inp,outp\n\n\n# In[13]:\n\n\n#up_model = Lambda(get_upsampling_model_4x(arr_elr))\n\n\n# In[14]:\n\n\n#up_model.summary()\n\n\n# In[16]:\n\n\n# this gets the output \nupsampled_inp,upsampled_output = get_upsampling_model_8x()\n\n\n# In[17]:\n\nwith tf.device('/cpu:0'):\n up_model2 = Model(upsampled_inp,upsampled_output)\n up_model2.summary()\n\n\n# ### VGG network\n# \n# this is only used to for calculating our loss\n# \n\n# In[18]:\n\n\n#vgg input\nvgg_inp=Input([176,176,3])\n\n#vgg network\nvgg= VGG16(include_top=False, input_tensor=vgg_inp)\nfor l in vgg.layers: l.trainable=False\n\n\n# In[19]:\n\n\n# Lambda makes a layer of a function/ this makes the preprocessing a layer\npreproc_layer = Lambda(preproc)\n\n\n# In[20]:\n\n\n# get the vgg output \nvgg_out_layer = vgg.get_layer('block2_conv2').output\n\n# making model Model(inputs, outputs)\nwith tf.device('/cpu:0'):\n vgg_content = Model(vgg_inp, vgg_out_layer)\n vgg_content.summary()\n\n # In[21]:\n\n # this is the VGG model with the HR input\n vgg_hr_image = vgg_content(preproc_layer(vgg_inp))\n\n # this is the upsampled network\n vgg_it_op = vgg_content(preproc_layer(upsampled_output))\n\n # ### Loss and Optimisers\n # \n\n # In[22]:\n loss = Lambda(lambda x: K.sqrt(K.mean((x[0]-x[1])**2, (1,2))))([vgg_hr_image, vgg_it_op])\n\n\n# In[23]:\n\nwith tf.device('/cpu:0'):\n sr_model = Model([upsampled_inp, vgg_inp], loss)\n\nparallel_model = multi_gpu_model(sr_model, gpus=4)\n\nparallel_model.compile('adam', 'mse')\n#sr_model.compile('adam', 'mse')\n\n# ### Training\n\n# In[82]:\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n\ncheckpointer = ModelCheckpoint(filepath='./weights/celba_2k_8x_{epoch:02d}.hdf5', verbose=1)\nhistory = parallel_model.fit_generator(batchGenerator(original_img_fnames),\n steps_per_epoch=1582, epochs=100, callbacks=[checkpointer, tensorboard])\nwith open('trainHistoryDict', 'wb') as file_pi:\n pickle.dump(history.history, file_pi)\n#sr_model.fit_generator(batchGenerator(original_img_fnames), 8, 20)\n\n\n# ### Saver\n# \n\n# In[83]:\n\n\nit_model = Model(upsampled_inp, upsampled_output)\nit_model.save_weights('./weights/'+'celba_2k_8x.h5')\n\n\n# In[24]:\n\n\nit_model = Model(upsampled_inp, upsampled_output)\nit_model.load_weights('./weights/'+'celba_2k_8x.h5')\n\n\n# ### Examples\n# \n# show 1. low res 2. hi-res 3. ground truth\n# \n\n# In[25]:\n\n\n#get_ipython().magic(u'time p = it_model.predict(arr_elr[0:50])')\n#p.shape\n\n\n# In[26]:\n\n\ndef compare_pics(x,y):\n fig = plt.figure(figsize=(30,30))\n a=fig.add_subplot(1,2,1)\n imgplot = plt.imshow(x)\n a=fig.add_subplot(1,2,2)\n imgplot = plt.imshow(y)\n\n\n# In[27]:\n\n\ncompare_pics(arr_elr[10].astype('uint8'), p[10].astype('uint8'))\n\n\n# In[32]:\n\n\ncompare_pics(arr_elr[13].astype('uint8'), p[13].astype('uint8'))\n\n\n# In[33]:\n\n\ncompare_pics(arr_hr[13].astype('uint8'),p[13].astype('uint8'))\n\n\n# # Predicting on Test set that the model hasn't seen\n\n# In[34]:\n\n\nget_ipython().magic(u'time p = it_model.predict(arr_test_elr[0:50])')\np.shape\n\n\n# In[35]:\n\n\ncompare_pics(arr_test_elr[24].astype('uint8'),p[24].astype('uint8'))\n\n\n# In[45]:\n\n\ncompare_pics(arr_test_hr[24].astype('uint8'),p[24].astype('uint8'))\n\n\n# # Let's Predict on the Prediction or 64x SR\n\n# In[46]:\n\n\nnew_upsampled_inp,new_upsampled_output = get_upsampling_model_4x(p[20:25])\n\n\n# In[47]:\n\n\nnew_up_model = Model(new_upsampled_inp,new_upsampled_output)\nnew_up_model.summary()\n\n\n# In[48]:\n\n\nnew_new_up_model = Model(new_upsampled_inp, new_upsampled_output)\nnew_new_up_model.load_weights('./weights/'+'celba_2k_8x.h5')\n\n\n# In[50]:\n\n\nget_ipython().magic(u'time new_p = new_new_up_model.predict(p[24:25])')\n\n\n# In[51]:\n\n\nnew_p.shape\n\n\n# In[52]:\n\n\ncompare_pics(new_p[0].astype('uint8'),p[24].astype('uint8'))\n\n\n# In[ ]:\n\n\ncompare_pics(new_p[4].astype('uint8'),arr_test_hr[24].astype('uint8'))\n\n\n# ### Credits\n# \n# Papers: \"Perceptual Losses for Real-Time Style Transfer and Super-Resolution\" by Johnson, et.al\n# http://arxiv.org/abs/1603.08155\n# \n# \"A Neural Algorithm of Artistic Style\" by Gatys et.al \n# http://arxiv.org/abs/1508.06576v2\n# \n# Code ideas inspired by Jermey Howard's SFData Institute Advanced Deep Learning Course \n","sub_path":"Talk03_8xSuperResolution/SuperResolutionforTFTalk-GPU-2k-8x-Presentation.py","file_name":"SuperResolutionforTFTalk-GPU-2k-8x-Presentation.py","file_ext":"py","file_size_in_byte":9464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504401175","text":"# This program is used to produce a 2-dimension parton shower with an input energy of initial state particle\nimport random\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nDelta_t = 0.5 # Here we assume that the time between a splitting and another is a constant\nEcrit = 20 # Particles with energy below Ecrit will not split\n\nclass Particle(object):\n \n def __init__(self, Px, Py, Xi, Yi, theta):\n self._Px = Px\n self._Py = Py\n self._E = math.sqrt(Px*Px + Py*Py)\n self._M = 1.0\n self._Xi = Xi\n self._Yi = Yi\n self._theta = theta\n self._s = True # s is a parameter which indicate that the particle have already splitted or not\n \n def Split(self):\n # The final position of a particle is:\n self._Xf = self._Xi + self._Px/self._M * Delta_t\n self._Yf = self._Yi + self._Py/self._M * Delta_t\n # Here we set PDF(z) = 1/(1+z), 0 <= z <= 1, and theta has the PDF = 1/theta where theta is between [0.001, Pi/2] \n u = random.random()*np.log(2)\n z = np.exp(u)-1\n pars._z = z\n iran = 0\n while iran < 1:\n newtheta = random.random()*math.pi/2.0\n v = random.random()*1000\n if newtheta < 0.001:\n continue\n if v < 1/newtheta:\n self._newtheta = newtheta\n iran = iran+1\n self._s = False # This means that this particle have already splitted into two particles\n \n \nuser_input = input(\"Enter the initial particle with a Energy(GeV)\\nPlease Input here: \")\ninputline = user_input\n\nParticleList = []\nE = float(inputline)\nPx = E; Py = 0.0\nx = 0.0; y = 0.0\ntheta0 = 0.0\nPar0 = Particle(Px, Py, x, y, theta0)\nParticleList.append(Par0)\n\n# The splitting starts here\nwhile True:\n for pars in ParticleList:\n if pars._E > Ecrit and pars._s == True:\n pars.Split()\n # The first particle's information\n Px = pars._E*pars._z*math.cos(pars._theta)\n Py = pars._E*pars._z*math.sin(pars._theta+pars._newtheta)\n newpar1 = Particle(Px, Py, pars._Xf, pars._Yf, pars._theta+pars._newtheta)\n ParticleList.append(newpar1)\n \n # The second particle's information\n Px = pars._E*(1-pars._z)*math.cos(pars._theta)\n Py = pars._E*(1-pars._z)*math.sin(pars._theta-pars._newtheta)\n newpar2 = Particle(Px, Py, pars._Xf, pars._Yf, pars._theta-pars._newtheta)\n ParticleList.append(newpar2)\n \n # If all the particles' energy are below Ecrit, the splitting will stop\n ipars = 0\n for pars in ParticleList:\n if pars._E < Ecrit or pars._s == False:\n ipars = ipars+1\n if ipars >= len(ParticleList):\n break\n \n# Drawing the plot of the shower\nfor pars in ParticleList:\n if pars._E > Ecrit:\n plt.plot([pars._Xi, pars._Xf], [pars._Yi, pars._Yf], color='r')\n plt.scatter([pars._Xi, pars._Xf], [pars._Yi, pars._Yf], color='b')\n if pars._E < Ecrit:\n pars._Xf = pars._Xi + pars._Px*4*Delta_t # In fact, here is n ot Splitting, it's just for calculating Xf and Yf\n pars._Yf = pars._Yi + pars._Py*4*Delta_t\n plt.plot([pars._Xi, pars._Xf], [pars._Yi, pars._Yf], color='r')\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.show()\n","sub_path":"Exercise5_SingleShower_2D.py","file_name":"Exercise5_SingleShower_2D.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520802424","text":"#regexp_corey_shafer.py\n\n\nimport re\n\n\ntext_to_search=\"aaaaaaaaaaaaaaaaaaaaaaaabbbsbsbsbsbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbabc\"\n\npattern = re.compile(r\"abc\")\n\nmatches = pattern.findall(text_to_search)\n\nfor i in matches:\n\tprint (matches)\n\n\"\"\"\nfor match in matches:\n\tprint match\"\"\"","sub_path":"regexp_corey_shafer.py","file_name":"regexp_corey_shafer.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214008189","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = [\n \"test_basic_pickle\", \"test_hodlr_pickle\",\n]\n\nimport pickle\nimport numpy as np\n\nfrom .. import GP, kernels, BasicSolver, HODLRSolver\n\n\ndef _fake_compute(arg, *args, **kwargs):\n assert 0, \"Unpickled GP shouldn't need to be computed\"\n\n\ndef _test_pickle(solver, success, N=50, seed=123):\n np.random.seed(seed)\n kernel = 0.1 * kernels.ExpSquaredKernel(1.5)\n kernel.pars = [1, 2]\n gp = GP(kernel, solver=solver)\n x = np.random.rand(100)\n gp.compute(x, 1e-2)\n\n s = pickle.dumps(gp, -1)\n gp = pickle.loads(s)\n if success:\n gp.compute = _fake_compute\n gp.lnlikelihood(np.sin(x))\n\n\ndef test_basic_pickle(**kwargs):\n _test_pickle(BasicSolver, True, **kwargs)\n\n\ndef test_hodlr_pickle(**kwargs):\n _test_pickle(HODLRSolver, False, **kwargs)\n","sub_path":"george/testing/test_pickle.py","file_name":"test_pickle.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194284940","text":"\"\"\"ProjectsApp Views\n\nCreated on 10/02/16.\n\"\"\"\nfrom django.shortcuts import render\n\nfrom . import models\nfrom . import forms\nfrom GroupsApp.models import Group\nfrom AuthenticationApp.models import Bookmark\nfrom AuthenticationApp.models import MyUser\nfrom ProjectsApp.models import Project\n\nimport datetime\n\ndef removeProjectComment(request):\n\treturn 0\n\ndef getProjectComment(request):\n\treturn 0\n\ndef getProjects(request):\n\tprojects_list = models.Project.objects.all()\n\treturn render(request, 'projects.html', {\n\t\t'projects': projects_list,\n\t})\n\ndef getBookmarks(request):\n\tif (request.user.is_authenticated()):\n\t\tbookmarks_list = Bookmark.objects.all()\n\t\tpIdsToGet = [bookmark.projectID for bookmark in bookmarks_list if bookmark.userID == request.user.id]\n\t\tprojects_list = Project.objects.filter(id__in=pIdsToGet)\n\t\tcontext = {\n\t\t\t'projects' : projects_list\n\t\t}\n\t\treturn render(request, 'bookmarks.html', context)\n\treturn render(request, 'autherror.html')\n\n\ndef bookmarkProject(request):\n\tif (request.user.is_authenticated()):\n\t\tin_projectName = request.GET.get('name', 'None')\n\t\tin_project = Project.objects.get(name__exact=in_projectName)\n\t\tin_user = MyUser.objects.get(email__exact=request.user.email)\n\t\ttry:\n\t\t\tthis_bookmark = Bookmark.objects.get(userID=request.user.id, projectID=in_project.id)\n\t\t\tthis_bookmark.delete()\n\t\texcept:\n\t\t\tin_bookmark = Bookmark()\n\t\t\tin_bookmark.projectID = in_project.id\n\t\t\tin_bookmark.userID = in_user.id\n\t\t\tin_bookmark.save()\n\n\t\tcontext = {\n\t\t\t'project' : in_project\n\t\t}\n\t\treturn render(request, 'project.html', context)\n\n\treturn render(request, 'autherror.html')\n\ndef getProject(request):\n\tin_name = request.GET.get('name', None)\n\tin_project = models.Project.objects.get(name__exact=in_name)\n\tis_member = in_project.createdBy\n\tgroups_list = Group.objects.all()\n\t# assigned_groups = in_project.project_groups.all()\n\tcontext = {\n\t\t'project': in_project,\n\t\t'userIsMember': is_member,\n\t\t'groups_list': groups_list,\n\t\t# 'assigned_groups': assigned_groups\n\t}\n\tif request.method == 'POST':\n\t\tgroup_names = request.POST.getlist('dropdownl', 'None')\n\t\tif group_names != 'None':\n\t\t\tfor group_name in group_names:\n\t\t\t\tgroup = Group.objects.get(name__exact=group_name)\n\t\t\t\tgroup.project.add(in_project)\n\t\t\t\tgroup.save()\n\t\telse:\n\t\t\treturn render(request, 'autherror.html')\n\n\treturn render(request, 'project.html', context)\n\n\ndef getProjectForm(request):\n\tif request.user.is_authenticated():\n\t\treturn render(request, 'projectform.html')\n\t# render error page if user is not logged in\n\treturn render(request, 'autherror.html')\n\n\ndef getProjectFormSuccess(request):\n\tprint(\"In function\")\n\tif request.user.is_authenticated():\n\t\tif request.method == 'POST':\n\t\t\tform = forms.ProjectForm(request.POST)\n\t\t\tprint(form.errors)\n\t\t\tif form.is_valid():\n\t\t\t\tif models.Project.objects.filter(name__exact=form.cleaned_data['name']).exists():\n\t\t\t\t\treturn render(request, 'projectform.html', {'error': 'Error: That Project name already exists!'})\n\t\t\t\tnew_project = models.Project(name=form.cleaned_data['name'], description=form.cleaned_data['description'],\n\t\t\t\t\t\t\t\t\t\t\t languages=form.cleaned_data['languages'], experience=form.cleaned_data['experience'],\n\t\t\t\t\t\t\t\t\t\t\t speciality=form.cleaned_data['speciality'], createdBy=request.user.email)\n\t\t\t\tnew_project.created_at = datetime.datetime.now()\n\t\t\t\tnew_project.updated_at = datetime.datetime.now()\n\t\t\t\tnew_project.save()\n\t\t\t\trequest.user.save()\n\t\t\t\tcontext = {\n\t\t\t\t\t'name': form.cleaned_data['name'],\n\t\t\t\t}\n\t\t\t\treturn render(request, 'projectformsuccess.html', context)\n\t\telse:\n\t\t\tform = forms.ProjectForm()\n\t\treturn render(request, 'projectform.html')\n\t# render error page if user is not logged in\n\treturn render(request, 'autherror.html')\n\ndef removeProject(request):\n\tif request.user.is_authenticated():\n\t\tin_project_name = request.GET.get('name', 'None')\n\t\tin_project = models.Project.objects.get(name__exact=in_project_name)\n\t\tin_project.delete()\n\n\t\tprojects_list = models.Project.objects.all()\n\t\treturn render(request, 'projects.html', {\n\t\t\t'projects': projects_list,\n\t\t})\n\n\treturn render(request, 'autherror.html')\n\n\n","sub_path":"ProjectsApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574236820","text":"#!/usr/bin/env python\nimport requests\nimport sys\nimport os\nimport fcntl\nsys.path.append('/build/toolchain/lin32/perl-yaml-libyaml-0.41/lib/site_perl/5.10.0/i686-linux-thread-multi-64int')\nsys.path.append('/build/toolchain/noarch/python-pyyaml-3.11/lib/python2.7/site-packages')\nimport yaml\nimport json\nimport logging\nfrom datetime import datetime\n\nRESULT_BASE_DIR = '/tmp/scratch_results_from_cat/'\n\ndef check_file_exsit(file_name):\n try:\n with open(file_name, 'r') as f:\n return True\n except IOError:\n return False\n\ndef read_yaml_file(yaml_name):\n with open(yaml_name, 'r') as f:\n try:\n return yaml.load(f)\n except yaml.YAMLError as error:\n print(error)\n\ndef read_processed_file(processed_file):\n processed_list = []\n try:\n fp = open(processed_file, 'r')\n for line in fp:\n line = line.rstrip('\\n')\n processed_list.append(line)\n except Exception as e:\n print(\"WARN: Read processed test run id failed: %s\" % e)\n return processed_list\n return processed_list\n\ndef read_url(url):\n #print('[DEBUG]: Getting data from %s' % url)\n info = {}\n try:\n result = requests.get(url)\n info = json.loads(result.content)\n except Exception as e:\n print('read %s hit error %s' % (url, e))\n return info\n return info\n\ndef get_processed_filename(area_id, branch_name, changeset):\n return 'run_data/%s/%s/%s/processed_testrunids' \\\n % (area_id, branch_name, changeset)\n\ndef get_changeset_summary_file(area_id, branch_name, changeset):\n summary_path = 'run_data/%s/%s/%s/summary.json' \\\n % (area_id, branch_name, changeset)\n return get_base_filename(summary_path)\n\ndef get_processed_changeset(area_id, branch_name):\n return 'run_data/%s/%s/processed_changeset' % (area_id, branch_name)\n\ndef get_base_filename(filename, result_base_dir=None):\n if result_base_dir ==None:\n result_base_dir = RESULT_BASE_DIR\n return os.path.join(result_base_dir, filename)\n\ndef save_to_processed_file(processed_file, prepared_data, seq=None):\n try:\n dir_name = os.path.dirname(processed_file)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n fp = open(processed_file, 'a')\n fcntl.flock(fp, fcntl.LOCK_EX)\n fp.write(prepared_data + '\\n')\n fcntl.flock(fp, fcntl.LOCK_UN)\n fp.close()\n except Exception as e:\n print(seq ,\"ERROR: Save the %s to processed file %s failed: \" % (prepared_data, processed_file), e)\n pass\n\ndef save_to_json_file(output_filename, prepared_data):\n try:\n dir_name = os.path.dirname(output_filename)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(output_filename, 'w') as writer:\n json.dump(prepared_data, writer, indent=4)\n except Exception as e:\n print(e, 'ERROR: when save %s to json file %s ' % (prepared_data, output_filename))\n\ndef check_storage():\n for d in ['/PA/results', '/WDC/results']:\n if not os.path.isdir(d):\n print(d, ' is not mounted')\n sys.exit(1)\n\ndef construct_testruns_link(config_data):\n url = 'https://cat2.eng.vmware.com/#/area/'\n url += '%s/testing' % config_data['area_id']\n url += '?branch_name=%s' % config_data['branch_name']\n url += '&build_type_name=%s' % config_data['build_type_name']\n url += '&count=%s' % config_data['count']\n return url\n\ndef setup_logging(logfile, loglevel=logging.INFO, stdout=True):\n overall_format = '%(asctime)s %(levelname)-8s %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n formatter = logging.Formatter(overall_format, datefmt=date_format)\n logger = logging.getLogger('RBT')\n logging.basicConfig(level=logging.WARN)\n logger.setLevel(loglevel)\n\n fh = logging.FileHandler(logfile)\n fh.setLevel(loglevel)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n if not stdout:\n logger.propagate = False\n return logger\n\nif __name__ == '__main__':\n save_to_json_file('/tmp/ab.json', {'a':'b','c':'d'})\n","sub_path":"myutility.py","file_name":"myutility.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54445953","text":"\"\"\"\r\nДень 3: Простые проблемы программирования (5 часов):\r\nпоменять местами две переменные,\r\nперевести градусы Цельсия в градусы по Фаренгейту,\r\nпосчитать сумму всех разрядов в числе, проверить число на простоту,\r\nсгенерировать случайное число, удалить дубликат из списка\r\n\"\"\"\r\n#1 поменять местами две переменные,\r\na = 2\r\nb = 3\r\nprint(f' a = > {a}')\r\nprint(f' b = > {b}')\r\nprint(\"Magika\")\r\na,b = b,a\r\nprint(f' a = > {a}')\r\nprint(f' b = > {b}')\r\n\r\n# перевести градусы Цельсия в градусы по Фаренгейту, Convert Celsius to Fahrenheit\r\ncelsius = float(input('Input °C Celsius : '))\r\ncelsius_to_Fahrenheit = (celsius * (9/5)) + 32\r\nprint(f'Convert {celsius} °C Celsius to Fahrenheit => {celsius_to_Fahrenheit :.2f} °F')\r\n\r\n\r\n#посчитать сумму всех разрядов в числе\r\nprint('Количество разрядов:', len(str(abs(int(input('Введите число,чтобы посчитать сумму всех разрядов в числе: '))))))\r\n\r\n\r\n# проверить число на простоту\r\nfrom math import sqrt\r\ndef is_prime(n):\r\n if n < 2:\r\n return False\r\n if n == 2:\r\n return True\r\n limit = sqrt(n)\r\n i = 2\r\n while i <= limit:\r\n if n % i == 0:\r\n return False\r\n i += 1\r\n return True\r\n\r\nnum = int(input('Введите число,чтобы проверить число на простоту: '))\r\nprint(is_prime(num))\r\n\r\n\r\n# сгенерировать случайное число\r\nimport random\r\nprint(f'Рандомное число от 1 до 100 => {random.randint(1,100)}\\n')\r\n\r\n#удалить дубликат из списка\r\ndef del_dups(mylist):\r\n # удалить дубликат из списка\r\n # сравнить индекс с остальными\r\n # удалить копии- ошибочно/// просто не добавлять в новый список\r\n # создать новый список и вернуть\r\n newlist = []\r\n for i in mylist:\r\n if i not in newlist:\r\n newlist.append(i)\r\n return newlist\r\n\r\nmy_list = [8, 8, 9, 9, 7, 15, 15, 2, 20, 13, 2, 24, 6, 11, 7, 12, 4, 10, 18,\r\n 13, 23, 11, 3, 11, 12, 10, 4, 5, 4, 22, 6, 3, 19, 14, 21, 11, 1,\r\n 5, 14, 8, 0, 1, 16, 5, 10, 13, 17, 1, 16, 17, 12, 6, 10, 0, 3, 9,\r\n 9, 3, 7, 7, 6, 6, 7, 5, 14, 18, 12, 19, 2, 8, 9, 0, 8, 4, 5]\r\nprint(f'удалить дубликат из списка : {my_list} \\n\\n result => {del_dups(my_list)}')\r\n#print(del_dups(my_list))\r\n# -> [8, 9, 7, 15, 2, 20, 13, 24, 6, 11, 12, 4, 10, 18, 23, 3, 5, 22, 19, 14,\r\n# 21, 1, 0, 16, 17]\r\n\r\n\r\n","sub_path":"Day_3_Simple_problem.py","file_name":"Day_3_Simple_problem.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106909897","text":"import random\n\ndef printGrid(grid):\n for row in grid:\n print(row)\n print()\n\ndef getRow(grid, num):\n return grid[num]\n\ndef getCol(grid, num):\n col = []\n for i in range(5):\n col.append(grid[i][num])\n return col\n\ndef writeRow(grid, num, word):\n for i in range(5):\n grid[num][i] = word[i]\n\ndef writeCol(grid, num, word):\n for i in range(5):\n grid[i][num] = word[i]\n\ndef testWriteRow(grid, num, word):\n grid = list(grid)\n for i in range(5):\n grid[num][i] = word[i]\n return grid\n\ndef testWriteCol(grid, num, word):\n grid = list(grid)\n for i in range(5):\n grid[i][num] = word[i]\n return grid\n\ndef testWrite(grid, num, word):\n if(num < 5):\n return testWriteRow(grid, num, word)\n else:\n return testWriteCol(grid, num - 5, word)\n\ndef fittingWords(present, words):\n words = list(words)\n for i in range(5):\n if(present[i] != ' '):\n for j in range(len(words) - 1, -1, -1):\n if(words[j][i] != present[i]):\n words.pop(j)\n return words\n\ndef getNumPossible(grid, words):\n numPossible = []\n for i in range(5):\n numPossible.append(len(fittingWords(getRow(grid, i), words)))\n for i in range(5):\n numPossible.append(len(fittingWords(getCol(grid, i), words)))\n return numPossible\n\ndef getOptimalWord(grid, position, words):\n max = -1\n maxIndex = -1\n for i in range(len(words)):\n result = testWrite(grid, position, words[i])\n min = getMin(getNumPossible(result, words))\n if(min > max):\n maxIndex = i\n max = min\n return words[maxIndex]\n\ndef getMin(list):\n min = 4267\n for item in list:\n if(item < min):\n min = item\n return min\n\ndictionary = open('5dict.txt', 'r')\nwords = []\nfor word in dictionary:\n words.append(word[:-1])\n\ngrid = [[' ' for i in range(5)] for j in range(5)]\n\n# Add starting words here\n# writeRow(grid, 2, 'happy')\n\nprintGrid(grid)\n\nfilled = []\nfor i in range(5):\n if ' ' in getRow(grid, i):\n filled.append(False)\n else:\n filled.append(True)\nfor i in range(5):\n if ' ' in getCol(grid, i):\n filled.append(False)\n else:\n filled.append(True)\n\nwhile(False in filled):\n numPossible = getNumPossible(grid, words)\n minIndex = -1\n min = 4267\n for i in range(len(numPossible)):\n if(numPossible[i] < min and not filled[i]):\n minIndex = i\n min = numPossible[i]\n\n if(minIndex < 5):\n writeRow(grid, minIndex, getOptimalWord(grid, minIndex, fittingWords(getRow(grid, minIndex), words)))\n else:\n writeCol(grid, minIndex - 5, getOptimalWord(grid, minIndex, fittingWords(getCol(grid, minIndex - 5), words)))\n filled[minIndex] = True\n\n printGrid(grid)\n\n if 0 in getNumPossible(grid, words):\n print(\"NO SOLUTIONS REMAINING\")\n exit()","sub_path":"crossword.py","file_name":"crossword.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644950482","text":"\"\"\" Entry point for `tsrc foreach`. \"\"\"\n\nimport argparse\nimport subprocess\nimport sys\nimport textwrap\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport cli_ui as ui\n\nimport tsrc\nfrom tsrc.cli import (\n add_repos_selection_args,\n add_workspace_arg,\n get_workspace_with_repos,\n)\n\nEPILOG = textwrap.dedent(\n \"\"\"\\\n Usage:\n # Run command directly\n tsrc foreach -- some-cmd --with-option\n Or:\n # Run command through the shell\n tsrc foreach -c 'some cmd'\n \"\"\"\n)\n\nCommand = Union[str, List[str]]\n\n\ndef configure_parser(subparser: argparse._SubParsersAction) -> None:\n parser = subparser.add_parser(\n \"foreach\", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_workspace_arg(parser)\n add_repos_selection_args(parser)\n parser.add_argument(\"cmd\", nargs=\"*\")\n parser.add_argument(\n \"-c\",\n help=\"use a shell to run the command\",\n dest=\"shell\",\n default=False,\n action=\"store_true\",\n )\n parser.set_defaults(run=run)\n\n\ndef run(args: argparse.Namespace) -> None:\n # Note:\n # we want to support both:\n # $ tsrc foreach -c 'shell command'\n # and\n # $ tsrc foreach -- some-cmd --some-opts\n #\n # Due to argparse limitations, `cmd` will always be a list,\n # but we need a *string* when using 'shell=True'.\n #\n # So transform use the value from `cmd` and `shell` so that:\n # * action.command is suitable as argument to pass to subprocess.run()\n # * action.description is suitable for display purposes\n command: Command = []\n if args.shell:\n if len(args.cmd) != 1:\n die(\"foreach -c must be followed by exactly one argument\")\n command = args.cmd[0]\n description = args.cmd[0]\n else:\n if not args.cmd:\n die(\"needs a command to run\")\n command = args.cmd\n description = \" \".join(args.cmd)\n shell = args.shell\n command = command\n description = description\n\n workspace = get_workspace_with_repos(args)\n\n cmd_runner = CmdRunner(workspace.root_path, command, description, shell=shell)\n tsrc.run_sequence(workspace.repos, cmd_runner)\n ui.info(\"OK\", ui.check)\n\n\nclass CommandFailed(tsrc.Error):\n pass\n\n\nclass CouldNotStartProcess(tsrc.Error):\n pass\n\n\nclass CmdRunner(tsrc.Task[tsrc.Repo]):\n \"\"\"\n Implements a Task that runs the same command on several repositories.\n \"\"\"\n\n def __init__(\n self,\n workspace_path: Path,\n command: Command,\n description: str,\n shell: bool = False,\n ) -> None:\n self.workspace_path = workspace_path\n self.command = command\n self.description = description\n self.shell = shell\n\n def display_item(self, repo: tsrc.Repo) -> str:\n return repo.dest\n\n def on_start(self, *, num_items: int) -> None:\n ui.info_1(f\"Running `{self.description}` on {num_items} repos\")\n\n def on_failure(self, *, num_errors: int) -> None:\n ui.error(f\"Command failed for {num_errors} repo(s)\")\n\n def process(self, index: int, count: int, repo: tsrc.Repo) -> None:\n ui.info_count(index, count, repo.dest)\n full_path = self.workspace_path / repo.dest\n if not full_path.exists():\n raise MissingRepo(repo.dest)\n # fmt: off\n ui.info(\n ui.lightgray, \"$ \",\n ui.reset, ui.bold, self.description,\n sep=\"\"\n )\n # fmt: on\n full_path = self.workspace_path / repo.dest\n try:\n rc = subprocess.call(self.command, cwd=full_path, shell=self.shell)\n except OSError as e:\n raise CouldNotStartProcess(\"Error when starting process:\", e)\n if rc != 0:\n raise CommandFailed()\n\n\ndef die(message: str) -> None:\n ui.error(message)\n print(EPILOG, end=\"\")\n sys.exit(1)\n\n\nclass MissingRepo(tsrc.Error):\n def __init__(self, dest: str):\n self.dest = dest\n super().__init__(\"not cloned\")\n","sub_path":"tsrc/cli/foreach.py","file_name":"foreach.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"140532847","text":"class Codec:\n\n url_list = []\n\n def __init__(self):\n self.url_list = []\n\n\n def encode(self, longUrl):\n \"\"\"Encodes a URL to a shortened URL.\n\n :type longUrl: str\n :rtype: str\n \"\"\"\n if longUrl in self.url_list:\n return 'http://tinyurl.com/' + str(self.url_list.index(longUrl))\n else:\n self.url_list.append(longUrl)\n return 'http://tinyurl.com/' + str(len(self.url_list) - 1)\n\n\n def decode(self, shortUrl):\n \"\"\"Decodes a shortened URL to its original URL.\n \n :type shortUrl: str\n :rtype: str\n \"\"\"\n idx = int(shortUrl.replace('http://tinyurl.com/', ''))\n return self.url_list[idx]\n\n\nurl = \"https://leetcode.com/problems/design-tinyurl\"\ncodec = Codec()\ncodec.decode(codec.encode(url))","sub_path":"Solutions/535EncodeAndDecodeTinyURL.py","file_name":"535EncodeAndDecodeTinyURL.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"599873825","text":"#To run this code please run `mpirun -n 4 python dist_pyrescalk_dnations.py` in command line.\n\n\n\n\nimport sys\n\n\nimport pyDRESCALk.config as config\n\nconfig.init(0)\nfrom pyDRESCALk.pyDRESCALk import *\nfrom pyDRESCALk.utils import *\nfrom pyDRESCALk.dist_comm import *\nfrom pyDRESCALk.data_generator import *\nfrom scipy.io import loadmat\n\n\n\ndef dist_rescalk_2d_synthetic():\n\n args = parser()\n args.p_r = 2\n args.p_c = 2\n args.m = 3\n args.n = 12\n args.k = 2\n main_comm = MPI.COMM_WORLD\n rank = main_comm.rank\n size = main_comm.size\n comm = MPI_comm(main_comm, args.p_r, args.p_c)\n args.comm1 = comm.comm\n args.comm = comm\n args.col_comm = comm.cart_1d_column()\n args.row_comm = comm.cart_1d_row()\n args.rank = rank\n args.pgrid = [args.p_r, args.p_c]\n args.shape = [args.m, args.n]\n args.fpath = '../data/tmp/'\n dgen = data_generator(args)\n A_gen, R_gen, X_gen = dgen.fit()\n args.size = size\n args.np = np\n args.fname = 'synthetic'\n args.start_k = 1\n args.end_k = 4\n args.itr = 500\n args.init = 'rand'\n args.noise_var = 0.015\n args.verbose = True\n args.norm = 'fro'\n args.method = 'mu'\n args.precision = np.float32\n args.results_path = '../Results/'\n pyDRESCALk(X_gen ,factors=None, params=args).fit()\n\n\n\ndist_rescalk_2d_synthetic()\n\n","sub_path":"examples/dist_pyrescalk_synthetic.py","file_name":"dist_pyrescalk_synthetic.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"504891650","text":"import subprocess\nimport os\nimport uuid\nfrom datetime import datetime\n\nos.chdir(\"/Users/itsu/blog/hexo/source/_posts/microblogs/\")\n\nwhile True:\n formatter = '---'\n content = input(\"Pls enter your post: \\n\")\n # input bug: cannot delete Chinese characters in a normal way.(terminal uses bytes objects)\n title = \"title: \" + content\n add_info = str(input(\"if any: \\n\"))\n if len(add_info) > 0:\n title = title + \" >>\"\n while True:\n conf = input(\"Sure to post? (Y/N) \")\n if conf.lower() == \"y\":\n file_name = \"mb_\" + str(datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")) + \".md\"\n date = \"date: \" + datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n vis = \"visible: hide\"\n cat = \"categories: microblogs\"\n\n post = [formatter, title, date, vis, cat, formatter, add_info]\n\n with open(file_name, 'w+') as post_file:\n for line in post:\n post_file.write(line + \"\\n\")\n subprocess.run([\"hexo\", \"g\"])\n subprocess.run([\"hexo\", \"d\"])\n break\n else:\n break\n","sub_path":"prog/mb.py","file_name":"mb.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360072156","text":"import pytest\n\nfrom src.python.review.common.language import Language\nfrom src.python.review.inspectors.checkstyle.checkstyle import CheckstyleInspector\nfrom src.python.review.reviewers.utils.issues_filter import filter_low_measure_issues\nfrom test.python.inspectors import JAVA_DATA_FOLDER\nfrom test.python.inspectors.conftest import gather_issues_test_info, IssuesTestInfo, use_file_metadata\n\nFILE_NAMES_AND_N_ISSUES = [\n ('test_simple_valid_program.java', 0),\n ('test_spaces.java', 14),\n ('test_valid_spaces.java', 0),\n ('test_curly_braces.java', 2),\n ('test_valid_curly_braces.java', 0),\n ('test_invalid_naming.java', 14),\n ('test_valid_naming.java', 0),\n ('test_unused_imports.java', 5),\n ('test_blocks.java', 5),\n ('test_valid_blocks.java', 0),\n ('test_magic_numbers.java', 0),\n ('test_ternary_operator.java', 0),\n ('test_todo.java', 3),\n ('test_upper_ell.java', 1),\n ('test_missing_default.java', 1),\n ('test_valid_default.java', 0),\n ('test_array_type.java', 1),\n ('test_algorithm_with_scanner.java', 0),\n ('test_valid_algorithm_1.java', 0),\n ('test_nested_blocks.java', 2),\n ('test_reassigning_example.java', 2),\n ('test_switch_statement.java', 3),\n ('test_when_only_equals_overridden.java', 1),\n ('test_constants.java', 4),\n # (\"test_empty_lines_btw_members.java\", 2),\n ('test_covariant_equals.java', 1),\n ('test_multi_statements.java', 4),\n ('test_boolean_expr.java', 2),\n ('test_code_with_comments.java', 0),\n ('test_too_long_method.java', 1),\n ('test_cyclomatic_complexity.java', 0),\n ('test_cyclomatic_complexity_bad.java', 1),\n ('test_long_lines.java', 1),\n ('test_indentation_with_spaces.java', 0),\n ('test_indentation_with_tabs.java', 0),\n ('test_indentation_google_style.java', 4),\n]\n\n\n@pytest.mark.parametrize(('file_name', 'n_issues'), FILE_NAMES_AND_N_ISSUES)\ndef test_file_with_issues(file_name: str, n_issues: int):\n inspector = CheckstyleInspector()\n\n path_to_file = JAVA_DATA_FOLDER / file_name\n with use_file_metadata(path_to_file) as file_metadata:\n issues = inspector.inspect(file_metadata.path, {})\n issues = filter_low_measure_issues(issues, Language.JAVA)\n\n assert len(issues) == n_issues\n\n\nFILE_NAMES_AND_N_ISSUES_INFO = [\n ('test_simple_valid_program.java',\n IssuesTestInfo(n_func_len=1, n_cc=1)),\n\n ('test_invalid_naming.java',\n IssuesTestInfo(n_code_style=14, n_func_len=3, n_cc=3)),\n\n ('test_unused_imports.java',\n IssuesTestInfo(n_best_practices=5, n_func_len=1, n_cc=1)),\n\n ('test_switch_statement.java',\n IssuesTestInfo(n_best_practices=1, n_error_prone=2, n_func_len=5, n_cc=5)),\n\n ('test_boolean_expr.java',\n IssuesTestInfo(n_best_practices=2, n_func_len=3, n_cc=3, n_bool_expr_len=4)),\n\n ('test_too_long_method.java',\n IssuesTestInfo(n_func_len=3, n_cc=3)),\n\n ('test_cyclomatic_complexity.java',\n IssuesTestInfo(n_func_len=5, n_cc=5, n_bool_expr_len=1)),\n\n ('test_cyclomatic_complexity_bad.java',\n IssuesTestInfo(n_func_len=6, n_cc=6, n_bool_expr_len=9)),\n]\n\n\n@pytest.mark.parametrize(('file_name', 'expected_issues_info'),\n FILE_NAMES_AND_N_ISSUES_INFO)\ndef test_file_with_issues_info(file_name: str, expected_issues_info: IssuesTestInfo):\n inspector = CheckstyleInspector()\n\n path_to_file = JAVA_DATA_FOLDER / file_name\n with use_file_metadata(path_to_file) as file_metadata:\n issues = inspector.inspect(file_metadata.path, {})\n\n issues_info = gather_issues_test_info(issues)\n assert issues_info == expected_issues_info\n","sub_path":"test/python/inspectors/test_checkstyle_inspector.py","file_name":"test_checkstyle_inspector.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521207800","text":"import numpy as np\nimport pylab\nimport matplotlib.pyplot as plt\n\n# Define sigmoid function\ndef sigm(x):\n # To prevent overflow, we use the following stable implementation\n return 1./(1. + np.exp(-x))\n\n# Define function for generating target data from feature data\ndef gen_target(data, n_rows, a, b, r):\n r2 = r*r\n target = np.zeros(n_rows)\n for i in range(n_rows):\n temp = (data[i, 0] - a)**2 + (data[i, 1] - b)**2\n if temp < r2:\n target[i] += 1\n\n return target\n\n# Define class to contain parameters for neural network\nclass Parm:\n # Initialize parameters\n def __init__(self, eta, b_size, data_rows, data_cols):\n # Initialize learning rate\n self.eta = eta\n\n # Initialize batch size\n self.batch_size = b_size\n\n # Initialize data row and column sizes\n self.rows = data_rows\n self.cols = data_cols\n\n # Initialize batch\n self.batch = np.random.randint(0, self.rows, self.batch_size)\n\n # Modify values in batch\n def shuffle(self):\n self.batch = np.random.randint(0, self.rows, self.batch_size)\n\n# Define class to contain weights for neural network\nclass Weights:\n # Initialize parameters\n def __init__(self, data_cols, n_hidden):\n # Initialize number of hidden nodes\n self.n_hidden = n_hidden\n\n # Initialize original input bias terms \n self.bi_orig = np.random.uniform(-1, 1, n_hidden)\n\n # Initialize original hidden bias term \n self.bh_orig = np.random.uniform(-1, 1)\n\n # Initialize original input weights\n self.Wi_orig = np.random.uniform(-.1, .1, (n_hidden, data_cols))\n\n # Initialize original hidden weights\n self.Wh_orig = np.random.uniform(-.1, .1, n_hidden)\n\n # Initialize input bias terms \n self.bi = np.copy(self.bi_orig)\n\n # Initialize hidden bias term \n self.bh = np.copy(self.bh_orig)\n\n # Initialize input weights\n self.Wi = np.copy(self.Wi_orig)\n\n # Initialize hidden weights\n self.Wh = np.copy(self.Wh_orig)\n\n # Routine for setting weights and bias terms to original values\n def reset_weights(self):\n self.bi = np.copy(self.bi_orig)\n self.bh = np.copy(self.bh_orig)\n self.Wi = np.copy(self.Wi_orig)\n self.Wh = np.copy(self.Wh_orig)\n\nclass Network:\n # Initialize parameters\n def __init__(self, eta, b_size, data_rows, data_cols, n_hidden):\n self.w = Weights(data_cols, n_hidden)\n self.p = Parm(eta, b_size, data_rows, data_cols)\n\n # Define function for resetting weights\n def reset_w(self):\n self.w.reset_weights()\n\n # Define function to generate output of neural network\n def h_out(self, feature):\n # Compute value at which to evaluate sigmoid function\n x = np.dot(self.w.Wi, feature) + self.w.bi\n return sigm(x)\n\n def o_out(self, x):\n # Compute value at which to evaluate sigmoid function\n temp = np.dot(self.w.Wh, x) + self.w.bh\n return sigm(temp)\n\n # Define error function for neural network\n def err(self, target, output):\n return target - output\n\n # Define total error function for neural network\n def tot_err(self, feature, target, test_size):\n temp = 0\n\n for i in range(test_size):\n o_h = self.h_out(feature[i])\n o = self.o_out(o_h)\n temp += 0.5 * (target[i] - o)**2\n\n print(\" Total error = \", temp)\n\n def update_Wh (self, Err, o, o_h):\n # Compute gradient of Error with respect to hidden bias\n temp = self.p.eta * Err * o * (1 - o)\n # Update hidden bias term\n self.w.bh += temp\n\n # Compute gradient of Error with respect to hidden weights\n update = o_h * temp \n # Update hidden weights\n self.w.Wh += update\n\n def update_Wi (self, feature, Err, o, o_h):\n temp = self.p.eta * Err * o * (1 - o) * self.w.Wh * o_h * (1 - o_h)\n self.w.bi = np.add(self.w.bi, temp)\n\n for j in range(self.w.n_hidden):\n update = feature * temp[j]\n\n self.w.Wi[j] = np.add(self.w.Wi[j], update)\n\n # Define function for training neural network on data set\n def train(self, feature, target):\n num_err = 0\n\n for i in range(self.p.rows):\n o_h = self.h_out(feature[i])\n o = self.o_out(o_h)\n Err = self.err(target[i], o)\n\n self.update_Wh(Err, o, o_h)\n self.update_Wi(feature[i], Err, o, o_h)\n\n # Define function for testing neural network on data set\n def test(self, feature, target, test_size, data_title):\n num_err = 0\n\n for i in range(test_size):\n o_h = self.h_out(feature[i])\n o = self.o_out(o_h)\n Err = self.err(target[i], o)\n \n if(o >= 0.5):\n o = 1\n else:\n o = 0\n\n if(target[i] != o):\n num_err += 1 \n\n print(\" Number of errors from\", data_title, \":\", num_err)\n\n # Define function for plotting test results from Nerual Network on test set\n def plot_test(self, feature, target, test_size, plot_title):\n x_no = []\n x_yes = []\n y_no = []\n y_yes = []\n\n for i in range(test_size):\n o_h = self.h_out(feature[i])\n o = self.o_out(o_h)\n \n # Store predictions based on model developed by Neural Network\n if (o >= 0.5):\n x_yes.append(feature[i, 0])\n y_yes.append(feature[i, 1])\n else:\n x_no.append(feature[i, 0])\n y_no.append(feature[i, 1])\n\n # Commands for plotting classified data points\n ax = plt.gca()\n ax.cla() # Clear the grid for fresh plot\n plt.plot(x_no[:], y_no[:], 'r^', markersize=6, lw=5)\n plt.plot(x_yes[:], y_yes[:], 'go', markersize=6, lw=3)\n circle1 = plt.Circle((0.5, 0.6), 0.4, color='k', lw=3, fill=False)\n plt.gca().set_aspect('equal', adjustable='box')\n ax.add_artist(circle1)\n # set and label axes, set title, and legend\n plt.axis([0, 1, 0, 1])\n plt.title(plot_title)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.grid(True)\n\n plot_title.__add__('.png')\n plt.savefig(plot_title)\n\n# Main function\ndef main(epochs):\n # Initialize Neural Network\n N = Network(1, 5, 100, 2, 10)\n\n # Generate training data\n train_feature = np.random.uniform(0, 1, (N.p.rows, N.p.cols))\n train_target = gen_target(train_feature, N.p.rows, 0.5, 0.6, 0.4)\n\n # Generate testing data\n testing_size = 100\n test_feature = np.random.uniform(0, 1, (testing_size, N.p.cols))\n test_target = gen_target(test_feature, testing_size, 0.5, 0.6, 0.4)\n\n # Print some statistics prior to training\n print(\"\\n --------------- Statistics prior to training ---------------\")\n N.test(train_feature, train_target, N.p.rows, \n 'training data prior to training')\n N.tot_err(train_feature, train_target, N.p.rows)\n\n N.test(test_feature, test_target, testing_size, \n 'testing data prior to training')\n N.tot_err(test_feature, test_target, testing_size)\n\n # Traing and test over epochs provided as input\n for i in range(epochs.size):\n # Reset weights for next training interval\n N.reset_w()\n\n for j in range(epochs[i]):\n N.train(train_feature, train_target)\n\n # Print some statistics after training\n print(\"\\n -------------- After training over %d epochs --------------\" \n %epochs[i])\n \n N.test(train_feature, train_target, N.p.rows, \n 'training data after training')\n N.tot_err(train_feature, train_target, N.p.rows)\n\n N.test(test_feature, test_target, testing_size, \n 'testing data after training')\n N.tot_err(test_feature, test_target, testing_size)\n\n N.plot_test(train_feature, train_target, N.p.rows, \n 'Plot of Classification of Training Data '\n 'over %d epochs' %epochs[i])\n N.plot_test(test_feature, test_target, testing_size, \n 'Plot of Classification of Testing Data '\n 'over %d epochs' %epochs[i])\n\n print(\"\\n\")\n\n# Run main function\nmain(np.array([100, 250, 500, 2500, 5000]))\n\n","sub_path":"Problem-2/ssigm2.py","file_name":"ssigm2.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638026070","text":"import time\nimport random\nfrom selenium import webdriver\nfrom PIL import Image\n\nlis = {'0': \"#000000\",\n '1': \"#ffffff\",\n '2': \"#aaaaaa\",\n '3': \"#555555\",\n '4': \"#fed3c7\",\n '5': \"#ffc4ce\",\n '6': \"#faac8e\",\n '7': \"#ff8b83\",\n '8': \"#f44336\",\n '9': \"#e91e63\",\n 'A': \"#e2669e\",\n 'B': \"#9c27b0\",\n 'C': \"#673ab7\",\n 'D': \"#3f51b5\",\n 'E': \"#004670\",\n 'F': \"#057197\",\n 'G': \"#2196f3\",\n 'H': \"#00bcd4\",\n 'I': \"#3be5db\",\n 'J': \"#97fddc\",\n 'K': \"#167300\",\n 'L': \"#37a93c\",\n 'M': \"#89e642\",\n 'N': \"#d7ff07\",\n 'O': \"#fff6d1\",\n 'P': \"#f8cb8c\",\n 'Q': \"#ffeb3b\",\n 'R': \"#ffc107\",\n 'S': \"#ff9800\",\n 'T': \"#ff5722\",\n 'U': \"#b83f27\",\n 'V': \"#795548\"}\n\n\ndef connect_website():\n global driver\n driver = webdriver.Chrome()\n driver.get(\"https://board.mcfx.us/\")\n driver.implicitly_wait(3)\n time.sleep(1)\n\n\ndef trans(c):\n if ord(c) >= ord('a'):\n return 10 + (ord(c) - ord('a'))\n else:\n return ord(c) - ord('0')\n\n\ndef get_rgb(s):\n r = trans(s[1:2]) * 16 + trans(s[2:3])\n g = trans(s[3:4]) * 16 + trans(s[4:5])\n b = trans(s[5:6]) * 16 + trans(s[6:7])\n return r, g, b\n\n\ndef get_color(r, g, b):\n global lis\n min_dis = 100000\n min_who = -1\n for p in lis:\n tmp = get_rgb(lis[p])\n dis = abs(tmp[0] - r) + abs(tmp[1] - b) + abs(tmp[2] - g)\n if dis < min_dis:\n min_dis = dis\n min_who = p\n return min_who\n\n\ndef draw_pixel(x, y, r, g, b, flit):\n if flit:\n if r >= 250 and g >= 250 and b >= 250:\n return\n driver.execute_script(\"$.post('/draw',{{x:{}+{}*1280,v:{}}})\".format(x, y, get_color(r, g, b)))\n\n\ndef print_image(path, dx, dy, shuffled, flit):\n img = Image.open(path)\n width = img.width\n height = img.height\n pix = []\n for x in range(width):\n for y in range(height):\n pix.append((x, y))\n if shuffled:\n random.shuffle(pix)\n for pos in pix:\n p = img.getpixel((pos[0], pos[1]))\n draw_pixel(dx + pos[0], dy + pos[1], p[0], p[1], p[2], flit)\n\n\nconnect_website()\n# draw_pixel(0, 0, 0, 0, 0)\n","sub_path":"mcfx/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"67531310","text":"# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Service loader\n# ----------------------------------------------------------------------\n# Copyright (C) 2007-2016 The NOC Project\n# See LICENSE for details\n# ----------------------------------------------------------------------\n\n# Python modules\nfrom __future__ import absolute_import\n\n# NOC modules\nfrom .base import BaseLoader\nfrom noc.sa.models.service import Service\n\n\nclass ServiceLoader(BaseLoader):\n \"\"\"\n Service loader\n \"\"\"\n\n name = \"service\"\n model = Service\n fields = [\n \"id\",\n \"parent\",\n \"subscriber\",\n \"profile\",\n \"ts\",\n \"logical_status\",\n \"logical_status_start\",\n \"agreement_id\",\n \"order_id\",\n \"stage_id\",\n \"stage_name\",\n \"stage_start\",\n \"account_id\",\n \"address\",\n \"managed_object\",\n \"nri_port\",\n \"cpe_serial\",\n \"cpe_mac\",\n \"cpe_model\",\n \"cpe_group\",\n \"description\",\n ]\n\n mapped_fields = {\n \"parent\": \"service\",\n \"subscriber\": \"subscriber\",\n \"profile\": \"serviceprofile\",\n \"managed_object\": \"managedobject\",\n }\n\n discard_deferred = True\n\n def find_object(self, v):\n \"\"\"\n Find object by remote system/remote id\n :param v:\n :return:\n \"\"\"\n if not v.get(\"remote_system\") or not v.get(\"remote_id\"):\n self.logger.warning(\"RS or RID not found\")\n return None\n if not hasattr(self, \"_service_remote_ids\"):\n self.logger.info(\"Filling service collection\")\n coll = Service._get_collection()\n self._service_remote_ids = {\n c[\"remote_id\"]: c[\"_id\"]\n for c in coll.find(\n {\"remote_system\": v[\"remote_system\"].id, \"remote_id\": {\"$exists\": True}},\n {\"remote_id\": 1, \"_id\": 1},\n )\n }\n if v[\"remote_id\"] in self._service_remote_ids:\n return Service.objects.get(id=self._service_remote_ids[v[\"remote_id\"]])\n return None\n","sub_path":"core/etl/loader/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"167105526","text":"class Solution:\n \"\"\"\n @param A: An integer array.\n @param k: A positive integer (k <= length(A))\n @param target: Integer\n @return a list of lists of integer \n \"\"\"\n def kSumII(self, A, k, target):\n # write your code here\n # two pointers\n A = sorted(A)\n ret = []\n self.dfs(A, ret, [], target, k)\n return ret\n \n def dfs(self, A, ret, stack, target, k, pos=0):\n if target < 0 or k < 0:\n return \n if k == 0 and target == 0:\n ret.append(stack[:])\n return\n \n for i in range(pos, len(A)):\n target = target - A[i]\n k = k - 1\n stack.append(A[i])\n self.dfs(A, ret, stack, target, k, i+1)\n stack.pop()\n k = k + 1\n target = target + A[i]\n \n","sub_path":"K_Sum_II.py","file_name":"K_Sum_II.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"40270563","text":"import random # For generating random numbers\r\nimport sys # For closing the game\r\nimport pygame \r\nimport os\r\nfrom pygame.locals import * # For keys\r\n\r\nimport neat\r\n\r\n# Globals variables for the game\r\nFPS = 32 # Frames per second\r\n\r\nscreenwidth = 1112\r\nscreenheight = 627\r\n\r\nscreen = pygame.display.set_mode((screenwidth, screenheight))\r\n\r\ngroundy = screenheight * 0.8\r\ngame_sprites = {}\r\ngame_sounds = {}\r\nplayer = \"gallery/Sprites/bird.png\"\r\nbackground =\"gallery/Sprites/background.png\"\r\npipe = \"gallery/Sprites/pipe.png\"\r\npygame.init()\r\n\r\nclass Bird:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n self.BirdVely = -9 # it is -ve to make bird jump automatically whwn game starts\r\n self.BirdFlapAccv = -8 # velocity while flapping\r\n self.BirdFlapped = True\r\n self.BirdAccY = 1 # For increasing velocity\r\n self.BirdMaxVelY = 10\r\n\r\n def jump(self):\r\n if self.y > 0 :\r\n self.BirdVely = self.BirdFlapAccv\r\n self.BirdFlapped = True\r\n\r\n def move(self):\r\n if self.BirdVely < self.BirdMaxVelY and not self.BirdFlapped:\r\n self.BirdVely += self.BirdAccY\r\n\r\n if self.BirdFlapped:\r\n self.BirdFlapped = False\r\n #BirdHeight = game_sprites['player'].get_height()\r\n \r\n self.y = self.y + self.BirdVely\r\n\r\n\r\n \r\n\r\n\r\n\r\n# Functions\r\n\r\ndef welcomeScreen(): # Shows welcome Screen\r\n \r\n messagex = int((screenwidth - game_sprites['message'].get_width())/2)\r\n messagey = int (screenheight * 0.001)\r\n\r\n font = pygame.font.SysFont(\"Times New Roman\", 30, bold=True, italic=True)\r\n guide_message = font.render(\" Press 's' to Start\", True, (0, 0, 0))\r\n basex = 0\r\n while True:\r\n for event in pygame.event.get():\r\n # If user clicks cross button than close the game\r\n\r\n if event.type == pygame.QUIT or (event.type==pygame.KEYDOWN and event.key== K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n quit()\r\n\r\n # If the user presses space or up key, start the game for them\r\n elif event.type==KEYDOWN and (event.key == pygame.K_s or event.key== K_UP):\r\n return\r\n\r\n else:\r\n # screen.blit(game_sprites['background'], (0,0))\r\n # screen.blit(game_sprites['player'], (playerx, playery))\r\n screen.blit(game_sprites['message'], (messagex, messagey))\r\n screen.blit(guide_message, (0.1 * screenwidth, 0.6 *screenheight))\r\n # screen.blit(game_sprites['base'], (basex, groundy))\r\n pygame.display.update() # VERY IMP: Without this no update\r\n FPSclock.tick(FPS)\r\n\r\n#def mainGame():\r\n print(\"In main Game\")\r\n score = 0\r\n playerx = int(screenwidth/5)\r\n playery = int(screenheight/2)\r\n basex = 0\r\n\r\n # Create 2 pipes for blitting on the screen\r\n newPipe1 = getRandomPipe()\r\n newPipe2 = getRandomPipe()\r\n newPipe3 = getRandomPipe()\r\n\r\n # List of upper pipes\r\n upperPipes = [\r\n {'x' : screenwidth+50, 'y' : newPipe1[0]['y']},\r\n {'x' : screenwidth+50+(screenwidth/3), 'y' : newPipe2[0]['y']},\r\n {'x' : screenwidth+50+2*(screenwidth/3) , 'y' : newPipe3[0]['y']},\r\n ]\r\n\r\n # List of Lower pipes\r\n lowerPipes = [\r\n {'x' : screenwidth+50, 'y' : newPipe1[1]['y']},\r\n {'x' : screenwidth+50+(screenwidth/3), 'y' : newPipe2[1]['y']},\r\n {'x' : screenwidth+50+2*(screenwidth/3) , 'y' : newPipe3[1]['y']},\r\n\r\n ]\r\n\r\n pipeVelx = -4\r\n playerVely = -9 # it is -ve to make bird jump automatically whwn game starts\r\n playerMaxVelY = 10\r\n playerMinVelY = -8\r\n playerAccY = 1 # For increasing velocity\r\n\r\n playerFlapAccv = -8 # velocity while flapping\r\n playerFlapped = False # It is true only when bird is flapping\r\n\r\n while True:\r\n # playerVely = -9\r\n for event in pygame.event.get():\r\n if event.type == QUIT or (event.type == KEYDOWN and event.type == K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):\r\n if playery > 0 :\r\n playerVely = playerFlapAccv\r\n playerFlapped = True\r\n \r\n #game_sounds[''].play()\r\n crashTest = isCollide(playerx, playery, upperPipes, lowerPipes)\r\n \r\n if crashTest: # If player is crashed\r\n return\r\n # Check for score\r\n playerMidpos = playerx + game_sprites['player'].get_width()/2\r\n for pipe in upperPipes:\r\n pipeMidPos = pipe['x'] + game_sprites['pipe'][0].get_width()/2\r\n if pipeMidPos<= playerMidpos = groundy - 56 or playery<0:\r\n # game_sounds['hit'].play()\r\n return True\r\n \r\n for pipe in upperPipes:\r\n pipeHeight = game_sprites['pipe'][0].get_height()\r\n if (playery < pipeHeight + pipe['y'] and abs(playerx -pipe['x'])+50 < game_sprites['pipe'][0].get_width()):\r\n return True\r\n\r\n for pipe in lowerPipes:\r\n if (playery + game_sprites['player'].get_height() > pipe['y'] and abs(playerx -pipe['x'])+50 < game_sprites['pipe'][0].get_width()):\r\n return True\r\n return False\r\n\r\ndef getRandomPipe():\r\n # generate positions of both pipes\r\n\r\n pipeHeight = game_sprites['pipe'][0].get_height()\r\n offset = screenheight/6\r\n y2 = offset + random.randrange(2, int(screenheight - game_sprites['base'].get_height() + 0.6*offset))\r\n pipex = screenwidth + 10\r\n # y1 = pipeHeight - y2 + offset to make this work change y1 to -y1 on '192'\r\n y1 = y2 - offset - pipeHeight\r\n pipe = [\r\n {'x' : pipex, 'y' : y1} , # Upper Pipe\r\n {'x' : pipex, 'y' : y2} # Lower Pipe\r\n ]\r\n return pipe\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n # It is the main function form where gamw will start\r\n #pygame.init() # Initialize all pygame modules\r\n FPSclock = pygame.time.Clock() # For contolling FPS\r\n pygame.display.set_caption(\"Flappy Bird by KK\")\r\n game_sprites['numbers'] = (\r\n pygame.image.load('gallery/Sprites/0.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/1.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/2.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/3.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/4.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/5.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/6.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/7.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/8.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/9.png').convert_alpha(),\r\n\r\n )\r\n\r\n # Game sprites\r\n game_sprites['base'] = pygame.image.load('gallery/Sprites/base.png').convert_alpha()\r\n game_sprites['message'] = pygame.image.load('gallery/Sprites/message.jpg').convert_alpha()\r\n game_sprites['pipe'] = (pygame.transform.rotate(pygame.image.load(pipe).convert_alpha(),180),\r\n pygame.image.load(pipe).convert_alpha()\r\n )\r\n game_sprites['background'] = pygame.image.load(background).convert()\r\n game_sprites['player'] = pygame.image.load(player).convert_alpha() \r\n\r\n\r\n\r\n # Game Sounds\r\n\r\n # game_sounds['die'] = pygame.mixer.Sounds('')\r\n # game_sounds['hit'] = pygame.mixer.Sounds('')\r\n # game_sounds['point'] = pygame.mixer.Sounds('')\r\n # game_sounds['swoosh'] = pygame.mixer.Sounds('')\r\n # game_sounds['wing'] = pygame.mixer.Sounds('')\r\n \r\n while True:\r\n welcomeScreen() # Shows welcome screen to the user untill he presses a button\r\n mainGame() # This is the main game function\r\n\r\n\r\ndef main(genomes, config):\r\n score = 0\r\n nets = []\r\n ge = []\r\n birds = []\r\n\r\n basex = 0\r\n\r\n # Create 2 pipes for blitting on the screen\r\n newPipe1 = getRandomPipe()\r\n newPipe2 = getRandomPipe()\r\n newPipe3 = getRandomPipe()\r\n\r\n # List of upper pipes\r\n upperPipes = [\r\n {'x' : screenwidth+50, 'y' : newPipe1[0]['y']},\r\n {'x' : screenwidth+50+(screenwidth/3), 'y' : newPipe2[0]['y']},\r\n {'x' : screenwidth+50+2*(screenwidth/3) , 'y' : newPipe3[0]['y']},\r\n ]\r\n\r\n # List of Lower pipes\r\n lowerPipes = [\r\n {'x' : screenwidth+50, 'y' : newPipe1[1]['y']},\r\n {'x' : screenwidth+50+(screenwidth/3), 'y' : newPipe2[1]['y']},\r\n {'x' : screenwidth+50+2*(screenwidth/3) , 'y' : newPipe3[1]['y']},\r\n\r\n ]\r\n\r\n pipeVelx = -4\r\n\r\n run = True\r\n\r\n for _, g in genomes:\r\n net = neat.nn.FeedForwardNetwork.create(g, config)\r\n nets.append(net)\r\n birds.append(Bird(int(screenwidth/5), int(screenheight/2)))\r\n g.fitness = 0\r\n ge.append(g)\r\n\r\n while run:\r\n \r\n\r\n for x,bird in enumerate(birds):\r\n if isCollide(bird.x, bird.y, upperPipes, lowerPipes) == True:\r\n ge[x].fitness -= 1\r\n birds.pop(x)\r\n nets.pop(x)\r\n ge.pop(x)\r\n\r\n # Check for score\r\n birdMidpos = bird.x + game_sprites['player'].get_width()/2\r\n i = 0\r\n for pipe in upperPipes:\r\n pipeMidPos = pipe['x'] + game_sprites['pipe'][0].get_width()\r\n if pipeMidPos<= birdMidpos = birds[0].x - 50:\r\n indexNextPipe = x\r\n break\r\n else:\r\n return\r\n\r\n print(\"YO RA PIPE\")\r\n print(indexNextPipe)\r\n\r\n pipeHeight = game_sprites['pipe'][0].get_height()\r\n # For moving bird\r\n for x, bird in enumerate(birds):\r\n bird.move()\r\n ge[x].fitness += 0.1\r\n\r\n output = nets[x].activate((bird.y, (bird.y - (upperPipes[indexNextPipe]['y'] + pipeHeight) ), (bird.y - (lowerPipes[indexNextPipe]['y']) ) ))\r\n\r\n if output[0] > 0.5:\r\n bird.jump()\r\n\r\n # Blit the sprites\r\n \r\n screen.blit(game_sprites['background'], (0, 0))\r\n\r\n for upperPipe, lowerPipe in zip(upperPipes, lowerPipes):\r\n screen.blit(game_sprites['pipe'][0], (upperPipe['x'], upperPipe['y']))\r\n screen.blit(game_sprites['pipe'][1], (lowerPipe['x'], lowerPipe['y']))\r\n \r\n screen.blit(game_sprites['base'], (basex, groundy))\r\n\r\n for bird in birds:\r\n screen.blit(game_sprites['player'], (bird.x, bird.y))\r\n\r\n myDigits = [int(x) for x in list(str(score))]\r\n width = 0\r\n for digits in myDigits:\r\n width += game_sprites['numbers'][digits].get_width()\r\n \r\n xoffset = (screenwidth - width)/2\r\n\r\n for digit in myDigits:\r\n screen.blit(game_sprites['numbers'][digit], (xoffset, screenwidth*0.12))\r\n xoffset += game_sprites['numbers'][digit].get_width()+2\r\n\r\n pygame.display.update()\r\n FPSclock.tick(FPS)\r\n\r\n # print(\"Yo h size\")\r\n # print(len(birds))\r\n\r\n if len(birds) == 0:\r\n run = False\r\n \r\n\r\ndef run(config_path):\r\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet,\r\n neat.DefaultStagnation, config_path)\r\n \r\n p = neat.Population(config)\r\n\r\n p.add_reporter(neat.StdOutReporter(True))\r\n stats = neat.StatisticsReporter()\r\n p.add_reporter(stats)\r\n\r\n print(\"In run\")\r\n\r\n winner = p.run(main,50)\r\n \r\n\r\nif __name__ == \"__main__\":\r\n #print(\"THEEK HAI\")\r\n # It is the main function form where gamw will start\r\n #pygame.init() # Initialize all pygame modules\r\n FPSclock = pygame.time.Clock() # For contolling FPS\r\n pygame.display.set_caption(\"Flappy Bird by KK\")\r\n game_sprites['numbers'] = (\r\n pygame.image.load('gallery/Sprites/0.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/1.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/2.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/3.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/4.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/5.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/6.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/7.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/8.png').convert_alpha(),\r\n pygame.image.load('gallery/Sprites/9.png').convert_alpha(),\r\n\r\n )\r\n\r\n # Game sprites\r\n game_sprites['base'] = pygame.image.load('gallery/Sprites/base.png').convert_alpha()\r\n game_sprites['message'] = pygame.image.load('gallery/Sprites/message.jpg').convert_alpha()\r\n game_sprites['pipe'] = (pygame.transform.rotate(pygame.image.load(pipe).convert_alpha(),180),\r\n pygame.image.load(pipe).convert_alpha()\r\n )\r\n game_sprites['background'] = pygame.image.load(background).convert()\r\n game_sprites['player'] = pygame.image.load(player).convert_alpha() \r\n\r\n\r\n \r\n while True:\r\n welcomeScreen() # Shows welcome screen to the user untill he presses a button\r\n break\r\n \r\n\r\n\r\n local_dir = os.path.dirname(__file__)\r\n config_path = os.path.join(local_dir, \"config-feedforward.txt\")\r\n run(config_path)","sub_path":"AI Plays Flappy Bird/AI Flappy Bird.py","file_name":"AI Flappy Bird.py","file_ext":"py","file_size_in_byte":16695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211687636","text":"\"\"\"\n\nWe want make a package of goal kilos of chocolate.\nWe have small bars (1 kilo each) and big bars (5 kilos each). \nReturn the number of small bars to use, assuming we always use big bars before small bars. Return -1 if it can't be done.\n\n\nmake_chocolate(4, 1, 9) → 4\nmake_chocolate(4, 1, 10) → -1\nmake_chocolate(4, 1, 7) → 2\n\n\"\"\"\n\n\ndef make_chocolate(small, big, goal):\n blocks = int(goal / 5)\n if blocks > big:\n blocks = big\n bc = blocks * 5\n restForSmall = 0\n if goal - bc >= 0:\n restForSmall = goal - bc\n if restForSmall - small <= 0:\n result = abs(restForSmall-small)\n small -= result\n return small\n return -1\n\n return -1\n\n\nprint(make_chocolate(4, 1, 4))\n","sub_path":"CodingBat/make_chocalate.py","file_name":"make_chocalate.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252019515","text":"import os\n\n\n# 订单到运单 之间 超时设定\nOVERTIME_ORDER_TO_WAYBILL_HOUR = 36\n\n\n# 工作目录\nWORK_DIR = r'D:\\奇货居\\work'\n\n# 宏文件目录\nVBA_DIR = r\"D:\\奇货居\\work\\vba\"\n\nBEAUTY_VBA_PATH = os.path.join(VBA_DIR, \"美化.xlsm\")\n\nEXCEL_VISIBLE = 0\n\n\n# 备份目录\nBACK_DIR = r'D:\\奇货居\\备份'\nNEW_ORDER_BAK_DIR = os.path.join(BACK_DIR,r\"系统导出\\新增订单\")\nUPDATE_ORDER_BAK_DIR = os.path.join(BACK_DIR,r\"系统导出\\更新订单\")\n\n\n\n# 备份供应商 运单目录\nTRACKING_BACK_DIR = os.path.join(BACK_DIR,r'运单\\源')\n\n\n# 文件下载目录\nEXPORT_DIR = r\"D:\\Downloads\\QHJ_MALL\"\n\n# 文件时间格式\nDATE_FORMAT = \"%Y-%m-%d %H_%M_%S\"\n\n\"\"\"订单处理\n路径 和参数\n\"\"\"\n\n\n\n# 发货商路径\n\nCOMMODITY_BASE_DIR = r\"D:\\奇货居\\素材\\商城图片素材\"\nCOMMODITY_PATH =os.path.join(COMMODITY_BASE_DIR, \"商品信息.xlsx\")\nGOODS_DIR = COMMODITY_BASE_DIR\nGOODS_PATH = COMMODITY_PATH\n\n\n\n# 输出目录\nSENDMAIL_ORDER_DIR = os.path.join(WORK_DIR,r\"外发订单\")\n\n# 订单文件匹配正则\nORDER_DATE_PATT = '^订单\\s+?(?P20\\d{2}(?:-\\d{1,2}){2}\\D+?\\d{1,2}(?:_\\d{1,2}){1,2})\\.xls[xm]?$'\n\n# 可入系统 目录\nINPUT_SYS_DIR = os.path.join(WORK_DIR,r'运单信息\\可入系统')\n\n# 可录入系统的 文件匹配正则表达式\nINPUT_SYS_PATT = r\"运单 (?P\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}_\\d{1,2}_\\d{1,2})\\.xls[xm]?\"\n\n\n# 运单\n\nTRACKING_SRC_DIR = os.path.join(WORK_DIR,r\"运单信息\\外部来源\")\nTRACKING_INPUT_DIR = os.path.join(WORK_DIR,r\"运单信息\\可入系统\")\n\n\n\n\"\"\"\n存储\n\n\"\"\"\n\n# 新订单存储目录\n\nNEW_ORDER_SAVE_DIR = os.path.join(WORK_DIR, r\"外发订单\\新订单\")\n\n# 已发订单 未回运单\nOVERTIME_ORDER_SAVE_DIR = os.path.join(WORK_DIR, r\"外发订单\\已发未收\")\n\n\n# 系统字段 映射 本地字段对应关系\n\nFIELDS_SLM_DIC = {\n\n}\n\n# 本地字段 映射 系统字段\n\nFIELDS_LSM_DIC = {v:k for k, v in FIELDS_SLM_DIC.items()}\n\n\n","sub_path":"ZERO/tools/settings/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323823618","text":"import numpy as np\nfrom sklearn import datasets\nfrom copy import deepcopy\niris=datasets.load_iris()\n\n#print(iris)\nX=iris.data[:,2:4]\nprint(X)\ny=iris.target\nprint('--------------------------------------------------')\nprint(y)\n\n#a={'shai':\"Hello,laday\",'shouer':\"wuei\",'no':1234}\n\n#print(a['shai'])\n#print(a['shouer'])\n\nfrom matplotlib import pyplot as plt\ny_0=np.where(y==0)\nprint(\"Y is :\")\nprint(y_0)\nplt.scatter(X[y_0,0],X[y_0,1])\ny_1=np.where(y==1)\nplt.scatter(X[y_1,0],X[y_1,1])\ny_2=np.where(y==2)\nplt.scatter(X[y_2,0],X[y_2,1])\nplt.show()\n\nprint(len(X))\n\nprint(np.size(X))\nprint(X.shape)\nprint(X.shape[0])\nprint(X.shape[1])\nk=3\nrandom_index=np.random.choice(range(len(X)),k)\nprint(\"The random number is :\")\nprint(random_index)\nprint(random_index[0])\npara1,para2,para3=random_index\ncentriods=X[random_index]\nprint(centriods)\nprint('--------------------')\nprint(X[para1,0],X[para1,1])\nprint(X[para2,0],X[para2,1])\nprint(X[para3,0],X[para3,1])\n\nprint(np.array([[X[para1,0],X[para1,1]],[X[para2,0],X[para2,1]],[X[para3,0],X[para3,1]]]))\nprint('--------------------')\n\n\ndef visual_centroids(X,centroids):\n plt.scatter(X[:,0],X[:,1])\n plt.scatter(centroids[:,0],centroids[:,1],marker='*',s=200,c='#050505')\n plt.show()\n\nvisual_centroids(X,centriods)\n\n\n\n'''import numpy as np\nfrom numpy import linalg as LA\n\n\na = np.array([-3, -5, -7, 2, 6, 4, 0, 2, 8])\nb = a.reshape((3, 3))\nprint(b)\n'''\n\n'''\n\n\nprint(np.linalg.norm(b,axis=1))\nprint(np.linalg.norm(b,axis=0))\n'''\ndef dist(a,b):\n a=np.array(a)\n b=np.array(b)\n divert=a-b\n #print(divert)\n #return np.linalg.norm(divert,axis=1)\n\n return np.linalg.norm(divert,axis=1)\n\n#print(dist([2,4],[5,7]))\n\n#print(np.linalg.norm(dist([2,4],[5,7]),axis=0))\n#print(np.linalg.norm(dist([2,4],[5,7])))\n\ndef assigan_cluster(x,centriods):\n distances=dist(x,centriods)\n cluster=np.argmin(distances)\n return cluster\n\ndef update_centriods(X,centriods,clusters):\n for i in range(k):\n cluster_i=np.where(clusters==i)\n print(cluster_i)\n centriods[i]=np.mean(X[cluster_i],axis=0)\n print(centriods[i])\n\n\n'''\nrandom_test=np.random.randint(1,150)\nprint(\"random_test is : \", random_test)\nrandom_vec=X[random_test]\nprint(\"The random vector is :\")\nprint(random_vec)\nprint('--------------------')\nprint(\"The cluster of vector belongs to cluster_\",assigan_cluster(random_vec,centriods))\n'''\ntol=0.0001\nmax_iter=100\niter=0\ncentriods_diff=100000\nclusters=np.zeros(len(X))\nprint(clusters.shape)\n\n\nwhile itertol:\n for i in range(len(X)):\n clusters[i]=assigan_cluster(X[i],centriods)\n print(\" the item of the i:\",i,\" data:\",X[i],\"belongs to cluster_\",clusters[i])\n\n centroids_prev=deepcopy(centriods)\n update_centriods(X,centriods,clusters)\n iter+=1\n centriods_diff=np.linalg.norm(centriods-centroids_prev)\n print('Iterations: ',str(iter))\n print('centroids:\\n',centriods)\n print('centriods moves: ,{:5.4f}'.format(centriods_diff))\n visual_centroids(X,centriods)\n","sub_path":"K-clusters.py","file_name":"K-clusters.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551618722","text":"import Protein\nimport re\nimport HMM, apply_hmm\nimport sys\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom posterior import*\nfrom globs import*\n\n\n#patters\np_name = r\"protein:)(\\w+)(\\s*)\"\nstates_pattern = r\"[oHST]+\\s+\"\n\n\naa_set = {'A', 'C' , 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'Y', 'X'}\ngroup_set = {'o', 'H', 'S', 'T'}\n\n\ndef replace_symbols(line):\n line = line.replace('H', 'A')\n line = line.replace('S', 'B')\n line = line.replace('o', 'O')\n return line\n\n\ndef parse_file(path):\n \"\"\"\n cleans the vm file lines from garbage (didnt clean the spaces)\n :param path:\n :return:\n \"\"\"\n\n #states\n aa_state = False\n group_state= False\n states_state = False\n keywords_state = False\n\n proteins = []\n name = \"No one\"\n aa_seq= \"\"\n group_seq= \"\"\n structure = \"\"\n keywords= \"\"\n\n #print(name)\n\n with open(path) as file:\n for line in file.readlines():\n #print(line)\n line = line.strip()\n if aa_state:\n if all(aa in aa_set for aa in line):\n aa_seq += line\n else:\n aa_state = False\n group_state = True\n group_seq += line\n\n elif group_state:\n char = line[0]\n if char.isdigit():\n group_seq += line\n else:\n group_state = False\n states_state = True\n structure += replace_symbols(line)\n elif states_state:\n if re.fullmatch(states_pattern,line): # maybe we can make more efficient with adding a special symbol like '$' at the beginning of each states line\n structure += replace_symbols(line)\n else:\n states_state = False\n keywords_state = True\n keywords += line\n\n elif keywords_state:\n if line.startswith('++'):\n keywords_state = False\n # print(\"I'm Here\")\n # print(name)\n # print(aa_seq)\n # print(group_seq)\n # print(structure)\n assert group_seq.isdigit(), 'assert 1 ' + group_seq\n assert all(s in 'ABTO' for s in structure), 'assert 2'\n protein = Protein.Protein(name, aa_seq, group_seq, keywords, structure)\n if not protein.to_drop:\n proteins.append(protein)\n name = None\n aa_seq= \"\"\n group_seq= \"\"\n structure = \"\"\n keywords= \"\"\n else:\n keywords += line\n\n elif line.startswith('protein:'):\n name = line[8:]\n print(name)\n aa_state = True\n\n else:\n raise ValueError(\"Wrong string pattern!\")\n\n return proteins\n\n\ndef evaluate(true_structure, our_prediciton):\n matches = 0\n total = 0\n for strc_true, strc_our in zip(true_structure, our_prediciton):\n\n for i in range(len(strc_true)):\n curr_prb = 0\n if len(strc_true) != len(strc_our):\n print(strc_our)\n print(strc_true)\n print(len(strc_true))\n print(len(strc_our))\n print(\"oh no\")\n exit(1)\n if strc_our[i] == strc_true[i]:\n matches += 1\n total += 1\n\n return matches/total\n\n\nif __name__ == '__main__':\n proteins = parse_file('prot_data_human')\n print(len(proteins))\n # print([p.name for p in proteins])\n # for p in p_list:\n # print(p.keywords)\n\n p_train = proteins[:4000]\n p_test = proteins[4000:]\n\n emissions = init_emissions_group(p_train, True)\n transitions = init_transitions(p_train, True)\n emissions_matrix = emission_to_matrix(emissions)\n transitions_matrix = transition_to_matrix(transitions)\n\n # seq = \"0112233446\"\n # posterior = calculate_posterior(seq, transitions_matrix, emissions_matrix)\n # print(posterior, \"\\n\")\n\n true = []\n pred = []\n for p in p_test:\n matrix = calculate_posterior_group(p.group_seq, transitions_matrix, emissions_matrix)\n trace = trace_states(p.group_seq, matrix)\n # matrix, trace = calculate_viterbi(p.group_seq, transitions_matrix, emissions_matrix)\n trace = Protein.revert_structure3(trace)\n pred.append(trace)\n true.append(p.structure)\n try:\n print(trace[:60])\n print(p.structure[:60])\n print(\"======================================\")\n except IndexError:\n continue\n\n err = evaluate(true, pred)\n print(err)\n\n # test_p = p_list[-2:]\n # train_p = p_list[:-2]\n # hmm = apply_hmm.HMM(train_p, test_p)\n","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"170696165","text":"import tensorflow as tf\nimport numpy as np\nimport gym\nimport matplotlib.pyplot as plt\n\nclass model:\n def __init__(self, input_size, output_size, hidden_size, lr=0.00025):\n self.input_size = input_size\n self.outout_size = output_size\n self.X = tf.placeholder(shape=[1, 4], dtype=tf.float32)\n self.Y = tf.placeholder(shape=[1, 2], dtype=tf.float32)\n self.W_input = tf.Variable(tf.random_uniform([input_size, hidden_size], minval=0, maxval=0.001))\n self.W_output = tf.Variable(tf.random_uniform([hidden_size, output_size], minval=0, maxval=0.001))\n self.Qout = tf.matmul(tf.nn.relu(tf.matmul(self.X, self.W_input)), self.W_output)\n\n self.predicted = tf.argmax(self.Qout, 1)\n self.loss = tf.reduce_sum(tf.square(self.Y - self.Qout))\n self.trainer = tf.train.AdamOptimizer(learning_rate=lr).minimize(self.loss)\n\n\nclass batch:\n def __init__(self, size=1000000):\n self.size = size\n self.items = []\n\n def fetch_batch(self, size=50):\n if len(self.items) < size:\n return []\n return np.random.permutation(self.items)[0:size]\n\n def add(self, s, a, r, s1):\n if len(self.items) >= self.size:\n self.items.pop(0)\n self.items.append([s, a, r, s1])\n\n\nclass trainer:\n def __init__(self, epochs, epsilon=0.1, gamma=0.99):\n self.epochs = epochs\n self.epsilon = epsilon\n self.gamma = gamma\n self.model = model(4, 2, 50)\n self.batch = batch()\n self.test_number = 120\n\n def encode(self, s):\n return np.reshape(np.array(s), (1, self.model.input_size))\n\n def train(self):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n env = gym.make('CartPole-v1')\n for i in range(self.epochs):\n d = False\n s = env.reset()\n while not d:\n a, Q = sess.run([self.model.predicted, self.model.Qout], feed_dict={self.model.X: self.encode(s)})\n if np.random.random() < self.epsilon:\n a[0] = env.action_space.sample()\n\n s1, r, d, _ = env.step(a[0])\n self.batch.add(s, a[0], r, s1)\n\n states = self.batch.fetch_batch()\n if not len(states):\n continue\n\n for x in states:\n state, action, reward, state1 = x\n target = sess.run(self.model.Qout, feed_dict={self.model.X: self.encode(state)})\n maxQ = np.max(sess.run(self.model.Qout, feed_dict={self.model.X: self.encode(state1)}))\n target[0][a] = reward + self.gamma * maxQ\n sess.run(self.model.trainer, feed_dict={self.model.X: self.encode(state), self.model.Y: target})\n print(\"episode\", i + 1, \"out of\", self.epochs)\n\n def test(self, render_mode=False):\n print(\"---testing---\")\n #must obtain a score of 195 in 120 consecutive tests\n rewards = []\n env = gym.make('CartPole-v1')\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(self.test_number):\n s = env.reset()\n d = False\n rAll = 0\n while not d:\n a = sess.run(self.model.predicted, feed_dict={self.model.X: self.encode(s)})\n s, r, d, _ = env.step(a[0])\n rAll += r\n if render_mode:\n env.render()\n\n rewards.append(rAll)\n\n plt.plot(rewards)\n plt.plot([195 for i in range(len(rewards))])\n plt.show()\n\nif __name__ == '__main__':\n t = trainer(3000)\n t.train()\n t.test(True)","sub_path":"cartpole.1/RL.py","file_name":"RL.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433760701","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import patterns, url\n\n\nurlpatterns = patterns(\n 'legacy.edit_userprofile.views',\n url(r'^(?P\\d+)$', 'userprofile_form',\n name='edit_userprofile'),\n url(r'^update/(?P\\d+)$', 'userprofile_update',\n name='edit_userprofile-update'),\n)\n","sub_path":"legacy/edit_userprofile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262973573","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: ci_knight \n@date: 2016年01月1日\n\"\"\"\n\nDEBUG = True\nFLATPAGES_AUTO_RELOAD = DEBUG\n\n\nclass Config(object):\n\n WeChat_Token = 'x'\n EncodingAESKey = 'x'\n AppID = 'x'\n AppSECRET = 'x'","sub_path":"cat/dist_app_settings.py","file_name":"dist_app_settings.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365978321","text":"# Copyright (c) Meta Platforms, Inc. and affiliates\nimport torch\nfrom torch.testing._internal.common_utils import run_tests\nfrom spmd.testing.common_utils import ( # type: ignore\n DistTensorTestBase,\n with_comms,\n)\nfrom spmd import DeviceMesh, DTensor, Shard, Replicate, distribute_tensor\n\n\nclass TPShardingOpsTest(DistTensorTestBase):\n @with_comms\n def test_sharded_view(self):\n device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))\n torch.manual_seed(0)\n tensor = torch.rand(16, 35, 26)\n sharding = [Shard(0)]\n st = distribute_tensor(tensor, device_mesh, sharding).view(8, 4, 35, 13)\n st_new = distribute_tensor(\n tensor.view(8, 4, 35, 13), device_mesh, sharding\n )\n self.assertEqual(st.to_local(), st_new.to_local())\n self.assertEqual(st.placements[0], st_new.placements[0])\n\n @with_comms\n def test_sharded_transpose(self):\n device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))\n torch.manual_seed(self.rank)\n tensor = torch.rand(3, 5, 6, device=self.device_type)\n sharding = [Shard(0)]\n dist_tensor = DTensor.from_local(tensor, device_mesh, sharding)\n new_dt = dist_tensor.transpose(0, 2)\n self.assertTrue(new_dt.placements[0].is_shard(dim=2))\n self.assertEqual(new_dt.to_local(), tensor.transpose(0, 2))\n new_dt = dist_tensor.transpose(1, 2)\n self.assertTrue(new_dt.placements[0].is_shard(dim=0))\n self.assertEqual(new_dt.to_local(), tensor.transpose(1, 2))\n\n @with_comms\n def test_sharded_permute(self):\n device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))\n torch.manual_seed(self.rank)\n tensor = torch.rand(3, 5, 6, device=self.device_type)\n sharding = [Shard(0)]\n dist_tensor = DTensor.from_local(tensor, device_mesh, sharding)\n new_dt = dist_tensor.permute(1, 0, 2)\n self.assertTrue(new_dt.placements[0].is_shard(dim=1))\n self.assertEqual(new_dt.to_local(), tensor.permute(1, 0, 2))\n\n @with_comms\n def test_replicated_permute(self):\n device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))\n torch.manual_seed(0)\n tensor = torch.rand(3, 5, 6, device=self.device_type)\n sharding = [Replicate()]\n dist_tensor = DTensor.from_local(tensor, device_mesh, sharding)\n new_dt = dist_tensor.permute(1, 0, 2)\n self.assertTrue(new_dt.placements[0].is_replicate())\n self.assertEqual(new_dt.to_local(), tensor.permute(1, 0, 2))\n self.assertEqual(new_dt.stride(), tensor.permute(1, 0, 2).stride())\n\n @with_comms\n def test_sharded_cat(self):\n device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))\n torch.manual_seed(self.rank)\n tensor_1 = torch.rand(3, 5, 6)\n tensor_2 = torch.rand(3, 5, 6)\n tensor_3 = torch.rand(3, 5, 6)\n sharding = [Shard(0)]\n dt_1 = DTensor.from_local(tensor_1, device_mesh, sharding)\n dt_2 = DTensor.from_local(tensor_2, device_mesh, sharding)\n dt_3 = DTensor.from_local(tensor_3, device_mesh, sharding)\n new_dt = torch.cat([dt_1, dt_2, dt_3])\n cat_dt = DTensor.from_local(\n torch.cat([tensor_1, tensor_2, tensor_3]), device_mesh, sharding\n )\n self.assertEqual(new_dt.to_local(), cat_dt.to_local())\n self.assertEqual(new_dt.size(), cat_dt.size())\n\n @with_comms\n def test_sharded_split(self):\n device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))\n torch.manual_seed(self.rank)\n tensor = torch.rand(3, 5, 6, device=self.device_type)\n sharding = [Shard(2)]\n dist_tensor = DTensor.from_local(tensor, device_mesh, sharding)\n dt_list = dist_tensor.split(dist_tensor.size(-1) // 2, dim=-1)\n local_tensors = tensor.split(3, dim=-1)\n for idx, dt in enumerate(dt_list):\n self.assertTrue(dt.placements[0].is_shard(dim=2))\n self.assertEqual(dt.to_local(), local_tensors[idx])\n\n\nif __name__ == \"__main__\":\n run_tests()\n","sub_path":"test/spmd/tensor/test_tp_sharding_ops.py","file_name":"test_tp_sharding_ops.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115555165","text":"\"\"\"\n Publisher MQTT\n\"\"\"\n\nimport paho.mqtt.client as mqtt\nimport time\nfrom Publisher.VehicleCounter import VehicleCounter\n\nclient = mqtt.Client()\n\n\ndef on_connect(_client, _userdata, _flags, rc):\n \"\"\"\n Callback MQTT wywolywany po ustanowieniu polaczenia z brokerem.\n \"\"\"\n if rc == 0:\n print(\"Connected successfully\")\n else:\n print(\"Connect returned result code: \" + str(rc))\n\n\ndef on_detect(direction):\n \"\"\"\n Callback obiektu detekcji pojazdow, wykonywany po wykryciu przejeżdżającego samochodu.\n Publikuje na dwóch kanałach MQTT informacje o timestampie i kierunku pojazdu.\n Jeden kanał obsługiwany jest przez subskrybenta MQTT zliczającego zdarzenia i wysyłającego dane do MongoDB,\n drugi do wyświetlania danych w czasie rzeczywistym w aplikacji mobilnej.\n \"\"\"\n timestamp = int(time.time())\n client.publish(\"bigdata/vehicle_traffic\", direction)\n client.publish(\"bigdata/real_time\", str(timestamp) + \" \" + direction, retain=True)\n print(timestamp, direction)\n\n\nif __name__ == '__main__':\n # Ustawienie callbacku nawiązania połączenia\n client.on_connect = on_connect\n\n # Łączenie z brokerem MQTT\n client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLS)\n client.username_pw_set(\"bigdata\", \"Bigdata2021\")\n client.connect(\"cf3d53718bf9452e937c720384e211ae.s1.eu.hivemq.cloud\", 8883)\n\n # Utworzenie obiektu detekcji pojazdów\n v = VehicleCounter(preview=False)\n\n # Ustawienie callbacka wykonywanego po detekcji pojazdu\n v.on_detect = on_detect\n\n # Uruchomienie detekcji\n v.start()\n\n\n","sub_path":"Publisher/mqtt_publisher.py","file_name":"mqtt_publisher.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110818587","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\nparams = {'legend.fontsize':'xx-large',\n 'axes.labelsize':'xx-large',\n 'axes.titlesize':'xx-large',\n 'ytick.labelsize':'xx-large',\n 'xtick.labelsize':'xx-large'}#,\n #'axes.facecolor':'white',\n #'figure.facecolor':'white'}\npylab.rcParams.update(params)\n#plt.style.use(['dark_background','seaborn-pastel'])\n\nimport glob\nimport os\n\nfrom astropy.io import fits\n\ndef gauss(a,b,c,x):\n return a*np.exp(-(x-b)**2/c)**2\n\npattern = '/media/maja/Elements/rebinned20180822/rebinned20180822v02*_exp03_multi*.fits.fits'\npattern2 = '/media/maja/Elements/rebinned20180822/rebinned*_exp0?_multi*_042_*LL.fits.fits'\n#pattern = '/media/maja/Elements/work/03946/hetdex/maverick/red1/reductions/201801*/virus/*/exp0?/virus/multi*_042_*LL.fits'\nff = glob.glob(pattern)\nff = np.array(ff)\nff2 = glob.glob(pattern2)\n#print(len(ff))\n\nfiber = []\nhere = [x[-20:-18]!='09' for x in ff]\n\n#print(here)\nfor f in ff[np.where(here)]:\n hdu = fits.open(f)\n fiber.append(hdu['sky_spectrum'].data)\n hdu.close()\nfiber = np.array(fiber)\nfiber.shape\n\nfiberarray = np.concatenate([x[:,4:] for x in fiber])\nfiberarray[np.where(np.isnan(fiberarray))] = 0\n\ni = 5000\nfiberarray[i] = fiberarray[i]+gauss(10,600,8, np.arange(0,fiberarray.shape[-1],1))\n\nfiberarray.shape\n\nmeanspec = np.nanmean(fiberarray[:], axis=0)\nstdspec = np.nanstd(fiberarray[:],axis=0)\n\nfiberarray_0 = (fiberarray-meanspec)/stdspec\n\n#stdspec.shape\nplt.figure(figsize=(20,4))\nplt.plot(meanspec)\nplt.title('mean spectrum')\nplt.show()\n\ncov = np.cov(fiberarray_0.T)\n\neigenvals, eigenvecs = np.linalg.eig(cov)\neigenvals = np.real(eigenvals)\neigenvecs = np.real(eigenvecs)\n\neigenpairs = [(np.abs(eigenvals[i]), eigenvecs[:,i]) for i in np.argsort(abs(eigenvals))[::-1]]\n\n\"\"\"plt.figure(figsize=(20,4))\nplt.plot([x[0] for x in eigenpairs])\nplt.axvline(100)\nplt.axvline(150)\nplt.yscale('log')\"\"\"\n\nncomp = 150\n\nimp = np.array([eigenpairs[i][1] for i in range(ncomp)])\n\nfiberpca = np.dot(fiberarray_0, imp.T)\n\nnew = np.dot(fiberpca, imp)\n\nnewspec = new*stdspec+meanspec\n\nplt.figure(figsize=(20,4))\nplt.plot(fiberarray[i,580:620]-newspec[i,580:620])\nplt.plot(gauss(10,600,8, np.arange(0,fiber.shape[-1],1))[580:620])\nplt.show()\n\nplt.figure(figsize=(20,4))\nplt.plot(fiberarray[i,:])\nplt.plot(newspec[i,:])\nplt.show()\n\nre = (fiberarray-newspec)/fiberarray\n\nstddevi = np.nanstd(re[i])\nprint('\\nstd of re[i]: ',stddevi)\n\nplt.figure(figsize=(20,4))\nplt.plot(re[i])\nplt.axhline(stddevi)\nplt.axhline(-stddevi)\nplt.title('std {:.3f}'.format(stddevi))\nplt.show()\n\n\n\"\"\"plt.figure(figsize=(25, 17))\nplt.subplot(341)\nplt.scatter(fiberpca[:,0], fiberpca[:,1], alpha=0.1)\nplt.xlabel('PC 1')\nplt.ylabel('PC 2')\nplt.subplot(342)\nplt.scatter(fiberpca[:,1], fiberpca[:,2], alpha=0.1)\nplt.xlabel('PC 2')\nplt.ylabel('PC 3')\nplt.subplot(343)\nplt.scatter(fiberpca[:,2], fiberpca[:,3], alpha=0.1)\nplt.xlabel('PC 3')\nplt.ylabel('PC 4')\nplt.subplot(344)\nplt.scatter(fiberpca[:,3], fiberpca[:,4], alpha=0.1)\nplt.xlabel('PC 4')\nplt.ylabel('PC 5')\nplt.subplot(345)\nplt.scatter(fiberpca[:,4], fiberpca[:,5], alpha=0.1)\nplt.xlabel('PC 5')\nplt.ylabel('PC 6')\nplt.subplot(346)\nplt.scatter(fiberpca[:,5], fiberpca[:,6], alpha=0.1)\nplt.xlabel('PC 6')\nplt.ylabel('PC 7')\nplt.subplot(347)\nplt.scatter(fiberpca[:,6], fiberpca[:,7], alpha=0.1)\nplt.xlabel('PC 7')\nplt.ylabel('PC 8')\nplt.subplot(348)\nplt.scatter(fiberpca[:,7], fiberpca[:,8], alpha=0.1)\nplt.xlabel('PC 8')\nplt.ylabel('PC 9')\nplt.subplot(349)\nplt.scatter(fiberpca[:,8], fiberpca[:,9], alpha=0.1)\nplt.xlabel('PC 9')\nplt.ylabel('PC 10')\nplt.subplot(3,4,10)\nplt.scatter(fiberpca[:,9], fiberpca[:,10], alpha=0.1)\nplt.xlabel('PC 10')\nplt.ylabel('PC 11')\nplt.subplot(3,4,11)\nplt.scatter(fiberpca[:,10], fiberpca[:,11], alpha=0.1)\nplt.xlabel('PC 11')\nplt.ylabel('PC 12')\nplt.subplot(3,4,12)\nplt.scatter(fiberpca[:,11], fiberpca[:,12], alpha=0.1)\nplt.xlabel('PC 12')\nplt.ylabel('PC 13')\n#plt.savefig('fiberpcas.png', bbox_inches='tight')\n\nfor j in range(0,10):\n plt.figure(figsize=(20,4))\n plt.plot(imp[j])\n\"\"\"\n","sub_path":"mixedfiberpca.py","file_name":"mixedfiberpca.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121126495","text":"# Django settings for newt project.\nimport os\n\nimport djcelery\ndjcelery.setup_loader()\n\nREDIS_SERVER = os.environ.get('REDIS_SERVER' , None)\n\nif REDIS_SERVER :\n _redis_ip , _redis_port = REDIS_SERVER.split()\n BROKER_URL= 'redis://%s:%s/0' %( _redis_ip , _redis_port )\n CELERY_RESULT_BACKEND = BROKER_URL\nelse :\n BROKER_URL= 'redis://cn16358:6379/0' \n #BROKER_URL= ['redis://cn16356:6379//' , 'redis://cn16355:6379//' , 'redis://cn16354:6379//' , ]\n #CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'\n CELERY_RESULT_BACKEND = BROKER_URL\nfrom kombu import Queue,Exchange\nCELERY_DEFAULT_QUEUE = 'default'\ndefault_exchange = Exchange( 'default' , type='direct' )\n#ln_exchange = Exchange('ln' , type='topic')\nCELERY_QUEUES=(\n Queue('default',Exchange('default'),routing_key='task.#'),\n Queue('ln3' ,Exchange('ln'), routing_key='ln.ln3'),\n Queue('ln4',Exchange('ln'),routing_key='ln.ln4'),\n Queue('ln7',Exchange('ln'),routing_key='ln.ln7'),\n\n)\n\n#class MyRouter(object):\n# def route_for_task( self,task,args=None ,kwargs=None ) :\n# if task == 'command.adapters.celery_adapter.execute_task' :\n# print( args , kwargs )\n# return { 'exchange':'ln' , 'exchange_type':'topic','routing_key':'ln.' + kwargs[\"machine\"] }\n\nCELERY_ROUTES = ( 'common.routers.MyRouter' )\n\n#CELERY_ROUTES={\n# 'command.adapters.celery_adapter.execute_task':{\n# 'queue':'ln3' ,\n# 'routing_key' : 'ln.ln3',\n# },\n#}\n\nCELERY_DEFAULT_EXCHANGE='tasks'\nCELERY_DEFAULT_EXCHANGE_TYPE='topic'\nCELERY_DEFAULT_ROUTING_KEY='task.default'\n","sub_path":"newt-p3/newt/celery_settings.py","file_name":"celery_settings.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302358635","text":"url = \"a\"\nname = \"b\"\ndistrict = \"c\"\npage = \"d\"\nfrom_time = \"e\"\nto_time = \"f\"\nparams = \"{0}?iname={1}®ion={2}&page={3}&st={4}&et={5}\"\n\nkwargs = {\n \"name\": name,\n \"from_time\": from_time,\n \"url\": url,\n \"district\": district,\n # \"to_time\": to_time,\n \"page\": page,\n}\n\nargs = [url, name, district, page, from_time, to_time]\n\nprint(params.format(*args))\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500690207","text":"\"\"\"\nCopyright (c) 2020 COTOBA DESIGN, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\nand to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\nTHE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport unittest\n\nfrom programy.config.file.yaml_file import YamlConfigurationFile\nfrom programy.clients.config import ClientConfigurationData\nfrom programy.clients.events.console.config import ConsoleConfiguration\n\n\nclass ClientConfigurationDataTests(unittest.TestCase):\n\n def test_with_data(self):\n yaml = YamlConfigurationFile()\n self.assertIsNotNone(yaml)\n yaml.load_from_text(\"\"\"\n console:\n bot: bot\n prompt: \">>>\"\n bot_selector: programy.clients.client.DefaultBotSelector\n renderer: programy.clients.render.text.TextRenderer\n scheduler:\n name: Scheduler1\n debug_level: 0\n add_listeners: True\n remove_all_jobs: True\n \"\"\", ConsoleConfiguration(), \".\")\n\n bot_config = yaml.get_section(\"console\")\n\n client_config = ClientConfigurationData(\"test\")\n client_config.load_configuration_section(yaml, bot_config, \".\")\n\n self.assertEqual(\"programy.clients.client.DefaultBotSelector\", client_config.bot_selector)\n\n self.assertIsNotNone(client_config.scheduler)\n self.assertEqual(\"Scheduler1\", client_config.scheduler.name)\n self.assertEqual(0, client_config.scheduler.debug_level)\n self.assertTrue(client_config.scheduler.add_listeners)\n self.assertTrue(client_config.scheduler.remove_all_jobs)\n\n self.assertEqual(\"programy.clients.render.text.TextRenderer\", client_config.renderer)\n\n def test_without_data(self):\n yaml = YamlConfigurationFile()\n self.assertIsNotNone(yaml)\n yaml.load_from_text(\"\"\"\n console:\n \"\"\", ConsoleConfiguration(), \".\")\n\n bot_config = yaml.get_section(\"console\")\n\n client_config = ClientConfigurationData(\"test\")\n client_config.load_configuration_section(yaml, bot_config, \".\")\n\n self.assertIsNone(client_config.bot_selector)\n\n self.assertIsNotNone(client_config.scheduler)\n self.assertEqual(None, client_config.scheduler.name)\n self.assertEqual(0, client_config.scheduler.debug_level)\n self.assertFalse(client_config.scheduler.add_listeners)\n self.assertFalse(client_config.scheduler.remove_all_jobs)\n\n self.assertIsNone(client_config.renderer)\n","sub_path":"dialogue-engine/test/programytest/clients/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"538787415","text":"#!/usr/bin/env python\n\n\"\"\"\nMight make this its own library: wit.ai\n- wit\n- witinput\n- sox\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport requests\nimport os\nimport logging\n\nWIT_API_HOST = os.getenv('WIT_URL', 'https://api.wit.ai')\n\n\nclass WitError(Exception):\n\tpass\n\n\nclass Wit(object):\n\t\"\"\"\n\tSimple Wit.ai interface\n\t\"\"\"\n\taccess_token = None\n\n\tdef __init__(self, access_token, logger=None):\n\t\tself.access_token = access_token\n\t\tself.logger = logger or logging.getLogger(__name__)\n\n\tdef message(self, msg):\n\t\t\"\"\"\n\t\tSend a text message to Wit.ai\n\t\t\"\"\"\n\t\tself.logger.debug(\"Message request: msg=%r\", msg)\n\t\tparams = {}\n\t\tif msg:\n\t\t\tparams['q'] = msg\n\t\tresp = self.req('GET', '/message', params)\n\t\tself.logger.debug(\"Message response: %s\", resp)\n\t\treturn resp\n\n\tdef speech(self, fileio):\n\t\t\"\"\"\n\t\tSend a wave audio file to Wit.ai\n\t\t\"\"\"\n\t\tself.logger.debug(\"Speech request\")\n\t\tfileio.seek(0)\n\t\tresp = self.req('POST', '/speech', {}, data=fileio.read())\n\t\tself.logger.debug(\"Speech response: %s\", resp)\n\t\treturn resp\n\n\tdef req(self, meth, path, params, **kwargs):\n\t\tif path == '/message':\n\t\t\trsp = requests.request(\n\t\t\t\tmeth,\n\t\t\t\tWIT_API_HOST + path,\n\t\t\t\theaders={\n\t\t\t\t\t'authorization': 'Bearer ' + self.access_token,\n\t\t\t\t\t'accept': 'application/vnd.wit.20160330+json'\n\t\t\t\t},\n\t\t\t\tparams=params,\n\t\t\t\t**kwargs\n\t\t\t)\n\t\telif path == '/speech':\n\t\t\trsp = requests.request(\n\t\t\t\tmeth,\n\t\t\t\tWIT_API_HOST + path + '?v=20160511',\n\t\t\t\theaders={\n\t\t\t\t\t'authorization': 'Bearer ' + self.access_token,\n\t\t\t\t\t# 'accept': 'application/vnd.wit.20160330+json'\n\t\t\t\t\t'Content-Type': 'audio/wav',\n\t\t\t\t\t# 'Content-Type': 'audio/raw;encoding=signed-integer;bits=16;rate=16000;endian=little'\n\t\t\t\t},\n\t\t\t\t# params=params,\n\t\t\t\t**kwargs\n\t\t\t)\n\t\telse:\n\t\t\traise WitError('This library does not support {0!s} path'.format({path}))\n\n\t\tif rsp.status_code > 200:\n\t\t\traise WitError('Wit responded with status: ' + str(rsp.status_code) +\n\t\t\t\t\t\t' (' + rsp.reason + ')')\n\t\tjson = rsp.json()\n\t\tif 'error' in json:\n\t\t\traise WitError('Wit responded with an error: ' + json['error'])\n\t\treturn json\n\n\ndef main():\n\ttoken = os.getenv('WIT')\n\tclient = Wit(token)\n\tresp = client.message('hi')\n\tprint(resp)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"chi/lib/wit.py","file_name":"wit.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"542184059","text":"\"\"\"Файл с классом и функциями для работы с метками Аруко\"\"\"\nimport numpy as np\nimport cv2\nimport cv2.aruco as aruco\n\nclass Aruco:\n \"\"\"Класс для поиска Аруко меток.\"\"\"\n\n COLOR = (0, 204, 255)\n\n def __init__(self, cap):\n \"\"\"Как аргумент передаём камеру.\"\"\"\n\n self.CAP = cap\n # self.CAP.set(3, 1280)\n # self.CAP.set(4, 720)\n self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\n self.parameters = aruco.DetectorParameters_create()\n\n def get_markers(self):\n \"\"\"Функция читает с камеры и возвращает координаты углов и idшники меток\"\"\"\n\n ret, self.frame = self.CAP.read()\n # self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n self.corners, self.ids, _ = aruco.detectMarkers(self.frame, self.aruco_dict,\n parameters=self.parameters)\n return self.corners, self.ids\n\n def render_frame(self):\n \"\"\"Рендерит информацию меток в кадре\"\"\"\n\n markers_frame = aruco.drawDetectedMarkers(self.frame, self.corners, self.ids,\n borderColor=self.COLOR)\n return markers_frame\n\ndef aruco_center(corner):\n \"\"\"Принимает описание координат углов метки, возвращает координаты центра метки.\"\"\"\n\n x = (corner[0][0][0] + corner[0][2][0]) / 2\n y = (corner[0][0][1] + corner[0][2][1]) / 2\n return int(x), int(y)\n\ndef aruco_angel(corner):\n \"\"\"Принимает описание координат углов метки, возвращает угол поворота метки.\"\"\"\n x1 = corner[0][0][0]\n y1 = corner[0][0][1]\n x2 = corner[0][3][0]\n y2 = corner[0][3][1]\n return math.atan2(x2-x1, y2-y1)*180/math.pi\n","sub_path":"cvi/aruco.py","file_name":"aruco.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"393603090","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 21 16:00:46 2017\n\n@author: jianfengsong\n\"\"\"\nimport xlrd as xl\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom itertools import combinations\nclass fun():\n def excel_data(n):\n train_rows_value=list()\n train_cols_value=list()\n excel=xl.open_workbook(n)\n data_table=excel.sheet_by_index(0)\n rows=data_table.nrows\n cols=data_table.ncols\n for a in range(rows):\n train_rows_value.append(data_table.row_values(a))\n train_row=np.asarray(train_rows_value)\n return train_row\n########################################################\n def data (x1_train,x0_train,test1,test0):\n train=list()\n tar=list()\n test=list()\n test_tar=list()\n for a in range(len(x1_train)):\n train.append(x1_train[a])\n for a in range(len(x0_train)):\n train.append(x0_train[a])\n \n for a in range(int(len(x1_train))):\n tar.append(1)\n for a in range(int(len(x0_train))):\n tar.append(0)\n \n for a in range(len(test1)):\n test.append(test1[a])\n for a in range(len(test0)):\n test.append(test0[a])\n \n for a in range(int(len(test1))):\n test_tar.append(1)\n for a in range(int(len(test0))):\n test_tar.append(0)\n return train,test,tar,test_tar\n##################################################\n def two_class(data_set):\n data_high=list()\n data_low=list()\n data_label=list()\n for a in range(len(data_set)):\n b=len(data_set[a])-1\n if data_set[a][b] == 'High':\n data_set[a][b]=1\n data_high.append(data_set[a])\n if data_set[a][b]=='Low':\n data_set[a][b]=0\n data_low.append(data_set[a])\n if data_set[a][b] =='SFE':\n data_label.append(data_set[a])\n data_high1=np.asarray(data_high)\n data_low1=np.asarray(data_low)\n return data_high1,data_low1,data_label\n##################################################\n def feature_sample(data_set):\n feature_data=[[] for i in range(len(data_set[1])-1)]\n for b in range(len(data_set[1])-1):\n for a in range(len(data_set)):\n feature_data[b].append(float(data_set[a][b]))\n feature_data1=np.asarray(feature_data)\n return feature_data1\n#########################################################\n def get_feature_sample(): #find the feature matrix, for example j[0]is all value of 'C' \n train_row=fun.excel_data('SFE_Train_Data.xlsx')\n test_row=fun.excel_data('SFE_Test_Data.xlsx')\n \n train_high,train_low,train_label=fun.two_class(train_row)\n test_high,test_low,test_label=fun.two_class(test_row)\n \n train_set,test_set,train_label,test_label=fun.data(train_high,train_low,test_high,test_low)\n \n feature_col=fun.feature_sample(train_set)\n test_col=fun.feature_sample(test_set)\n# feature_col1=np.asarray(feature_col.append(train_label))\n return feature_col,train_label,test_col,test_label\n##########################################################\n def ehaustive(num):\n x=fun.get_feature_sample()\n selected_feature_set=list()\n selected_feature1=combinations(range(7),num)\n selected_feature=np.asarray(list(selected_feature1))\n return selected_feature\n############################################################\n def determind(data_set,tar):\n number_of_wrong=0\n for a in range(len(tar)):\n if data_set[a]!=tar[a]:\n number_of_wrong+=1\n else:\n number_of_wrong+=0\n error_rate=number_of_wrong/len(data_set)\n return error_rate\n###########################################################\n def NN3_err(train,tar,test,test_tar,n):\n nn3=KNeighborsClassifier(n_neighbors=3)\n nn3.fit(train,tar)\n if n==1:\n nn3_clas=nn3.predict(train)\n nn3error=fun.determind(nn3_clas,tar)\n if n==0:\n nn3_clas=nn3.predict(test)\n nn3error=fun.determind(nn3_clas,test_tar)\n return nn3error\n###############################################################\n def LDA_error(train,tar,test,test_tar,n):\n clf=LDA()\n clf.fit(train,tar)\n if n ==1:\n LDA_cla=clf.predict(train)\n error=fun.determind(LDA_cla,tar)\n if n==0:\n LDA_cla=clf.predict(test)\n error=fun.determind(LDA_cla,test_tar)\n return error\n########################################################\n\n\n\n\n############### MAIN() ########################################\nfeature_data,feature_label,test_set,test_label=fun.get_feature_sample()\nmin_ind_set, min_err_set,min_cla_err_set=list(),list(),list()\nmin3nn_ind_set, min3nn_err_set,min3nn_cla_err_set=list(),list(),list()\nfor a in range(1,6):\n selected_feature=fun.ehaustive(a)\n min_err=1\n min3nn_err=1\n for b in range(len(selected_feature)):\n selected_feature_set1=list()\n LDA_feature1set,LDA_feature0set,test1,test0=list(),list(),list(),list()\n for c in range(len(selected_feature[b])):\n indice=selected_feature[b][c]\n selected_feature_set1.append(feature_data[indice])\n selected_feature_set=np.asarray(selected_feature_set1)\n LDA_feature_1,LDA_feature_0,test_1,test_0=list(),list(),list(),list()\n for d in range(0,12): #with SFE high\n LDA_feature_1.append(feature_data[indice][d])\n# test_1.append(test_set[indice][d])\n for e in range(12,len(feature_data[c])): #with SFE low\n LDA_feature_0.append(feature_data[indice][e])\n# test_0.append(test_set[indice][e])\n for f in range(0,50):\n test_1.append(test_set[indice][f])\n for g in range(50,98):\n test_0.append(test_set[indice][g])\n LDA_feature1set.append(LDA_feature_1)\n LDA_feature0set.append(LDA_feature_0)\n test1.append(test_1)\n test0.append(test_0)\n #LDA APPARENT\n LDA_feature1_set=(np.asarray(LDA_feature1set)).T #as my x1 \n LDA_feature0_set=(np.asarray(LDA_feature0set)).T #as my x0\n test1set=(np.asarray(test1)).T\n test0set=(np.asarray(test0)).T\n train,test,tar,test_tar=fun.data(LDA_feature1_set,LDA_feature0_set,test1set,test0set)\n #find min for LDA\n LDA_err=fun.LDA_error(train,tar,test,test_tar,1)\n LDA_cla_err=fun.LDA_error(train,tar,test,test_tar,0)\n# print(LDA_cla_err)\n nn3error=fun.NN3_err(train,tar,test,test_tar,1)\n# print(nn3error,a)\n nn3_cla_error=fun.NN3_err(train,tar,test,test_tar,0)\n #LDA\n if min_err>LDA_err:\n min_err=LDA_err\n min_ind=b\n min_cla_err=LDA_cla_err\n else:\n min_err=min_err\n min_ind=min_ind\n # 3NN \n if min3nn_err>nn3error:\n min3nn_err=nn3error\n min3nn_ind=b\n min3nncla=nn3_cla_error\n else:\n min3nn_err=min3nn_err\n min3nn_ind=min3nn_ind\n \n min_err_set.append(min_err)#min LDA apparent error\n min_ind_set.append(selected_feature[min_ind])#min LDA error index\n min_cla_err_set.append(min_cla_err)# classification error\n \n min3nn_err_set.append(min3nn_err)#min 3NN apparent error\n min3nn_ind_set.append(selected_feature[min3nn_ind])#min 3nn error index\n min3nn_cla_err_set.append(min3nncla)# classification error\n############################################################################\n# SFS for LDA\nsfs_lda=list()\nsfs_lda_ind=list()\nsfs_ldatrain,sfs_ldatest=list(),list()\nsfs_lda_app,sfs_lda_cla=list(),list()\nsfs_lda_app.append(min_err_set[0])\nsfs_lda_cla.append(min_cla_err_set[0])\nfor a in range(7):\n sfs_lda.append(a)\nsfs_lda.remove(min_ind_set[0][0])\nsfs_lda_ind.append(min_ind_set[0][0])\nfor a in range (4):\n sfs_ldatrain.append(feature_data[sfs_lda_ind[a]])\n sfs_ldatest.append(test_set[sfs_lda_ind[a]])\n sfs_min=1\n for b in sfs_lda:\n sfs_ldatrain.append(feature_data[b])\n sfs_ldatest.append(test_set[b])\n sfs_lda_train=(np.asarray(sfs_ldatrain)).T\n sfs_lda_test=(np.asarray(sfs_ldatest)).T\n sfs_lda_app_err=fun.LDA_error(sfs_lda_train,tar,sfs_lda_test,test_label,1)\n sfs_lda_cla_err=fun.LDA_error(sfs_lda_train,tar,sfs_lda_test,test_label,0)\n if sfs_min>sfs_lda_app_err:\n sfs_min=sfs_lda_app_err\n sfs_ind=b\n min_cla_err=sfs_lda_cla_err\n else:\n sfs_min=sfs_min\n sfs_ind=sfs_ind\n sfs_ldatrain.pop(a+1)\n sfs_ldatest.pop(a+1)\n sfs_lda.remove(sfs_ind)\n sfs_lda_app.append(sfs_min)\n sfs_lda_cla.append(min_cla_err)\n sfs_lda_ind.append(sfs_ind)\n# SFS 3NN\nsfs_3nn=list()\nsfs_3nn_ind=list()\nsfs_3nntrain,sfs_3nntest=list(),list()\nsfs_3nn_app,sfs_3nn_cla=list(),list()\nsfs_3nn_app.append(min3nn_err_set[0])\nsfs_3nn_cla.append(min3nn_cla_err_set[0])\n#sfs_3nn_app.append(3)\n#sfs_3nn_cla.append(min3nn_cla_err_set[0])\nfor a in range(7):\n sfs_3nn.append(a) \nsfs_3nn.remove(min3nn_ind_set[0][0])\nsfs_3nn_ind.append(min3nn_ind_set[0][0])\n#sfs_3nn.remove(3)\n#sfs_3nn_ind.append(3)\nfor d in range (4):\n sfs_3nntrain.append(feature_data[sfs_3nn_ind[d]])\n sfs_3nntest.append(test_set[sfs_3nn_ind[d]])\n sfs3nn_min=1\n for c in sfs_3nn:\n sfs_3nntrain.append(feature_data[c])\n sfs_3nntest.append(test_set[c])\n sfs_3nn_train=(np.asarray(sfs_3nntrain)).T\n sfs_3nn_test=(np.asarray(sfs_3nntest)).T\n sfs_3nn_app_err=fun.NN3_err(sfs_3nn_train,tar,sfs_3nn_test,test_label,1)\n sfs_3nn_cla_err=fun.NN3_err(sfs_3nn_train,tar,sfs_3nn_test,test_label,0)\n if sfs3nn_min>sfs_3nn_app_err:\n sfs3nn_min=sfs_3nn_app_err\n sfs3nn_ind=c\n min3nn_cla_err=sfs_3nn_cla_err\n else:\n sfs3nn_min=sfs3nn_min\n sfs3nn_ind=sfs3nn_ind\n sfs_3nntrain.pop(d+1)\n sfs_3nntest.pop(d+1)\n sfs_3nn.remove(sfs3nn_ind)\n sfs_3nn_app.append(sfs3nn_min)\n sfs_3nn_cla.append(min3nn_cla_err)\n sfs_3nn_ind.append(sfs3nn_ind) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Project2/2Assignment2a1.py","file_name":"2Assignment2a1.py","file_ext":"py","file_size_in_byte":10578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"533338027","text":"import numpy as np\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom .models import MLPRegressor\n\n\ndef train_nn(x_train, y_train, x_test, n_splits=5, seed=42):\n input_size = x_train.shape[1]\n train_preds = np.zeros(len(x_train))\n test_preds = np.zeros(len(x_test))\n\n splits = list(StratifiedKFold(\n n_splits=n_splits, random_state=seed).split(x_train, y_train))\n\n for fold, (train_idx, valid_idx) in enumerate(splits):\n print(f'Fold {fold + 1}')\n\n train_fold_x = x_train[train_idx, :]\n train_fold_y = y_train[train_idx]\n valid_fold_x = x_train[valid_idx, :]\n valid_fold_y = y_train[valid_idx]\n\n model = MLPRegressor(input_size)\n model.fit(train_fold_x, train_fold_y, valid_fold_x, valid_fold_y)\n\n train_preds[valid_idx] = model.predict(valid_fold_x)\n test_preds += model.predict(x_test) / n_splits\n\n return train_preds, test_preds\n","sub_path":"models/nn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99362397","text":"# Copyright (C) 2010-2014 Simula Research Laboratory\n#\n# This file is part of CBCPOST.\n#\n# CBCPOST is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# CBCPOST is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with CBCPOST. If not, see .\n\"\"\"\nParameterized represents a suggested interface to create classes/objects with\nassociated parameters.\n\"\"\"\nfrom __future__ import division\nfrom cbcpost import get_parse_command_line_arguments\nimport sys\n\n#pylint: disable=R0921\nclass Parameterized(object):\n \"Core functionality for parameterized subclassable components.\"\n def __init__(self, params):\n self.params = self.default_params()\n\n self.params.replace(params)\n if get_parse_command_line_arguments():\n args = sys.argv[1:]\n self.params.parse_args(args)\n\n # Assert for each subclass that we have all keys,\n # i.e. no default_params functions have been skipped\n # in the inheritance chain\n pkeys = set(self.params.keys())\n for cls in type(self).mro()[:-2]: # Skip object and Parameterized\n assert len(set(cls.default_params().keys()) - pkeys) == 0\n\n # --- Default parameter functions ---\n\n @classmethod\n def default_params(cls):\n \"Merges base and user params into one ParamDict.\"\n raise NotImplementedError(\"Missing default_params implementation for \\\n class %s\" % (cls,))\n\n # --- Name functions ---\n\n @classmethod\n def shortname(cls):\n \"\"\"Get a one-word description of what the class represents.\n\n By default uses class name.\"\"\"\n return cls.__name__\n\n @classmethod\n def description(cls):\n \"\"\"Get a one-sentence description of what the class represents.\n\n By default uses first line of class docstring.\"\"\"\n doc = cls.__doc__\n if doc is None:\n return \"Missing description.\"\n else:\n return doc.split('\\n')[0]\n\n def __str__(self):\n return \"%s: %s\" % (self.shortname(), self.description())\n","sub_path":"R_N_partitioned/cbcpost/parameterized.py","file_name":"parameterized.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526213055","text":"# This goes through the directory \"txt\" and searches the files in it for false positives/ truly relevant articles, then sorts into respective folders.\n\nimport string\nimport os\nimport re\nfrom shutil import copyfile\n\nsecondaryKeywords = ['detention', 'detain', 'children', 'families', 'Mexico', 'Trump', 'protest', 'border', 'facilit'] #these are ones which by themselves might not be indicative of a match, but if there's multiple, it's more likely.\n\ndirectory = os.fsencode(\"txt\") #this is where the text files are\n\nglobal iPositives\niPositives = 0\nglobal iNegatives\niNegatives = 0\n\ndef findKeywords(str): #here we test to see what keywords the text in question contains\n\n iCount = 0\n for keyword in secondaryKeywords: #testing to see whether secondary keywords are in a file, and incrementing the iCount counter for each one that is.\n if re.search(keyword, str) is not None:\n # print(keyword)\n iCount = iCount + 1\n \n\n if re.search('((?i)(refugee|immigration|asylum))', str) is not None: #any one of these keywords means the article is relevant. \n return True;\n elif re.search('(?i)\\bICE\\b', str) is not None: #if \" ICE \" is there as a single capitalised word, it's relevant.\n return True;\n elif iCount > 3: #if more than 3 of the other terms are present, it's relevant. Can play with this number to get stricter or less strict about false positives\n return True;\n else:\n return False;\n\nfor file in os.listdir(directory): #for each file, test for keywords, and move into a new folder if they are there.\n filename = os.fsdecode(file)\n #print(filename)\n filepath = 'txt/' + filename\n file = open(filepath, encoding = \"utf8\")\n raw = file.read()\n \n if (findKeywords(raw) is True):\n destination = 'contains-keywords/' + filename \n copyfile(filepath, destination)\n iPositives = iPositives+1 #increment this number to keep track of how many files we have detected and moved\n else:\n iNegatives = iNegatives+1 #increment this number to keep track of how many false positives we identified\n destination = 'false-positives/' + filename\n copyfile(filepath, destination)\n file.close()\n\nprint(\"Total real articles = \" + str(iPositives) + \"; Total false positives = \" + str(iNegatives))\n","sub_path":"data/downloads/everything/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"27616722","text":"import json\n\n\n\n\nlt = [\n {'name':'张一','age':'7','height':'130'},\n {'name':'赵二','age':'13','height':'150'},\n {'name':'黄三','age':'16','height':'160'},\n {'name':'李四','age':'20','height':'170'}\n]\n\nstring = json.dumps(lt,ensure_ascii=False)\n\nobj = json.loads(string)\nprint(type(obj))","sub_path":"Craw Day05/3-json.py","file_name":"3-json.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532478816","text":"\r\n\r\n# players are given a option to start the game or to quit by importing sys module.\r\n\r\nimport sys\r\na=input(\"TO START THE GAME TYPE 'yes' and TO QUIT TYPE 'no'\\n\")\r\nif a.lower()==\"no\":\r\n sys.exit()\r\nelse:\r\n print(\"LET'S START THE GAME\")\r\n# those who need instructions can ask for it, others can start the game directly.\r\n\r\na=input(\"welcome to the game of chance,are you ready to test your fortune ,\\ndo you need instructions type (yes) or (no) \\n\")\r\nif a.lower()==\"yes\":\r\n print(''' 1. player rolls two six-sided dice and adds the numbers rolled together.\r\n 2. On this first roll, a 7 or an 11 automatically wins, and a 2, 3, or 12automatically loses, and play is over.\r\n If a 4, 5, 6, 8, 9, or 10 are rolled on this first roll, that number becomes the 'point.'\r\n 3. The player continues to roll the two dice again until one of two things happens: \r\n either they roll the 'point' again, in which case they win; or they roll a 7, in which case they lose.''')\r\nelif a.lower()==\"no\":\r\n print(\"all the best, player\")\r\n\r\n\r\n\r\n\r\nimport random # for random number generation.\r\n\r\ndef dice_number():\r\n _=input(\"press enter to roll the dice \") #using \"_ \" because python ignores a variable if \"_\" is used. This is an unused variable but important so that we can have the press enter option.\r\n die1 = random.randrange(1,7) # this will enable to select a random number from 1 to 6\r\n die2 = random.randrange(1,7) \r\n return (die1 , die2) #returns the dice_number values in the form of tuple\r\n\r\n \r\ndef two_dice(dices):\r\n die1, die2 = dices\r\n print(\"player- the sum of numbers you have got in die 1 and die 2 are {} + {} = {}\".format(die1,die2,sum(dices)))# using string formatting to input the values of die1 and die2 and then using the sum function for die1+die2 # as previously the return value returned die1 and die2 in tuple, this will convert them into variables.\r\n\r\nvalue=dice_number() #calling the dice_number function to get a value,return the roll and then store that value in value.\r\ntwo_dice(value) \r\nsum_of_dices=sum(value) #using the sum function in value to find the sum of two outcomes.\r\n\r\n#sample executions\r\n\r\nif sum_of_dices in (7,11): # why we are using (in) keyword to find if sum of dices is 7 or 11 to determine the result.\r\n result=\"congratulations you won\"\r\nelif sum_of_dices in (2,3,12): # we are using (in) keyword to find if sum of dices is 2 , 3 , 12 to determine the result.\r\n result=\"you lost, \\ntry again next time\"\r\nelse: #because none of the cases worked above now we will play continously until we win or lose.\r\n result=\"continue your game please\"\r\n currentpoint = sum_of_dices\r\n print(\"good game, your current point is\",currentpoint)\r\n\r\n\r\n# game continues if you have not scored a total of 2 , 3 , 7 , 11 , 12 \r\nwhile result==\"continue your game please\":# this will enable the game to continue in a loop until the outcome is win or lose\r\n value=dice_number()\r\n two_dice(value)\r\n sum_of_dices=sum(value)\r\n if sum_of_dices == currentpoint:\r\n result=\"congratulations you won\"\r\n elif sum_of_dices == 7:\r\n result=\"you lost,\\n try again next time\"\r\n\r\n# when the outcome is clear,this will produce the outcome of the game\r\nif result == \"congratulations you won\":\r\n print(\"congratulations,you won\")\r\nelse:\r\n print(\"you lost, \\ntry again next time\")\r\n\r\n\r\n\r\n","sub_path":"game of craps.py","file_name":"game of craps.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"487002329","text":"from sklearn import svm\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef sk_svr(X, Y):\n '''\n sklearn自带的SVR求解\n X = [x, y]\n Y = -x^2 - y^2\n (a,b)是圆心的坐标\n r是圆的半径\n '''\n clf = svm.SVR(kernel='linear')\n clf.fit(X, Y)\n #rint(clf.coef_, clf.intercept_)\n a = - 1 * clf.coef_[0][0] / 2\n b = - 1 * clf.coef_[0][1] / 2\n R = a * a + b * b - clf.intercept_[0]\n r = math.sqrt(R)\n return [a, b, r]\n\n\ndef my_svr(x, y, epoches, rho, rho_1, rho_2, rho_3):\n '''\n 对朗格朗日对偶函数求梯度下降,得到对偶变量的最优解\n 通过KKT条件,求出原变量a,b和R\n '''\n alpha_1 = np.zeros(len(x))\n alpha_2 = np.zeros(len(x))\n x_1 = np.zeros(len(x))\n y_1 = np.zeros(len(x))\n for i in range(epoches):\n for j in range(len(x)):\n x_1[j] = (alpha_1[j] - alpha_2[j]) * x[j]\n y_1[j] = (alpha_1[j] - alpha_2[j]) * y[j]\n for j in range(len(x)):\n alpha_1[j] = alpha_1[j] - rho * ((np.sum(x_1) * x[j] + np.sum(y_1) * y[j]) * 1 - (x[j] * x[j] + y[j] * y[j]) + (rho_1 + rho_2) * alpha_1[j] - c * rho_1 + rho_3 * (np.sum(alpha_1)-np.sum(alpha_2)))\n alpha_2[j] = alpha_2[j] - rho * ((np.sum(x_1) * x[j] + np.sum(y_1) * y[j]) * (-1) + (x[j] * x[j] + y[j] * y[j]) + (rho_1 + rho_2) * alpha_2[j] - c * rho_1 + rho_3 * (np.sum(alpha_2)-np.sum(alpha_1)))\n #print(alpha_1)\n #print(alpha_2)\n omega_1 = -1 * np.sum(x_1)\n omega_2 = -1 * np.sum(y_1)\n #print(omega_1)\n #print(omega_2)\n a = -1 * omega_1 / 2\n b = -1 * omega_2 / 2\n R = x[0] * x[0] + omega_1 * x[0] + a * a + y[0] * y[0] + omega_2 * y[0] + b * b\n r = math.sqrt(R)\n return [a, b, r]\n\n\ndef plot_circle(x, y, a, b, r):\n \n theta = np.arange(0, 2 * np.pi, 0.01)\n m = a + r * np.cos(theta)\n n = b + r * np.sin(theta)\n fig = plt.figure()\n axes = fig.add_subplot(111)\n axes.plot(x, y, 'ro')\n axes.plot(m, n)\n axes.axis('equal')\n axes.set_title('data0')\n\n plt.xlabel('x')\n plt.ylabel('y')\n #plt.xlim((0, 1.5))\n plt.show()\n\n\ndef cal_error(x, y, a, b, r):\n\n\n error = 0\n for i in range(len(x)):\n error = error + abs(x[i] * x[i] - 2 * a * x[i] + y[i] * y[i] - 2 * b * y[i] + a * a + b * b - r * r)\n print(error)\n return error\n\n\nx = []\ny = []\nwith open('data/data2.txt') as f:\n line = f.readline()\n while line:\n s = line.split(' ')\n x.append(float(s[0]))\n y.append(float(s[1].replace('\\n', '')))\n line = f.readline()\nx = np.array(x)\ny = np.array(y)\nX = np.zeros((len(x), 2))\nX[:,0] = x\nX[:,1] = y\nY = np.zeros((len(x)))\nfor i in range(len(x)):\n Y[i] = - x[i] * x[i] - y[i] * y[i]\n\n[a, b, r] = sk_svr(X, Y)\nprint([a, b, r])\n\nerror = cal_error(x, y, a, b, r)\n\nplot_circle(x, y, a, b, r)\n\n\nepoches = 100\nrho = 0.1\nc = 1\nrho_1 = 0.1\nrho_2 = 0.5\nrho_3 = 10\n[a, b, r] = my_svr(x, y, epoches, rho, rho_1, rho_2, rho_3)\nprint([a, b, r])\n\nerror = cal_error(x, y, a, b, r)\n\nplot_circle(x, y, a, b, r)","sub_path":"作业/第三次实验/code/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"416873795","text":"# 3、不同的子序列\n# 结果:提交通过\n\ndef cSubS(s,t):\n ls = len(s) + 1\n lt = len(t) + 1\n M = [[0 for j in range(ls)] for i in range(lt)]\n if ls < lt:\n return 0\n M[0][:] = [1 for j in range(ls)]\n\n for i in range(1, lt): #尝试用回溯法,出现索引超出范围,改为循环\n for j in range(1, ls):\n if s[j - 1] == t[i - 1]:\n M[i][j] = M[i - 1][j - 1] + M[i][j - 1]\n else:\n M[i][j] = M[i][j - 1]\n\n return M[-1][-1]\n#\nS = \"rabbbit\"\nT = \"rabbit\"\na = cSubS(S,T)\nprint(a)\n","sub_path":"python/Day5_test/pro3.py","file_name":"pro3.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9924425","text":"def add_parameters(request, **kwargs):\n path = request.get_full_path().split('?')\n new_path = path[0]\n current_parameters = {}\n if '?' in path:\n symbol = '&'\n temp_parameters = path[1]\n for par in temp_parameters.split('&'):\n tmp = par.split('=')\n current_parameters[tmp[0]] = tmp[1]\n else:\n symbol = '?'\n new_path += symbol\n for key, value in kwargs.items():\n current_parameters[key] = value\n for key, value in current_parameters.items():\n new_path += f'{key}={value}&'\n new_path = new_path[:-1]\n return new_path\n","sub_path":"indussystem/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428839062","text":"import numpy as np\n\ndef dists(X,Y):\n result = np.zeros( (X.shape[0], Y.shape[0]), dtype=X.dtype)\n for i in xrange(X.shape[0]):\n for j in xrange(Y.shape[0]):\n result[i,j] = np.sum( (X[i,:] - Y[j,:]) ** 2)\n return result \n\nd = 100\nX = np.random.randn(1000,d)\nY = np.random.randn(200,d)\n\nimport timer \ntimer.compare_perf(dists, [X,Y])\n\n\n","sub_path":"examples/allpairs_distances_loops.py","file_name":"allpairs_distances_loops.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645448925","text":"from decoder.decoder import Decoder\nfrom decoder.bats.mpitchmsg.segments import *\nfrom decoder.bats.mpitchmsg.constants import *\n\n\nclass Decoder(Decoder):\n def __init__(self, opts, next_decoder):\n super(Decoder, self).__init__('ndq/itchp50', opts, next_decoder)\n self.__parse_options(opts)\n self.__unhandledMessages = dict()\n self.__translation = dict()\n self.__frameCount = 0\n self.__msgCount = dict()\n self.__byteCount = 0\n\n def __parse_options(self, opts):\n pass\n\n def on_message(self, inputContext, payload):\n # parse the packet header\n origPayloadLen = len(payload)\n headers, payload = self.decode_segment(UnitHeader, payload)\n if len(headers) is not 1:\n raise ValueError(\"Internal error processing MPitch packet header\")\n header = headers[0]\n\n # update stats\n self.__byteCount += UnitHeader.WireBytes()\n self.__frameCount += 1\n\n # process each message\n for msgIdx in range(0, header['mpitch-hdr-count']):\n # decode the common fields from the messsage payload\n commons, payload = self.decode_segment(CommonFields, payload)\n if len(commons) is not 1:\n raise ValueError(\"Internal error processing MPitch common message fields\")\n common = commons[0]\n\n # grab the message payload & trim the remaining payload\n msgLen = common['mpitch-length'] - CommonFields.WireBytes()\n messagePayload = payload[:msgLen]\n payload = payload[msgLen:]\n # get the msg type\n msgType = common['mpitch-msg-type']\n\n # decode the message payload\n context = {'sequence-number': header['mpitch-hdr-sequence']+msgIdx}\n\n # update stats\n self.__byteCount += msgLen\n self.__msgCount[msgType] = self.__msgCount.get(msgType, 0) + 1\n\n # get the message type & the descriptor for it\n if msgType not in MsgTypes:\n self.__unhandledMessages[msgType] = self.__unhandledMessages.get(msgType, 0) + 1\n else:\n messages, messagePayload = self.decode_segment(MsgTypes[msgType][0], messagePayload)\n if len(messages) is not 1:\n raise ValueError(\"Internal error processing MPitch message\")\n message = messages[0]\n\n # get the message type name\n typeName = MsgTypes[msgType][1]\n\n message['mpitch-message-type'] = typeName\n context.update(message)\n\n # send to next\n context.update(inputContext)\n context.update(header)\n context.update(common)\n self.dispatch_to_next(context, messagePayload)\n\n def summarize(self):\n unhandled = dict([ (MsgTypes[k][1], v) for k,v in self.__unhandledMessages.iteritems()])\n msgCounts = dict([ (MsgTypes[k][1], v) for k,v in self.__msgCount.iteritems()])\n return {\n 'mpitch-unhandled-messages': unhandled,\n 'mpitch-translation-entries': len(self.__translation),\n 'mpitch-frames': self.__frameCount,\n 'mpitch-bytes': self.__byteCount,\n 'mpitch-msg-counts': msgCounts\n }\n","sub_path":"decoder/bats/mpitch.py","file_name":"mpitch.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"465384003","text":"import datetime # we will use this for date objects\n\n# from datetime import datetime\n\nclass Person:\n\n def __init__(self, name, surname, birthdate, address, telephone, email):\n self.name = name\n self.surname = surname\n self.birthdate = birthdate\n\n self.address = address\n self.telephone = telephone\n self.email = email\n\n self.age = self._age()\n\n self.last_calculated_age = datetime.datetime.now()\n\n # self.age = self.age()\n\n def _age(self):\n today = datetime.date.today()\n age = today.year - self.birthdate.year\n\n if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):\n age -= 1\n\n return age\n\n def age(self):\n if datetime.datetime.now() - self.last_calculated_age > \\\n datetime.timedelta(days=365):\n return self._age()\n else:\n return self.age\n\n def lalala(self):\n pass\n\n # @property\n # def age(self):\n # return self._age()\n #\n # @age.setter\n # def age(self, value):\n # self._age = value\n\n\ndef print_all_attributes(obj):\n for x in dir(obj):\n if hasattr(obj, x):\n print(f'{x}: {getattr(obj, x)}')\n\n\ndef print_all_using_dict(obj):\n for k, v in obj.__dict__.items():\n print(f'{k}: {v}')\n\n\nif __name__ == \"__main__\":\n person = Person(\n \"Jane\",\n \"Doe\",\n datetime.date(1992, 3, 12), # year, month, day\n \"No. 12 Short Street, Greenville\",\n \"555 456 0987\",\n \"jane.doe@example.com\"\n )\n print('\\n'.join([f'{key}: {value}' for key, value in Person.__dict__.items()]))\n","sub_path":"object_oriented_programming/exercise_5.py","file_name":"exercise_5.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522817524","text":"import openhtf as htf\nfrom fake import get_session\nimport fake\n\n\n# tests need not be declared within a class; they can be reused\n@htf.measures(htf.Measurement(''))\ndef measure_voltage():\n pass\n\n\nclass DOMTest():\n @htf.measures(htf.Measurement('status').equals('OK', type=str))\n def iceboot(test, dom=None, FAKEresults=None):\n # get data from outside the test (including config)\n #test.logger.info('Running iceboot on device: {}'.format(test.test_record.metadata['device']))\n\n test.logger.info('Metadata Keys: {}'.format(test.test_record.metadata.keys()))\n results = fake.do_something(test, dom)\n ### can explicitly STOP this test phase...\n #try:\n # assert results['status'] == 'OK'\n #except AssertionError:\n # return htf.PhaseResult.STOP\n\n # but the measurement above includes what we expect the \"status\" to be\n test.measurements.status = FAKEresults.get('status', None)\n\n # save arbitrary results with attachments\n #test.attach_from_file('/scratch/some_file.i3file', \n # name='{}-data'.format(test.test_record.metadata['subtype']))\n #test.logger.info('Test Attachments {}'.format(test.attachments))\n\n # add state that persists through tests\n test.state['foo'] = lambda x: [x + '-reticulating', x + '-tickling']\n test.state['bar'] = 'I am a bar'\n\n #response = test.test_record.metadata['websession'].get('https://hercules.icecube.wisc.edu/moni20_single_dom_detail/133255/19-60/')\n\n # Example of passing in a \"requests\" web-session which has authenticated with i3live\n # this is similar to having a \n ws = test.test_record.metadata['websession']\n if ws:\n test.logger.info('Requesting data from hercules.... ')\n response = ws.get('https://hercules.icecube.wisc.edu/get_livepulse/')\n assert response.status_code == 200\n test.logger.info('Got livepulse ({}): {}'.format(response.status_code, response.content[0:50]))\n test.attach('fooDocument', response.content)\n else:\n test.logger.info('Skipping web session test')\n test.attach('fooDocument', 'skipped')\n\n\n #response = session.get(\n #data = response.content\n\n @htf.measures(htf.Measurement('bar'))\n def run_foo_command(test, cmd_args=[], cmd_kwargs={}):\n test.measurements.bar = 'nar'\n \n test.logger.info(\"Running cmd with 'foo' with args={} (set in iceboot)\".format(', '.join(cmd_args)))\n result = test.state['foo'](*cmd_args)\n test.logger.info(\"Got result: {}\".format(result))\n\n att = test.get_attachment('fooDocument')\n for name, a in test.attachments.items():\n test.logger.info('Attachment {}: {}'.format(name, a))\n\n\n try:\n if test.test_record.metadata['device']['hacks'].get('FAIL_FOO', None) == True:\n x = 10 / 0\n except ZeroDivisionError:\n test.logger.error('Error! Failure in flux capacitance')\n return htf.PhaseResult.STOP\n return htf.PhaseResult.CONTINUE\n\n\nclass VapeTest():\n '''\n The below could be abstracted with a custom decorator that was simpler to use?\n\n or we could create a class to generates tests for a particular \"test class\"\n with proper measurements ... like a \"DOM\" always has \"Voltage\" and \"Current\"\n etc....\n '''\n # can only be declared once (belongs in global)\n htf.util.conf.declare('i3LiveInTheHaus', default_value='bar', description='test config')\n\n @htf.measures(htf.Measurement('taste', units='X'))\n @htf.measures(htf.Measurement('volts', units='V'))\n def coil(test, resist, wattage):\n m = test.measurements\n assert resist > 0\n if resist * wattage < 4.2:\n m.taste = 'good'\n else:\n m.taste = 'burnt'\n test.measurements.volts = resist * wattage\n test.logger.info('{} volts'.format(m.volts))\n \n\n @htf.measures(htf.Measurement('taste'))\n def puff(test):\n test.measurements.taste = 'delectable'\n test.logger.info('delectable')\n\n @htf.measures(htf.Measurement('power_time_series')\n .with_dimensions('ms', 'V', 'A'))\n @htf.measures(htf.Measurement('average_voltage').with_units('V'))\n @htf.measures(htf.Measurement('average_current').with_units('A'))\n @htf.measures(htf.Measurement('resistance').with_units('ohm').in_range(6, 8))\n def run_mod(test):\n import random\n #test.logger.info('Starting Vape test with {}'.format(test.descriptor.metadata['subtype']))\n vci = 0\n vs = 0.0\n cs = 0.0\n for t in range(10):\n resistance = test.test_record.metadata['device']['hacks']['ohms']\n voltage = 10 + 10.0*t\n vs += voltage\n current = voltage / resistance + 0.01 * random.random()\n cs += current\n vci += 1\n dimensions = (t, voltage, current)\n test.measurements['power_time_series'][dimensions] = 0\n\n # When accessing your multi-dim measurement a DimensionedMeasuredValue\n # is returned.\n dim_measured_value = test.measurements['power_time_series']\n\n # Let's convert that to a pandas dataframe\n #power_df = dim_measured_value.to_dataframe(columns=['ms', 'V', 'A', 'n/a'])\n #test.logger.info('This is what a dataframe looks like:\\n%s', power_df)\n #test.measurements['average_voltage'] = power_df['V'].mean()\n test.measurements['average_voltage'] = float(vs) / float(vci)\n\n # We can convert the dataframe to a numpy array as well\n #power_array = power_df.as_matrix()\n #test.logger.info('This is the same data in a numpy array:\\n%s', power_array)\n #test.measurements['average_current'] = power_array.mean(axis=0)[2]\n test.measurements['average_current'] = float(cs) / float(vci)\n test.measurements['resistance'] = (\n test.measurements['average_voltage'] /\n test.measurements['average_current']\n )\n\n","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455147875","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.db.models import ObjectDoesNotExist\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\nfrom users.models import *\nfrom .forms import *\nfrom django.core.paginator import Paginator\nfrom django.http import Http404\n\n# Create your views here.\n@login_required\ndef index(request):\n home = []\n posts = Post.objects.all()\n for post in posts:\n homeItem = {}\n homeItem['post'] =post\n try:\n homeItem['profile'] = Profile.objects.get(user = post.user)\n homeItem['comments'] = Comment.objects.filter(post = post)\n\n except ObjectDoesNotExist:\n print(\"Couldn't retrive profile or comments\")\n home.append(homeItem)\n\n paginator = Paginator(home, 10)\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'feed/index.html', {'page_obj': page_obj})\n\n\n@login_required\ndef post(request,id):\n form = CommentForm()\n if request.method == 'POST':\n form = CommentForm(data=request.POST)\n if form.is_valid():\n cleanForm = form.cleaned_data\n user = request.user\n post = Post.objects.get(id=id)\n comment = Comment(user=user,post=post,comment=cleanForm['comment'])\n comment.save()\n\n return HttpResponseRedirect(reverse('index'))\n return render(request,'feed/post.html',{'id':id,'form':form})\n\n homeItem = {}\n try:\n post = Post.objects.get(id=id)\n homeItem['post'] = post\n homeItem['profile'] = Profile.objects.get(user=post.user)\n homeItem['comments'] = []\n comments = list(Comment.objects.filter(post=post))\n for item in comments:\n commentDict = {'comment':item}\n profile = Profile.objects.get(user=item.user)\n commentDict['profile'] = profile\n homeItem['comments'].append(commentDict)\n except ObjectDoesNotExist:\n print(\"Failed to fetch profile or comments\")\n homeItem['comments'] = []\n\n return render(request,'feed/post.html',{'id':id,'form':form,'homeItem':homeItem})\n\n\n@login_required\ndef profile(request,username):\n try:\n user = User.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n except ObjectDoesNotExist:\n profile = None\n return render(request, 'feed/profile.html', {'profile':profile,'username':user.get_username()})\n\n@login_required\ndef editprofile(request):\n \n form = None\n if request.method == 'POST':\n form = EditProfileForm(data=request.POST,files=request.FILES)\n\n if form.is_valid():\n cleanForm = form.cleaned_data\n user = User.objects.get(username=request.user.get_username())\n user.username = cleanForm['username']\n user.save()\n profile = Profile(user=user,bio=cleanForm['bio'],image=cleanForm['image'])\n profile.save()\n return HttpResponseRedirect(reverse('index'))\n\n return render(request, 'feed/editprofile.html',{'form':form})\n\n try:\n user = User.objects.get(username=request.user.get_username())\n profile = Profile.objects.get(user=user)\n form = EditProfileForm(data={'username':user.get_username(),'bio':profile.get_bio(),'image':profile.get_image()})\n except ObjectDoesNotExist:\n form = EditProfileForm(data={'username':request.user.get_username()})\n return render(request, 'feed/editprofile.html',{'form':form})\n\n\n@login_required\ndef newpost(request):\n form = NewPostForm()\n if request.method == 'POST':\n form = NewPostForm(data=request.POST, files= request.FILES)\n if form.is_valid():\n cleanForm = form.cleaned_data\n post = Post(user= request.user, image=cleanForm['image'],caption=cleanForm['caption'])\n post.save()\n\n return HttpResponseRedirect(reverse('index'))\n return render(request,'feed/newpost.html',{'form': form})\n return render(request,'feed/newpost.html',{'form': form})","sub_path":"feed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"488200044","text":"import csv\n\nwith open(\"./astronauts.csv\", newline='') as csvfile: \n reader = csv.DictReader(csvfile) \n x = 1\n majorId = 1\n fieldnames = ['AstronautId', 'MajorId']\n writer = csv.DictWriter(open('gradMajorsRel.csv', 'w'), fieldnames=fieldnames)\n writer.writeheader()\n majors = {}\n for row in reader:\n major = row['Graduate Major'] \n if major not in majors:\n majors[major] = majorId\n majorId += 1\n csvData = {'AstronautId' : x, 'MajorId' : majors[major]}\n writer.writerow(csvData)\n x += 1\n\t\n\n# AstronautId, Name, BirthDate, BirthPlace, Group, Gender, Status, Year, DeathMission, DeathDate, SpaceFlightHours, SpaceFlights, SpaceWalks, Rank, Service\n\n# Name,Year,Group,Status,Birth Date,Birth Place,Gender,Alma Mater,Undergraduate Major,Graduate Major,Military Rank,Military Branch,Space Flights,Space Flight (hr),Space Walks,Space Walks (hr),Missions,Death Date,Death Mission\n\n","sub_path":"gradMajorsRel.py","file_name":"gradMajorsRel.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41808157","text":"from pkgs.injest import *\nfrom pkgs.digest import *\n\ndef main():\n\ti = injest.Injester('./DataStorage/json')\n\td = digest.Digester('./db.sqlite3', './DataStorage/json')\n\t\n#\ti.DataCrawler('class')\n#\ti.DataCrawler('race')\n#\ti.DataCrawler('3v3')\n#\ti.DataCrawler('rbg')\n\n\tciJson = d.GetJsonData('ClassInfo')\n\tarena33Json = d.GetJsonData('ArenaLB33')\n\n\td.InsertData('arena_arenalb33', arena33Json)\n\n\treturn\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"WebService/job_scheduler.py","file_name":"job_scheduler.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362096891","text":"from django.urls import path\nfrom .views import show_cars, taxi_new_car, taxi_edit_car\n\n\nurlpatterns = [\n\n\tpath('', \t\t\t\t\t\t\tshow_cars, name='show_cars'),\n\tpath('new-car/', \t\t\t\t\ttaxi_new_car, name= 'taxi_new_car'),\n\tpath('edit-car//', \t\ttaxi_edit_car, name= 'taxi_edit_car')\n\n\t\t\t ]\n","sub_path":"carsapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"463564782","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\n\n\nclass Continuous_Casting_2D_HeatTransferModel():\n def __init__(self, nx, ny, dt, continuous_caster):\n self.nx, self.ny = nx, ny\n self.dt = dt\n self.time_simulation = int(abs(continuous_caster[\"continuous_caster_length\"] / continuous_caster[\"v_cast\"]))\n self.continuous_caster = continuous_caster\n \n def read_input_data(self):\n self.components, self.h_coolsection, self.coolsection_length, self.measured_value, self.measured_location = [], [], [], [], []\n input_parameters_path = os.getcwd()\n with open(input_parameters_path + \"/major_components_steel_2d.csv\",\"r\") as f:\n for line in f.readlines():\n [key, value] = line.strip().split(',')\n self.components.append([key, float(value)])\n self.components = dict(self.components)\n \n with open(input_parameters_path + \"/continuous_caster_2d.csv\",\"r\") as f:\n for line in f.readlines():\n [length, h] = line.strip().split(',')\n self.h_coolsection.append(float(h))\n self.coolsection_length.append(float(length))\n self.h_coolsection = self.h_coolsection[1:]\n self.h_coolsection = np.array(self.h_coolsection) \n self.h_coolsection = self.h_coolsection.reshape(self.h_coolsection.shape[0], 1)\n self.n_dim = self.h_coolsection.size\n \n with open(input_parameters_path + \"/measured_temperature_2d.csv\",\"r\") as f:\n for line in f.readlines():\n [location, value] = line.strip().split(',')\n self.measured_location.append(float(location))\n self.measured_value.append(float(value))\n self.measured_temperature = {\"location\":np.array(self.measured_location), \"value\": np.array(self.measured_value)}\n \n def pre_process_coolsection(self):\n length_temp = [sum(self.coolsection_length[0:i+1]) for i in range(len(self.coolsection_length))]\n self.coolsection_start, self.coolsection_end = length_temp[0:-1], length_temp[1:]\n \n def init_mesh(self):\n self.mesh = {\"nx\":int(self.nx), \"ny\":int(self.ny), \"dx\":0, \"dy\":0,\"tnpts\":0, \"dt\": self.dt}\n self.mesh[\"dx\"] = self.continuous_caster[\"steel_length\"] / (self.mesh[\"nx\"] - 1)\n self.mesh[\"dy\"] = self.continuous_caster[\"steel_thickness\"] / (self.mesh[\"ny\"] - 1)\n self.mesh[\"tnpts\"] = int(self.time_simulation / self.mesh[\"dt\"])\n \n def init_condition(self):\n self.temperature_field = np.zeros((self.mesh[\"nx\"], self.mesh[\"ny\"], self.mesh[\"tnpts\"]))\n self.temperature_field[:,:,0] = self.continuous_caster[\"temperature_cast\"] * np.ones(self.temperature_field[:,:,0].shape)\n \n def solid_liquid_temperature(self):\n temperature_l = 1536.6 - 88.0 * self.components[\"C\"] - 8* self.components[\"Si\"] - 5* self.components[\"Mn\"]\n temperature_s = 1527.0 - 187.0 * self.components[\"C\"] - 700.0* self.components[\"S\"] - 500.0* self.components[\"P\"] - 20.5 * self.components[\"Si\"] - 6.5 * self.components[\"Mn\"] - 5.5 * self.components[\"Al\"]\n return temperature_l, temperature_s\n\n def compute_fs(self, temperature_point, temperature_l, temperature_s):\n if(temperature_point >= temperature_l):\n fs = 0.0\n if(temperature_point > temperature_s and temperature_point < temperature_l):\n fs = (temperature_l - temperature_point) / (temperature_l - temperature_s)\n if(temperature_point <= temperature_s):\n fs = 1.0\n return fs\n \n def compute_physicial_parameters(self, temperature_point): \n L = 268000.0\n temperature_l, temperature_s = self.solid_liquid_temperature()\n fs = self.compute_fs(temperature_point, temperature_l, temperature_s)\n if(temperature_point >= temperature_l):\n ce, k, pho = 540.0, 50.0, 7250\n if(temperature_point > temperature_s and temperature_point < temperature_l):\n ce, k ,pho = 540.0 + L / (temperature_l - temperature_s), fs * 25 + (1 - fs) * 50, 7250\n if(temperature_point < temperature_s):\n ce, k, pho = 540.0, 28, 7250\n steel_para = {\"ce\":ce, \"k\":k, \"pho\":pho, \"temperature_s\":temperature_s, \"temperature_l\":temperature_l}\n return steel_para\n \n def difference_2d_onestep(self, tstep):\n for i in range(self.mesh[\"nx\"]):\n for j in range(self.mesh[\"ny\"]):\n steel_para = self.compute_physicial_parameters(self.temperature_field[i][j][tstep])\n ax, ay = steel_para['k'] * self.mesh[\"dt\"] / (steel_para['ce'] * steel_para['pho'] * (self.mesh[\"dx\"])**2), steel_para['k'] * self.mesh[\"dt\"] / (steel_para['ce'] * steel_para['pho'] * (self.mesh[\"dy\"])**2)\n if not(i == 0) and not(i == (self.mesh[\"nx\"] - 1)) and not(j == 0) and not(j == (self.mesh[\"ny\"] - 1)):\n temperature_up, temperature_down = self.temperature_field[i+1][j][tstep], self.temperature_field[i-1][j][tstep]\n temperature_left, temperature_right = self.temperature_field[i][j-1][tstep], self.temperature_field[i][j+1][tstep]\n temperature_middle = self.temperature_field[i][j][tstep]\n else:\n temperature_up, temperature_down, temperature_middle, temperature_left, temperature_right = self.boundary_condition(i, j, tstep, steel_para)\n self.temperature_field[i][j][tstep+1] = ax *temperature_up + ax * temperature_down + (1 - 2*ax -2*ay) * temperature_middle + ay *temperature_left + ay * temperature_right\n \n def boundary_condition(self, i, j, tstep, steel_para):\n temperature_water, h = 30.0, self.compute_h(self.mesh['dt'] * tstep * self.continuous_caster['v_cast'])\n if i == 0 and not(j == 0) and not(j == self.mesh[\"ny\"] - 1):\n temperature_up, temperature_middle = self.temperature_field[i+1][j][tstep], self.temperature_field[i][j][tstep]\n temperature_left, temperature_right = self.temperature_field[i][j-1][tstep], self.temperature_field[i][j+1][tstep]\n temperature_down = temperature_up - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if i == self.mesh[\"nx\"] - 1 and not(j == 0) and not(j == self.mesh[\"ny\"] - 1):\n temperature_down, temperature_middle = self.temperature_field[i-1][j][tstep], self.temperature_field[i][j][tstep]\n temperature_left, temperature_right = self.temperature_field[i][j-1][tstep], self.temperature_field[i][j+1][tstep]\n temperature_up = temperature_down - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if j == 0 and not(i == 0) and not(i == self.mesh[\"nx\"] - 1):\n temperature_up, temperature_down = self.temperature_field[i+1][j][tstep], self.temperature_field[i-1][j][tstep]\n temperature_middle, temperature_right = self.temperature_field[i][j][tstep], self.temperature_field[i][j+1][tstep]\n temperature_left = temperature_right - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if j == self.mesh[\"ny\"] - 1 and not(i==0) and not(i == self.mesh[\"nx\"] - 1):\n temperature_up, temperature_down = self.temperature_field[i+1][j][tstep], self.temperature_field[i-1][j][tstep]\n temperature_middle, temperature_left = self.temperature_field[i][j][tstep], self.temperature_field[i][j-1][tstep]\n temperature_right = temperature_left - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if i == 0 and j == 0:\n temperature_up, temperature_middle = self.temperature_field[i+1][j][tstep], self.temperature_field[i][j][tstep]\n temperature_right = self.temperature_field[i][j+1][tstep]\n temperature_down = temperature_up - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n temperature_left = temperature_right - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if i == 0 and j == self.mesh[\"ny\"] - 1:\n temperature_up, temperature_middle = self.temperature_field[i+1][j][tstep], self.temperature_field[i][j][tstep]\n temperature_left = self.temperature_field[i][j-1][tstep]\n temperature_down = temperature_up - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n temperature_right = temperature_left - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if i == self.mesh[\"nx\"] - 1 and j == 0:\n temperature_down, temperature_middle = self.temperature_field[i-1][j][tstep], self.temperature_field[i][j][tstep]\n temperature_right = self.temperature_field[i][j+1][tstep]\n temperature_up = temperature_down - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n temperature_left = temperature_right - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n if i == self.mesh[\"nx\"] - 1 and j == self.mesh[\"ny\"] - 1:\n temperature_down, temperature_middle = self.temperature_field[i-1][j][tstep], self.temperature_field[i][j][tstep]\n temperature_left = self.temperature_field[i][j-1][tstep]\n temperature_up = temperature_down - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n temperature_right = temperature_left - 2 * self.mesh[\"dx\"] * h * (temperature_middle - temperature_water)/ (steel_para[\"k\"])\n \n return temperature_up, temperature_down, temperature_middle, temperature_left, temperature_right\n \n def set_h_coolsection(self, h_coolsection):\n self.h_coolsection = h_coolsection.copy()\n \n def compute_h(self, y):\n coolsection, h = len(self.coolsection_start), 0.0\n if y < self.coolsection_start[0]:\n h = 1400.0 + y * (800 - 1400) / self.coolsection_start[0]\n return h\n for i in range(coolsection):\n if y > self.coolsection_start[i] and y < self.coolsection_end[i]:\n h = self.h_coolsection[i]\n return h\n \n def solve(self):\n for current_tstep in range(self.mesh[\"tnpts\"] - 1):\n self.difference_2d_onestep(current_tstep)\n \n def compute_simulated_temperature(self):\n measured_time_step = ((self.measured_temperature[\"location\"] / self.continuous_caster[\"v_cast\"])/ self.mesh[\"dt\"]).astype(int)\n self.simulated_temperature = self.temperature_field[0][int((self.mesh['ny'] - 1) / 2)][measured_time_step].copy()\n \n def compute_obj_values(self, h_coolsection):\n self.set_h_coolsection(h_coolsection)\n self.solve()\n self.compute_simulated_temperature()\n return ((self.simulated_temperature - self.measured_temperature[\"value\"])**2).sum() / self.simulated_temperature.size\n \n def plot_surface_temperature(self):\n x = self.continuous_caster[\"v_cast\"] * self.dt * (np.linspace(1,self.mesh[\"tnpts\"],self.mesh[\"tnpts\"]).astype(int))\n plt.plot(x, self.temperature_field[0, int((self.ny - 1)/2), :], label = \"simulated_temperature\")\n plt.scatter(self.measured_temperature[\"location\"], self.measured_temperature[\"value\"], label = \"measured_temperature\")\n plt.xlabel(\"the distance from meniscus(m)\")\n plt.ylabel(\"surface temperature(C)\")\n plt.legend()\n plt.show()\n \n def plot_temperature_field(self): \n x, y = np.meshgrid(self.mesh[\"dx\"] * np.arange(self.mesh[\"nx\"]), self.mesh[\"dy\"] * np.arange(self.mesh[\"ny\"]))\n plt.contourf(x,y,self.temperature_field[:,:,-1])\n plt.xlabel(\"x(m)\")\n plt.ylabel(\"y(m)\")\n plt.colorbar()\n plt.show()\n\n\n","sub_path":"IHCP_Continuous_Casting_2D/IHCP_Continuous_Casting_2D_Class.py","file_name":"IHCP_Continuous_Casting_2D_Class.py","file_ext":"py","file_size_in_byte":11967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460073263","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\nimport paramiko\n\nprivate_key_pat = '/root/.ssh/id_rsa'\nkey = paramiko.RSAKey.from_private_key_file(private_key_pat)\n\nt = paramiko.Transport(('192.168.2.23', 22))\nt.connect(username='root', pkey=key)\n\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.get('/tmp/test1.txt', '/root/test1.txt')\nt.close()","sub_path":"learn/day06/paramiko_manager/ssh_ftp_download.py","file_name":"ssh_ftp_download.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450745763","text":"from createRandomGraph import createRandom\r\nfrom directedGraph import directedGraph\r\nfrom getDataFromFile import getData, writeData\r\n\r\n\r\nclass UI:\r\n def __init__(self, graph):\r\n self.graph = graph\r\n\r\n def ui_change_cost(self):\r\n x = int(input(\">>\"))\r\n y = int(input(\">>\"))\r\n print( self.graph.getCostOfEdge(x, y))\r\n cost = int(input(\"Enter new cost: \"))\r\n self.graph.changeCostOfEdge(x, y, cost)\r\n\r\n def ui_dOut_for_key(self):\r\n x = int(input(\">>\"))\r\n print(\"These is the number of vertices leaving from vertex {} :\".format(x))\r\n print(self.graph.getOutDegreeOfVertex(x))\r\n\r\n def ui_dIn_for_key(self):\r\n x = int(input(\">>\"))\r\n print(\"These are the keys of the vertices coming to vertex {}:\".format(x))\r\n print(self.graph.getInDegreeOfVertex(x))\r\n\r\n def ui_is_edge(self):\r\n x = int(input(\">>\"))\r\n y = int(input(\">>\"))\r\n print(self.graph.isEdge(x, y))\r\n\r\n def ui_add_edge(self):\r\n x = int(input(\">>\"))\r\n y = int(input(\">>\"))\r\n cost = int(input(\">>\"))\r\n self.graph.addEdge(x, y, cost)\r\n\r\n\r\n def ui_is_vertex(self):\r\n x = int(input(\">>\"))\r\n print(self.graph.isVertex(x))\r\n\r\n def ui_remove_edge(self):\r\n x = int(input(\">>\"))\r\n y = int(input(\">>\"))\r\n self.graph.removeEdge(x, y)\r\n\r\n\r\n def ui_remove_vertex(self):\r\n print(\"ATTENTION: This vertex will no longer exist and every other vertex will stay the same !\")\r\n x = int(input(\">>\"))\r\n self.graph.removeVertex(x)\r\n\r\n def ui_add_vertex(self):\r\n x = int(input(\">>\"))\r\n self.graph.addVertex(x)\r\n\r\n def ui_get_all_vertices(self):\r\n print(\"These is the number of vertices: {}\".format(self.graph.getNumberOfVertices()))\r\n\r\n\r\n def ui_outdegree(self):\r\n x = int(input(\">>\"))\r\n print(\"These is the number of vertices leaving from vertex {} :\".format(x))\r\n print(self.graph.getOutDegreeOfVertex(x))\r\n\r\n def ui_indegree(self):\r\n x = int(input(\">>\"))\r\n print(\"These are the keys of the vertices coming to vertex {}:\".format(x))\r\n print(self.graph.getInDegreeOfVertex(x))\r\n\r\n def ui_showVertices(self):\r\n print(self.graph.iterateVertices())\r\n\r\n def ui_parse_outbound_edges(self):\r\n x = int(input(\">>\"))\r\n for element in self.graph.parseOutboundEdgesOfVertex(x):\r\n print(element)\r\n\r\n def ui_parse_inbound_edges(self):\r\n x = int(input(\">>\"))\r\n for element in self.graph.parseInboundEdgesOfVertex(x):\r\n print(element)\r\n\r\n def ui_change_graphs(self):\r\n # TODO: -> this\r\n print(\"You just created a copy of this function !\")\r\n self.graph.createCopyOfGraph()\r\n print(self.graph)\r\n print(self.graph.copy)\r\n\r\n print(\"You just changed the graphs between them \")\r\n self.graph, self.graph.copy = self.graph.copy, self.graph\r\n\r\n print(self.graph)\r\n print(self.graph.copy)\r\n\r\n def ui_show_isolated_vertices(self):\r\n print(self.graph.getIsolatedVertices())\r\n\r\n def ui_warshall_algorithm(self):\r\n x = int(input(\">>\"))\r\n y = int(input(\">>\"))\r\n path, cost = self.graph.getPath(x, y)\r\n if path == []:\r\n print(\"No path found!\")\r\n else:\r\n print(\"This is the path {}\".format(path))\r\n print(\"This is the minimum cost {}\".format(cost))\r\n\r\n def ui_kruskal_algorithm(self):\r\n print(self.graph.KruskalAlgorithm())\r\n\r\n def ui_show_cyclic_graph(self):\r\n self.graph.showCycle()\r\n\r\n def ui_show_hamiltonian_cycle(self):\r\n self.graph.hamCycle()\r\n\r\n @staticmethod\r\n def show_menu():\r\n print(\"\"\" \r\n q: Quit and save to 'output.txt'\r\n 1: Check edge,\r\n 2: Add edge,\r\n 3: Check vertex,\r\n 4: Remove edge,\r\n 5: Remove vertex,\r\n 6: Add vertex,\r\n 7: Change the cost of an edge,\r\n 8: Show the number of vertices,\r\n 9: Show the inbound degree of a vertex,\r\n 10: Show the outbound degree of a vertex,\r\n 11: Show the vertices,\r\n 12: Show inbound edges of vertex,\r\n 13: Show outbound edges of vertex.\r\n 15: Show path between two vertices.\r\n 16: Show isolated vertices.\r\n 17: Show the path with Warshall Algorithm. \r\n 18: Show Kruskal s algorithm.\r\n 19: Show if it is a cyclic graph\r\n 20: Show hamilitonian cycle\r\n \r\n \"\"\")\r\n\r\n def menu(self):\r\n commands = {\r\n 1: self.ui_is_edge,\r\n 2: self.ui_add_edge,\r\n 3: self.ui_is_vertex,\r\n 4: self.ui_remove_edge,\r\n 5: self.ui_remove_vertex,\r\n 6: self.ui_add_vertex,\r\n 7: self.ui_change_cost,\r\n 8: self.ui_get_all_vertices,\r\n 9: self.ui_indegree,\r\n 10: self.ui_outdegree,\r\n 11: self.ui_showVertices,\r\n 12: self.ui_parse_inbound_edges,\r\n 13: self.ui_parse_outbound_edges,\r\n 14: self.ui_change_graphs,\r\n 16: self.ui_show_isolated_vertices,\r\n 17: self.ui_warshall_algorithm,\r\n 18: self.ui_kruskal_algorithm,\r\n 19:self.ui_show_cyclic_graph,\r\n 20: self.ui_show_hamiltonian_cycle,\r\n 'q': quit,\r\n\r\n }\r\n while True:\r\n try:\r\n self.show_menu()\r\n command = input(\"Enter your command: \")\r\n if command == \"q\":\r\n\r\n if(self.graph.getNumberOfVertices() == 0):\r\n writeData(\"random_graph1.txt\", self.graph, False)\r\n else:\r\n writeData(\"output.txt\",self.graph, True)\r\n quit()\r\n if int(command) not in commands:\r\n raise ValueError(\"Invalid command\")\r\n command = int(command)\r\n commands[command]()\r\n\r\n except Exception as e:\r\n print(e)\r\n","sub_path":"GA/001/userInterface.py","file_name":"userInterface.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"497601994","text":"# -*- coding: utf-8 -*-\nimport os\nimport shutil\nimport win32clipboard\nimport win32con\nimport urllib.request\nimport sys\nimport re\nfrom time import sleep\n\nimport smtplib\nimport email.mime.text\nfrom email.utils import formataddr\n\n\nreportStatus = \"\"\nreportTitle = \"\"\nreportContent = \"\"\n\n\nemailSender = \"developer-send@qq.com\"\nemailReceiver = \"developer-receive@qq.com\"\nemailServer = \"smtp.qq.com\"\nemailPw = \"pcawfmdhyfvaecea\"\n\ndef GetClipboardText():\n win32clipboard.OpenClipboard()\n text = win32clipboard.GetClipboardData(win32con.CF_TEXT)\n win32clipboard.CloseClipboard()\n text = text.decode('utf-8')\n return text\n\ndef SetClipboardText(text):\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardData(win32con.CF_TEXT, text)\n win32clipboard.CloseClipboard()\n\ndef IsUrl(text):\n # http://\n if text[0:7] == \"http://\":\n return True\n elif text[0:8] == \"https://\":\n return True\n return False\n\ndef SendEmailAndQuit(err=False, msg=None):\n if msg == None:\n msg = \"请尝试重新复制链接!\"\n printHelp(msg)\n \n msg = email.mime.text.MIMEText(reportContent)\n msg['From'] = formataddr([\"yif-dev-send\", emailSender])\n msg['To'] = formataddr([\"pasteUrl2-receiver\", emailReceiver])\n msg['Subject'] = \"[pasteUrl2] \" + reportStatus + reportTitle\n\n try:\n server = smtplib.SMTP_SSL(emailServer, port=465)\n server.login(emailSender, emailPw)\n response = server.sendmail(emailSender, emailReceiver, msg.as_string())\n server.quit()\n# except Exception as e:\n# print(e)\n# print(response)\n finally:\n quit()\n\ndef printHelp(msg):\n print()\n print()\n print(msg)\n print(\"\\n\\n如果问题重复出现,请联系您的外孙\")\n sleep(5)\n\n#########################################\n\nprint(\"请稍等...\")\n\nclipboardText = GetClipboardText()\n\n\nif not IsUrl(clipboardText):\n reportStatus += \"ERR: \"\n reportTitle += \"非网址\"\n reportContent += \"剪切板内容\" + '\"' + clipboardText + '\"'\n reportContent += \"不是一个有效的网址\"\n SendEmailAndQuit(err=True)\n\nurl = clipboardText\n\ntry:\n request = urllib.request.urlopen(url)\n responseContent = request.read().decode('utf-8')\nexcept Exception as exception:\n reportStatus += \"ERR: \"\n reportTitle += \"无法获取网页\"\n reportContent += url\n reportContent += \"\\n\"\n reportContent += str(exception)\n SendEmailAndQuit(err=True)\n\n\npatternForTitle = \"(?s)\\s*?(.+?)\\s*?\"\npatternForXIAONIANGAO = '
\\s*?(.+?)\\s*?
'\n\nsearchPattern = \"\"\nif \"xiaoniangao.cn\" in url:\n searchPattern = patternForXIAONIANGAO\nelse:\n searchPattern = patternForTitle\n\nmatches = re.search(searchPattern, responseContent)\n\n\n\n\ntitle = \"\"\ntry:\n title = matches.group(1)\nexcept Exception as exception:\n reportStatus += \"ERR: \"\n reportTitle += \"未找到标题\"\n reportContent += url\n reportContent += \"\\n\"\n reportContent += str(exception)\n SendEmailAndQuit(err=True)\n \ntry:\n fileContent = \"[InternetShortcut]\\nURL=\" + url\n fileName = title + \".url\"\n try:\n existingFile = open(fileName, \"r\")\n except:\n pass\n else:\n reportStatus += \"ERR: \"\n reportTitle += \"文件已经存在\"\n reportContent += url\n reportContent += \"\\n\"\n #reportContent += str(exception)\n SendEmailAndQuit(err=True, msg=\"文章已经保存过了,不用再保存了!\")\n \n saveFile = open(fileName, \"w\")\n saveFile.write(fileContent)\n saveFile.close()\nexcept Exception as exception:\n reportStatus += \"ERR: \"\n reportTitle += \"处理文件时遇到问题\"\n reportContent += url\n reportContent += \"\\n\"\n reportContent += str(exception)\n SendEmailAndQuit(err=True, msg=\"程序遇到错误\")\n\nreportStatus += \"成功: \"\nreportTitle += title\nreportContent += url\nSendEmailAndQuit()\n\n##\n","sub_path":"paste-url-2.py","file_name":"paste-url-2.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"72760624","text":"\"\"\"\r\nPrototyp 1:\r\n- Beräkning av belysningsmatris.\r\n\r\nPrototyp 2:\r\n- Konvertering av belysningsmatrisen till teckenrepresentation, samt utskrift av denna.\r\n- Tog bort xycoord-metod -> kortare kod.\r\n- Omvändning av matris i y-led för att stämma med vanligt koordinatsystem\r\n- Tydligare kommenteringsformat\r\n\r\nPrototyp 3:\r\n- Användarinput för radie och ljuspunkter, med felhantering av värden.\r\n- Hantera radier < 1\r\n- Hantera ljusvärden utanför klotet (negativt under rottecken i z_calc).\r\n\r\nPrototyp 4:\r\n- Lade till en metod för att lägga till en enkel skugga\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\nfrom graphics import *\r\n\r\n\r\n# CLASSES #\r\n\r\nclass Sphere:\r\n # Declares/defines attributes\r\n def __init__(self, radius):\r\n if radius < 1:\r\n raise ValueError('Radius not >= 1')\r\n self.radius = radius\r\n self.lighting = [[None for x in range(self.radius*6+1)] for y in range(self.radius*6+1)]\r\n\r\n # Printable version of the sphere, light intensity represented by less or more covering characters\r\n def __str__(self):\r\n printable = ''\r\n\r\n for y in range(len(self.lighting)):\r\n for x in range(len(self.lighting[y])):\r\n if self.lighting[y][x] is None:\r\n printable += ' ' # Double characters used to make up for chars non-cubic ratio\r\n elif 1.00 >= self.lighting[y][x] > 0.900:\r\n printable += ' '\r\n elif 0.900 >= self.lighting[y][x] > 0.700:\r\n printable += '..'\r\n elif 0.700 >= self.lighting[y][x] > 0.500:\r\n printable += '--'\r\n elif 0.500 >= self.lighting[y][x] > 0.300:\r\n printable += '++'\r\n elif 0.300 >= self.lighting[y][x] > 0.0001:\r\n printable += '**'\r\n elif -1.000 <= self.lighting[y][x] <= 0.0001:\r\n printable += '##'\r\n elif self.lighting[y][x] == -10:\r\n printable += 'SS'\r\n printable += '\\n'\r\n\r\n return printable\r\n\r\n # Calculates z point (3d) on sphere from x and y coordinates (2d)\r\n def __calc_z(self, x, y):\r\n if (self.radius**2 - x**2 - y**2) < 0:\r\n return 0 # Take care of points outside Sphere surface\r\n else:\r\n return sqrt(self.radius**2 - x**2 - y**2)\r\n\r\n # Calculates light intensity in point(x,y,z)\r\n def __calc_light(self, x, light_x, y, light_y, z, light_z):\r\n return round((x*light_x + y*light_y + z*light_z)/self.radius**2, 3) # Rounding for easier comparisons\r\n\r\n def __in__ellipse(self, x, y, radius_x, radius_y):\r\n cosv = x / sqrt(x**2 + y**2)\r\n sinv = y / sqrt(x**2 + y**2)\r\n\r\n return (cosv*x + sinv*y)**2 / radius_x + (sinv*x + cosv*y)**2 / radius_y <= 1.000\r\n\r\n # Generates the light intensity for all points(x,y,z) on the visible sphere using input\r\n def generate(self, light_x, light_y):\r\n light_y *= -1 # Flip matrix horizontally\r\n light_z = self.__calc_z(light_x, light_y)\r\n\r\n if light_z < 0.0001:\r\n raise ValueError('Lighting coordinates outside sphere')\r\n\r\n for y in range(self.radius*2+1):\r\n for x in range(self.radius*2+1):\r\n z = self.__calc_z(x - self.radius, y - self.radius)\r\n if not z < 0.0001:\r\n self.lighting[y + self.radius*2][x + self.radius*2] \\\r\n = self.__calc_light(x - self.radius, light_x, y - self.radius, light_y, z, light_z)\r\n\r\n self.__add_shadow(light_x, light_y)\r\n\r\n # Adds values for a simple shadow behind sphere\r\n def __add_simple_shadow(self, light_x, light_y):\r\n for y in range(self.radius*2+1):\r\n for x in range(self.radius*2+1):\r\n if (self.lighting[y + self.radius*2 - light_y*2][x + self.radius*2 - light_x*2] is None) and \\\r\n (self.__calc_z(x-self.radius, y-self.radius) > 0.0001):\r\n self.lighting[y + self.radius*2 - light_y*2][x + self.radius*2 - light_x*2] = -10\r\n\r\n def __add_shadow(self, light_x, light_y):\r\n ellipse_x = light_x*2\r\n ellipse_y = light_y*2\r\n\r\n radius_x = int(sqrt(ellipse_x**2 + ellipse_y**2))\r\n radius_y = int(sqrt(ellipse_x**2 - (ellipse_x/2)**2))\r\n\r\n for y in range(radius_y*2+1):\r\n for x in range(radius_x*2+1):\r\n print(self.__in__ellipse(x - radius_x, y - radius_y, radius_x, radius_y))\r\n if (self.lighting[y + self.radius*2 - light_y*2][x + self.radius*2 - light_x*2] is None) and \\\r\n self.__in__ellipse(x - radius_x, y - radius_y, radius_x, radius_y):\r\n self.lighting[y + self.radius*2 - light_y*2][x + self.radius*2 - light_x*2] = -10\r\n\r\n\r\n# FUNCTIONS #\r\ndef user_interface():\r\n win = GraphWin('P101 - Sphere lighting', 400, 400)\r\n\r\n pt = Point(100, 50)\r\n\r\n pt.draw(win)\r\n\r\n win.getMouse()\r\n win.close()\r\n\r\n\r\n# Handles user questions and user input\r\ndef user_input(questiontype):\r\n try:\r\n if questiontype == 'radius':\r\n return int(input('Choose radius (whole number >=1): ')) # Radius input\r\n if questiontype == 'lighting':\r\n choice = input('Choose lighting coordinates (x,y): ').split(',') # Lighting coordinates input\r\n return int(choice[0]), int(choice[1])\r\n if questiontype == 'wait':\r\n return int(input('1 to rerun, 2 to quit: ')) # Rerun or quit program input\r\n except (ValueError, IndexError) as e:\r\n print('ERROR: Invalid value, try again. [', e, ']') # Handle errors in input values\r\n\r\n\r\n# main\r\ndef main():\r\n \"\"\"\r\n while True:\r\n\r\n while True:\r\n try:\r\n s = Sphere(user_input('radius')) # Create a Sphere-object\r\n break\r\n except (ValueError, TypeError) as e:\r\n print('ERROR: Invalid value, try again. [', e, ']')\r\n\r\n while True:\r\n try:\r\n x, y = user_input('lighting')\r\n s.generate(x, y) # Generate lighting matrix\r\n break\r\n except (ValueError, TypeError) as e:\r\n print('ERROR: Invalid value, try again. [', e, ']')\r\n\r\n print(s) # Print character representation of Sphere-object\r\n\r\n if user_input('wait') == 2: # wait for user input to rerun or quit\r\n break\r\n \"\"\"\r\n user_interface()\r\n\r\n\r\n# EXECUTION #\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"P101.py","file_name":"P101.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272930877","text":"#coding: utf-8\n# ©2017 «Gilles Vaillancourt» a.k.a. Jean-Hugues Roy. GNU GPL v3.\n\n# Première version, tirée d'un script utilisé pour moissonner le site du Collège des médecins.\n\nfor a in reversed(range(1930,2017)): # Boucle qui passe toutes les années en ordre inverse, de 2016 à 1930\n\tfor x in range(1001,2000): # Boucle qui passe les 1000 numéros de permis possible à chaque année \n\t\tnoPermis = str(a)[2:] + str(x)[1:]\n\n### Les commentaires du prof sont précédés par trois «#»\n###\n### C'est beau mon Gilles. On voit que tu as beaucoup de temps pour réfléchir à des solutions.\n","sub_path":"devoir1.py","file_name":"devoir1.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507270261","text":"#!flask/bin/python\n\n\nif __name__ == \"__main__\":\n\timport logSetup\n\tlogSetup.initLogging()\n\n# This HAS to be included before the app, to prevent circular dependencies.\n# import WebMirror.runtime_engines\n\nfrom settings import MAX_DB_SESSIONS\nfrom WebMirror.Runner import NO_PROCESSES\nimport WebMirror.Runner\nimport WebMirror.rules\nimport WebMirror.SpecialCase\n\ndef go():\n\n\timport pystuck; pystuck.run_server()\n\n\tlargv = [tmp.lower() for tmp in sys.argv]\n\n\tif not \"noreset\" in largv:\n\t\tprint(\"Resetting any in-progress downloads.\")\n\t\tWebMirror.Runner.resetInProgress()\n\telse:\n\t\tprint(\"Not resetting in-progress downloads.\")\n\n\trules = WebMirror.rules.load_rules()\n\t# WebMirror.Runner.initializeStartUrls(rules)\n\n\tglobal NO_PROCESSES\n\tglobal MAX_DB_SESSIONS\n\tMAX_DB_SESSIONS = NO_PROCESSES + 5\n\n\tprocesses = 16\n\tNO_PROCESSES = processes\n\tMAX_DB_SESSIONS = NO_PROCESSES + 5\n\tif \"maxprocesses\" in largv:\n\t\tprocesses = 24\n\t\tNO_PROCESSES = processes\n\t\tMAX_DB_SESSIONS = NO_PROCESSES + 5\n\telif \"fewprocesses\" in largv:\n\t\tprocesses = 8\n\t\tNO_PROCESSES = processes\n\t\tMAX_DB_SESSIONS = NO_PROCESSES + 5\n\telif \"twoprocess\" in largv:\n\t\tprocesses = 2\n\t\tNO_PROCESSES = processes\n\t\tMAX_DB_SESSIONS = NO_PROCESSES + 2\n\telif \"oneprocess\" in largv:\n\t\tprocesses = 1\n\t\tNO_PROCESSES = processes\n\t\tMAX_DB_SESSIONS = NO_PROCESSES + 2\n\n\trunner = WebMirror.Runner.Crawler(thread_count=NO_PROCESSES)\n\trunner.run()\n\n\t# print(\"Thread halted. App exiting.\")\n\nif __name__ == \"__main__\":\n\timport sys\n\n\tlargv = [tmp.lower() for tmp in sys.argv]\n\n\tif \"scheduler\" in sys.argv:\n\t\tprint(\"Please use runScheduler.py instead!\")\n\t\tsys.exit(1)\n\telse:\n\n\t\tstarted = False\n\t\tif not started:\n\t\t\tstarted = True\n\t\t\tgo()\n","sub_path":"runScrape.py","file_name":"runScrape.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7595043","text":"import pandas as pd\nimport os\nimport time\ndir = 'D:/PythonObject/file/'\nfilePath = os.listdir(dir)\n\nsTime1 = time.time()\nfileList = []\nfor i in filePath:\n fileList.append(os.path.join(dir,i))\nprint(fileList)\neTime1 = time.time()\ndataTime1 = eTime1-sTime1\nprint('拿出列表路径时间:',dataTime1)\n\n\nsTime2 = time.time()\ndfs = []\nfor read in fileList:\n d = dfs.append(pd.read_excel(read))\n \neTime2 = time.time()\ndataTime2 = eTime2-sTime2\nprint('循环读出表格时间:',dataTime2)\n\n\nsTime3 = time.time()\ndataF = pd.DataFrame(dfs)\nprint(dataF)\n# dataFrame.to_excel('D:/PythonObject/file/合并.xlsx',index=False)\n# eTime3 = time.time()\n# dataTime3 = eTime3-sTime3\n","sub_path":"testObject/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"387797418","text":"# coding=utf-8\n# !/usr/bin/env python\nimport sys\nimport os\n\nhadoop_cmd=\"/usr/local/hadoop/bin/hadoop\"#hadoop安装目录\ndata_root=\"/home/test_data/\"\nhdfs_root=\"/user/hadoop/\"\ntaskid=2\n\ndef run_hadoopcmd(cmd):\n hadoopcmd = hadoop_cmd + \" \" + cmd\n os.system(hadoopcmd)\n\ndef get_all_path(local_cover_path,local_stego_path,local_txt_path):\n#读取cover和stego文件夹下的所有图片名称,并且将名称对应的路径写到对应的txt文件中\n if os.path.exists(local_txt_path):\n os.system(\"rm -r \"+local_txt_path)\n if not os.path.exists(local_txt_path):\n os.system(\"mkdir \"+local_txt_path)\n i=0\n for root, dirs, files in os.walk(local_cover_path):\n for file in files:\n file_ext=os.path.splitext(file)[1]\n if file_ext== '.jpg' or file_ext=='.jpeg':\n i=i+1\n content=\"0 \"+hdfs_root+str(taskid)+\"/cover/\"+file\n txt_path=local_txt_path+str(i)+\".txt\"\n with open(txt_path, 'w') as f:\n f.write(content)\n for root, dirs, files in os.walk(local_stego_path):\n for file in files:\n file_ext=os.path.splitext(file)[1]\n if file_ext== '.jpg' or file_ext=='.jpeg':\n i=i+1\n content=\"1 \"+hdfs_root+str(taskid)+\"/stego/\"+file\n txt_path=local_txt_path+str(i)+\".txt\"\n with open(txt_path, 'w') as f:\n f.write(content)\n\ndef hadoop_52(local_cover_path,local_stego_path,local_output,taskid): \n\n local_txt_path = data_root+\"task/\"+str(taskid)+\"/txt/\" \n\n #写入到txt文件后,将图像文件和txt文件上传到hdfs上\n\n fs_task_path=hdfs_root+str(taskid)+\"/\"\n make_dir_path=\"fs -mkdir -p \"+fs_task_path\n run_hadoopcmd(make_dir_path)\n\n copy_cover_cmd= \"fs -copyFromLocal \" + local_cover_path + \" \" + fs_task_path\n copy_stego_cmd= \"fs -copyFromLocal \" + local_stego_path + \" \" + fs_task_path\n copy_txt_cmd= \"fs -copyFromLocal \" + local_txt_path + \" \" + fs_task_path\n\n run_hadoopcmd(copy_cover_cmd)\n run_hadoopcmd(copy_stego_cmd)\n run_hadoopcmd(copy_txt_cmd)\n\n fs_task_path=hdfs_root+str(taskid)+\"/\"\n file_path = \"/home/test_data/5_mapper_reducer/52_train/mapper_train.py,/home/test_data/5_mapper_reducer/52_train/reducer_train.py\" \n input_path = fs_task_path+\"txt\"\n output_path = fs_task_path + \"output\"\n\n run_hadoopcmd(\"jar ${HADOOP_HOME}/share/hadoop/tools/lib/hadoop-streaming-2.7.4.jar \"\\\n +\"-D mapred.map.tasks=2 -D mapred.job.priority=HIGH -D mapreduce.map.memory.mb=8192\"\\\n\t\t+\" -files \"+file_path+\" -input \" + input_path + \" -output \"+output_path \\\n\t\t+\" -mapper 'python mapper_train.py' -reducer 'python reducer_train.py'\")\n\n\n\n get_output=\"fs -get \" + output_path + \"/part-00000 \" + local_output\n run_hadoopcmd(get_output)\n\n delete_cmd = \"fs -rm -r \" + fs_task_path\n\n run_hadoopcmd(delete_cmd)\n\n local_cover_txt=data_root+\"task/\"+str(taskid)+\"/cover.txt\"\n local_stego_txt=data_root+\"task/\"+str(taskid)+\"/stego.txt\"\n c=[]\n s=[]\n with open(local_output,\"r\") as file_to_read:\n while True :\n lines=file_to_read.readline()\n if not lines:\n break\n line=lines.strip()\n words=line.split(\"\\n\")\n all=words[0].split(\",\")\n if(all[0]==\"0\"):\n c.append(all[1]+\"\\n\")\n else:\n s.append(all[1]+\"\\n\")\n with open(local_cover_txt, 'w') as f:\n\n f.writelines(c)\n with open(local_stego_txt, 'w') as f:\n\n f.writelines(s)\n\n\ndef main():\n\n local_cover_path = data_root+\"task/\"+str(taskid)+\"/cover/\" \n \n local_stego_path = data_root+\"task/\"+str(taskid)+\"/stego/\" \n\n local_txt_path = data_root+\"task/\"+str(taskid)+\"/txt/\" \n\n local_output=data_root+\"task/\"+str(taskid)+\"/result\"\n\n get_all_path(local_cover_path,local_stego_path,local_txt_path)\n\n hadoop_52(local_cover_path,local_stego_path,local_output,taskid)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"back/test_data/5_hadoop/hadoop_train.py","file_name":"hadoop_train.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"208653082","text":"import argparse\n\n#you can force the optional argument to have specified number of parameter\n#with nargs= in add_argument\nmy_parser = argparse.ArgumentParser(\"my arg praser!\")\n\nmy_parser.add_argument('-g', nargs=3, type=str)\n#the nargs keyword can also accept the following:\n\n# ?: a single value, which can be optional, if not provided default is used\n# *: a flexible number of values, which will be gathered into a list\n# +: like *, but requiring at least one value\n# argparse.REMAINDER: all the values that are remaining in the command line\nmy_parser.add_argument('target',\n action='store',\n nargs='?',\n default='my default value')\n\n#becuase target added first, its position is first in the \n#positional arguments \n\nmy_parser.add_argument('input',\n action='store',\n nargs='*',\n default='my default value2')\n\nargs = my_parser.parse_args()\n#if you provide --global then you must use args.global\n#else you must use args.g\nprint(args.g)\nprint(args.input)\nprint(args.target)\n","sub_path":"argparse/arg-number.py","file_name":"arg-number.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"186557776","text":"class Solution:\n def reorderList(self, head: ListNode) -> None:\n if not head or not head.next:\n return\n fast = slow = head\n \n while fast.next and fast.next.next:\n fast = fast.next.next\n slow = slow.next\n \n p1, p2 = slow, slow.next\n slow.next = None\n \n while p2:\n p2.next, p2, p1 = p1, p2.next, p2\n \n first = head\n tail = p1\n \n while tail and first:\n \n first.next, tail.next, first, tail = tail, first.next, first.next, tail.next\n","sub_path":"142-Reorder-List/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"386234219","text":"# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport abc\nimport sqlite3\nfrom collections import Counter\n\nfrom nltk import ngrams\n\nfrom thot_utils.libs import config\nfrom thot_utils.libs.utils import split_string_to_words\n\n\nclass LanguageModelProviderInterface(object):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def get_count(self, src_words):\n pass\n\n @abc.abstractmethod\n def get_all_counts(self):\n pass\n\n\nclass LanguageModelFileProvider(LanguageModelProviderInterface):\n def __init__(self, fd, ngrams_length):\n self.fd = fd\n self.ngrams_length = ngrams_length\n self.main_counter = Counter()\n self.run()\n\n def run(self):\n for line in self.fd:\n word_array = split_string_to_words(line)\n self.train_word_array(word_array)\n\n def train_word_array(self, word_array):\n # obtain counts for 0-grams\n self.main_counter.update({\"\": len(word_array)})\n\n # obtain counts for higher order n-grams\n for i in range(1, self.ngrams_length + 1):\n self.main_counter.update(\n ngrams(word_array, i, pad_left=True, pad_right=True, left_pad_symbol=config.bos_str,\n right_pad_symbol=config.eos_str)\n )\n\n def get_count(self, word):\n return self.main_counter[word]\n\n def get_all_counts(self):\n for source, count in self.main_counter.items():\n yield source, count\n\n\nclass LanguageModelDBProvider(LanguageModelProviderInterface):\n def __init__(self, filename):\n self.connection = sqlite3.connect(filename)\n self.cursor = self.connection.cursor()\n\n def get_count(self, word):\n self.cursor.execute('select c from ngram_counts where n=? limit 1', [word])\n rows = self.cursor.fetchall()\n if rows:\n return rows[0][0]\n return 0\n\n def get_all_counts(self):\n raise NotImplemented()\n\n def load_from_other_provider(self, provider):\n self.connection.execute('CREATE TABLE ngram_counts (n text primary key not null, c int not null)')\n for key, value in provider.get_all_counts():\n self.cursor.execute('insert into ngram_counts values (?, ?)', [' '.join(key), value])\n self.connection.commit()\n","sub_path":"thot_utils/libs/recase/language_model_provider.py","file_name":"language_model_provider.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541892969","text":"from tkinter import Label as TKLabel\n\nfrom ..utils import load_color\n\ndef Text(root, style = \"\", **options):\n\n props = {\n 'fg': load_color(style)['bgColor']\n }\n\n props.update(**options)\n\n T = TKLabel(root)\n\n T.config(props)\n T.pack()","sub_path":"src/Text/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405215259","text":"from os.path import exists\nfrom os import mkdir\nfrom time import sleep\nimport ctypes\nfrom win32com.shell import shell, shellcon\nfrom requests import get\nfrom bs4 import BeautifulSoup\n\n# Globals\nurl = \"http://deadauthor.org/art/\"\nimage_path = \"\"\nPICTURES_FOLDER = shell.SHGetFolderPath(0, shellcon.CSIDL_MYPICTURES, None, 0)\nDIR = r\"{}\\abstrakt_images\".format(PICTURES_FOLDER)\n\n\ndef download_image(url, path):\n r = get(url)\n with open(path, \"wb\") as f:\n f.write(r.content)\n\n\ndef get_latest_image(url):\n global DIR\n global image_path\n\n html_text = get(url).text\n soup = BeautifulSoup(html_text, \"html.parser\")\n images = soup.findAll(\"a\", href=True)\n last_image_name = images[-1][\"href\"]\n\n image_url = url + last_image_name\n path = DIR + \"/\" + last_image_name\n image_path = path\n\n if not exists(path):\n download_image(image_url, path)\n\n\nif __name__ == \"__main__\":\n if not exists(DIR):\n mkdir(DIR)\n\n while True:\n get_latest_image(url)\n ctypes.windll.user32.SystemParametersInfoW(20, 0, image_path, 0)\n sleep(3660)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"530376503","text":"import sys\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier\n\nimport Indicators as ind\nfrom KSVMeans import KSVMeans\n\nfrom Stock import Stock\n\nnum_clusters = 5\nnxt_day_predict = 7\ndb_dir = 'db'\nextraRandomTree = True\n\nind_dict = {\n 'SMA' : ind.SMA, # (df, n)\n 'EMA' : ind.EMA, # (df, n)\n 'MOM' : ind.MOM, # (df, n)\n 'ROC' : ind.ROC, # (df, n)\n 'ATR' : ind.ATR, # (df, n)\n 'BBANDS' : ind.BBANDS, # (df, n, multiplier, middle)\n 'PPSR' : ind.PPSR, # (df)\n 'PPSRFIBO' : ind.PPSRFIBO,# (df)\n 'STOK' : ind.STOK, # (df)\n 'STO' : ind.STO, # (df, n)\n 'TRIX' : ind.TRIX, # (df, n)\n 'ADX' : ind.ADX, # (df, n, n_ADX)\n 'MACD' : ind.MACD, # (df, n_fast, n_slow)\n 'MASS' : ind.MASS, # (df)\n 'VORTEX' : ind.VORTEX, # (df, n)\n 'KST' : ind.KST, # (df, r1, r2, r3, r4, n1, n2, n3, n4)\n 'RSI' : ind.RSI, # (df, n)\n 'TSI' : ind.TSI, # (df, r, s)\n 'ACCDIST' : ind.ACCDIST, # (df, n)\n 'CHAIKIN' : ind.CHAIKIN, # (df)\n 'MFI' : ind.MFI, # (df, n)\n 'OBV' : ind.OBV, # (df, n)\n 'FORCE' : ind.FORCE, # (df, n)\n 'EOM' : ind.EOM, # (df, n)\n 'CCI' : ind.CCI, # (df, n)\n 'COPP' : ind.COPP, # (df, n)\n 'KELCH' : ind.KELCH, # (df, n)\n 'DONCH' : ind.DONCH, # (df, n)\n 'ULTOSC' : ind.ULTOSC # (df)\n }\n\nif extraRandomTree:\n ind_funcs_params = []\n with open('db/FeaturesTestOut2.txt', 'r') as f:\n for line in f:\n line = line.split(',')\n if len(line) == 1:\n ind_funcs_params.append([ind_dict[line[0][:-1]], None])\n else:\n params = line[1].split()\n params = map(int, params)\n ind_funcs_params.append([ind_dict[line[0]], tuple(params)])\n\n_gridSearch_ = True\n_train_test_data_ = True\n\nif __name__ == \"__main__\":\n ticker = 'TSLA'\n\n stock = Stock(ticker, considerOHL = False, train_test_data = _train_test_data_, train_size = 0.8)\n stock.applyIndicators(ind_funcs_params, verbose = True)\n stock.applyPredict(nxt_day_predict)\n stock.fit_kSVMeans(num_clusters = 4,\\\n random_state_kmeans = 40,\\\n random_state_clf = 40,\\\n classifier = 'OneVsOne',\\\n consistent_clusters_multiclass = True)","sub_path":"Tests/Tools_Tests/merge_test.py","file_name":"merge_test.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334161848","text":"\"\"\"\nThe n-queens puzzle is the problem of placing n queens on an n×n chessboard such that\nno two queens attack each other.\n\nGiven an integer n, return all distinct solutions to the n-queens puzzle.\n\nEach solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.'\nboth indicate a queen and an empty space respectively.\n\nInput: 4\nOutput: [\n [\".Q..\", // Solution 1\n \"...Q\",\n \"Q...\",\n \"..Q.\"],\n\n [\"..Q.\", // Solution 2\n \"Q...\",\n \"...Q\",\n \".Q..\"]\n]\nExplanation: There exist two distinct solutions to the 4-queens puzzle as shown above.\n\"\"\"\n#Programming for the Puzzled -- Srini Devadas\n#A Profusion of Queens\n#Given the dimension of a square \"chess\" board, call it N, find a placement\n#of N queens such that no two Queens attack each other using recursive search\n\nclass Solution(object):\n # This procedure initializes the board to be empty, calls the recursive N-queens\n # procedure and prints the returned solution\n def solveNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: List[List[str]]\n \"\"\"\n ret = []\n board = [-1] * n\n self.rQueens(board, 0, [], ret)\n return self.printBoard(ret)\n\n # This procedure prints the board row by row\n def printBoard(self, board):\n ret = []\n for solution in board:\n temp = []\n for i in solution:\n row = \".\" * i + 'Q' + '.' * (len(solution) - i - 1)\n temp.append(row)\n ret.append(temp)\n return ret\n\n # This procedure checks that the most recently placed queen on column current\n # does not conflict with queens in columns to the left.\n def noConflicts(self, board, current):\n for i in range(current):\n if (board[i] == board[current]):\n return False\n if (current - i == abs(board[current] - board[i])):\n return False\n return True\n\n # This procedure places a queens on the board on a given column so it does\n # not conflict with the existing queens, and then calls itself recursively\n # to place subsequent queens till the requisite number of queens are placed\n def rQueens(self, board, current, path, ret):\n if (current == len(board)):\n ret.append(path)\n return\n\n for i in range(len(board)):\n board[current] = i\n if (self.noConflicts(board, current)):\n # tmp = \".\" * len(board)\n # self.rQueens(board, current + 1, path+[tmp[:i]+\"Q\"+tmp[i+1:]], ret)\n self.rQueens(board, current + 1, path + [i], ret)\n\n\nprint(Solution().solveNQueens(4))\n\n\n","sub_path":"51N-Queen.py","file_name":"51N-Queen.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491561185","text":"# MAIN File\n# Sculpted with love by Thomas Groot en Jonathan Gerbscheid <3\n\n# Naoqi Imports #\nfrom naoqi import ALProxy\nfrom naoqi import ALBroker\nfrom naoqi import ALModule\n\n# additional Imports\nimport math\nimport time\nimport sys\nimport Image\nimport numpy\nimport cv2\n# Local modules #\nimport facedetection\nimport facerecognition\nimport speech\nimport slam\nimport questions_answers\nimport language_processing\nfrom naoqi import qi\nfrom Sound import locateSound # jonathans naoqi stuff\n# from PeopleDetection import peopledetector\n\n# Global variables #\nIP = \"127.0.0.1\"\n# IP = \"pepper.local\"\n# IP = \"146.50.60.15\"\n# IP = \"192.168.131.13\"\nPORT = 9559\n\nTextToSpeech = None\nAnimatedSpeech = None\nVideoDevice = None\nAudioRecorder = None\nAudioDevice = None\nSoundLocator = None\nNavigation = None\nLocalizer = None\nmemory = None\nmotionProxy = None\npostureProxy = None\npplDetectionargs = None\n\n\n#############\n# Functions #\n#############\n\n\n# jonathan comment dit\ndef setup_people_detection():\n global pplDetectionargs\n pplDetectionargs = peopledetector.setup_network()\n\n# jonathan comment dit\ndef detect_people():\n detections = peopledetector.detect_people(VideoDevice, *pplDetectionargs)\n outlist = []\n for detection in detections:\n outlist.append([detection[0][0], detection[0][1], detection[1][0], detection[1][1]])\n return outlist\n\n# return detected faces\ndef make_face_database(tracking=False):\n global VideoDevice\n if tracking:\n if motionProxy == None:\n init_localization()\n print(\"Finding faces...\")\n face_list = facedetection.collect_faces(VideoDevice, motionProxy)\n else:\n print(\"Finding faces...\")\n face_list = facedetection.collect_faces(VideoDevice)\n return face_list\n\n# Detects faces in one image\ndef detect_faces():\n global VideoDevice\n face_list = facedetection.detect_once(VideoDevice)\n return face_list\n\n# Train a set of faces to be associated with a label\ndef train_recognize_faces(face_list, labels, recognizer=None):\n if recognizer == None:\n recognizer = facerecognition.FaceRecognizer()\n print(\"Training faces...\")\n recognizer.train(face_list, labels)\n return recognizer\n\n# Return names from a list of recognized faces\ndef recognize_faces(recognizer):\n global VideoDevice\n print(\"Recognizing faces...\")\n recognized_faces = recognizer.recognize(VideoDevice)\n return recognized_faces\n\n# Make the robot say something\ndef robot_say(text=\"Hi human\"):\n global TextToSpeech\n TextToSpeech.say(text)\n\n# Say something and make some movement\ndef robot_animated_say(text=\"Hi human\"):\n global AnimatedSpeech\n AnimatedSpeech.say(text, {\"bodyLanguageMode\":\"contextual\"})\n\n# Return recognized speech\ndef speech_recognition(max_tries=4):\n global AudioRecorder\n global AudioDevice\n print(\"Recognizing speech...\")\n tries = 0\n sentence = \"\"\n while tries < max_tries and sentence == \"\":\n sentence = speech.wait_for_voice(AudioRecorder, AudioDevice)\n tries += 1\n return sentence\n\n\n######################\n# Proxy Initializers #\n######################\n\n\n# Allows the robot to say text\ndef init_textToSpeech():\n global TextToSpeech\n TextToSpeech = ALProxy(\"ALTextToSpeech\", IP, 9559)\n\ndef init_animatedSpeech():\n global AnimatedSpeech\n AnimatedSpeech = ALProxy(\"ALAnimatedSpeech\", IP, 9559)\n\n# Soundlocator is for locating sound\ndef init_soundLocalization():\n global SoundLocator\n SoundLocator = locateSound.SoundLocatorModule(\"SoundLocator\", IP, PORT)\n\n# Videodevice is for taking images from the videostream\ndef init_videoDevice():\n global VideoDevice\n VideoDevice = ALProxy(\"ALVideoDevice\", IP, 9559)\n\n# AudioRecorder is for sound recording\ndef init_audioRecorder():\n global AudioRecorder\n AudioRecorder = ALProxy(\"ALAudioRecorder\", IP, 9559)\n\n# AudioDevice is for sound level registration\ndef init_audioDevice():\n global AudioDevice\n AudioDevice = ALProxy(\"ALAudioDevice\", IP, 9559)\n\n# Navigation module\ndef init_navigation():\n global Navigation\n Navigation = ALProxy(\"ALNavigation\", IP, 9559)\n\ndef init_motion():\n global motionProxy\n global postureProxy\n motionProxy = ALProxy(\"ALMotion\", IP, PORT)\n postureProxy = ALProxy(\"ALRobotPosture\", IP, PORT)\n motionProxy.wakeUp()\n\ndef init_localization():\n global Localizer\n Localizer = slam.Localization(Navigation)\n\ndef init_memory():\n global memory\n memory = ALProxy(\"ALMemory\", IP, PORT)\n\n\n########\n# Main #\n########\n\ndef turn_to_person():\n detectioncounter = 0\n while True:\n peopleList = detect_people()\n if len(peopleList) > 0:\n boxindex = get_biggest_box_index(peopleList)\n width = peopleList[boxindex][2] - peopleList[boxindex][0]\n height = peopleList[boxindex][3] - peopleList[boxindex][1]\n size = width * height\n if size > 10000:\n detectioncounter += 1\n # turn to person\n box = peopleList[boxindex]\n print(box)\n print(box[0]/2.0)\n boxcenter = box[0] + ((box[2] - box[0])/2.0)\n print(boxcenter)\n im_width = 640\n\n dwidth = im_width/55.20\n pdiff = im_width/2.0 - boxcenter\n turn = (math.radians(pdiff / dwidth))\n print(\"angle: \" + str(turn))\n motionProxy.moveTo(0.0, 0.0, turn)\n if detectioncounter > 3:\n robot_say(\"found you!\")\n return peopleList\n else:\n detectioncounter = 0\n # motionProxy.moveTo(0.0, 0.0, math.radians(30))\n else:\n detectioncounter = 0\n # motionProxy.moveTo(0.0, 0.0, math.radians(30))\n\n# Turn the robot torwards the loudest sound\ndef turn_to_sound():\n SoundLocator.reset_variables()\n while True:\n if SoundLocator.soundFound:\n # move to the source of the sound\n print(\"angle found: \" + str(SoundLocator.soundAngle))\n motionProxy.moveTo(0.0, 0.0, math.radians(SoundLocator.soundAngle))\n SoundLocator.reset_variables()\n break\n\n\ndef get_biggest_box_index(boxlist):\n index = 0\n maxsize = 0\n for i in range(len(boxlist)):\n width = boxlist[i][2] - boxlist[i][0]\n height = boxlist[i][3] - boxlist[i][1]\n size = width * height\n if size > maxsize:\n maxsize = size\n index = i\n return index\n\n\n# Wait for a door to open\ndef door_waiter():\n sonar = ALProxy(\"ALSonar\", IP, PORT)\n sonar.subscribe(\"python_client\")\n robot_say(\"Waiting for door to open.\")\n while True:\n # front = memory.getData(\"Device/SubDeviceList/Platform/Front/Sonar/Sensor/Value\")\n fronthorizontal7x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/X/Sensor/Value\")\n fronthorizontal8x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/X/Sensor/Value\")\n\n print(\"distance to wall 1: \" + str(fronthorizontal7x))\n print(\"distance to wall 2: \" + str(fronthorizontal8x))\n # print(\"right: \" + str(right))\n if fronthorizontal7x > 2.0 and fronthorizontal8x > 2.0:\n print(\"Door opened!\")\n break\n\n # Unsubscribe from sonars, this will stop sonars (at hardware level)\n sonar.unsubscribe(\"python_client\")\n\n\ndef speech_and_person():\n # wait for door to open\n # door_waiter\n\n # move forward to middle of room\n # load Localization\n\n # Localizer.moveTo_to([0,0])\n\n robot_say(\"I want to play a riddle game\")\n time.sleep(5)\n motionProxy.moveTo(0.0, 0.0, math.radians(180))\n time.sleep(2)\n # turn_to_person()\n face_list = []\n while face_list == []:\n face_list, image = detect_faces()\n robot_say(str(\"I found \" + str(len(face_list)) + \" people\"))\n time.sleep(1)\n robot_say(\"I am not very good at faces yet, so I don't know your genders\")\n time.sleep(1)\n robot_say(\"now. Who wants to play riddles with me?\")\n # wait for crowd to surround the robot\n time.sleep(5)\n for i in range(5):\n robot_say(str(\"question \" + str(i+1) + \" please.\"))\n sentence = speech_recognition(max_tries=1)\n if sentence != \"\":\n robot_say(\"You said.\")\n time.sleep(1)\n print(sentence)\n robot_say(str(sentence))\n else:\n robot_say(\"I did not understand the question.\")\n time.sleep(2)\n\n robot_say(\"I am done playing riddles\")\n time.sleep(1)\n robot_say(\"Now we play the blind mans bluff game\")\n time.sleep(5)\n for i in range(5):\n robot_say(str(\"question \" + str(i+1) + \" please.\"))\n for _ in range(2):\n sentence = speech_recognition(max_tries=1)\n turn_to_sound()\n if sentence != \"\":\n robot_say(\"You said\")\n time.sleep(1)\n robot_say(str(sentence))\n time.sleep(1)\n break\n else:\n robot_say(\"I could not understand it, could you repeat it once more?\")\n robot_say(\"I am done answering questions.\")\n time.sleep(1)\n robot_say(\"Thank you all for playing!\")\n\n# Get the drink order from a single person\ndef get_order(person_index, recognizer):\n\n qa = questions_answers.QA()\n face_detection_start_time = time.time()\n # Check if person is standing in front of the camera\n while True:\n faces, image = detect_faces()\n # Break after 30 seconds\n if len(faces) > 0 or face_detection_start_time-time.time() > 30.0:\n break\n name = \"\"\n name_timeout = time.time()\n # Try and ask for the name for 30 seconds\n while name == \"\" and time.time()-name_timeout < 30.0:\n robot_say(qa.ask_for_name())\n name = speech_recognition(max_tries=1)\n time.sleep(1)\n name = language_processing.get_name(name)\n if name != \"noname\":\n robot_say(str(\"Hi \" + name))\n else:\n robot_say(str(\"I did not understand your name so now you are person\"+str(person_index+1)))\n name = str(\"person\"+str(person_index+1))\n time.sleep(1)\n robot_say(\"I am going to try to learn your face\")\n time.sleep(1)\n robot_say(\"Please look straight at me\")\n # Run the facedetection\n face_list = make_face_database(True)\n label_list = []\n for face in face_list:\n label_list.append(person_index)\n # If a recognizer already exists, use that recognizer\n if len(face_list) > 0:\n recognizer = train_recognize_faces(face_list, label_list, recognizer)\n robot_say(\"I learned your face!\")\n time.sleep(1)\n # Taking the order\n drink_list = [\"water\"]\n drink_timeout = time.time()\n while time.time()-drink_timeout < 30.0:\n robot_say(qa.ask_for_drink())\n sentence = speech_recognition(max_tries=1)\n candidate_drink_list = language_processing.get_all_drinks(sentence)\n if len(candidate_drink_list) > 0:\n drink_list = candidate_drink_list\n print(\"Found drinks\", drink_list)\n break\n else:\n robot_say(\"I was unable to understand your order\")\n time.sleep(0.5)\n time.sleep(1)\n robot_say(\"I will order \")\n for drink in drink_list:\n time.sleep(0.5)\n robot_say(str(drink))\n time.sleep(0.3)\n robot_say(\"for you\")\n time.sleep(1)\n robot_say(\"Thank you for you order\")\n time.sleep(1)\n return [name, drink_list, recognizer]\n\ndef repeat_orders(person_info_list):\n face_list = []\n while face_list == []:\n face_list, image = detect_faces()\n robot_say(\"Hi bartender\")\n for person_info in person_info_list:\n time.sleep(1)\n robot_say(str(person_info[0] + \" wants to order\"))\n for drink in person_info[1]:\n time.sleep(0.5)\n robot_say(str(drink))\n\n\ndef move_straight_until_stuck():\n \"\"\"Move until you are stuck and then find a free zone.\"\"\"\n x = 0.5\n y = 0.0\n theta = 0.0\n fronthorizontal7x = 2.0\n fronthorizontal8x = 2.0\n\n print(\"first loop\")\n count1 = 0\n while True:\n # time.sleep(1)\n print(\"==in first loop==\")\n fronthorizontal7x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/X/Sensor/Value\")\n fronthorizontal8x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/X/Sensor/Value\")\n\n print(\"distance to wall 1: \" + str(fronthorizontal7x))\n print(\"distance to wall 2: \" + str(fronthorizontal8x))\n if fronthorizontal7x > 1.0 or fronthorizontal8x > 1.0:\n motionProxy.moveTo(x, y, theta)\n # time.sleep(1)\n else:\n break\n\n print(\"turning!\")\n # motionProxy.moveTo(0.0, 0.0, math.radians(45))\n\n time.sleep(2)\n fronthorizontal7x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/X/Sensor/Value\")\n fronthorizontal8x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/X/Sensor/Value\")\n\n count2 = 0\n while True:\n # time.sleep(1)\n print(\"==in second loop==\")\n fronthorizontal7x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/X/Sensor/Value\")\n fronthorizontal8x = memory.getData(\"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/X/Sensor/Value\")\n\n print(\"distance to wall 1: \" + str(fronthorizontal7x))\n print(\"distance to wall 2: \" + str(fronthorizontal8x))\n if fronthorizontal7x > 1.0 or fronthorizontal8x > 1.0:\n motionProxy.moveTo(x, y, theta)\n # time.sleep(1)\n else:\n break\n robot_say(\"Im done!\")\n # Navigation.findFreeZone(2.0, 4.0)\n\n\n# Run the cocktail party challenge\ndef cocktail_party():\n qa = questions_answers.QA()\n\n # Set head straight\n speed = 0.2\n defaultyaw = 0.0\n defaultpitch = -0.6\n\n motionProxy.setAngles(\"HeadPitch\", defaultpitch, speed)\n motionProxy.setAngles(\"HeadYaw\", defaultyaw, speed)\n\n print(\"Initializers\")\n # get_all_drinks is ran once so that nltk is loaded\n language_processing.get_all_drinks(\"water\")\n recognizer = None\n person_list = []\n\n robot_say(\"Can the first person please walk up to me?\")\n time.sleep(5)\n person_info = get_order(0, recognizer)\n person_list.append([person_info[0], person_info[1]])\n recognizer = person_info[2]\n # reset head positions\n motionProxy.setAngles(\"HeadPitch\", defaultpitch, speed)\n motionProxy.setAngles(\"HeadYaw\", defaultyaw, speed)\n\n robot_say(\"Can the second person please walk up to me?\")\n time.sleep(5)\n person_info = get_order(1, recognizer)\n person_list.append([person_info[0], person_info[1]])\n recognizer = person_info[2]\n # reset head positions\n motionProxy.setAngles(\"HeadPitch\", defaultpitch, speed)\n motionProxy.setAngles(\"HeadYaw\", defaultyaw, speed)\n\n robot_say(\"Can the third person please walk up to me?\")\n time.sleep(5)\n person_info = get_order(2, recognizer)\n person_list.append([person_info[0], person_info[1]])\n recognizer = person_info[2]\n # reset head positions\n motionProxy.setAngles(\"HeadPitch\", defaultpitch, speed)\n motionProxy.setAngles(\"HeadYaw\", defaultyaw, speed)\n\n time.sleep(5)\n robot_say(\"Can the bartender please come to me?\")\n time.sleep(5)\n repeat_orders(person_list)\n time.sleep(3)\n # STEP 6,7,8: we are skipping these\n robot_say(\"I am done\")\n time.sleep(1)\n robot_say(\"Thank you all\")\n print(\"Done with cocktail party\")\n\n\ndef navigation_things():\n \"\"\"this method does nothing except hold the navigation code that I am still\n working on, but that is not allowed in the main :).\"\"\"\n Localizer.explore(4)\n Localizer.save_exploration()\n result_map = Navigation.getMetricalMap()\n map_width = result_map[1]\n map_height = result_map[2]\n img = numpy.array(result_map[4]).reshape(map_width, map_height)\n img = (100 - img) * 2.55 # from 0..100 to 255..0\n img = numpy.array(img, numpy.uint8)\n cv2.imwrite(\"robocup-nagoya.png\", img)\n # Localizer.stop_exploration()\n # Localizer.explore(1)\n # Navigation.stopLocalization()\n # # Localizer.start_localization()\n # # Localizer.load_exploration(\"/home/nao/.local/share/Explorer/2017-07-19T163238.071Z.explo\")\n # Navigation.loadExploration(\"/home/nao/.local/share/Explorer/2017-07-20T123155.689Z.explo\")\n # # Navigation.getMetricalMap()\n # # print(\"path: \" + str(Localizer.map_path))\n # # Localizer.load_exploration(\"2017-07-20T123155.689Z.explo\")\n # result_map = Navigation.getMetricalMap()\n # map_width = result_map[1]\n # map_height = result_map[2]\n # img = numpy.array(result_map[4]).reshape(map_width, map_height)\n # img = (100 - img) * 2.55 # from 0..100 to 255..0\n # img = numpy.array(img, numpy.uint8)\n # # cv2.imwrite(\"iismap2.png\", img)\n # # pilimage = Image.frombuffer('L', (map_width, map_height), img, 'raw', 'L', 0, 1)\n #\n # # for i in range(120):\n # # for j in range(120):\n # # img[i][j] = 1\n # # cv2.imshow(\"map\", img)\n # # cv2.waitKey(1)\n #\n # est_position_maybe = Navigation.relocalizeInMap([0,0])\n # # est_position = Navigation.getRobotPositionInMap()\n # while True:\n # # Localizer.relocalize([0,0])\n # # est_position = Navigation.getRobotPositionInMap()\n # est_position = Navigation.relocalizeInMap([0,0])\n # print(est_position[1][0])\n # a = est_position[1][0][0]\n # b = est_position[1][0][1]\n # x = map_width * a\n # y = map_height * b\n # print(\"adjusted: \" + str(x) + \", \" + str(y))\n # Navigation.startLocalization()\n # Navigation.navigateToInMap([0.,0.])\n #\n # # print(\"start talking\")\n # # sentence = speech_recognition()\n # # print(sentence)\n # Localizer.start_localization()\n # Localizer.relocalize([0.,0.])\n # print(\"path: \" + str(Localizer.map_path))\n # # Localizer.moveTo_to([-1., -1.])\n # print(\"estimate location: \" + str(Localizer.get_robot_position()))\n # Localizer.stop_exploration()\n\n\n# Main function that is run once upon startup\ndef main():\n\n lifeProxy = ALProxy(\"ALAutonomousLife\", IP, PORT)\n # lifeProxy.setState(\"disabled\")\n print(\"AutonomousLife: \" + lifeProxy.getState())\n init_soundLocalization()\n init_navigation()\n init_textToSpeech()\n init_animatedSpeech()\n init_videoDevice()\n init_motion()\n init_audioDevice()\n init_audioRecorder()\n init_memory()\n # init_localization()\n\n # ROBOT INSPECTION ================================\n # door_waiter()\n # robot_say(\"door opened!\")\n # move_straight_until_stuck()\n\n # time.sleep(3)\n\n # COCKTAIL PARTY ==================================\n # cocktail_party()\n\n\n # SPEECH AND PERSON RECOGNITION ===================\n # speech_and_person()\n\n # TESTING =========================================\n robot_animated_say(\"Can the first person please walk up to me?\")\n\n\n # OTHER STUFF --------------------------------\n\n # # correct head angle for long distances\n # # currentAngle = motionProxy.getAngles(\"HeadYaw\", True)[0]\n # # motionProxy.setAngles([\"HeadPitch\"], currentAngle + 0.08, 0.2)\n #\n # # not finished\n #\n # # MAIN WHILE LOOP\n # while True:\n # # do a lot of stuff here\n # peopleList = detect_people()\n # print(\"found \" + str(len(peopleList)) + \" people!\")\n # # Localizer.get_map()\n # # finally turn to sound if it was recognized\n # if SoundLocator.soundFound:\n # # move to the source of the sound\n # print(\"angle found: \" + str(SoundLocator.soundAngle))\n # motionProxy.moveTo(0.0, 0.0, math.radians(SoundLocator.soundAngle))\n # SoundLocator.reset_variables()\n #\n print(\"Done\")\n\n\n# Use the main function\nif __name__ == \"__main__\":\n main()\n","sub_path":"robocup2017/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156535111","text":"from random import randint \n\n\n# place = [\n# 'Darkhan',\n# 'Palvan-Ata',\n# 'karavan',\n# 'Manas-ata',\n# 'to society',\n# 'office'\n# ]\n\n# meal = [\n# 'Plov',\n# 'Pizza',\n# 'pirozhok',\n# 'spicy-meat',\n# 'logman',\n# 'Faiza',\n# 'meat',\n# 'tibbon'\n# ]\n\n\n# # jer = place.pop()\n# # food = meal.pop()\n\n# # print(jer,food)\n\n# for i in range(3):\n# print(\"Жер:\",place[randint(0, len(place)-1)], \" -- тамак: \", meal[randint(0, len(meal)-1)])\n\n\n\nname = [\n 'zalkar',\n 'emir',\n 'amanchik',\n 'zarlyk bayke',\n 'iskender bayke',\n 'nurmukhanmed bayke',\n 'bekzat bayke',\n 'aiganysh',\n 'era'\n]\n\nfor a in range(1):\n print(\"team1:\",name[randint(0,len(name)-1)])\n\nfor a in range(1):\n print(\"team2:\",name[randint(0,len(name)-1)])\n\n\nfor a in range(1):\n print(\"team3:\",name[randint(0,len(name)-1)])\n","sub_path":"Practiceself/random2.py","file_name":"random2.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"133892605","text":"\"\"\"\n模拟栈\n实现一个栈,栈初始为空,支持四种操作:\n\n(1) “push x” – 向栈顶插入一个数x;\n\n(2) “pop” – 从栈顶弹出一个数;\n\n(3) “empty” – 判断栈是否为空;\n\n(4) “query” – 查询栈顶元素。\n\n现在要对栈进行M个操作,其中的每个操作3和操作4都要输出相应的结果。\n\n输入格式\n第一行包含整数M,表示操作次数。\n\n接下来M行,每行包含一个操作命令,操作命令为”push x”,”pop”,”empty”,”query”中的一种。\n\n输出格式\n对于每个”empty”和”query”操作都要输出一个查询结果,每个结果占一行。\n\n其中,”empty”操作的查询结果为“YES”或“NO”,”query”操作的查询结果为一个整数,表示栈顶元素的值。\n\n数据范围\n1 ≤ M ≤ 100000,\n1 ≤ x ≤ 10^9\n所有操作保证合法。\n\n输入样例:\n10\npush 5\nquery\npush 6\npop\nquery\npop\nempty\npush 4\nquery\nempty\n输出样例:\n5\n5\nYES\n4\nNO\n\"\"\"\nN = 100000\n\n\nclass stack:\n def __init__(self):\n self.D = [0] * (N + 10)\n self.top = -1 # 直接取\n\n def empty(self):\n return self.top == -1\n\n def push(self, e):\n if self.top < len(self.D):\n self.top += 1\n self.D[self.top] = e\n\n def pop(self):\n if not self.empty():\n self.top -= 1\n return self.D[self.top + 1]\n\n def query(self):\n if not self.empty():\n return self.D[self.top]\n\n\nif __name__ == '__main__':\n s = stack()\n m = int(input())\n while m != 0:\n m -= 1\n op, *p = input().split()\n if op == 'pop':\n s.pop()\n elif op == 'empty':\n print('YES' if s.empty() else 'NO')\n elif op == 'push':\n s.push(int(p[0]))\n elif op == 'query':\n print(s.query())","sub_path":"2021/Algorithm/Python/Base/2_data_structure/828.py","file_name":"828.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258145004","text":"\n\n# Bottom-up approach:\ndef make_palindrome(s):\n if len(s) <= 1:\n return s\n\n table = [['' for i in range(len(s) + 1)] for j in range(len(s) + 1)]\n\n for i in range(len(s)):\n table[i][1] = s[i]\n\n for j in range(2, len(s) + 1):\n for i in range(len(s) - j + 1):\n term = s[i:i + j]\n first, last = term[0], term[-1]\n if first == last:\n table[i][j] = first + table[i+1][j-2] + last\n else:\n one = first + table[i+1][j-1] + first\n two = last + table[i+1][j-1] + last\n if len(one) < len(two):\n table[i][j] = one\n elif len(one) > len(two):\n table[i][j] = two\n else:\n table[i][j] = min(one, two)\n return table[0][-1]\n\nprint(make_palindrome(\"ajdbgoehqgboqgqrhgethkilmpqecgvbih\"))\n","sub_path":"solution/20-02-2020-num34.py","file_name":"20-02-2020-num34.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7292232","text":"#!/usr/bin/python3\nimport rclpy\nfrom rclpy.node import Node\nimport rover_msgs.msg\nimport time\n\nclass ControlMaster(Node):\n\n def __init__(self):\n super().__init__(\"control_master\")\n self.drive_pub = self.create_publisher(rover_msgs.msg.ODrive, \"drive_cmd\", 1)\n # self.arm_pub = self.create_publisher(String, \"arm_cmd\", 10)\n self.telop_drive_sub = self.create_subscription(rover_msgs.msg.ODrive, \"telop_drive_cmd\", self.telop_drive_cmd, 1)\n # self.telop_arm_sub = self.create_subscription(Joy, \"arm_controller_val\", self.telop_arm_cmd, 10)\n self.auto_drive_sub = self.create_subscription(rover_msgs.msg.ODrive, \"auto_drive_cmd\", self.auto_drive_cmd, 1)\n # self.auto_arm_sub = self.create_subscription(String, \"auto_arm_cmd\", self.auto_arm_cmd, 10)\n self.block_time = 0\n self.telop_time = time.time()\n\n def telop_drive_cmd(self, msg):\n self.telop_time = time.time()\n self.block_time = 5\n self.get_logger().info(\"drive_cmd received: \" + str(msg))\n self.drive_pub.publish(msg)\n\n def telop_arm_cmd(self, msg):\n self.telop_time = time.time()\n self.block_time = 5\n self.get_logger().info(\"telop_arm_cmd received: \" + str(msg))\n\n def auto_drive_cmd(self, msg):\n self.get_logger().info(\"auto_drive_cmd received: \" + str(msg))\n if (time.time() - self.telop_time) >= self.block_time:\n self.block_time = 0\n self.drive_pub.publish(msg)\n\n def auto_arm_cmd(self, msg):\n self.get_logger().info(\"auto_arm_cmd received: \" + str(msg))\n if (time.time() - self.telop_time) >= self.block_time:\n self.block_time = 0\n # self.arm_pub.publish(msg)\n\ndef main(args=None):\n rclpy.init(args=args)\n try:\n control_master = ControlMaster()\n rclpy.spin(control_master)\n except:\n pass\n finally:\n control_master.get_logger().info(\"control_master node shutdown\")\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n control_master.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n","sub_path":"ros2ws/rover_main_drive/rover_main_drive/control_master.py","file_name":"control_master.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"469517984","text":"'''\nCreated on Aug 23, 2021\n\n@author: immanueltrummer\n'''\nimport torch.utils.data\nimport transformers\n\nclass CorrelationDS(torch.utils.data.Dataset):\n \"\"\" Represents training data for correlation prediction. \"\"\"\n \n tokenizer = transformers.RobertaTokenizer.from_pretrained('roberta-base')\n \n def __init__(self, columns_1, columns_2, labels):\n \"\"\" Initializes prediction for labeled column pairs.\n \n Args:\n columns_1: names of first columns\n columns_2: names of second columns\n labels: label indicating column correlation\n \"\"\"\n self.encodings = self.tokenizer(\n columns_1, columns_2, \n truncation=True, padding=True)\n self.labels = labels\n \n def __getitem__(self, idx):\n \"\"\" Return item at specified index.\n \n Args:\n idx: index of item to retrieve\n \n Returns:\n item at specified index\n \"\"\"\n item = {key: torch.tensor(val[idx]) \n for key, val in self.encodings.items()}\n item['labels'] = torch.tensor(self.labels[idx])\n return item\n \n def __len__(self):\n \"\"\" Returns number of items. \"\"\"\n return len(self.labels)","sub_path":"src/dp/nlp/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438933556","text":"from typing import Tuple, List\nimport numpy as np\nfrom numpy import ndarray\nfrom tensorflow import Tensor\nimport tensorflow as tf\n\nfrom decompose.distributions.distribution import Distribution\nfrom decompose.distributions.distribution import DrawType\nfrom decompose.likelihoods.likelihood import Likelihood\n\n\nclass PostU(object):\n\n def __init__(self, likelihood: Likelihood, prior: Distribution,\n f: int, normalize: bool = False) -> None:\n self.__likelihood = likelihood\n self.__prior = prior\n self.__f = f\n self.__K = prior.shape[0]\n self.__normalize = normalize\n\n def f(self) -> int:\n return(self.__f)\n\n @property\n def prior(self):\n return(self.__prior)\n\n def updateUf(self, Uf, Ufk, k):\n UfUpdated = tf.concat((Uf[:k], Ufk, Uf[k+1:]), 0)\n return(UfUpdated)\n\n def update(self, U: Tensor, X: Tensor, t) -> Tuple[Tensor, Tensor]:\n f, K = self.__f, self.__K\n\n if not t:\n self.prior.update(data=tf.transpose(U[f]))\n\n prepVars = self.__likelihood.lhU[f].prepVars(U, X)\n\n def cond(k, Uf):\n return(tf.less(k, K))\n\n def body(k, U):\n U = self.updateK(k, prepVars, U)\n return(k+1, U)\n\n k = tf.constant(0)\n loop_vars = [k, list(U)]\n\n _, U = tf.while_loop(cond, body, loop_vars)\n return(U[f])\n\n def updateK(self, k, prepVars, U):\n if self.prior.drawType == DrawType.SAMPLE:\n return(self.updateKSample(k, prepVars, U))\n else:\n return(self.updateKSample(k, prepVars, U))\n\n def updateKSample(self, k, prepVars, U):\n f = self.__f\n\n UfShape = U[f].get_shape()\n\n lhUfk = self.__likelihood.lhU[f].lhUfk(U, prepVars, k)\n postfk = lhUfk*self.prior[k].cond()\n Ufk = postfk.draw()\n\n Ufk = tf.expand_dims(Ufk, 0)\n\n isValid = tf.reduce_all(tf.is_finite(Ufk))\n Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),\n lambda: U[f])\n\n # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)\n Uf.set_shape(UfShape)\n U[f] = Uf\n return(U)\n\n def updateKMode(self, k, prepVars, U, normalize, absMax):\n f = self.__f\n\n UfShape = U[f].get_shape()\n\n lhUfk = self.__likelihood.lhU[f].lhUfk(U, prepVars, k)\n postfk = lhUfk*self.prior[k].cond()\n Ufk = postfk.draw()\n if normalize:\n Ufk = Ufk/tf.norm(Ufk)\n Ufk = tf.expand_dims(Ufk, 0)\n\n Ufk = tf.where(tf.greater(tf.abs(Ufk), absMax[k]),\n tf.sign(Ufk)*absMax[k], Ufk)\n\n isValid = tf.reduce_all(tf.is_finite(Ufk))\n Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),\n lambda: U[f])\n\n # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)\n Uf.set_shape(UfShape)\n U[f] = Uf\n\n return(U)\n","sub_path":"decompose/postU/postU.py","file_name":"postU.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"305211529","text":"#16:25\nimport pdb\n\ndef cal(N):\n if cache[N]:\n return cache[N]\n if N == 1:\n return [[1]]\n elif N == 2:\n return [[2]]\n else:\n candidate = merge(add(cal(N-1),1),add(cal(N-2),2))\n candidate = checkRule(candidate)\n return candidate\n\ndef add(lists, a):\n newlists = lists\n for l in newlists:\n l.append(a)\n return newlists\n\ndef merge(lista, listb):\n list = lista\n for e in listb:\n if e not in lista:\n lista.append(e)\n return lista\n\ndef checkRule(candidate):\n for c in candidate:\n index = []\n for i in range(len(c)):\n if c[i] == 1:\n index.append(i)\n for j in range(len(index)):\n if j+1 < len(index):\n if index[j+1]-index[j] % 2 == 1:\n candidate.remove(c)\n break\n return candidate\n\n#T = int(input())\n#output = []\n#for index in range(T):\n #R,C = map(int, input().split())\ncache = [[] for x in range(10)]\npdb.set_trace()\nfor i in range(1,10):\n cache[i] = cal(i)\n print(str(i)+\":\"+str(cal(i)))","sub_path":"GoogleCodeJam/2015_R2/DrumDecorator.py","file_name":"DrumDecorator.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54579490","text":"from short_term import Memory\n\"\"\"\n This file is used to pass around Alan's memory and other global values.\n For example, context.short_term_memory will return the same object everywhere\n inside alan. Add references to global data structures and classes here.\n\"\"\"\nshort_term_memory = Memory()\n\n# Boolean to store alan's current sleep state. Default is False.\nsleeping = False\n\n# Boolean to store alan's talking state. Set and unset in alan.speak(). Default to False.\ntalking = False\n\n# Constant phrase to wake alan from sleep state. Needs to match a phrase in keyphrase.list if opearating in passive mode.\nWAKE_PHRASE = \"wake up\"\n\n# This is a list of running background services, the \"stop\" command will use this to kill processes.\nservices = []\n\n# Stop alan from prompting, defaults to false\nno_prompt = False\n\n","sub_path":"memory/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"296496905","text":"#! C:\\Python27\\Python\r\n#initialize the board (10 x 10)\r\n\r\n# line = 'x' * 10\r\n# map = [line] * 10\r\n# x = 0\r\n# y = 0\r\n# userpos = x,y\r\n# for line in map print map\r\n\r\nclass Myclass:\r\n\tx = 0\r\n\ty = 0\r\n\r\n\r\n# def control()\r\n# control = raw_input()\r\n# if control == 'w':\r\n\t# y += 1\r\n# if control == 'a':\r\n\t# x -= 1\r\n# if control == 's':\r\n\t# y -= 1\r\n# if control == 'd':\r\n\t# x += 1\r\n# while true:\r\n","sub_path":"pythontest.py","file_name":"pythontest.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"601259639","text":"\"\"\"Add json_metadata to the tables table.\n\nRevision ID: b46fa1b0b39e\nRevises: ef8843b41dac\nCreate Date: 2016-10-05 11:30:31.748238\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'b46fa1b0b39e'\ndown_revision = 'ef8843b41dac'\n\nfrom alembic import op\nimport logging\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('tables',\n sa.Column('params', sa.Text(), nullable=True))\n\n\ndef downgrade():\n try:\n op.drop_column('tables', 'params')\n except Exception as e:\n logging.warning(str(e))\n\n","sub_path":"superset/migrations/versions/b46fa1b0b39e_add_params_to_tables.py","file_name":"b46fa1b0b39e_add_params_to_tables.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"488362590","text":"import os\n\nINSTALL_PATH = \"/var/webserver\"\nPORT_NUM = \"80\"\n\nos.system(\"yum install git -y\")\nos.system(\"sudo apt-get install git -y\")\nos.system(\"wget https://bootstrap.pypa.io/get-pip.py --no-check-certificate\")\nos.system(\"python get-pip.py\")\nos.system(\"pip install flask\")\nos.system(\"pip install Flask-WTF\")\nos.system(\"pip install simplejson\")\nos.system(\"mkdir \"+INSTALL_PATH)\nos.system(\"rm \"+INSTALL_PATH+\"/fileShare -rf\")\nos.chdir(INSTALL_PATH)\nos.system(\"git clone git://github.com/zhenchaochen/fileShare\")\nos.system(\"iptables -I INPUT -p tcp --dport \"+PORT_NUM+\" -j ACCEPT\")\n\n#create start script\nf = open(\"/etc/init/fileShare.conf\",'w')\nf.write('\\n'+'start on startup')\nf.write('\\n'+'respawn')\nf.write('\\n'+'respawn limit 100 0')\nf.write('\\n'+'exec iptables -I INPUT -p tcp --dport '+PORT_NUM+' -j ACCEPT')\nf.write('\\n'+'chdir '+INSTALL_PATH+'/fileShare')\nf.write('\\n'+'exec git pull')\nf.write('\\n'+'exec python run.py '+PORT_NUM)\nf.close()\n\nos.system(\"start fileShare\")\nos.system(\"sudo start fileShare\")\n\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269273390","text":"import numpy as np\nfrom scipy.sparse import dok_matrix, csr_matrix, coo_matrix\nimport time\nimport math\nfrom error import rmse2\nfrom util import fetch, initUV\nfrom pycuda import driver, compiler, gpuarray, tools\nimport pycuda.autoinit\n\nkernel_code = open('bgsvd_kernel.c', 'r').read()\nmod = compiler.SourceModule(kernel_code)\nmatrixfact = mod.get_function(\"SGD\")\n\ndef sgd(UU,MM,RR, U, V, ulimits, bu, bi, gmean, umin, mmin, latent=30, gpu_steps=1, alpha=0.0002, beta=0.01, delta=0.01, debug=2):\n\n u_gpu = gpuarray.to_gpu(np.array(UU).astype(np.int32))\n v_gpu = gpuarray.to_gpu(np.array(MM).astype(np.int32))\n r_gpu = gpuarray.to_gpu(np.array(RR).astype(np.int32))\n\n a_gpu = gpuarray.to_gpu(np.array(U).astype(np.float32))\n b_gpu = gpuarray.to_gpu(np.array(V).astype(np.float32))\n\n ul_gpu = gpuarray.to_gpu(np.array(ulimits).astype(np.int32))\n bu_gpu = gpuarray.to_gpu(np.array(bu).astype(np.int32))\n bi_gpu = gpuarray.to_gpu(np.array(bi).astype(np.int32))\n \n t7 = time.clock()\n print(\"Ulimits \", ulimits)\n\n if debug>1:\n print(\"Length of uu,mm \", len(UU), len(MM), len(U), len(V) )\n\n matrixfact(\n u_gpu, v_gpu, r_gpu, a_gpu, b_gpu,\n np.int32(latent), ul_gpu, bu_gpu, bi_gpu, np.int32(gmean), np.int32(umin), np.int32(mmin), np.int32(gpu_steps),\n np.float32(alpha), np.float32(beta), np.float32(delta),\n block=(16,16,1),grid=(1,1)\n )\n\n P = a_gpu.get()\n Q = b_gpu.get()\n BU = bu_gpu.get()\n BI = bi_gpu.get()\n\n t8 = time.clock()\n\n if debug>1:\n print(\"Shape of P, Q :\", P.shape, Q.shape)\n\n return P, Q, BU, BI\n\ndef pack(UU, MM, RR, uu, mm, rr, ulimits):\n\n ulimits.append(ulimits[len(ulimits)-1]+len(uu))\n\n UU.extend(uu)\n MM.extend(mm)\n RR.extend(rr)\n\n return UU, MM, RR, ulimits\n\ndef factorize(users, movies, ratings, test_users, test_movies, test_ratings, blocks=1, latent=12, steps=10, gpu_steps=1, alpha=0.0002, beta=0.02, delta=0.01, rmse_repeat_count=3, debug=2, dataset=''):\n\n U, V = initUV( np.max(users)-np.min(users)+1, latent, np.max(movies)-np.min(movies)+1)\n bu = np.zeros(np.max(users)-np.min(users)+1)\n bi = np.zeros(np.max(movies)-np.min(movies)+1)\n global_mean = np.mean(ratings)\n\n U = np.array(U)\n V = np.array(V)\n\n size = max(np.max(users)+1, np.max(movies)+1)\n split = int(size/blocks)\n us = int(math.ceil( np.float(np.max(users))/split ) )\n vs = int(math.ceil( np.float(np.max(movies))/split ) )\n if debug>1:\n print(\"Total splits : \",split, us, vs, us*vs)\n print(\"U, V shapes :\", U.shape, V.shape)\n\n start_time=time.clock()\n y1, y2 = [], []\n count, error = 0, 100\n \n for k in range(steps):\n\n if debug>1:\n print(\"Step : \", k)\n\n u1, v1 = 0, 0\n t4 = time.clock()\n\n for i in range(us):\n u1 = i*split\n if np.max(users) < u1:\n u1 = int(np.max(users))\n\n u2 = ((i+1)*split - 1)\n if np.max(users) < u2:\n u2 = int(np.max(users))\n\n stemp = 0\n UU, MM, RR = [], [], []\n ulimits = [0]\n\n for j in range(vs):\n xtemp = int((i+stemp)%us)\n\n print(\"i, j, ii, jj \", i, j, xtemp, j)\n\n u1 = xtemp*split\n if np.max(users) < u1:\n u1 = int(np.max(users))\n\n u2 = ((xtemp+1)*split - 1)\n if np.max(users) < u2:\n u2 = int(np.max(users))\n\n v1 = j*split\n if np.max(movies) < v1:\n v1 = int(np.max(movies))\n \n v2 = (j+1)*split -1\n if np.max(movies) < v2:\n v2 = int(np.max(movies))\n\n print(\"Processing split : \" , i , j, u1, u2, v1, v2)\n\n uu, mm, rr = fetch(u1,u2, v1,v2, users,movies,ratings)\n\n if(len(uu)!=0 and len(mm)!=0):\n UU,MM,RR, ulimits = pack(UU,MM,RR, uu,mm,rr, ulimits)\n\n stemp+=1\n U, V, bu, bi = sgd(UU,MM,RR, U,V, ulimits,bu, bi, global_mean, np.min(users), np.min(movies))\n np.savetxt('x_U'+str(k)+'.txt', U, fmt='%.3f')\n np.savetxt('x_V'+str(k)+'.txt', V, fmt='%.3f')\n\n t5 = time.clock()\n if debug>1:\n print(\" Step time taken : \", round(t5-t4,2))\n\n y1.append(round(t5-start_time,3))\n train_rmse = rmse2(users, movies, ratings, U, V, bu, bi, global_mean)\n test_rmse = rmse2(test_users, test_movies, test_ratings, U, V, bu, bi, global_mean)\n print(\"Train error:\", round(train_rmse, 4) , \" Test error:\", round(test_rmse,4) )\n y2.append(round(test_rmse,3) )\n\n step_error=round(test_rmse,4)\n \n if step_error < delta:\n break\n elif error None:\n filename = os.path.join(base_dir, str.format('{cc}.gif', cc=cc.lower()))\n print(str.format('[~] filename: {}', os.path.basename(filename)))\n\n with open(filename, 'wb') as fd:\n fd.write(image)\n\n\nasync def flag_get(session: aiohttp.ClientSession, url: str, cc: str):\n print(str.format('[~] flag \"{}\" download by url: {}', cc, url))\n\n async with session.get(url) as response:\n image = await response.read()\n\n return image, cc\n\n\nasync def runner(\n base_url: str,\n base_dir: str,\n country_codes: List[str]\n):\n to_do_tasks = []\n\n async with aiohttp.ClientSession() as session:\n\n for cc in country_codes:\n url = str.format('{}/{cc}/{cc}.gif', base_url, cc=cc.lower())\n to_do_tasks.append(flag_get(session, url, cc))\n\n completed_tasks = asyncio.as_completed(to_do_tasks)\n\n for i, completed_task in enumerate(completed_tasks, start=1):\n image, cc = await completed_task\n flag_save(image, base_dir, cc)\n print(str.format('[~] task # {} was completed', i))\n\n\ndef main():\n loop = asyncio.get_event_loop()\n\n try:\n loop.run_until_complete(runner(BASE_URL, BASE_DIR, COUNTRY_CODES))\n finally:\n loop.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"packages/async_packages/aiohttp_package/examples/flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66023577","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AttributeMapping',\n fields=[\n ('attribute_mapping_id', models.AutoField(serialize=False, primary_key=True, db_column=b'ATTRIBUTE_MAPPING_ID')),\n ('source_attribute_name', models.CharField(max_length=500, db_column=b'SOURCE_ATTRIBUTE_NAME')),\n ('target_attribute_name', models.CharField(max_length=500, db_column=b'TARGET_ATTRIBUTE_NAME')),\n ],\n options={\n 'db_table': 'ETL_CONF_ATTRIBUTE_MAPPING',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Process',\n fields=[\n ('process_id', models.AutoField(serialize=False, primary_key=True, db_column=b'PROCESS_ID')),\n ('name', models.CharField(max_length=50, db_column=b'NAME')),\n ],\n options={\n 'db_table': 'ETL_CONF_PROCESS',\n 'verbose_name_plural': 'processes',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Rooting',\n fields=[\n ('rooting_id', models.AutoField(serialize=False, primary_key=True, db_column=b'ROOTING_ID')),\n ('rooting_name', models.CharField(max_length=500, null=True, db_column=b'ROOTING_NAME', blank=True)),\n ('rooting_type', models.CharField(blank=True, max_length=50, null=True, db_column=b'ROOTING_TYPE', choices=[(b'TEMP_FULL_LOAD', b'Temp Full Load'), (b'DELETE_PARTITION_AND_INSERT', b'Delete Partition Insert'), (b'MERGE_SCD_TYPE_1', b'Merge SCD Type 1'), (b'MERGE_SCD_TYPE_2', b'Merge SCD Type 2')])),\n ('sequence', models.IntegerField(help_text=b'Order in which rooting will execute when process is run. Rootings with smaller sequence numbers are executed before those with bigger ones.', null=True, verbose_name=b'sequence number', db_column=b'SEQUENCE', blank=True)),\n ('process', models.ForeignKey(db_column=b'PROCESS', blank=True, to='configurations.Process', null=True)),\n ],\n options={\n 'db_table': 'ETL_CONF_ROOTING',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Rule',\n fields=[\n ('rule_id', models.AutoField(serialize=False, primary_key=True, db_column=b'RULE_ID')),\n ('source_column', models.CharField(db_column=b'SOURCE_COLUMN', max_length=500, blank=True, help_text=b'Leave this blank to compare target attribute to condition value.', null=True, verbose_name=b'source attribute name')),\n ('condition', models.CharField(blank=True, max_length=500, null=True, db_column=b'CONDITION', choices=[(b'EQUAL', b'Equal'), (b'NOT EQUAL', b'Not equal'), (b'GREATER THAN', b'Greater than'), (b'LESS THAN', b'Less than'), (b'GREATER THAN OR EQUAL', b'Greater than or Equal'), (b'LESS THAN OR EQUAL', b'Less than or equal')])),\n ('target_column', models.CharField(db_column=b'TARGET_COLUMN', max_length=500, blank=True, help_text=b'Leave this blank to compare source attribute to condition value.', null=True, verbose_name=b'target attribute name')),\n ('condition_value', models.CharField(help_text=b'Leave this blank to compare source attribute to target attribute.', max_length=500, null=True, db_column=b'CONDITION_VALUE', blank=True)),\n ('operation', models.CharField(max_length=500, db_column=b'OPERATION', choices=[(b'AND', b'AND'), (b'OR', b'OR')])),\n ],\n options={\n 'db_table': 'ETL_CONF_RULES',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Source',\n fields=[\n ('source_id', models.AutoField(serialize=False, primary_key=True, db_column=b'SOURCE_ID')),\n ('source_name', models.CharField(max_length=500, db_column=b'SOURCE_NAME')),\n ('source_query', models.TextField(db_column=b'SOURCE_QUERY')),\n ('source_group', models.CharField(max_length=500, db_column=b'SOURCE_GROUP')),\n ('source_db', models.CharField(max_length=100, db_column=b'SOURCE_DB')),\n ('source_schema', models.CharField(max_length=100, db_column=b'SOURCE_SCHEMA')),\n ],\n options={\n 'db_table': 'ETL_CONF_SOURCE',\n 'verbose_name': 'data source',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Structure',\n fields=[\n ('structure_id', models.AutoField(serialize=False, primary_key=True, db_column=b'STRUCTURE_ID')),\n ('sequence', models.IntegerField(help_text=b\"Order in which optional section will appear if there's more than one.\", max_length=500, verbose_name=b'sequence number', db_column=b'SEQUENCE')),\n ('section', models.CharField(max_length=500, verbose_name=b'section type', db_column=b'SECTION', choices=[(b'ON', b'ON'), (b'MATCHED', b'WHEN MATCHED'), (b'NOT MATCHED', b'WHEN NOT MATCHED')])),\n ('rooting_id', models.ForeignKey(related_name='optional_query_sections', db_column=b'ROOTING_ID', verbose_name=b'rooting', to='configurations.Rooting')),\n ],\n options={\n 'db_table': 'ETL_CONF_STRUCTURE',\n 'verbose_name': 'optional query section',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Target',\n fields=[\n ('target_id', models.AutoField(serialize=False, primary_key=True, db_column=b'TARGET_ID')),\n ('target_name', models.CharField(max_length=500, db_column=b'TARGET_NAME')),\n ('target_group', models.CharField(max_length=500, db_column=b'TARGET_GROUP')),\n ('target_db', models.CharField(max_length=100, db_column=b'TARGET_DB')),\n ('target_schema', models.CharField(max_length=100, db_column=b'TARGET_SCHEMA')),\n ],\n options={\n 'db_table': 'ETL_CONF_TARGET',\n 'verbose_name': 'data target',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='rule',\n name='structure_id',\n field=models.ForeignKey(db_column=b'STRUCTURE_ID', verbose_name=b'query section', to='configurations.Structure'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='rooting',\n name='source_id',\n field=models.ForeignKey(db_column=b'SOURCE_ID', verbose_name=b'data source', to='configurations.Source'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='rooting',\n name='target_id',\n field=models.ForeignKey(db_column=b'TARGET_ID', verbose_name=b'data target', to='configurations.Target'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='attributemapping',\n name='rooting_id',\n field=models.ForeignKey(db_column=b'ROOTING_ID', verbose_name=b'rooting', to='configurations.Rooting'),\n preserve_default=True,\n ),\n ]\n","sub_path":"configurations/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"342829305","text":"import numpy as np\n\nfrom matchlistID import matchlistID\n\n\ndef matchFLCdrc(targname,flcFile,drcFile,dir='./',matchtol=1):\n\n flcN = np.genfromtxt(dir+flcFile,names=True)\n flc = np.genfromtxt(dir+flcFile)\n\n drcN = np.genfromtxt(dir+drcFile,names=True)\n drc = np.genfromtxt(dir+drcFile)\n\n colFs = np.array(flcN.dtype.names)\n colDs = np.array(drcN.dtype.names)\n\n xF = np.int(np.where(colFs=='x_DRCtrans')[0])\n yF = np.int(np.where(colFs=='y_DRCtrans')[0])\n f606w_mag = np.int(np.where(colFs=='mean_f606w')[0])\n f814w_mag = np.int(np.where(colFs=='mean_f814w')[0])\n f606w_err = np.int(np.where(colFs=='stdev_f606w')[0])\n f814w_err = np.int(np.where(colFs=='stdev_f814w')[0])\n\n xD = np.int(np.where(colDs=='xcenter_f606w')[0])\n yD = np.int(np.where(colDs=='ycenter_f606w')[0])\n\n idColF = len(colFs)\n newCol = np.zeros((len(flc),1),dtype=int)\n newCol[:,0] = np.arange(0,len(flc),1)\n\n flc_id = np.hstack((flc,newCol))\n\n idColD = len(colDs)\n newCol = np.zeros((len(drc),1),dtype=int)\n newCol[:,0] = np.arange(0,len(drc),1)\n drc_id = np.hstack((drc,newCol))\n\n master_in = drc_id[:,[idColD,xD,yD]]\n\n idD, xd, yd = 0, 1, 2\n\n cat = flc_id\n\n nF_out = True\n\n minLen = len(drc_id)\n\n while nF_out:\n master, matchids = matchlistID(master_in,cat,matchtol,xd,yd,xF,yF,\n idColF)\n\n if len(master)>=int(0.65*minLen):\n nF_out = False\n print('Minimum Number Reached: %d' % len(master),targname)\n\n else:\n print('Need More Stars')\n print(\"Pixel Tolerance: %d, Number Stars: %d\" % (matchtol,\n len(master)))\n\n matchtol += 1\n if matchtol <= 4:\n master_in = drc_id[:,[idColD,xD,yD]]\n matchids = np.zeros((len(master_in),1))\n else:\n print(\"Sacrificing number of stars for quality of matches.\")\n nF_out = False\n\n master = np.hstack((master,matchids))\n print(targname, len(master)/minLen)\n\n idD, xd, yd, idF = 0, 1, 2, 3\n\n idColF = master[:,idF]\n idxF = np.asarray(idColF,int)\n regF = flc[idxF]\n\n newCols = np.zeros((len(regF),4))\n newCols = regF[:,[f606w_mag,f814w_mag,f606w_err,f814w_err]]\n\n idColD = master[:,idD]\n idxD = np.asarray(idColD,int)\n regD = drc[idxD]\n\n allOut = np.hstack((regD,regF))\n headerD = ' '.join(colDs)\n headerF = ' '.join(colFs)\n\n headerAll = headerD + ' ' + headerF\n np.savetxt(dir+targname+'_fullCat.dat',allOut,header=headerAll)\n\n shortOut = np.hstack((regD,newCols))\n headerShort = headerD + ' meanFLC_f606w meanFLC_f814w err_f606w err_f814w'\n\n np.savetxt(dir+targname+'_wErr.dat',shortOut,header=headerShort)\n\n return None\n\n#\n","sub_path":"codes23Oct/matchFLCdrc.py","file_name":"matchFLCdrc.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"265247128","text":"print(\"hello word\")\nimport turtle\nt=turtle\nprint(0 or 1)\n# t.setheading(-90)\n\nt.circle(34,steps=3)\n\nt.forward(170)\nt.left(90)\nt.forward(34)\nt.left(90)\nt.forward(170)\nprint(__name__)\nt.done()\n","sub_path":"turtle4.py","file_name":"turtle4.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647375419","text":"'''\nGiven a string s, find the length of the longest substring without repeating characters.\n\n\n\nExample 1:\n\nInput: s = \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\nExample 2:\n\nInput: s = \"bbbbb\"\nOutput: 1\nExplanation: The answer is \"b\", with the length of 1.\nExample 3:\n\nInput: s = \"pwwkew\"\nOutput: 3\nExplanation: The answer is \"wke\", with the length of 3.\nNotice that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\nExample 4:\n\nInput: s = \"\"\nOutput: 0\n\n\nConstraints:\n\n0 <= s.length <= 5 * 104\ns consists of English letters, digits, symbols and spaces.\n\n'''\nfrom typing import *\n\nclass Solution:\n # 내 풀이 (52ms, 89%)\n def lengthOfLongestSubstring(self, s: str) -> int:\n maxlen = 0\n substring = ''\n\n for char in s:\n while char in substring:\n substring = substring[1:]\n substring += char\n maxlen = max(len(substring), maxlen)\n\n return maxlen\n\n","sub_path":"ch11/kio/ch11_3_kio.py","file_name":"ch11_3_kio.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100357163","text":"def check(hub, output, expected, print_result: bool):\n result = \"Pass\"\n if expected is None:\n return \"Fail: Missing expected input\"\n if output is None:\n return \"Fail: Module output is None\"\n if isinstance(output, bool):\n return \"Fail: Module output is a boolean\"\n if isinstance(expected, bool):\n return \"Fail: Assertion value is a boolean\"\n try:\n\n if print_result:\n assert float(expected) > float(\n output\n ), f\"{float(expected)} is not greater than {float(output)}\"\n else:\n assert float(expected) > float(output), \"Result is not greater\"\n except (AssertionError, ValueError) as err:\n result = f\"Fail: {err}\"\n return result\n","sub_path":"validator/validator/assertions/assertGreater.py","file_name":"assertGreater.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554096364","text":"class Account:\n\taccnocounter=10000\n\tdef __init__(self,name,mobileno,aadharno,balance=0):\n\t\tself.accno= Account.accnocounter\n\t\tAccount.accnocounter+=1\n\t\tself.name=name\n\t\tself.mobileno=mobileno\n\t\tself.aadharno=aadharno\n\t\tself.balance=balance\n\t\t\n\tdef withdraw(self,money):\n\t\tif (self.balance >= money) and self.balance!=0:\n\t\t\tself.balance-=money\n\t\telse:\n\t\t\tprint(\"Insufficient balance\")\n\t\t\t\n\tdef deposit(self,money):\n\t\tself.balance+=money\n\t\t\n\tdef __repr__(self):\n\t\treturn \"Name: \"+str(self.name)+\"\\nAccount No: \"+str(self.accno)+\"\\nBalance: \"+str(self.balance)+\"\\nMobile: \"+str(self.mobileno)+\"\\nAadharno: \"+str(self.aadharno)+\"\\n\"\n\t\n\tdef transfer(self,obj,money):\n\t\tif isinstance(obj,Account):\n\t\t\tif (self.balance >= money) and self.balance!=0:\n\t\t\t\tself.balance-=money\n\t\t\t\tobj.balance+=money\n\t\t\telse:\n\t\t\t\tprint(\"Insufficient balance to Transfer\")\n\t\telse:\n\t\t\tprint(\"Error: transfer not possible\")\n\t\t\t\n\t\t\t\ndef main():\n\tc=Account(\"Siddharth\",7387838949,9890544498,500)\n\tprint(c)\n\tc.withdraw(200)\n\tprint(c)\n\tc.deposit(700)\n\tprint(c)\n\td=Account(\"Rohini\",9970337780,8237146020,700)\n\tprint(d)\n\td.transfer(c,300)\n\tprint(c)\n\tprint(d)\n\t\n\t\nif __name__==\"__main__\":\n\tmain()","sub_path":"1.Class/Language_Python-master/Language_Python-master/LC21_1_AccountClass.py","file_name":"LC21_1_AccountClass.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606460079","text":"# coding: utf-8\n\nimport unittest\nimport os\nimport sys\n\nimport requests_mock\nfrom unittest import mock\n\nsys.path.append(\"../yyetsbot\")\nimport yyetsbot as _\n\nfrom fansub import BaseFansub, YYeTsOnline, YYeTsOffline\n\n\nclass TestBaseFunsub(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.ins = BaseFansub()\n cls.cookie_jar = dict(name=\"hello\")\n cls.ins.cookie_file = \"test_cookies.dump\" # generate on tests/test_cookies.dump\n\n @classmethod\n def tearDownClass(cls) -> None:\n cls().ins.redis.flushall()\n os.unlink(cls().ins.cookie_file)\n\n def test_save_cookies(self):\n self.ins.__save_cookies__(self.cookie_jar)\n exists = os.path.exists(self.ins.cookie_file)\n self.assertTrue(exists)\n\n def test_load_cookies(self):\n self.test_save_cookies()\n cookie = self.ins.__load_cookies__()\n self.assertEqual(cookie, self.cookie_jar)\n\n def test_get_from_cache(self):\n value = self.ins.__get_from_cache__(\"http://test.url\", \"__hash__\")\n self.assertEqual(value, self.ins.__hash__())\n\n def test_save_to_cache(self):\n # never expire\n url = \"http://test2.url\"\n self.ins.__save_to_cache__(url, self.cookie_jar)\n cache_copy = self.ins.__get_from_cache__(url, \"never mind method\")\n self.assertEqual(cache_copy, self.cookie_jar)\n\n\nclass TestYYeTsTestOnline(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.ins = YYeTsOnline()\n cls.cookie_jar = dict(name=\"hello yyets\")\n cls.ins.cookie_file = \"test_cookies.dump\" # generate on tests/test_cookies.dump\n cls.ins.url = \"http://www.rrys2020.com/resource/1988\"\n with open(\"data/yyets_search.html\") as f:\n cls.search_html = f.read()\n\n @classmethod\n def tearDownClass(cls) -> None:\n cls().ins.redis.flushall()\n # os.unlink(cls().ins.cookie_file)\n\n def test_get_id(self):\n self.assertEqual(self.ins.id, \"1988\")\n\n @requests_mock.mock()\n def test_get_search_html(self, m):\n m.get('http://www.rrys2020.com/search?keyword=abc&type=resource', text=self.search_html)\n response = self.ins.__get_search_html__(\"abc\")\n self.assertEqual(self.search_html, response)\n\n @requests_mock.mock()\n def test_search_preview(self, m):\n kw = \"abc\"\n m.get(f'http://www.rrys2020.com/search?keyword={kw}&type=resource', text=self.search_html)\n results = self.ins.search_preview(kw)\n results.pop(\"source\")\n for name in results.values():\n self.assertIn(kw, name.lower())\n\n # TODO....\n def test_search_result(self):\n pass\n\n\nclass TestYYeTsTestOffline(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.ins = YYeTsOffline(db=\"test\")\n\n @classmethod\n def tearDownClass(cls) -> None:\n cls().ins.mongo.close()\n\n def test_search_preview(self):\n kw = \"逃避\"\n results = self.ins.search_preview(kw)\n self.assertEqual(results[\"source\"], self.ins.label)\n results.pop(\"source\")\n self.assertEqual(3, len(results))\n for name in results.values():\n self.assertIn(kw, name)\n\n def test_search_result(self):\n url = \"http://www.rrys2020.com/resource/34812\"\n results = self.ins.search_result(url)\n self.assertIn(str(results['all']['data']['info']['id']), url)\n self.assertIn(\"逃避可耻\", results[\"cnname\"])\n self.assertIn(\"34812\", results[\"share\"])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_fansub.py","file_name":"test_fansub.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"469386307","text":"# You are climbing a stair case. It takes n steps to reach to the top.\r\n#\r\n# Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\r\n\r\n\r\nclass Solution(object):\r\n def climbStairs(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n if n == 1 or n == 2:\r\n return n\r\n\r\n arr = [0 for i in range(n + 1)]\r\n arr[1] = 1\r\n arr[2] = 2\r\n for i in range(3, n + 1):\r\n arr[i] = arr[i - 1] + arr[i - 2]\r\n return arr[n]\r\n\r\n\r\ns = Solution()\r\nprint(s.climbStairs(3))\r\n","sub_path":"Python/70. Climbing Stairs.py","file_name":"70. Climbing Stairs.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455781806","text":"'''Byte-at-a-time ECB decryption (Harder)\nAES-128-ECB(random-prefix || attacker-controlled || target-bytes, random-key)\nattack to find target-bytes\nIn this challenge I use a random-prefix with length 3-10 , if it greater just need do some ez step ''' \nfrom Crypto.Cipher import AES \nfrom os import urandom \nfrom random import randint \n\nrandom_prefix = urandom (randint(3,10)) \ntargets_bytes = urandom(randint(3,10))\n\ndef PKCS7(m,length):\n ch = length - len(m) % length \n return m + bytes([ch]) * ch \n\ndef unPad(c):\n return c[:-c[-1]] \n\ndef encrypt_oracle (s):\n s = random_prefix + s + targets_bytes\n s = PKCS7(s,16)\n cipher = AES.new(key,AES.MODE_ECB)\n return cipher.encrypt(s) \n\ndef length_detect(encrypt_oracle):\n for length in range(2,41):\n s = b'0' * (3*length) \n encode_s = encrypt_oracle(s)\n if encode_s[2*length : 3*length] == encode_s[length : 2*length]:\n return length\n \ndef detect_length_prefix(encrypt_oracle):\n s1 = b'0'\n s2 = b'1'\n length = 15 \n while True : \n e_s1 = encrypt_oracle(s1)\n e_s2 = encrypt_oracle(s2) \n if e_s1[16:32] != e_s2[16:32] : # when s1 + prefix is a block \n return length + 1\n length -= 1 \n s1 = b'0' + s1 \n s2 = b'0' + s2 \n \ndef detect_length_suffix_prefix(encrypt_oracle):\n s = b\"\"\n l1 = len(encrypt_oracle(s)) \n l2 = l1 \n i = 0 \n while l2 == l1 : \n s += b\"0\"\n l2 = len(encrypt_oracle(s))\n i+= 1 \n return l1 - i \n\ndef next_bytes(knowbytes,encrypt_oracle):\n string = b\"0\" * (KEYSIZE - len_prefix) + b\"0\" * (KEYSIZE - len(knowbytes) % KEYSIZE - 1 ) \n encode_s = encrypt_oracle(string)\n for ch in range(256):\n string_guess = string + knowbytes + bytes([ch])\n encode_s_guess = encrypt_oracle(string_guess)\n if encode_s_guess[:len(string_guess) ] == encode_s[:len(string_guess)]:\n return bytes([ch])\n\ndef attack() : \n print(\"[*] attack to find suffix.........\")\n knowbytes = b\"\"\n for i in range(len_target):\n knowbytes += next_bytes(knowbytes,encrypt_oracle)\n return knowbytes \n\nkey = urandom(16) \nif __name__ == \"__main__\": \n global KEYSIZE \n print(\"[*] detect keysize........\")\n KEYSIZE = length_detect(encrypt_oracle)\n print(\"KEYSIZE = %d\" % KEYSIZE)\n\n print(\"[*] Detecting prefix length.......\")\n len_prefix = detect_length_prefix(encrypt_oracle)\n assert len_prefix == len(random_prefix)\n print(\"Prefix length is %d \" % len_prefix)\n\n print(\"[*] Detecting target length........\")\n len_target = detect_length_suffix_prefix(encrypt_oracle) - len_prefix\n assert len_target == len(targets_bytes)\n print(\"Target length is %d \" % len_target)\n\n print(attack())\n","sub_path":"Crypto/AES/byte_at_time.py","file_name":"byte_at_time.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309114750","text":"# 単純にbfsによる探索を行う\n# 入力、初期設定など\nH, W = map(int,input().split())\nmaze = list(list(input()) for _ in range(H))\nINF = -1\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\n\n\n\ndef bfs():\n while queue:\n now_row,now_column = queue.pop(0)\n\n # 上下左右を探索\n # 探索結果が迷路内かつ訪れていないかつ壁でなければvisitedを更新し、そのマスをqueueに追加(いわゆるbfs)\n for i in range(4):\n next_row, next_column = now_row + dy[i] , now_column + dx[i]\n if 0 <= next_row < H and 0 <= next_column < W and \\\n maze[next_row][next_column] != \"#\" and visited[next_row][next_column] == INF:\n visited[next_row][next_column] = visited[now_row][now_column] + 1\n queue.append([next_row,next_column])\n\n\nans = 0\nfor column in range(W):\n for row in range(H):\n if maze[row][column] == \"#\":\n continue\n # dequeが使えるかわからないのでリ��トで代用\n queue = [[row, column]]\n # visitedにはスタートから最短何マスかかるか格納\n visited = [[INF] * W for _ in range(H)]\n visited[row][column] = 0\n bfs()\n tmp_max = 0\n for i in range(H):\n if tmp_max < max(visited[i]):\n tmp_max = max(visited[i])\n if tmp_max > ans:\n ans = tmp_max\nprint(ans)\n","sub_path":"ABC/151/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91527498","text":"\nfrom behave import use_step_matcher, given, when, then, step\nuse_step_matcher(\"re\")\n\n\ndef assert_equal_wrapper(lhs, rhs):\n assert lhs == rhs, f\"'{lhs}' does not equal '{rhs}'\"\n\n\n@given(r'the application is in headless mode')\ndef api_is_up(context):\n from os import environ\n environ['HEADLESS'] = True\n\n\n@when(r'I spin up the application')\ndef start_application_execution(context):\n from PyQt5.QtWidgets import QApplication\n if QApplication.instance() is None:\n app = QApplication([])\n context.app_ = app\n\n\n@when(r'I wait (\\d+) (second|seconds|minute|minutes|hour|hours)')\ndef wait(context, timeout, denomination):\n from time import sleep\n timeout = int(timeout)\n\n if \"second\" in denomination:\n sleep(timeout)\n elif \"minute\" in denomination:\n timeout *= 60\n sleep(timeout)\n elif \"hour\" in denomination:\n timeout *= (60 * 60)\n sleep(timeout)","sub_path":"test/steps/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589991674","text":"import numpy as np\r\n\r\n\r\ndef gauss1D(m, v, N, w):\r\n pos = np.arange(-w, w - w / N, 2 * w / N)\r\n insE = -0.5 * ((pos - m) / v) ** 2\r\n norm = 1 / (v * np.sqrt(2 * np.pi))\r\n res = norm * np.exp(insE)\r\n realDensity = np.stack((pos, res), axis=1)\r\n return realDensity\r\n","sub_path":"gauss1D.py","file_name":"gauss1D.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288947903","text":"import asyncio, uvloop\nfrom albatross import Server\n\n\nclass Handler:\n async def on_get(self, _, res):\n res.write('x' * (1024 * 100))\n\n\nif __name__ == \"__main__\":\n asyncio.set_event_loop(uvloop.new_event_loop())\n app = Server()\n app.add_route('/', Handler())\n app.serve(port=8080)","sub_path":"benchmarks/albatross_uvloop.py","file_name":"albatross_uvloop.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"449509189","text":"# -*-coding:UTF-8 -*\n\n\"\"\"\nCamille Schneider\n\"\"\"\n\"\"\"Drawing, Style\"\"\"\n\nfrom Drawing import Drawing\nfrom Content import Content\n\nimport Frame\nimport BoundingBox\n\n\nclass Arrow(Drawing):\n \"\"\"This class draws arrows in svg blocks.\n Users set length, angle, thickness and color\"\"\"\n\n numero_triangle = 0;\n\n\n #--------------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, width=10, height=10, thickness=5, fe=\"black\", fl=\"black\", style=None, x=None, y=None, x2=None,\n y2=None):\n from Style import Style\n\n \"\"\"Five arguments can be entered in this order :\n First the length of the arrow and the angle in radius that the arrow makes with the abcysse axis.\n The angle is only be define at the end in order to stay in the right low corner.\n Then the thickness, the color of the end of the arrow(the triangle)and the color of the rectangle.\n This arrow is drawn in a SVG block and starts at the top on the left of the block.\n example : a = Arrow(50,100, 3, red, bleu)\n To place as you want the ending point (x, y) you have to know that the starting point is the center of an orthonormal basis\"\"\"\n\n \"\"\" You can put the arguments in this order :\n width : width of the arrow,\n height : height of the arrow,\n thickness : thickness of the arrow,\n fe : for fillcolorend, is the color of the line of the arrow,\n fl : for fillcolorline, is the color of the end (the triangle) of the arrow,\n style : the style of the arrow,\n x : the coordonate of the x axis on the frame of the arrow,\n y : the coordonate of the y axis on the frame of the arrow,\n \"\"\"\n\n\n self.x1 = x\n self.x2 = x2\n self.y1 = y\n self.y2 = y2\n\n # Test that the arguments are the right object in the right domain.\n # The angle must be a positive real number\n\n try:\n self.thickness = int(thickness)\n \"\"\"missing documentation\"\"\"\n if (self.thickness < 0):\n raise ValueError(\"Arrow's thickness must be a positive integer\")\n except ValueError:\n print(\"Value Error : Arrow's thickness must be a positive integer\")\n\n\n #Define the width and height of the head of the arrow\n\n self.width_head = self.thickness\n \"\"\"missing documentation\"\"\"\n self.height_head = self.thickness\n \"\"\"missing documentation\"\"\"\n\n\n #Now we define the size of the canvas :\n self.size_x = \"1000\" #1000#abs(self.x2 - self.x1) + 10 * self.width_head\n self.size_y = \"1000\" #1000#abs(self.y2 - self.y1) + 10 * self.height_head\n \"\"\"missing documentation\"\"\"\n\n #Then we set the style by using the placementContent() function explained above :\n if style is None:\n self.style=Style.styleTAB[Style.currentId]\n else:\n self.style = style\n if x is None or y is None:\n self.bBox = self.placementContent()\n else:\n self.bBox = BoundingBox.BoundingBox()\n\n\n self.fillcolorend = fe\n self.fillcolorline = fl\n\n\n def toHtml(self):\n # The function which return the code html of the arrow\n \"\"\"This method returns the HTML code used to draw circles in slides using SVG technology.\"\"\"\n\n #Coordonates of the ended point : x2, y2\n #size of the head of the arrow\n\n\n content = '''\n\t\t\t\n\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t....\n\t\t\t\n\t\t\t'''.format(self.bBox.name,str(self.size_x), str(self.size_y), str(Arrow.numero_triangle) ,\n str(self.width_head), str(self.height_head),self.fillcolorend,\n str(self.x1), str(self.y1), str(self.x2), str(self.y2),\n self.fillcolorline, str(self.thickness), str(Arrow.numero_triangle))\n\n Arrow.numero_triangle = Arrow.numero_triangle + 1\n return content\n\n\n def rotation(self, angle2):\n\n \"\"\"This function enable the user to change the angle f an arrow \"\"\"\n\n\nif __name__ == \"__main__\":\n arrow = Arrow(1000, 0)\n print(arrow.toHtml())\n\t\n","sub_path":"Python/SINP/trunk/Release/Arrow.py","file_name":"Arrow.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526248706","text":"__author__ = \"Francisco Caldeira, Faculdade de Ciências da Universidade de Lisboa\"\r\n\r\nimport struct\r\n\r\nclass TypeError(Exception):\r\n\tpass\r\n\r\nclass Emitter(object):\r\n\tdef __init__(self):\r\n\t\tself.stack = [{}]\r\n\t\tself.id_stack = [{}]\r\n\t\tself.current_fun = (\"\", \"\")\r\n\t\tself.count = 0\r\n\t\tself.lines = []\r\n\t\tself.labels = []\r\n\t\tself.unary = False\r\n\t\tself.unary_count = 0\r\n\t\tself.isDeclared = False\r\n\t\tself.array_type = []\r\n\t\tself.OR = False\r\n\t\tself.AND = False\r\n\t\tself.array_size = []\r\n\t\tself.current_lambda = (\"\", \"\")\r\n\t\tself.current_lambda_call = \"\"\r\n\t\tself.lambda_call_counter = 0\r\n\t\tself.lambda_array_size = 0\r\n\t\tself.isLambda = False\r\n\t\tself.isLambdaArgs = False\r\n\t\tself.isClosure = False\r\n\t\tself.lambda_var_arg = []\r\n\t\tself.lambda_args = []\r\n\t\tself.lambda_llvm = []\r\n\t\tself.all_lambdas = []\r\n\t\tself.environment = []\r\n\r\n\tdef store_label(self, label):\r\n\t\tself.labels.append(label)\r\n\r\n\tdef get_label(self):\r\n\t\tvar = self.labels\r\n\t\treturn var\r\n\r\n\tdef update_labels(self):\r\n\t\tself.labels = []\r\n\r\n\tdef set_id_name(self, name, value):\r\n\t\tscope = self.id_stack[0]\r\n\t\tscope[name] = value\r\n\r\n\tdef get_id_name(self, name):\r\n\t\tfor scope in self.id_stack:\r\n\t\t\tif name in scope:\r\n\t\t\t\treturn scope[name]\r\n\r\n\tdef set_type(self, name, value):\r\n\t\tscope = self.stack[0]\r\n\t\tscope[name] = value\r\n\r\n\tdef get_type(self, name):\r\n\t\tfor scope in self.stack:\r\n\t\t\tif name in scope:\r\n\t\t\t\treturn scope[name]\r\n\r\n\tdef get_count(self):\r\n\t\tself.count += 1\r\n\t\treturn self.count\r\n\r\n\tdef get_id(self):\r\n\t\tid = self.get_count()\r\n\t\treturn f\"cas_{id}\"\r\n\r\n\tdef __lshift__(self, v):\r\n\t\tself.lines.append(v)\r\n\r\n\tdef get_code(self):\r\n\t\treturn \"\\n\".join(self.lines)\r\n\r\n\tdef get_pointer_name(self, n):\r\n\t\treturn f\"%point_{n}\"\r\n\r\n\tdef get_emitter_type(self, tipo):\r\n\t\tif tipo == \"Int\":\r\n\t\t\ttipo = \"i32\"\r\n\t\telif tipo == \"Float\":\r\n\t\t\ttipo = \"float\"\r\n\t\telif tipo == \"Boolean\":\r\n\t\t\ttipo = \"zeroext i1\"\r\n\t\telif tipo == \"String\":\r\n\t\t\ttipo = \"i8*\"\r\n\t\telif tipo == \"Void\":\r\n\t\t\ttipo = \"void\"\r\n\t\treturn tipo\r\n\r\n\tdef get_emitter_align(self, tipo):\r\n\t\talign = \"\"\r\n\t\tif tipo == \"Int\" or tipo == \"i32\":\r\n\t\t\talign = \"align 4\"\r\n\t\telif tipo == \"Float\" or tipo == \"float\":\r\n\t\t\talign = \"align 4\"\r\n\t\telif tipo == \"Boolean\":\r\n\t\t\talign = \"align 1\"\r\n\t\telif tipo == \"String\" or tipo == \"i8*\":\r\n\t\t\talign = \"align 8\"\r\n\t\telif tipo == \"Void\":\r\n\t\t\talign = \"\"\r\n\t\treturn align\r\n\r\ndef compiler(node, emitter=None):\r\n\tif node[\"nt\"] == \"Program\":\r\n\t\temitter = Emitter()\r\n\r\n\t\tfor using in node[\"Using\"]:\r\n\t\t\tcompiler(using, emitter)\r\n\r\n\t\tfor Decls_Defs in node[\"Decls_Defs\"]:\r\n\t\t\tcompiler(Decls_Defs, emitter)\r\n\t\t\r\n\t\treturn emitter.get_code()\r\n\r\n\telif node[\"nt\"] == \"Using\":\r\n\t\tif node[\"name\"] == \"print\":\r\n\t\t\temitter << \"declare i32 @printf(i8*, ...) #1\"\r\n\t\telif node[\"name\"] == \"array_create\":\r\n\t\t\temitter << \"declare i32 @array_create(i32) #1\"\r\n\t\telif node[\"name\"] == \"array_get\":\r\n\t\t\temitter << \"declare i32 @array_get(i8*) #1\"\r\n\r\n\telif node[\"nt\"] == \"Declaration\":\r\n\t\tfun_type = node[\"type\"]\r\n\t\tfun_name = node[\"name\"]\r\n\t\ttipo = emitter.get_emitter_type(fun_type)\r\n\t\tname = fun_name + \"_function\"\r\n\t\temitter.set_type(name, tipo)\r\n\t\temitter.isDeclared = True\r\n\t\treturn \r\n\r\n\telif node[\"nt\"] == \"Definition\":\r\n\t\tfun_type = node[\"type\"]\r\n\t\tfun_name = node[\"name\"]\r\n\t\temitter.current_fun = (fun_name, fun_type)\r\n\t\ttipo = emitter.get_emitter_type(fun_type)\r\n\t\targs_emitter = \"\"\r\n\r\n\t\tname = fun_name + \"_function\"\r\n\t\temitter.set_type(name, tipo)\r\n\r\n\t\tfor arg in node[\"arguments\"]:\r\n\t\t\ttemp = emitter.get_emitter_type(arg[\"type\"])\r\n\t\t\tif \"i1\" in temp:\r\n\t\t\t\ttemp = \"i1 zeroext\"\r\n\t\t\targs_emitter += temp\r\n\t\t\tpname = emitter.get_pointer_name(arg[\"name\"])\r\n\t\t\targs_emitter += f\" {pname}, \"\r\n\t\t\tif arg[\"name\"] == \"Void\":\r\n\t\t\t\targs_emitter = \"\"\r\n\t\t\telse:\r\n\t\t\t\tname = arg[\"name\"] + \"_var_arg\"\r\n\t\t\t\ttipo_arg = arg[\"type\"]\r\n\t\t\t\temitter.set_type(name, tipo_arg)\r\n\r\n\t\targs_emitter = args_emitter[:-2]\r\n\t\tif emitter.isLambda == True:\r\n\t\t\temitter.lambda_llvm.append(f\"define {tipo} @{fun_name}({args_emitter}) #0 {'{'}\")\r\n\t\telse:\r\n\t\t\temitter << f\"define {tipo} @{fun_name}({args_emitter}) #0 {'{'}\"\r\n\r\n\t\tfor arg in node[\"arguments\"]:\r\n\t\t\tif arg[\"name\"] != \"Void\":\r\n\t\t\t\tif arg[\"type\"] == \"i32\":\r\n\t\t\t\t\targ[\"type\"] = \"Int\"\r\n\t\t\t\telif arg[\"type\"] == \"float\":\r\n\t\t\t\t\targ[\"type\"] = \"Float\"\r\n\t\t\t\telif arg[\"type\"] == \"i8*\":\r\n\t\t\t\t\targ[\"type\"] = \"String\"\r\n\t\t\t\targ_tipo = emitter.get_emitter_type(arg[\"type\"])\r\n\t\t\t\tpname = emitter.get_pointer_name(arg[\"name\"])\r\n\t\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\t\talign = emitter.get_emitter_align(arg[\"type\"])\r\n\t\t\t\temitter.set_id_name(pname, registo)\r\n\t\t\t\tif \"i1\" in arg_tipo:\r\n\t\t\t\t\targ_tipo = \"i8\"\r\n\t\t\t\t\ttemp_reg = \"%\" + emitter.get_id()\r\n\t\t\t\t\tif emitter.isLambda == True:\r\n\t\t\t\t\t\temitter.lambda_llvm.append(f\" {registo} = alloca {arg_tipo}, {align}\")\r\n\t\t\t\t\t\temitter.lambda_llvm.append(f\" {temp_reg} = zext i1 {pname} to i8\")\r\n\t\t\t\t\t\temitter.lambda_llvm.append(f\" store {arg_tipo} {temp_reg}, {arg_tipo}* {registo}, {align}\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\temitter << f\" {registo} = alloca {arg_tipo}, {align}\"\r\n\t\t\t\t\t\temitter << f\" {temp_reg} = zext i1 {pname} to i8\"\r\n\t\t\t\t\t\temitter << f\" store {arg_tipo} {temp_reg}, {arg_tipo}* {registo}, {align}\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tif emitter.isLambda == True:\r\n\t\t\t\t\t\temitter.lambda_llvm.append(f\" {registo} = alloca {arg_tipo}, {align}\")\r\n\t\t\t\t\t\temitter.lambda_llvm.append(f\" store {arg_tipo} {pname}, {arg_tipo}* {registo}, {align}\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\temitter << f\" {registo} = alloca {arg_tipo}, {align}\"\r\n\t\t\t\t\t\temitter << f\" store {arg_tipo} {pname}, {arg_tipo}* {registo}, {align}\"\r\n\r\n\t\tfor stmt in node[\"block\"]:\r\n\t\t\tif stmt != None:\r\n\t\t\t\tcompiler(stmt, emitter)\r\n\r\n\t\tif emitter.isLambda == True:\r\n\t\t\temitter.lambda_llvm.append(\"}\")\r\n\t\telse:\r\n\t\t\temitter << \"}\"\r\n\t\treturn \r\n\r\n\telif node[\"nt\"] == \"Return\":\r\n\t\tparameters = node[\"r_parameters\"]\r\n\t\tregisto = \"\"\r\n\t\tif parameters[\"nt\"] == \"Lambda\":\r\n\t\t\tif parameters[\"arguments\"][0][\"nt\"] == \"Void\":\r\n\t\t\t\tregisto = compiler(parameters[\"expression\"], emitter)\r\n\t\t\telse:\r\n\t\t\t\tif parameters[\"arguments\"][0][\"type\"] == \"Int\":\r\n\t\t\t\t\tregisto = \"i32 0\"\r\n\t\t\t\telif parameters[\"arguments\"][0][\"type\"] == \"Float\":\r\n\t\t\t\t\tregisto = \"float 0x0\"\r\n\t\t\t\telif parameters[\"arguments\"][0][\"type\"] == \"Boolean\":\r\n\t\t\t\t\tregisto = \"i1 true\"\r\n\t\t\t\telif parameters[\"arguments\"][0][\"type\"] == \"String\":\r\n\t\t\t\t\tn = emitter.get_count()\r\n\t\t\t\t\tstr_decl = f\"\"\"@.casual_str_cas_{n} = private unnamed_addr constant [1 x i8] c\"\\\\00\", align 1\"\"\"\r\n\t\t\t\t\temitter.lines.insert(0, str_decl)\r\n\t\t\t\t\tregisto = f\"i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.casual_str_cas_{n}, i64 0, i64 0)\"\r\n\t\t\tname = \"$lambda$\" + emitter.current_fun[0]\r\n\t\t\temitter.current_lambda = (name, emitter.current_fun[1])\r\n\t\t\tcompiler(parameters, emitter)\r\n\t\telse:\r\n\t\t\tregisto = compiler(node[\"r_parameters\"], emitter)\r\n\t\tif \"i8\" in registo and \"*\" not in registo:\r\n\t\t\ttemp_reg = \"%\" + emitter.get_id()\r\n\t\t\tn = emitter.get_count() - 2\r\n\t\t\tif emitter.isLambda == True:\r\n\t\t\t\temitter.lambda_llvm.append(f\" {temp_reg} = trunc i8 %cas_{n} to i1\")\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {temp_reg} = trunc i8 %cas_{n} to i1\"\r\n\t\t\tregisto = f\"i1 {temp_reg}\"\r\n\t\tif emitter.isLambda == True:\r\n\t\t\temitter.lambda_llvm.append(f\" ret {registo}\")\r\n\t\telse:\r\n\t\t\temitter << f\" ret {registo}\"\r\n\r\n\telif node[\"nt\"] == \"Statments\":\r\n\t\tfor statment in node[\"Statment\"]:\r\n\t\t\tcompiler(statment, emitter)\r\n\r\n\telif node[\"nt\"] == \"fun_call\":\r\n\t\tvname = node[\"name\"]\r\n\t\tname = vname + \"_function\"\r\n\t\tregisto_args = \"\"\r\n\t\temitter.isDeclared = False\r\n\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\tpname = emitter.get_pointer_name(vname) + \"_\" + str(emitter.get_count())\r\n\r\n\t\tif emitter.isLambdaArgs == True:\r\n\t\t\treturn\r\n\r\n\t\tif emitter.isLambda == False:\r\n\t\t\tfor fun in emitter.all_lambdas:\r\n\t\t\t\tif vname == fun[\"fun\"]:\r\n\t\t\t\t\tfor name in fun[\"env\"]:\r\n\t\t\t\t\t\tif isinstance(name, dict):\r\n\t\t\t\t\t\t\tif name[\"nt\"] == \"Array\":\r\n\t\t\t\t\t\t\t\tif name not in node[\"arguments\"]:\r\n\t\t\t\t\t\t\t\t\tnode[\"arguments\"].append(name)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\targ = {'nt': 'Var', 'name': name}\r\n\t\t\t\t\t\t\tif arg not in node[\"arguments\"]:\r\n\t\t\t\t\t\t\t\tnode[\"arguments\"].append(arg)\r\n\r\n\t\tif \"$lambda$\" in vname and emitter.isLambda == False:\r\n\t\t\temitter.isClosure = True\r\n\t\t\temitter.current_lambda_call = vname\r\n\r\n\t\tfor args in node[\"arguments\"]:\r\n\t\t\tif args[\"nt\"] == \"Void\":\r\n\t\t\t\tregisto_args = \"\"\r\n\t\t\telse:\r\n\t\t\t\ttemp = compiler(args, emitter) + \", \"\r\n\t\t\t\tif \"i1\" in temp:\r\n\t\t\t\t\ttemp = temp.replace(\"i1\", \"i1 zeroext\")\r\n\t\t\t\telif \"i8\" in temp and \"*\" not in temp:\r\n\t\t\t\t\tvar = temp.split(\" \")\r\n\t\t\t\t\tvar[1] = var[1].replace(\",\", \"\")\r\n\t\t\t\t\ttemp_reg = \"%\" + emitter.get_id()\r\n\t\t\t\t\temitter << f\" {temp_reg} = trunc i8 {var[1]} to i1\"\r\n\t\t\t\t\ttemp = f\"i1 zeroext {temp_reg}, \"\r\n\t\t\t\tregisto_args += temp\r\n\t\temitter.isClosure = False\r\n\t\temitter.lambda_call_counter = 0\r\n\t\tregisto_args = registo_args[:-2]\r\n\t\tif emitter.isLambda == True:\r\n\t\t\tif tipo == \"void\":\r\n\t\t\t\temitter.lambda_llvm.append(f\" call {tipo} @{vname}({registo_args})\")\r\n\t\t\telse:\r\n\t\t\t\temitter.lambda_llvm.append(f\" {pname} = call {tipo} @{vname}({registo_args})\")\r\n\t\telse:\r\n\t\t\tif tipo == \"void\":\r\n\t\t\t\temitter << f\" call {tipo} @{vname}({registo_args})\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {pname} = call {tipo} @{vname}({registo_args})\"\r\n\r\n\t\tif \"i1\" in tipo:\r\n\t\t\ttemp_reg = \"%\" + emitter.get_id()\r\n\t\t\temitter << f\" {temp_reg} = zext i1 {pname} to i8\"\r\n\t\t\ttipo = \"i8\"\r\n\t\t\tpname = temp_reg\r\n\r\n\t\treturn f\"{tipo} {pname}\"\r\n\r\n\telif node[\"nt\"] == \"Var_assignment\":\r\n\t\tvname = node[\"name\"]\r\n\t\texpr = node[\"expr\"]\r\n\t\tname = vname + \"_var\"\r\n\r\n\t\tif expr[\"nt\"] == \"Lambda\":\r\n\t\t\ttipo = emitter.get_type(name)\r\n\t\t\temitter.current_lambda = (vname, tipo)\r\n\t\t\tregisto = compiler(expr, emitter)\r\n\t\t\treturn\r\n\r\n\t\tregisto = compiler(expr, emitter)\r\n\t\tpname = emitter.get_pointer_name(vname)\r\n\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\r\n\t\tif tipo == None:\r\n\t\t\tname = vname + \"_var_arg\"\r\n\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\t\t\tpname = emitter.get_id_name(pname)\r\n\r\n\t\temitter << f\" store {registo}, {tipo}* {pname}, {align}\"\r\n\t\treturn\r\n\r\n\telif node[\"nt\"] == \"Array_assignment\":\r\n\t\tvname = node[\"name\"]\r\n\t\texpr = node[\"expr\"]\r\n\t\tindex = node[\"index_type\"]\r\n\t\tname = vname + \"_array\"\r\n\r\n\t\tpname = emitter.get_pointer_name(vname)\r\n\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\r\n\t\tarr_type = \"\"\r\n\t\tfor arr in emitter.array_type:\r\n\t\t\tif arr[\"name\"] == name:\r\n\t\t\t\tarr_type = arr[\"type\"]\r\n\r\n\t\tregisto = compiler(expr, emitter)\r\n\t\treg = compiler(index, emitter)\r\n\t\treg2 = reg.split(\" \")\r\n\t\ti64_reg = \"%\" + emitter.get_id()\r\n\t\ttemp = \"%\" + emitter.get_id()\r\n\t\tif \"%\" in reg2[1]:\r\n\t\t\temitter << f\" {i64_reg} = sext {reg} to i64\"\r\n\t\t\temitter << f\" {temp} = getelementptr inbounds {arr_type}, {arr_type}* {pname}, i64 0, i64 {i64_reg}\"\r\n\t\telse:\r\n\t\t\temitter << f\" {temp} = getelementptr inbounds {arr_type}, {arr_type}* {pname}, i64 0, i64 {reg2[1]}\"\r\n\r\n\t\temitter << f\" store {registo}, {tipo}* {temp}, {align}\"\r\n\r\n\telif node[\"nt\"] == \"create_array\":\r\n\t\treturn compiler(node[\"array_size\"], emitter)\r\n\r\n\telif node[\"nt\"] == \"Array_declaration\":\r\n\t\tvname = node[\"name\"]\r\n\t\tpname = emitter.get_pointer_name(vname)\r\n\t\ttipo = emitter.get_emitter_type(node[\"type\"])\r\n\t\talign = emitter.get_emitter_align(node[\"type\"])\r\n\r\n\t\tname = vname + \"_array\"\r\n\t\temitter.set_type(name, node[\"type\"])\r\n\r\n\t\tif \"i1\" in tipo:\r\n\t\t\ttipo = \"i8\"\r\n\r\n\t\tsize = compiler(node[\"size\"], emitter)\r\n\t\tsize = size.split(\" \")\r\n\t\tif \"%\" in size[1]:\r\n\t\t\traise TypeError(f\"Array '{vname}' size must be a number, it can't be an expression, variable or function call\")\r\n\r\n\t\temitter << f\" {pname} = alloca [{size[1]} x {tipo}], {align}\"\r\n\t\temitter.array_type.append({\"name\": name, \"type\": f\"[{size[1]} x {tipo}]\"})\r\n\t\treturn\r\n\r\n\telif node[\"nt\"] == \"double_fun_call\":\r\n\t\tname = node[\"name\"]\r\n\t\tfun1 = {'nt': 'fun_call', 'name': name, 'arguments': node[\"arguments1\"]}\r\n\t\tcompiler(fun1, emitter)\r\n\t\tname = \"$lambda$\" + name\r\n\t\tfun2 = {'nt': 'fun_call', 'name': name, 'arguments': node[\"arguments2\"]}\r\n\t\treturn compiler(fun2, emitter)\r\n\r\n\telif node[\"nt\"] == \"Lambda\":\r\n\t\tindex = 0\r\n\t\tfor line in emitter.lines:\r\n\t\t\tif \"define\" in line:\r\n\t\t\t\tindex = emitter.lines.index(line)\r\n\t\t\r\n\t\temitter.lambda_args = []\r\n\t\temitter.isLambda = True\r\n\r\n\t\tfor arg in node[\"arguments\"]:\r\n\t\t\tif arg != \"Void\":\r\n\t\t\t\temitter.lambda_args.append({'nt': 'Var_decl', 'name': arg[\"name\"], 'type': arg[\"type\"]})\r\n\r\n\t\texpr = node[\"expression\"]\r\n\r\n\t\temitter.isLambdaArgs = True\r\n\t\tcompiler(expr, emitter)\r\n\t\temitter.isLambdaArgs = False\r\n\t\tarray_types = 0\r\n\t\tcounter = 0\r\n\t\tname2 = emitter.current_lambda[0]\r\n\t\tfor name in emitter.environment:\r\n\t\t\tarray_types += 1\r\n\t\tfor name in emitter.environment:\r\n\t\t\tif isinstance(name, dict):\r\n\t\t\t\tindex = name[\"index_type\"][\"value\"]\r\n\t\t\t\tname = name[\"name\"]\r\n\t\t\t\tfor arr in emitter.array_type:\r\n\t\t\t\t\tif name + \"_array\" == arr[\"name\"]:\r\n\t\t\t\t\t\tsize = arr[\"type\"]\r\n\t\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name + \"_array\"))\r\n\t\t\t\temitter_type = emitter.get_emitter_type(tipo)\r\n\t\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\t\talign = emitter.get_emitter_align(emitter_type)\r\n\t\t\t\temitter << f\" {registo} = getelementptr inbounds {size}, {size}* %point_{name}, i64 0, i64 {index}\"\r\n\t\t\t\temitter << f\" {registo}_2 = load {emitter_type}, {emitter_type}* {registo}, {align}\"\r\n\t\t\t\temitter << f\" store {emitter_type} {registo}_2, {emitter_type}* getelementptr inbounds ([{array_types} x {emitter_type}], [{array_types} x {emitter_type}]* @{name2}_args, i64 0, i64 {counter}), {align}\"\r\n\t\t\t\tcounter += 1\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tif \"$\" in name:\r\n\t\t\t\t\tvar = name.split(\"$$$\")\r\n\t\t\t\t\tlocation = emitter.get_id_name(emitter.get_pointer_name(var[0]))\r\n\t\t\t\telse:\r\n\t\t\t\t\tlocation = emitter.get_pointer_name(name)\r\n\r\n\t\t\t\ttipo = \"\"\r\n\t\t\t\tif \"$\" not in name:\r\n\t\t\t\t\ttipo = emitter.get_type(name + \"_var\")\r\n\t\t\t\telse:\r\n\t\t\t\t\ttipo = emitter.get_type(name)\r\n\t\t\t\temitter_type = emitter.get_emitter_type(tipo)\r\n\t\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\t\talign = emitter.get_emitter_align(emitter_type)\r\n\t\t\t\temitter << f\" {registo} = load {emitter_type}, {emitter_type}* {location}, {align}\"\r\n\t\t\t\temitter << f\" store {emitter_type} {registo}, {emitter_type}* getelementptr inbounds ([{array_types} x {emitter_type}], [{array_types} x {emitter_type}]* @{name2}_args, i64 0, i64 {counter}), {align}\"\r\n\t\t\t\tcounter += 1\r\n\r\n\t\tarray = f\"@{name2}_args = dso_local global [{array_types} x {emitter_type}] zeroinitializer, {align}\"\r\n\t\temitter.lambda_array_size = array_types\r\n\t\temitter.lines.insert(0, array)\r\n\r\n\t\temitter.all_lambdas.append({\"fun\": emitter.current_lambda[0], \"env\": emitter.environment})\r\n\t\tlambda_fun = {'nt': 'Definition', 'name': emitter.current_lambda[0], 'arguments': emitter.lambda_args, \\\r\n\t\t\t\t\t 'type': emitter.current_lambda[1], 'block': [None, {'nt': 'Return', 'r_parameters': expr}]}\r\n\t\t\r\n\t\tcompiler(lambda_fun, emitter)\r\n\t\tfor line in emitter.lambda_llvm:\r\n\t\t\temitter.lines.insert(index, line)\r\n\t\t\tindex += 1\r\n\r\n\t\temitter.isLambda = False\r\n\t\temitter.current_lambda = (\"\", \"\")\r\n\t\temitter.lambda_llvm = []\r\n\t\temitter.environment = []\r\n\t\treturn\r\n\r\n\telif node[\"nt\"] == \"Var_declaration\":\r\n\t\tvname = node[\"name\"]\r\n\t\texpr = node[\"expr\"]\r\n\t\tpname = emitter.get_pointer_name(vname)\r\n\t\ttipo = emitter.get_emitter_type(node[\"type\"])\r\n\t\talign = emitter.get_emitter_align(node[\"type\"])\r\n\r\n\t\tif expr[\"nt\"] == \"Lambda\":\r\n\t\t\temitter.current_lambda = (vname, node[\"type\"])\r\n\t\t\tregisto = compiler(expr, emitter)\r\n\t\t\treturn\r\n\t\t\r\n\t\telse:\r\n\t\t\tregisto = compiler(expr, emitter)\r\n\t\t\tname = vname + \"_var\"\r\n\t\t\temitter.set_type(name, node[\"type\"])\r\n\r\n\t\t\tif \"i1\" in tipo:\r\n\t\t\t\ttipo = \"i8\"\r\n\r\n\t\t\temitter << f\" {pname} = alloca {tipo}, {align}\"\r\n\t\t\t\r\n\t\t\tif \"i1\" in registo:\r\n\t\t\t\tif \"true\" in registo:\r\n\t\t\t\t\tregisto = \"i8 1\"\r\n\t\t\t\telif \"false\" in registo:\r\n\t\t\t\t\tregisto = \"i8 0\"\r\n\r\n\t\t\temitter << f\" store {registo}, {tipo}* {pname}, {align}\"\r\n\t\t\treturn\r\n\r\n\telif node[\"nt\"] == \"Var\":\r\n\t\tvname = node[\"name\"]\r\n\t\tname = vname + \"_var\"\r\n\r\n\t\tregisto = \"%\" + emitter.get_id()\r\n\t\tpname = emitter.get_pointer_name(vname)\r\n\t\tif emitter.isLambda == True:\r\n\t\t\tif emitter.isLambdaArgs == True:\r\n\t\t\t\tname = node[\"name\"]\r\n\t\t\t\ttipo = emitter.get_type(name + \"_var\")\r\n\t\t\t\ttipo2 = emitter.get_type(name + \"_var_arg\")\r\n\t\t\t\tif tipo != None:\r\n\t\t\t\t\temitter.environment.append(name)\r\n\t\t\t\t\targ = {'nt': 'Var_decl', 'name': node[\"name\"], 'type': tipo}\r\n\t\t\t\t\tif arg not in emitter.lambda_args:\r\n\t\t\t\t\t\temitter.lambda_args.append(arg)\r\n\t\t\t\telif tipo2 != None:\r\n\t\t\t\t\tname = name + \"$$$\"\r\n\t\t\t\t\temitter.environment.append(name)\r\n\t\t\t\t\targ = {'nt': 'Var_decl', 'name': name, 'type': tipo2}\r\n\t\t\t\t\tif arg not in emitter.lambda_args:\r\n\t\t\t\t\t\temitter.lambda_var_arg.append(name)\r\n\t\t\t\t\t\temitter.set_type(name, tipo2)\r\n\t\t\t\t\t\temitter.lambda_args.append(arg)\r\n\t\t\telse:\r\n\t\t\t\tname = vname + \"$$$\"\r\n\t\t\t\tif name not in emitter.lambda_var_arg:\r\n\t\t\t\t\tname = vname + \"_var_arg\"\r\n\t\t\t\t\tpname = emitter.get_id_name(pname)\r\n\t\t\t\telse:\r\n\t\t\t\t\tpname = emitter.get_id_name(emitter.get_pointer_name(name))\r\n\t\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\t\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\t\t\t\t\r\n\t\t\t\tif \"i1\" in tipo:\r\n\t\t\t\t\ttipo = \"i8\"\r\n\t\t\t\temitter.lambda_llvm.append(f\" {registo} = load {tipo}, {tipo}* {pname}, {align}\")\r\n\t\t\t\treturn f\"{tipo} {registo}\"\r\n\r\n\t\telse:\r\n\t\t\tif \"_function_\" in name:\r\n\t\t\t\treturn\r\n\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\r\n\t\t\tif tipo == None or emitter.isDeclared == True:\r\n\t\t\t\tname = vname + \"_var_arg\"\r\n\t\t\t\tpname = emitter.get_id_name(pname)\r\n\t\t\t\tif vname in emitter.lambda_var_arg:\r\n\t\t\t\t\tv = vname.split(\"$$$\")\r\n\t\t\t\t\tname = v[0] + \"_var_arg\"\r\n\t\t\t\t\tpname = emitter.get_id_name(emitter.get_pointer_name(v[0]))\t\t\t\t\r\n\t\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name))\r\n\t\t\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\t\t\tif emitter.isClosure == True:\r\n\t\t\t\tn = f\"lambda_var_{emitter.lambda_call_counter}\"\r\n\t\t\t\tarray_types = emitter.lambda_array_size\r\n\t\t\t\temitter << f\" {registo} = load {tipo}, {tipo}* getelementptr inbounds ([{array_types} x {tipo}], [{array_types} x {tipo}]* @{emitter.current_lambda_call}_args, i64 0, i64 {emitter.lambda_call_counter}), {align}\"\r\n\t\t\t\temitter.lambda_call_counter += 1\r\n\t\t\t\treturn f\"{tipo} {registo}\"\r\n\t\t\tif \"i1\" in tipo:\r\n\t\t\t\ttipo = \"i8\"\r\n\t\t\temitter << f\" {registo} = load {tipo}, {tipo}* {pname}, {align}\"\r\n\t\t\treturn f\"{tipo} {registo}\"\r\n\r\n\telif node[\"nt\"] == \"get_array\":\r\n\t\treturn node[\"name\"]\r\n\r\n\telif node[\"nt\"] == \"Array\":\r\n\t\tvname = node[\"name\"]\r\n\t\tindex = node[\"index_type\"]\r\n\t\tif isinstance(vname, dict):\r\n\t\t\tvname = compiler(vname, emitter)\r\n\t\t\r\n\t\tname = vname + \"_array\"\r\n\t\tregisto = \"%\" + emitter.get_id()\r\n\t\tpname = emitter.get_pointer_name(vname)\r\n\r\n\t\tif emitter.isLambda == True:\r\n\t\t\tif emitter.isLambdaArgs == True:\r\n\t\t\t\ttipo = emitter.get_type(name)\r\n\t\t\t\tif tipo != None:\r\n\t\t\t\t\temitter.environment.append(node)\r\n\t\t\t\t\tif isinstance(node[\"name\"], dict):\r\n\t\t\t\t\t\tnode[\"name\"] = node[\"name\"][\"name\"]\r\n\t\t\t\t\targ = {'nt': 'Var_decl', 'name': node[\"name\"], 'type': tipo}\r\n\t\t\t\t\tif arg not in emitter.lambda_args:\r\n\t\t\t\t\t\temitter.lambda_args.append(arg)\r\n\t\t\telse:\r\n\t\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\t\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\t\t\t\tpname = emitter.get_id_name(pname)\r\n\r\n\t\t\t\tif \"i1\" in tipo:\r\n\t\t\t\t\ttipo = \"i8\"\r\n\r\n\t\t\t\temitter.lambda_llvm.append(f\" {registo} = load {tipo}, {tipo}* {pname}, {align}\")\r\n\t\t\t\treturn f\"{tipo} {registo}\"\r\n\t\telse:\r\n\t\t\ttipo = emitter.get_emitter_type(emitter.get_type(name)) \r\n\t\t\talign = emitter.get_emitter_align(emitter.get_type(name))\r\n\r\n\t\t\tarr_type = \"\"\r\n\t\t\tfor arr in emitter.array_type:\r\n\t\t\t\tif arr[\"name\"] == name:\r\n\t\t\t\t\tarr_type = arr[\"type\"]\r\n\r\n\t\t\tif \"i1\" in tipo:\r\n\t\t\t\ttipo = \"i8\"\r\n\r\n\t\t\tvar = compiler(index, emitter)\r\n\t\t\tvar2 = var.split(\" \")\r\n\t\t\ti64_reg = \"%\" + emitter.get_id()\r\n\t\t\ttemp = \"%\" + emitter.get_id()\r\n\r\n\t\t\tif emitter.isClosure == True:\r\n\t\t\t\tn = f\"lambda_var_{emitter.lambda_call_counter}\"\r\n\t\t\t\tarray_types = emitter.lambda_array_size\r\n\t\t\t\temitter << f\" {registo} = load {tipo}, {tipo}* getelementptr inbounds ([{array_types} x {tipo}], [{array_types} x {tipo}]* @{emitter.current_lambda_call}_args, i64 0, i64 {emitter.lambda_call_counter}), {align}\"\r\n\t\t\t\temitter.lambda_call_counter += 1\r\n\t\t\t\treturn f\"{tipo} {registo}\"\r\n\t\t\t\r\n\t\t\tif \"%\" in var2[1]:\r\n\t\t\t\temitter << f\" {i64_reg} = sext {var} to i64\"\r\n\t\t\t\temitter << f\" {temp} = getelementptr inbounds {arr_type}, {arr_type}* {pname}, i64 0, i64 {i64_reg}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {temp} = getelementptr inbounds {arr_type}, {arr_type}* {pname}, i64 0, i64 {var2[1]}\"\r\n\r\n\t\t\temitter << f\" {registo} = load {tipo}, {tipo}* {temp}, {align}\"\r\n\t\t\treturn f\"{tipo} {registo}\"\r\n\r\n\telif node[\"nt\"] == \"Int\":\r\n\t\ttipo = emitter.get_emitter_type(node[\"nt\"])\r\n\t\tvalor = str(node[\"value\"])\r\n\t\treturn f\"{tipo} {valor}\"\r\n\r\n\telif node[\"nt\"] == \"Boolean\":\r\n\t\ttipo = \"i1\"\r\n\t\tvalor = node[\"value\"]\r\n\t\treturn f\"{tipo} {valor}\"\r\n\r\n\telif node[\"nt\"] == \"Void\":\r\n\t\treturn \"void\"\r\n\r\n\telif node[\"nt\"] == \"String\":\r\n\t\ttipo = emitter.get_emitter_type(node[\"nt\"])\r\n\t\tvalue = node[\"value\"]\r\n\t\tvalor = value.replace('\"', '')\r\n\t\tsize = 1\r\n\t\tcounter = valor.count(\"\\\\n\")\r\n\t\tsize += counter\r\n\t\tvalue = valor.replace(\"\\\\n\", \"\")\r\n\t\tsize += len(value)\r\n\t\tvalor = valor.replace(\"\\\\n\", \"\\\\0A\")\r\n\t\tid = emitter.get_id()\r\n\t\tstr_name = f\"@.casual_str_{id}\"\r\n\t\t\r\n\t\t\t\r\n\t\tstr_decl = f\"\"\"{str_name} = private unnamed_addr constant [{size} x i8] c\"{valor}\\\\00\", align 1\"\"\"\r\n\t\temitter.lines.insert(0, str_decl)\r\n\r\n\t\treturn f\"i8* getelementptr inbounds ([{size} x i8], [{size} x i8]* {str_name}, i64 0, i64 0)\"\r\n\r\n\telif node[\"nt\"] == \"Float\":\r\n\t\ttipo = emitter.get_emitter_type(node[\"nt\"])\r\n\t\tvalue = node[\"value\"]\r\n\t\tfloat_single = struct.unpack('f', struct.pack('f', value))[0]\r\n\t\tvalor = hex(struct.unpack('\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\tregisto_l = compiler(left, emitter)\r\n\t\t\ttemp = compiler(right, emitter)\r\n\t\t\tregisto_r = temp.split(\" \")\r\n\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\tif registo_r[0] == \"float\":\r\n\t\t\t\temitter << f\" {registo} = fcmp ogt {registo_l}, {registo_r[1]}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {registo} = icmp sgt {registo_l}, {registo_r[1]}\"\r\n\t\t\treturn f\"i1 {registo}\"\r\n\r\n\t\telif node[\"operator\"] == \"<\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\tregisto_l = compiler(left, emitter)\r\n\t\t\ttemp = compiler(right, emitter)\r\n\t\t\tregisto_r = temp.split(\" \")\r\n\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\tif registo_r[0] == \"float\":\r\n\t\t\t\temitter << f\" {registo} = fcmp olt {registo_l}, {registo_r[1]}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {registo} = icmp slt {registo_l}, {registo_r[1]}\"\r\n\t\t\treturn f\"i1 {registo}\"\r\n\r\n\t\telif node[\"operator\"] == \">=\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\tregisto_l = compiler(left, emitter)\r\n\t\t\ttemp = compiler(right, emitter)\r\n\t\t\tregisto_r = temp.split(\" \")\r\n\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\tif registo_r[0] == \"float\":\r\n\t\t\t\temitter << f\" {registo} = fcmp oge {registo_l}, {registo_r[1]}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {registo} = icmp sge {registo_l}, {registo_r[1]}\"\r\n\t\t\treturn f\"i1 {registo}\"\r\n\r\n\t\telif node[\"operator\"] == \"<=\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\tregisto_l = compiler(left, emitter)\r\n\t\t\ttemp = compiler(right, emitter)\r\n\t\t\tregisto_r = temp.split(\" \")\r\n\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\tif registo_r[0] == \"float\":\r\n\t\t\t\temitter << f\" {registo} = fcmp ole {registo_l}, {registo_r[1]}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {registo} = icmp sle {registo_l}, {registo_r[1]}\"\r\n\t\t\treturn f\"i1 {registo}\"\r\n\r\n\t\telif node[\"operator\"] == \"!=\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\tregisto_l = compiler(left, emitter)\r\n\t\t\ttemp = compiler(right, emitter)\r\n\t\t\tregisto_r = temp.split(\" \")\r\n\r\n\t\t\tif \"i8\" in registo_l:\r\n\t\t\t\treg1 = \"%\" + emitter.get_id()\r\n\t\t\t\treg2 = \"%\" + emitter.get_id()\r\n\t\t\t\temitter << f\" {reg1} = trunc {registo_l} to i1\"\r\n\t\t\t\temitter << f\" {reg2} = zext i1 {reg1} to i32\"\r\n\t\t\t\tregisto_l = f\"i32 {reg2}\"\r\n\t\t\telif \"i1\" in registo_l:\r\n\t\t\t\tif \"true\" in registo_l:\r\n\t\t\t\t\tvalue = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tvalue = 0\r\n\t\t\t\tregisto_l = f\"i32 {value}\"\r\n\r\n\t\t\tif registo_r[0] == \"i1\":\r\n\t\t\t\tif registo_r[1] == \"true\":\r\n\t\t\t\t\tregisto_r[1] = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tregisto_r[1] = 0\r\n\t\t\telif registo_r[0] == \"i8\":\r\n\t\t\t\treg1 = \"%\" + emitter.get_id()\r\n\t\t\t\treg2 = \"%\" + emitter.get_id()\r\n\t\t\t\temitter << f\" {reg1} = trunc {registo_r[0]} {registo_r[1]} to i1\"\r\n\t\t\t\temitter << f\" {reg2} = zext i1 {reg1} to i32\"\r\n\t\t\t\tregisto_r[1] = f\"{reg2}\"\r\n\r\n\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\tif registo_r[0] == \"float\":\r\n\t\t\t\temitter << f\" {registo} = fcmp une {registo_l}, {registo_r[1]}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {registo} = icmp ne {registo_l}, {registo_r[1]}\"\r\n\t\t\treturn f\"i1 {registo}\"\r\n\r\n\t\telif node[\"operator\"] == \"==\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\tregisto_l = compiler(left, emitter)\r\n\t\t\ttemp = compiler(right, emitter)\r\n\t\t\tregisto_r = temp.split(\" \")\r\n\t\t\t\t\t\t\r\n\t\t\tif \"i8\" in registo_l:\r\n\t\t\t\treg1 = \"%\" + emitter.get_id()\r\n\t\t\t\treg2 = \"%\" + emitter.get_id()\r\n\t\t\t\temitter << f\" {reg1} = trunc {registo_l} to i1\"\r\n\t\t\t\temitter << f\" {reg2} = zext i1 {reg1} to i32\"\r\n\t\t\t\tregisto_l = f\"i32 {reg2}\"\r\n\t\t\telif \"i1\" in registo_l:\r\n\t\t\t\tif \"true\" in registo_l:\r\n\t\t\t\t\tvalue = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tvalue = 0\r\n\t\t\t\tregisto_l = f\"i32 {value}\"\r\n\r\n\t\t\tif registo_r[0] == \"i1\":\r\n\t\t\t\tif registo_r[1] == \"true\":\r\n\t\t\t\t\tregisto_r[1] = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tregisto_r[1] = 0\r\n\t\t\telif registo_r[0] == \"i8\":\r\n\t\t\t\treg1 = \"%\" + emitter.get_id()\r\n\t\t\t\treg2 = \"%\" + emitter.get_id()\r\n\t\t\t\temitter << f\" {reg1} = trunc {registo_r[0]} {registo_r[1]} to i1\"\r\n\t\t\t\temitter << f\" {reg2} = zext i1 {reg1} to i32\"\r\n\t\t\t\tregisto_r[1] = f\"{reg2}\"\r\n\r\n\t\t\tregisto = \"%\" + emitter.get_id()\r\n\t\t\tif registo_r[0] == \"float\":\r\n\t\t\t\temitter << f\" {registo} = fcmp oeq {registo_l}, {registo_r[1]}\"\r\n\t\t\telse:\r\n\t\t\t\temitter << f\" {registo} = icmp eq {registo_l}, {registo_r[1]}\"\r\n\t\t\treturn f\"i1 {registo}\"\r\n\r\n\t\telif node[\"operator\"] == \"&&\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\t\t\t\r\n\t\t\tlabels = emitter.get_label()\r\n\t\t\tlabel_and = \"start_and_\" + labels[0][9:]\r\n\t\t\tlabel_or = labels[1]\r\n\t\t\tvar = emitter.get_count()\r\n\t\t\tvar2 = emitter.get_count()\r\n\t\t\ttemp = \"\"\r\n\t\t\ttemp_not = \"\"\r\n\t\t\tif emitter.unary == True:\r\n\t\t\t\t\r\n\t\t\t\treg_l = compiler(left, emitter)\r\n\t\t\t\tif \"OR\" not in reg_l and \"AND\" not in reg_l:\r\n\t\t\t\t\tif \"not\" in reg_l:\r\n\t\t\t\t\t\treg_l = reg_l.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_l[1]}, label %{labels[0]}, label %{label_and}_{var}\"\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\temitter << f\" br {reg_l}, label %{label_and}_{var}, label %{labels[0]}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var}:\"\r\n\r\n\t\t\t\treg_r = compiler(right, emitter)\r\n\t\t\t\tif \"OR\" not in reg_r and \"AND\" not in reg_r:\r\n\t\t\t\t\tif \"not\" in reg_r:\r\n\t\t\t\t\t\treg_r = reg_r.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_r[1]}, label %{labels[0]}, label %{label_and}_{var2}\"\r\n\t\t\t\t\telse:\t\t\t\t\t\t\r\n\t\t\t\t\t\temitter << f\" br {reg_r}, label %{label_and}_{var2}, label %{labels[0]}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var2}:\"\r\n\t\t\t\ttemp_not = \"not\"\r\n\t\t\t\temitter.unary = False\r\n\t\t\telse:\r\n\t\t\t\tif emitter.OR == True:\r\n\t\t\t\t\tlabel_or = f\"{label_and}_{var2}\"\r\n\r\n\t\t\t\temitter.AND = True\r\n\t\t\t\treg_l = compiler(left, emitter)\r\n\t\t\t\tif \"OR\" not in reg_l and \"AND\" not in reg_l:\r\n\t\t\t\t\tif \"not\" in reg_l:\r\n\t\t\t\t\t\treg_l = reg_l.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_l[1]}, label %{label_or}, label %{label_and}_{var}\"\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\temitter << f\" br {reg_l}, label %{label_and}_{var}, label %{label_or}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var}:\"\r\n\r\n\t\t\t\temitter.AND = False\r\n\t\t\t\treg_r = compiler(right, emitter)\r\n\t\t\t\tif \"OR\" not in reg_r and \"AND\" not in reg_r:\r\n\t\t\t\t\tif \"not\" in reg_r:\r\n\t\t\t\t\t\treg_r = reg_r.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_r[1]}, label %{labels[1]}, label %{label_and}_{var2}\"\r\n\t\t\t\t\telse:\t\t\t\t\t\t\r\n\t\t\t\t\t\temitter << f\" br {reg_r}, label %{label_and}_{var2}, label %{labels[1]}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var2}:\"\r\n\t\t\t\r\n\t\t\tif \"OR\" in reg_r:\r\n\t\t\t\ttemp = \"1\"\r\n\t\t\treturn f\"AND {temp} {temp_not}\"\r\n\r\n\t\telif node[\"operator\"] == \"||\":\r\n\t\t\tleft = node[\"value_left\"]\r\n\t\t\tright = node[\"value_right\"]\r\n\r\n\t\t\tlabels = emitter.get_label()\r\n\t\t\tlabel_and = \"start_or_\" + labels[0][9:]\r\n\t\t\tlabel_and2 = labels[0]\r\n\t\t\tvar = emitter.get_count()\r\n\t\t\tvar2 = emitter.get_count()\r\n\t\t\ttemp = \"\"\r\n\t\t\ttemp_not = \"\"\r\n\r\n\t\t\tif emitter.unary == True:\r\n\t\t\t\t\r\n\t\t\t\treg_l = compiler(left, emitter)\r\n\t\t\t\tif \"OR\" not in reg_l and \"AND\" not in reg_l:\r\n\t\t\t\t\tif \"not\" in reg_l:\r\n\t\t\t\t\t\treg_l = reg_l.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_l[1]}, label %{label_and}_{var}, label %{labels[1]}\"\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\temitter << f\" br {reg_l}, label %{labels[1]}, label %{label_and}_{var}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var}:\"\r\n\r\n\t\t\t\treg_r = compiler(right, emitter)\r\n\t\t\t\tif \"OR\" not in reg_r and \"AND\" not in reg_r:\r\n\t\t\t\t\tif \"not\" in reg_r:\r\n\t\t\t\t\t\treg_r = reg_r.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_r[1]}, label %{label_and}_{var2}, label %{labels[1]}\"\r\n\t\t\t\t\telse:\t\t\t\t\t\t\r\n\t\t\t\t\t\temitter << f\" br {reg_r}, label %{labels[1]}, label %{label_and}_{var2}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var2}:\"\r\n\r\n\t\t\t\ttemp_not = \"not\"\r\n\t\t\t\temitter.unary = False\r\n\t\t\telse:\r\n\t\t\t\tif emitter.AND == True:\r\n\t\t\t\t\tlabel_and2 = f\"{label_and}_{var2}\"\r\n\r\n\t\t\t\temitter.OR = True\r\n\t\t\t\treg_l = compiler(left, emitter)\r\n\t\t\t\tif \"OR\" not in reg_l and \"AND\" not in reg_l:\r\n\t\t\t\t\tif \"not\" in reg_l:\r\n\t\t\t\t\t\treg_l = reg_l.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_l[1]}, label %{label_and}_{var}, label %{label_and2}\"\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\temitter << f\" br {reg_l}, label %{label_and2}, label %{label_and}_{var}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var}:\"\r\n\r\n\t\t\t\temitter.OR = False\r\n\t\t\t\treg_r = compiler(right, emitter)\r\n\t\t\t\tif \"OR\" not in reg_r and \"AND\" not in reg_r:\r\n\t\t\t\t\tif \"not\" in reg_r:\r\n\t\t\t\t\t\treg_r = reg_r.split(\"-\")\r\n\t\t\t\t\t\temitter << f\" br {reg_r[1]}, label %{label_and}_{var2}, label %{labels[0]}\"\r\n\t\t\t\t\telse:\t\t\t\t\t\t\r\n\t\t\t\t\t\temitter << f\" br {reg_r}, label %{labels[0]}, label %{label_and}_{var2}\"\r\n\t\t\t\t\temitter << f\"{label_and}_{var2}:\"\r\n\t\t\r\n\t\t\tif \"AND\" in reg_r:\r\n\t\t\t\ttemp = \"1\"\r\n\t\t\treturn f\"OR {temp} {temp_not}\"","sub_path":"adding lambdas/cas_compiler.py","file_name":"cas_compiler.py","file_ext":"py","file_size_in_byte":40548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319022895","text":"import pandas as pd\nimport numpy as np\n\nfrom utils import time_test\n# from utils_db_mysql import db_commit, close_connection\n# from utils_db_mysql import save_evac as save_table\n# from utils_db_mysql import save_data_bunch\n# from utils_db_mysql import create_table_evac as create_table\n# from utils_db_mysql import clear_table\n# from utils_db_mysql import drop_table\n# from utils_db_mysql import save_query_to_file\n\nfrom utils_mariadb import db_commit, close_connection, save_data_bunch\nfrom utils_mariadb import save_evac, clear_table, create_table_evac\nfrom utils_mariadb import select_data_evac, save_data_to_sql_file\n\n\n#! выбирать 2е число\n# family_id_global = 40316\nfamily_id_global = 69523\n# family_id_global = 53964\n# 1й файл 19031 11769\n# 2й файл 54071\t37997\n# 3й файл 55621\t38780\n# 6й файл 59235\t40316\n\n\ndef check(column, i, df):\n data = df[column][i]\n if type(data) == np.int64:\n return int(data)\n elif type(data) == float:\n return str(data).replace(\".0\", \"\")\n elif type(data) == str:\n return data.strip()\n if data == '-':\n data = None\n\n return data\n\n\n@time_test\ndef pars(df):\n all_dict = []\n\n for i in df.index:\n global family_id_global\n family_id = None\n family_id_temp = None\n name = None\n patronymic = None\n family_member = None\n date_of_birth = None\n before_evac_region = None\n before_evac_district = None\n before_evac_city = None\n nationality = None\n before_evac_place_of_work = None\n before_evac_post = None\n evac_district = None\n evac_city = None\n evac_with_company = None\n evac_place_of_work = None\n evac_post = None\n settled_address = None\n search_archive = None\n search_fond = None\n search_inventory = None\n search_case = None\n search_list = None\n other_data = None\n\n family_id_temp = df['номер'][i]\n\n if family_id_temp:\n family_id_global += 1\n\n family_id = family_id_global\n\n surname = str(df['фамилия'][i]).strip()\n name = check('имя', i, df)\n patronymic = check('отчество', i, df)\n family_member = check('отношение', i, df)\n date_of_birth = check('год рождения', i, df)\n before_evac_region = check('область1', i, df)\n before_evac_district = check('район1', i, df)\n before_evac_city = check('город1', i, df)\n nationality = check('национальность', i, df)\n before_evac_place_of_work = check('предприятие', i, df)\n before_evac_post = check('должность1', i, df)\n evac_district = check('район2', i, df)\n evac_city = check('город2', i, df)\n evac_with_company = check('организация', i, df)\n evac_place_of_work = check('место работы', i, df)\n evac_post = check('должность2', i, df)\n settled_address = check('адрес', i, df)\n search_archive = check('архив', i, df)\n search_fond = check('фонд', i, df)\n search_inventory = check('опись', i, df)\n search_case = check('дело', i, df)\n search_list = check('лист', i, df)\n other_data = check('примечание', i, df)\n\n val = (\n family_id, surname, name, patronymic,\n family_member, date_of_birth, before_evac_region,\n before_evac_district, before_evac_city, nationality,\n before_evac_place_of_work, before_evac_post,\n evac_district, evac_city, evac_with_company,\n evac_place_of_work, evac_post, settled_address,\n search_archive, search_fond, search_inventory,\n search_case, search_list, other_data\n )\n\n save_evac(val)\n # try:\n # save_evac(val)\n # except:\n # print(val)\n #all_dict.append(val)\n\n #save_data_bunch(all_dict)\n\n\n@time_test\ndef open_xlsx(path_xlsx):\n xlsx = pd.ExcelFile(path_xlsx)\n test_list = xlsx.sheet_names\n # test_list.pop(0)\n # test_list.pop(0)\n print(test_list)\n\n for sheet in test_list:\n print(sheet)\n df = xlsx.parse(sheet)\n df = df.where((pd.notnull(df)), None)\n pars(df)\n\n\ndef main():\n path_xlsx = 'xlsx/evac30.10.2020.xlsx'\n table = \"`hero_arhiv`.`hero_evac`\"\n\n # create_table_evac(table) # Создание таблицы, есл�� не существует\n clear_table(table) # Очистка таблицы и сброс id\n open_xlsx(path_xlsx) # Обработка всей книги\n db_commit(\"Данные записаны\")\n \n\n # Сохранение данных в файл\n data = select_data_evac(table)\n save_data_to_sql_file(data)\n close_connection() # Закрытие соединения\n #pass\n\n\nif __name__ == '__main__':\n main()","sub_path":"parser_evac.py","file_name":"parser_evac.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166786636","text":"from config import *\nimport os\nfrom pathlib import Path\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport cv2\nfrom tqdm import trange\nfrom keras.utils import Sequence\nfrom glob import glob\n\n\ndef create_path(path):\n my_file = Path(path)\n if not my_file.exists():\n os.makedirs(path)\n\n\ndef cut_image(img):\n r = np.arange(0, RESIZE_WIDTH, SUB_IMG_WIDTH)\n return [img[i: i+SUB_IMG_WIDTH, j: j+SUB_IMG_WIDTH] for i in r for j in r]\n\n\ndef mask2binary(m):\n binary_mask = m.copy()\n binary_mask[m == 1] = 0\n binary_mask[m != 1] = 1\n return binary_mask\n\n\ndef prepare_train_val_test_split():\n train_idx, test_idx = train_test_split(np.arange(1, TOTAL_IMG_NUM+1),test_size=0.3,random_state=10)\n train_idx, val_idx = train_test_split(train_idx,test_size=0.43,random_state=10)\n return train_idx, val_idx, test_idx\n\n\ndef phase2gray(phase):\n phase = 255 * (phase-0)/(3.2-0)\n phase_ = phase.copy()\n phase_[phase > 255] = 255.0\n phase_[phase < 0] = 0.0\n return phase_.astype(np.uint8)\n\n\ndef gen_path():\n create_path(train_path)\n create_path(val_path)\n create_path(test_path)\n create_path(train_mask_path)\n create_path(val_mask_path)\n create_path(test_mask_path)\n\n\ndef data_gen():\n train_idx, val_idx, test_idx = prepare_train_val_test_split()\n\n idx = 1\n for exp in trange(np.arange(1, EXP_NUM + 1, 1)):\n for t in np.arange(1, TIMESTEP + 1, 1):\n img = np.load(old_root_path + \"exp{}\\\\phase_npy\\\\{}_phase.npy\".format(exp, t))\n mask = np.load(old_root_path + \"exp{}\\\\afterwater\\\\{}_afterwater.npy\".format(exp, t)).astype(np.float64)\n\n # uint8\n img = phase2gray(img)\n binary_mask = mask2binary(mask).astype(np.uint8)\n\n # resize\n img = cv2.resize(img, (RESIZE_WIDTH, RESIZE_WIDTH))\n binary_mask = cv2.resize(binary_mask, (RESIZE_WIDTH, RESIZE_WIDTH))\n\n # cut images\n sub_mask_list = cut_image(binary_mask)\n sub_img_list = cut_image(img)\n\n # save\n for i in range(len(sub_img_list)):\n\n if idx in test_idx:\n cv2.imwrite(test_path + str(idx) + \".png\", sub_img_list[i])\n cv2.imwrite(test_mask_path + str(idx) + \".png\", sub_mask_list[i])\n\n elif idx in val_idx:\n cv2.imwrite(val_path + str(idx) + \".png\", sub_img_list[i])\n cv2.imwrite(val_mask_path + str(idx) + \".png\", sub_mask_list[i])\n\n else:\n cv2.imwrite(train_path + str(idx) + \".png\", sub_img_list[i])\n cv2.imwrite(train_mask_path + str(idx) + \".png\", sub_mask_list[i])\n\n idx += 1\n\n\nclass DataGenerator(Sequence):\n 'Generates data for Keras'\n\n def __init__(self,\n train_im_path=train_path,\n train_mask_path=train_mask_path,\n augmentations=None,\n batch_size=batch_size,\n img_size=256,\n n_channels=3,\n shuffle=True):\n\n 'Initialization'\n self.batch_size = batch_size\n self.train_im_paths = glob(train_im_path + '\\\\*')\n\n self.train_im_path = train_im_path\n self.train_mask_path = train_mask_path\n\n self.img_size = img_size\n\n self.n_channels = n_channels\n self.shuffle = shuffle\n self.augment = augmentations\n self.on_epoch_end()\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n\n # 2957 train img / 16 = 184.8\n return int(np.ceil(len(self.train_im_paths) / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate list of path\n indexes = self.indexes[index * self.batch_size:min((index + 1) * self.batch_size, len(self.train_im_paths))]\n\n # Find list of IDs\n list_IDs_im = [self.train_im_paths[k] for k in indexes]\n\n # Generate data--------------------------------------------------------------> (b)\n X, y = self.data_generation(list_IDs_im)\n\n if self.augment is None:\n return X, np.array(y) / 255\n else:\n im, mask = [], []\n for x, y in zip(X, y):\n augmented = self.augment(image=x, mask=y)\n im.append(augmented['image'])\n mask.append(augmented['mask'])\n return np.array(im), np.array(mask) / 255\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n # 1, 2, 3,..., 2957\n self.indexes = np.arange(len(self.train_im_paths))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def data_generation(self, list_IDs_im):\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((len(list_IDs_im), self.img_size, self.img_size, self.n_channels))\n y = np.empty((len(list_IDs_im), self.img_size, self.img_size, 1))\n\n # Generate data--------------------------------------------------------------> (c)\n for i, im_path in enumerate(list_IDs_im):\n\n # read image\n im = cv2.cvtColor(cv2.imread(im_path), cv2.COLOR_BGR2GRAY)\n mask_path = im_path.replace(self.train_im_path, self.train_mask_path)\n # mask image\n mask = cv2.cvtColor(cv2.imread(mask_path), cv2.COLOR_BGR2GRAY)\n\n # if img.shape == (XXX, XXX) , then some thing to do with __getitem__\n if len(im.shape) == 2:\n im = np.repeat(im[..., None], 3, 2)\n\n # Resize sample\n X[i,] = cv2.resize(im, (self.img_size, self.img_size))\n\n # Store class\n y[i,] = cv2.resize(mask, (self.img_size, self.img_size))[..., np.newaxis]\n y[y > 0] = 255\n\n return np.uint8(X), np.uint8(y)\n\n\n","sub_path":"datagen.py","file_name":"datagen.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552347409","text":"from phmac_compiler import py_html_macro_node as node\nfrom phmac_compiler import my_custom\nfrom phmac_compiler import load as file_to_tree\nimport sys\n\ndef make_dict(ins):\n lookin = []\n libs = []\n comp = []\n macros = []\n for x in ins.subs:\n if x.text==\"look-in\":\n for y in x.subs:\n lookin.append(y.text)\n elif x.text==\"libs\":\n for y in x.subs:\n libs.append(y.text)\n elif x.text==\"compile\":\n for y in x.subs:\n comp.append([y.text,y.subs[0].text])\n elif x.text==\"macro-files\":\n for y in x.subs:\n macros.append(y.text)\n else:\n raise Exception(\"Command (\"+str(x.text)+\") not recognized\")\n ou = dict()\n ou[\"look-in\"] = lookin\n ou[\"libs\"] = libs\n ou[\"compile\"] = comp\n ou[\"macros\"] = macros\n return ou\n\ndef addString(b,s):\n w = s.encode(\"utf8\")\n for x in w:\n b.append(x)\n\ndef make_python(d):\n ou = []\n addString(ou,\"import sys\\n\")\n for x in d[\"look-in\"]:\n addString(ou,\"sys.path.append(\\\"\")\n addString(ou,x)\n addString(ou,\"\\\")\\n\")\n addString(ou,\"from phmac_compiler import load_and_run as MACRO_THE_STUFF\\n\")\n addString(ou,\"from phmac_compiler import main as DO_THE_STUFF\\n\")\n for x in d[\"libs\"]:\n addString(ou,\"import phmac_stdlib\\n\")\n for x in d[\"macros\"]:\n addString(ou,\"MACRO_THE_STUFF(\\\"\")\n addString(ou,x)\n addString(ou,\"\\\")\\n\")\n for x in d[\"compile\"]:\n addString(ou,\"DO_THE_STUFF(\\\"\")\n addString(ou,x[0])\n addString(ou,\"\\\",\\\"\")\n addString(ou,x[1])\n addString(ou,\"\\\")\\n\")\n return ou\n\ndef save(data):\n outfile = open(\"make.py\",\"wb\")\n outfile.truncate(0)\n outfile.seek(0,0)\n outfile.write(bytes(data))\n outfile.close()\n\ndef main():\n ins = file_to_tree(sys.argv[1])\n d = make_dict(ins)\n del(ins)\n py = make_python(d)\n del(d)\n save(py)\n\nmain()\n","sub_path":"phmac.py","file_name":"phmac.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333640312","text":"import torch\nimport torch.nn\n\nfrom backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.secondorder.diag_hessian.diag_h_base import DiagHBaseModule\nfrom backpack.utils import conv as convUtils\n\n\nclass DiagHConv2d(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n sqrt_h_outs = backproped[\"matrices\"]\n sqrt_h_outs_signs = backproped[\"signs\"]\n h_diag = torch.zeros_like(module.bias)\n\n for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):\n h_diag_curr = convUtils.extract_bias_diagonal(module, h_sqrt)\n h_diag.add_(sign * h_diag_curr)\n return h_diag\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n sqrt_h_outs = backproped[\"matrices\"]\n sqrt_h_outs_signs = backproped[\"signs\"]\n X = convUtils.unfold_func(module)(module.input0)\n h_diag = torch.zeros_like(module.weight)\n\n for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):\n h_diag_curr = convUtils.extract_weight_diagonal(module, X, h_sqrt)\n h_diag.add_(sign * h_diag_curr)\n return h_diag\n","sub_path":"backpack/extensions/secondorder/diag_hessian/conv2d.py","file_name":"conv2d.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523730517","text":"import csv\nfrom gensim.models import word2vec\nimport gensim\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\n\n\n# 训练模型\ndef train_model():\n sentences = word2vec.LineSentence('sentences.txt') # Path\n model = word2vec.Word2Vec(sentences=sentences, size=100, min_count=3)\n model.wv.save_word2vec_format('word2vec.bin', binary=True) # Path\n\n\n# 计算两句话的相似度\ndef vector_similarity(s1, s2, model):\n def sentence_vector(s):\n words = word_tokenize(s)\n v = np.zeros(100)\n for word in words:\n try:\n v += model[word]\n except KeyError: # 未登录词\n v += np.zeros(100)\n v /= len(words)\n return v\n\n v1, v2 = sentence_vector(s1), sentence_vector(s2)\n try:\n sim = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n except ValueError:\n sim = 0\n return sim\n\n\n# 计算wordnet描述和所有候选wikidata描述的相似度\ndef cal_sim_word2vec(all_wordnet_des, all_wikidata_des_list, default_sim):\n model = gensim.models.KeyedVectors.load_word2vec_format('models/word2vec/word2vec.bin', binary=True) # Path\n all_sim_list = []\n for wordnet_des, wikidata_des_list in zip(all_wordnet_des, all_wikidata_des_list):\n sim_list = []\n for wikidata_des in wikidata_des_list:\n if wikidata_des == 'None':\n sim_list.append(default_sim)\n else:\n sim = vector_similarity(s1=wordnet_des, s2=wikidata_des, model=model)\n sim_list.append(round(sim, 4))\n all_sim_list.append(sim_list)\n return all_sim_list\n\n\nif __name__ == '__main__':\n train_model()\n","sub_path":"EM/Model_Combination1/models/word2vec/word2vec_model.py","file_name":"word2vec_model.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"141800332","text":"import requests\nfrom bs4 import BeautifulSoup\n\nBA_data = {} # Beer Advocate dictionary\ncatalog = [] # List of tuples: ( str(beer name), float(price), float(price per ounce) )\n\ndef standardize_string(s):\n '''\n Convert a string of one or more words, which contains uppercase or lowercase characters, into\n a \"standard string\" of lowercase characters with spaces replaced with '+'.\n\n :type s: string\n :rtype: string\n '''\n output = ''\n\n for char in s:\n char = char.lower()\n\n if char != ' ':\n output += char\n else:\n output += '+'\n\n return output\n\n\ndef beer_advocate_search():\n '''\n Scrape Beer Advocate's list of 250 most popular beers and insert into a dictionary. A key will be a\n standardized string (see: standardized_string(s) funcion) that represents a specific beer, and its\n value will be a tuple containing strings of that brewer and specific beer name.\n\n Example for Boston Beer Company Samuel Adams Boston Lager:\n -> Key: 'samuel+adams+boston+lager'\n -> Value: ( 'Boston Beer Company (Samuel Adams)', 'Samuel Adams Boston Lager' )\n '''\n r = requests.get('https://www.beeradvocate.com/lists/popular/')\n soup = BeautifulSoup(r.text, 'html.parser')\n soup = soup.table\n soup = soup.find_all('tr')\n\n # skip first two html headers in the list, and traverse the following 250 beers\n first_beer = 2\n last_beer = 252\n\n # 250 most popular beers on Beer Advocate\n for beer in range(first_beer, last_beer):\n a = soup[beer].find_all('a') # html tag\n\n name = a[0].text # beer name\n brewer = a[1].text # beer brewer\n\n # standardize beer name, isolate name from unnecessary classifiers\n std_name = name\n std_name = standardize_string(std_name)\n std_name = std_name.split('(')[0]\n\n # add beer to Beer Advocate dictionary\n BA_data[std_name] = [brewer, name]\n\n\ndef catalog_beer_inventoy():\n '''\n Srape Total Wine's inventory, searching for each beer in the Beer Advocate's most popular dictionary.\n If the beer is found in Total Wine's inventory its price and price per ounce will be recorded. Then this\n beer will be \"cataloged,\" where a tuple containing the brewer, beer name, price, and price per ounce\n will be appended to a list. After all of Beer Advocate's most popular beers are catalogued, the \"beer catalog\"\n can be sorted based on price or price per ounce.\n\n Example for Boston Beer Company Samuel Adams Boston Lager:\n -> ('Boston Beer Company (Samuel Adams)', 'Samuel Adams Boston Lager', 29.99, 0.09)\n '''\n print('* Cataloging Beer Advocate\\'s Most Popular Beers And Their Current Prices At Total Wine ... *\\n')\n\n for total_wine_beer in BA_data.keys():\n r = requests.get('https://www.totalwine.com/search/all?tab=fullcatalog&text=' + total_wine_beer + '&beervolume=standard-size')\n soup = BeautifulSoup(r.text, 'html.parser')\n uls = soup.find_all('ul')\n\n # Each search will yield either: no listing if beer not avaiable; one listing if only one variation\n # is for sale; or multiple listings if the beer is sold in multiple variations i.e. single, 6 pack,\n # 12 pack, etc. If no listings, continue. If one listing, scrape it. If multiple listings, scrape\n # just the first.\n for listing in uls:\n for beer in listing.find_all('li'):\n price_vals = [] # temporary list that will hold each beer's price and price per ounce\n\n name = beer.find('a', attrs={'class':'analyticsProductName'})\n price = beer.find('span', attrs={'class':'price'})\n\n if name is None:\n continue\n else:\n name = name.text\n name = standardize_string(name)\n\n # Some beers are labeled as beer name followed by '(type of beer)'\n # for any of these, separate beer name from type of beer and set\n # name to actual beer name\n name = name.split('(')[0]\n price = price.text\n price = float(price.lstrip('$ '))\n package = beer.find('div', attrs={'class':'plp-product-qty'}).text\n quantity = package.split('-')\n\n if len(quantity) == 1: # listing is a single beer\n ounces_per_beer = quantity[0]\n try:\n ounces_per_beer = float(ounces_per_beer.split('oz')[0])\n except:\n continue # beer volume in mL's (not ounces)\n total_ounces = ounces_per_beer\n else: # listing is package of beers (4, 6, 12, 24, etc)\n num_beers = quantity[0]\n try:\n num_beers = int(num_beers.rstrip('pk')) # labeled 12 pk; strip string from int\n ounces_per_beer = quantity[1]\n try:\n ounces_per_beer = float(ounces_per_beer.split('oz')[0])\n except:\n continue # Does not list beer quantity or amount\n # or beer volume in mL's (not ounces)\n total_ounces = num_beers * ounces_per_beer\n except:\n continue\n\n price_per_ounce = round(price / total_ounces, 2) # round ppo to 2 sig figs\n price_vals = [price, price_per_ounce]\n\n beer_brewer = BA_data[total_wine_beer][0] # Beer Advocate dictionary key (brewer)\n beer_name = BA_data[total_wine_beer][1] # Beer Advocate dictionary value (beer name)\n beer_price = price_vals[0]\n beer_price_per_ounce = price_vals[1]\n\n beer_data = (beer_brewer, beer_name, beer_price, beer_price_per_ounce)\n\n catalog.append(beer_data)\n\n print(f'{beer_brewer} - {beer_name} Has Been Cataloged ...\\n')\n\n # seaching for some beers will yield multiple packaging of a beer for sale, i.e. beer\n # single, 4 pack, 6 pack, 12 pack, etc; if this is the case just the first ocurance will\n # be recorded\n break\n\ndef main():\n beer_advocate_search()\n catalog_beer_inventoy()\n\n # sort most popular beers by price per ounce\n catalog.sort(key=lambda x: x[3])\n\n # print list of most popular beers, in ascending order, by price per ounce\n print(catalog)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"beverage_catalog.py","file_name":"beverage_catalog.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304281108","text":"timeRem = 2503\n\ndSpeed = 16\ndRunTime = 11\ndRest = 162\ndRestRem = 0\ndRunRem = 11\ndDist = 0\ndRun = 1\ndRest = 0\ndScore = 0\n\ncSpeed = 14\ncRunTime = 10\ncRest = 127\ncRestRem = 0\ncRunRem = 10\ncDist = 0\ncRun = 1\ncRest = 0\ncScore = 0\n\nfor currSec in range(1000):\n if dRun == 1:\n dRunRem -= 1\n dDist += dSpeed\n elif dRest == 1:\n dRestRem -= 1\n if dRun == 1 and dRunRem == 0:\n dRest = 1\n dRun = 0\n dRestRem = dRest\n elif dRest == 1 and dRestRem == 0:\n dRun = 1\n dRest = 0\n dRunRem = dRunTime\n \n \n if cRun == 1:\n cRunRem -= 1\n cDist += cSpeed\n elif cRest == 1:\n cRestRem -= 1\n if cRun == 1 and cRunRem == 0:\n cRest = 1\n cRun = 0\n cRestRem = cRest\n elif cRest == 1 and cRestRem == 0:\n cRun = 1\n cRest = 0\n cRunRem = cRunTime\n if cDist > dDist:\n cScore += 1\n elif dDist > cDist:\n dScore += 1\n\nprint(cScore, \" \", dScore)\n \n","sub_path":"Day 14/Day14Part2.py","file_name":"Day14Part2.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"231588419","text":"# -*- coding:utf-8 -*-\n# @File: 多进程.py\n# ==============================\n# @Time: 2018/8/14 15:21\n# @Author: hunzai\n# ==============================\n\nimport multiprocessing\nimport time,threading\n\ndef thread_run():\n print(threading.get_ident())\ndef run(name):\n time.sleep(2)\n print('hello',name)\n t = threading.Thread(target=thread_run)\n t.start()\n\nif __name__ == '__main__':\n for i in range(10):\n p = multiprocessing.Process(target=run,args=('hunzai %s' %i,))\n p.start()","sub_path":"day10(进程)/多进程.py","file_name":"多进程.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600230176","text":"import boto3\nimport numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('fan.mp4')\nwith open('rootkey.csv', 'r') as creds:\n creds = creds.read().split()\n access_key_id = creds[0]\n secret_access_key = creds[1]\n\nclient = boto3.client('rekognition',\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key)\n\nwhile(cap.isOpened()):\n ret, photo = cap.read()\n\n cv2.imwrite('frame.jpg', photo)\n\n with open('frame.jpg', 'rb') as source_image:\n source_bytes = source_image.read()\n\n response = client.detect_faces(Image={'Bytes': source_bytes}, Attributes=['ALL'])\n\n\n\n counter = 0\n\n for faceDetail in response['FaceDetails']:\n counter += 1\n\n cv2.putText(photo, str(counter), (0,500), fontColor=(255,255,255))\n\n cv2.imshow('Window', photo)\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","sub_path":"recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"282254530","text":"import os\r\nimport traceback\r\nimport xlwt\r\nimport boto3\r\nimport datetime\r\nfrom azure.common.credentials import ServicePrincipalCredentials\r\nfrom azure.mgmt.resource import ResourceManagementClient\r\nfrom azure.mgmt.network import NetworkManagementClient\r\nfrom azure.mgmt.compute import ComputeManagementClient\r\nfrom azure.mgmt.compute.models import DiskCreateOption\r\nfrom msrestazure.azure_exceptions import CloudError\r\nfrom azure.mgmt.resource import SubscriptionClient\r\nfrom azure.mgmt.web import WebSiteManagementClient\r\nfrom botocore.exceptions import ClientError\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.application import MIMEApplication\r\nfrom email.mime.multipart import MIMEMultipart\r\nimport urllib3\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\ntoday_date=datetime.date.today()\r\nAWS_PROFILE='myprofile1'\r\nemail_from = 'IEG@duffandphelps.com'\r\nemail_to = ['IEG@duffandphelps.com']\r\nboto3.setup_default_session(profile_name=AWS_PROFILE,region_name='us-east-1')\r\nsts = boto3.client('sts')\r\nwb = xlwt.Workbook(encoding=\"utf-8\")\r\nsheet2=wb.add_sheet(\"Subscription\")\r\n\r\nsvms = []\r\n\r\nsheet2.write(0,0,\"Subscription ID\")\r\nsheet2.write(0,1,\"Subscription Name\")\r\nsheet2.write(0,2,\"Authorization Source\")\r\n\r\nsheet3=wb.add_sheet(\"Resource Group\")\r\n\r\nvms = []\r\n\r\nsheet3.write(0,0,\"Resouce Group Name\")\r\nsheet3.write(0,1,\"Subscription\")\r\nsheet3.write(0,2,\"Location\")\r\nsheet3.write(0,3,\"Managed By\")\r\nsheet3.write(0,4,\"Provisioning State\")\r\nsheet3.write(0,5,\"ApplicationName\")\r\nsheet3.write(0,6,\"BusinessOwner\")\r\nsheet3.write(0,7,\"Environment\")\r\nsheet3.write(0,8,\"ServiceLine\")\r\nsheet3.write(0,9,\"TechnologyOwner\")\r\n\r\ndef get_credentials(): \r\n subscription_id = \"3a130925-d7da-48a2-b023-80148f77c31d\"\r\n credentials = ServicePrincipalCredentials(\r\n client_id=\"8c33058c-0ca8-49c4-ba75-8207eec88153\",\r\n secret=\"b6j=-XrH8XX4UtFuKjt+3Wlsci:tEsDp\",\r\n tenant=\"781802be-916f-42df-a204-78a2b3144934\",\r\n )\r\n return credentials\r\ndef run_example():\r\n\r\n subscription_id = \"3a130925-d7da-48a2-b023-80148f77c31d\"\r\n credentials = get_credentials()\r\n resource_client = ResourceManagementClient(credentials, subscription_id)\r\n compute_client = ComputeManagementClient(credentials, subscription_id)\r\n network_client = NetworkManagementClient(credentials, subscription_id)\r\n resource_client = ResourceManagementClient(credentials, subscription_id)\r\n web_client = WebSiteManagementClient(credentials, subscription_id) \r\n \r\n try:\r\n k=0\r\n m=0\r\n subscriptionClient = SubscriptionClient(credentials)\r\n for subscription in subscriptionClient.subscriptions.list():\r\n sub_id = subscription.subscription_id\r\n svms.append(sub_id)\r\n sheet2.write(m+1,0,sub_id)\r\n sheet2.write(m+1,1,subscription.display_name)\r\n sheet2.write(m+1,2,subscription.authorization_source)\r\n m =m+1\r\n print(\"Total VMs Count : \"+str(len(svms)))\r\n for subscription in subscriptionClient.subscriptions.list():\r\n sub_id = subscription.subscription_id\r\n resource_client1 = ResourceManagementClient(credentials, sub_id)\r\n web_client1 = WebSiteManagementClient(credentials, sub_id)\r\n groups = resource_client1.resource_groups.list()\r\n for vm in groups:\r\n vms.append(vm.name)\r\n sheet3.write(k+1,0,vm.name)\r\n sheet3.write(k+1,1,subscription.display_name)\r\n sheet3.write(k+1,2,vm.location)\r\n sheet3.write(k+1,3,vm.managed_by)\r\n sheet3.write(k+1,4,vm.properties.provisioning_state)\r\n try:\r\n for tag,tag1 in vm.tags.items():\r\n try:\r\n if tag == 'BusinessOwner': busname = tag1\r\n except: busname = \"UNKNOWN\"\r\n try:\r\n if tag == 'ApplicationName': appname = tag1\r\n except: appname = \"UNKNOWN\"\r\n try:\r\n if tag == 'Environment': envname = tag1\r\n except: envname = \"UNKNOWN\"\r\n try:\r\n if tag == 'ServiceLine': servname = tag1\r\n except: servname = \"UNKNOWN\"\r\n try:\r\n if tag == 'TechnologyOwner': techname = tag1\r\n except: techname = \"UNKNOWN\"\r\n sheet3.write(k+1,5,appname)\r\n sheet3.write(k+1,6,busname)\r\n sheet3.write(k+1,7,envname)\r\n sheet3.write(k+1,8,servname)\r\n sheet3.write(k+1,9,techname)\r\n except: pass\r\n k =k+1\r\n print(\"Total VMs Count : \"+str(len(vms)))\r\n day = today_date.strftime(\"%B_%Y\")\r\n file_name = 'Azure_Subscription_ResourceGroup_'+day+'.xls'\r\n wb.save(file_name)\r\n except CloudError:\r\n print('A VM operation failed:\\n{}'.format(traceback.format_exc()))\r\n try:\r\n prim_assume_itbn = sts.assume_role(\r\n RoleArn='arn:aws:iam::104436734642:role/3-Prd-Analyst-Access',\r\n RoleSessionName='ITBN',\r\n DurationSeconds=1800,\r\n )\r\n\r\n Prim_ITBN_RoleAccessKeyId = prim_assume_itbn[\"Credentials\"][\"AccessKeyId\"]\r\n Prim_ITBN_RoleSecretAccessKey = prim_assume_itbn[\"Credentials\"][\"SecretAccessKey\"]\r\n Prim_ITBN_RoleSessionToken = prim_assume_itbn[\"Credentials\"][\"SessionToken\"]\r\n\r\n \r\n CHARSET = \"utf-8\"\r\n #Sending Email for Unused Resources\r\n msg = MIMEMultipart()\r\n body_text = (\r\n \"Attached herewith is the latest Azure Subscription and Resource Groups Inventory \\r\\r\\n\"\r\n \"Total Number of Subscriptions: \" + str(len(svms)) + \" \\r\\r\\n\"\r\n \"Total Number of Resource Groups: \" + str(len(vms)) + \" \\r\\r\\n\"\r\n )\r\n #html = str(sys.argv[3])\r\n msg['Subject'] = \"Azure Subscription and Resource Group Inventory List\"\r\n msg['From'] = email_from\r\n msg['To'] = ', '.join(email_to)\r\n body = MIMEText(body_text.encode(CHARSET), 'plain', CHARSET)\r\n msg.attach(body)\r\n # What a recipient sees if they don't use an email reader\r\n msg.preamble = 'Multipart message.\\n' \r\n part = MIMEApplication(open(file_name, \"rb\").read())\r\n part.add_header('Content-Disposition', 'attachment', filename=file_name)\r\n part.add_header('Content-Type', 'application/vnd.ms-excel; charset=UTF-8')\r\n msg.attach(part)\r\n # Create a new SES resource and specify a region.\r\n ses = boto3.client('ses',\r\n aws_access_key_id=Prim_ITBN_RoleAccessKeyId,\r\n aws_secret_access_key=Prim_ITBN_RoleSecretAccessKey,\r\n aws_session_token=Prim_ITBN_RoleSessionToken,\r\n region_name='us-east-1',\r\n verify=False)\r\n \r\n # Try to send the email.\r\n #Provide the contents of the email.\r\n response=ses.send_raw_email(\r\n Source=email_from,\r\n Destinations=email_to,\r\n RawMessage={\r\n 'Data': msg.as_string(),\r\n }\r\n ) \r\n # Display an error if something goes wrong.\t\r\n except ClientError as e:\r\n print(e.response['Error']['Message'])\r\n else:\r\n print(\"Email sent! Message ID:\"),\r\n print(response['MessageId'])\r\nif __name__ == \"__main__\":\r\n run_example()\r\n","sub_path":"SubscriptionResourceGroup.py","file_name":"SubscriptionResourceGroup.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439769385","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport sqlite3\nimport subprocess\nimport collections\nimport pybedtools as pbt\nimport pysam\nimport time\n\nimport gemini_constants\nfrom gemini.annotations import annotations_in_region, guess_contig_naming\n\ndef add_requested_column(col_name, update_cursor):\n \"\"\"\n Attempt to add a new, user-defined column to the\n variants table. Exit if the column already exists.\n \"\"\"\n try:\n alter_qry = \"ALTER TABLE variants ADD COLUMN \" + col_name + \" BOOL DEFAULT NULL\"\n update_cursor.execute(alter_qry)\n except sqlite3.OperationalError:\n pass\n #sys.exit(\"ERROR: column \\\"\" + col_name + \"\\\" already exists in variants table\") \n\ndef _annotate_variants(args, conn, get_val_fn):\n \"\"\"Generalized annotation of variants with a new column.\n \n get_val_fn takes a list of annotations in a region and returns\n the value for that region to update the database with.\n\n Separates selection and identification of values from update, to avoid concurrent\n database access errors from sqlite3, especially on NFS systems. The retained\n to_update list is small, but batching could help if memory issues emerge.\n \"\"\"\n # For each, use Tabix to detect overlaps with the user-defined\n # annotation file. Update the variant row with T/F if overlaps found.\n annos = pysam.Tabixfile(args.anno_file)\n naming = guess_contig_naming(annos)\n select_cursor = conn.cursor()\n select_cursor.execute(\"SELECT chrom, start, end, variant_id FROM variants\")\n to_update = []\n for row in select_cursor:\n to_update.append((str(row[\"variant_id\"]),\n get_val_fn(annotations_in_region(row, annos, \"tuple\", naming))))\n update_cursor = conn.cursor()\n add_requested_column(args.col_name, update_cursor)\n for variant_id, val in to_update:\n update_qry = \"UPDATE variants SET \" + args.col_name + \" = \" + str(val) + \\\n \" WHERE variant_id = \" + variant_id\n update_cursor.execute(update_qry)\n\ndef annotate_variants_bool(args, conn):\n \"\"\"\n Populate a new, user-defined column in the variants\n table with a BOOLEAN indicating whether or not\n overlaps were detected between the variant and the \n annotation file.\n \"\"\"\n def has_anno_hit(hits):\n has_hit = 0\n for hit in hits:\n has_hit = 1\n break\n return has_hit\n return _annotate_variants(args, conn, has_anno_hit)\n\ndef annotate_variants_count(args, conn):\n \"\"\"\n Populate a new, user-defined column in the variants\n table with a INTEGER indicating the count of overlaps\n between the variant and the \n annotation file.\n \"\"\"\n def get_hit_count(hits):\n count = 0\n for hit in hits:\n count += 1\n return count\n return _annotate_variants(args, conn, get_hit_count)\n\ndef annotate_variants_list(args, conn):\n \"\"\"\n Populate a new, user-defined column in the variants\n table with a INTEGER indicating the count of overlaps\n between the variant and the \n annotation file.\n \"\"\"\n def get_hit_list(hits):\n hit_list = []\n for hit in hits:\n try:\n hit_list.append(hit[int(args.col_extract) - 1])\n except IndexError:\n sys.exit(\"Column \" + args.col_extract + \" exceeds \\\n the number of columns in your \\\n annotation file. Exiting.\")\n if len(hit_list) > 0:\n val = \",\".join(hit_list)\n return \"'%s'\" % val\n else:\n return \"NULL\"\n return _annotate_variants(args, conn, get_hit_list)\n\ndef annotate(parser, args):\n\n if (args.db is None):\n parser.print_help()\n exit()\n\n if not os.path.exists(args.db):\n sys.stderr.write(\"Error: cannot find database file.\")\n exit(1)\n if not os.path.exists(args.anno_file):\n sys.stderr.write(\"Error: cannot find annotation file.\")\n exit(1)\n\n conn = sqlite3.connect(args.db)\n conn.row_factory = sqlite3.Row # allow us to refer to columns by name\n conn.isolation_level = None\n\n if args.col_type == \"boolean\":\n annotate_variants_bool(args, conn)\n elif args.col_type == \"count\":\n annotate_variants_count(args, conn) \n elif args.col_type == \"list\":\n if args.col_extract is None:\n sys.exit(\"You must specify which column to extract (-e) from the annotation file.\")\n else:\n annotate_variants_list(args, conn)\n else:\n sys.exit(\"Unknown column type requested. Exiting.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gemini/gemini_annotate.py","file_name":"gemini_annotate.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"622646014","text":"import sys\r\nimport json\r\nimport numpy as np\r\n\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5 import QtGui\r\n\r\nimport matplotlib\r\n\r\nmatplotlib.use('Qt5Agg')\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nfrom matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass SC_GAP_Fitting_Window():\r\n\r\n def setupUi(self, Form):\r\n Form.setObjectName(\"Form\")\r\n Form.resize(796, 488)\r\n self.widget_superconductor_STM = QWidget(Form)\r\n self.widget_superconductor_STM.setGeometry(QtCore.QRect(20, 10, 761, 401))\r\n self.widget_superconductor_STM.setObjectName(\"widget_superconductor_STM\")\r\n #\r\n self.dat_file_list = QListWidget(self.widget_superconductor_STM)\r\n self.dat_file_list.setGeometry(QtCore.QRect(10, 40, 51, 351))\r\n self.dat_file_list.setObjectName(\"dat_file_list\")\r\n #\r\n self.groupBox_Single_Pixel = QGroupBox(self.widget_superconductor_STM)\r\n self.groupBox_Single_Pixel.setGeometry(QtCore.QRect(70, 10, 441, 381))\r\n self.groupBox_Single_Pixel.setObjectName(\"groupBox_Single_Pixel\")\r\n #\r\n self.groupBox_fitting_parameters = QGroupBox(self.widget_superconductor_STM)\r\n self.groupBox_fitting_parameters.setGeometry(QtCore.QRect(520, 120, 231, 271))\r\n self.groupBox_fitting_parameters.setObjectName(\"groupBox_fitting_parameters\")\r\n #\r\n self.label_parameter_A = QLabel(self.groupBox_fitting_parameters)\r\n self.label_parameter_A.setGeometry(QtCore.QRect(90, 30, 41, 16))\r\n self.label_parameter_A.setObjectName(\"label_parameter_A\")\r\n self.label_parameter_B = QLabel(self.groupBox_fitting_parameters)\r\n self.label_parameter_B.setGeometry(QtCore.QRect(90, 90, 41, 16))\r\n self.label_parameter_B.setObjectName(\"label_parameter_B\")\r\n self.label_parameter_C = QLabel(self.groupBox_fitting_parameters)\r\n self.label_parameter_C.setGeometry(QtCore.QRect(90, 150, 41, 16))\r\n self.label_parameter_C.setObjectName(\"label_parameter_C\")\r\n self.label_parameter_D = QLabel(self.groupBox_fitting_parameters)\r\n self.label_parameter_D.setGeometry(QtCore.QRect(90, 220, 41, 16))\r\n self.label_parameter_D.setObjectName(\"label_parameter_D\")\r\n self.horizontalSlider_parameter_A = QSlider(self.groupBox_fitting_parameters)\r\n self.horizontalSlider_parameter_A.setGeometry(QtCore.QRect(50, 50, 131, 16))\r\n self.horizontalSlider_parameter_A.setOrientation(QtCore.Qt.Horizontal)\r\n self.horizontalSlider_parameter_A.setObjectName(\"horizontalSlider_parameter_A\")\r\n self.horizontalSlider_parameter_B = QSlider(self.groupBox_fitting_parameters)\r\n self.horizontalSlider_parameter_B.setGeometry(QtCore.QRect(50, 110, 131, 16))\r\n self.horizontalSlider_parameter_B.setOrientation(QtCore.Qt.Horizontal)\r\n self.horizontalSlider_parameter_B.setObjectName(\"horizontalSlider_parameter_B\")\r\n self.horizontalSlider_parameter_C = QSlider(self.groupBox_fitting_parameters)\r\n self.horizontalSlider_parameter_C.setGeometry(QtCore.QRect(50, 170, 131, 16))\r\n self.horizontalSlider_parameter_C.setOrientation(QtCore.Qt.Horizontal)\r\n self.horizontalSlider_parameter_C.setObjectName(\"horizontalSlider_parameter_C\")\r\n self.horizontalSlider_parameter_D = QSlider(self.groupBox_fitting_parameters)\r\n self.horizontalSlider_parameter_D.setGeometry(QtCore.QRect(50, 240, 131, 16))\r\n self.horizontalSlider_parameter_D.setOrientation(QtCore.Qt.Horizontal)\r\n self.horizontalSlider_parameter_D.setObjectName(\"horizontalSlider_parameter_D\")\r\n self.lineEdit_parameter_D_min = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_D_min.setGeometry(QtCore.QRect(10, 240, 31, 20))\r\n self.lineEdit_parameter_D_min.setObjectName(\"lineEdit_parameter_D_min\")\r\n self.lineEdit_parameter_D_max = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_D_max.setGeometry(QtCore.QRect(190, 240, 31, 20))\r\n self.lineEdit_parameter_D_max.setObjectName(\"lineEdit_parameter_D_max\")\r\n self.lineEdit_parameter_C_min = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_C_min.setGeometry(QtCore.QRect(10, 170, 31, 20))\r\n self.lineEdit_parameter_C_min.setObjectName(\"lineEdit_parameter_C_min\")\r\n self.lineEdit_parameter_C_max = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_C_max.setGeometry(QtCore.QRect(190, 170, 31, 20))\r\n self.lineEdit_parameter_C_max.setObjectName(\"lineEdit_parameter_C_max\")\r\n self.lineEdit_parameter_B_min = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_B_min.setGeometry(QtCore.QRect(10, 110, 31, 20))\r\n self.lineEdit_parameter_B_min.setObjectName(\"lineEdit_parameter_B_min\")\r\n self.lineEdit_parameter_B_max = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_B_max.setGeometry(QtCore.QRect(190, 110, 31, 20))\r\n self.lineEdit_parameter_B_max.setObjectName(\"lineEdit_parameter_B_max\")\r\n self.lineEdit_parameter_A_max = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_A_max.setGeometry(QtCore.QRect(190, 50, 31, 20))\r\n self.lineEdit_parameter_A_max.setObjectName(\"lineEdit_parameter_A_max\")\r\n self.lineEdit_parameter_A_min = QLineEdit(self.groupBox_fitting_parameters)\r\n self.lineEdit_parameter_A_min.setGeometry(QtCore.QRect(10, 50, 31, 20))\r\n self.lineEdit_parameter_A_min.setObjectName(\"lineEdit_parameter_A_min\")\r\n self.pushButton_move_A = QPushButton(self.groupBox_fitting_parameters)\r\n self.pushButton_move_A.setGeometry(QtCore.QRect(170, 20, 51, 23))\r\n self.pushButton_move_A.setObjectName(\"pushButton_move_A\")\r\n self.pushButton_move_D = QPushButton(self.groupBox_fitting_parameters)\r\n self.pushButton_move_D.setGeometry(QtCore.QRect(170, 210, 51, 23))\r\n self.pushButton_move_D.setObjectName(\"pushButton_move_D\")\r\n self.pushButton_move_C = QPushButton(self.groupBox_fitting_parameters)\r\n self.pushButton_move_C.setGeometry(QtCore.QRect(170, 140, 51, 23))\r\n self.pushButton_move_C.setObjectName(\"pushButton_move_C\")\r\n self.pushButton_move_B = QPushButton(self.groupBox_fitting_parameters)\r\n self.pushButton_move_B.setGeometry(QtCore.QRect(170, 80, 51, 23))\r\n self.pushButton_move_B.setObjectName(\"pushButton_move_B\")\r\n #\r\n self.Load_dat_file_btn = QPushButton(self.widget_superconductor_STM)\r\n self.Load_dat_file_btn.setGeometry(QtCore.QRect(10, 10, 51, 23))\r\n self.Load_dat_file_btn.setObjectName(\"Load_dat_file_btn\")\r\n #\r\n self.groupBox_fitting = QGroupBox(self.widget_superconductor_STM)\r\n self.groupBox_fitting.setGeometry(QtCore.QRect(520, 10, 231, 101))\r\n self.groupBox_fitting.setObjectName(\"groupBox_fitting\")\r\n #\r\n self.pushButton_fitting = QPushButton(self.groupBox_fitting)\r\n self.pushButton_fitting.setGeometry(QtCore.QRect(80, 70, 51, 20))\r\n self.pushButton_fitting.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)\r\n self.pushButton_fitting.setObjectName(\"pushButton_fitting\")\r\n self.comboBox_isotropic = QComboBox(self.groupBox_fitting)\r\n self.comboBox_isotropic.setGeometry(QtCore.QRect(10, 40, 67, 22))\r\n self.comboBox_isotropic.setObjectName(\"comboBox_isotropic\")\r\n self.comboBox_isotropic.addItem(\"\")\r\n self.comboBox_isotropic.addItem(\"\")\r\n self.comboBox_isotropic.addItem(\"\")\r\n self.comboBox_isotropic.addItem(\"\")\r\n self.checkBox_isotropic = QCheckBox(self.groupBox_fitting)\r\n self.checkBox_isotropic.setGeometry(QtCore.QRect(10, 10, 84, 32))\r\n sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.checkBox_isotropic.sizePolicy().hasHeightForWidth())\r\n self.checkBox_isotropic.setSizePolicy(sizePolicy)\r\n self.checkBox_isotropic.setMinimumSize(QtCore.QSize(0, 32))\r\n self.checkBox_isotropic.setObjectName(\"checkBox_isotropic\")\r\n self.checkBox_anisotropic = QCheckBox(self.groupBox_fitting)\r\n self.checkBox_anisotropic.setGeometry(QtCore.QRect(140, 10, 84, 32))\r\n self.checkBox_anisotropic.setMinimumSize(QtCore.QSize(0, 32))\r\n self.checkBox_anisotropic.setObjectName(\"checkBox_anisotropic\")\r\n self.comboBox_anisotropic = QComboBox(self.groupBox_fitting)\r\n self.comboBox_anisotropic.setGeometry(QtCore.QRect(140, 40, 67, 22))\r\n self.comboBox_anisotropic.setObjectName(\"comboBox_anisotropic\")\r\n self.comboBox_anisotropic.addItem(\"\")\r\n self.comboBox_anisotropic.addItem(\"\")\r\n self.comboBox_anisotropic.addItem(\"\")\r\n self.comboBox_anisotropic.addItem(\"\")\r\n\r\n self.retranslateUi(Form)\r\n QtCore.QMetaObject.connectSlotsByName(Form)\r\n\r\n def retranslateUi(self, Form):\r\n _translate = QtCore.QCoreApplication.translate\r\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\r\n self.dat_file_list.setToolTip(_translate(\"Form\", \"存放加载上来的文件名\"))\r\n self.groupBox_Single_Pixel.setTitle(_translate(\"Form\", \"single_pixel\"))\r\n self.groupBox_fitting_parameters.setToolTip(_translate(\"Form\", \"显示拟合参数并可以自行调节\"))\r\n self.groupBox_fitting_parameters.setTitle(_translate(\"Form\", \"拟合参数\"))\r\n self.label_parameter_A.setToolTip(_translate(\"Form\", \"显示参数A的拟合结果\"))\r\n self.label_parameter_A.setText(_translate(\"Form\", \"参数A:\"))\r\n self.label_parameter_B.setToolTip(_translate(\"Form\", \"显示参数B的拟合结果\"))\r\n self.label_parameter_B.setText(_translate(\"Form\", \"参数B:\"))\r\n self.label_parameter_C.setToolTip(_translate(\"Form\", \"显示参数C的拟合结果\"))\r\n self.label_parameter_C.setText(_translate(\"Form\", \"参数C:\"))\r\n self.label_parameter_D.setToolTip(_translate(\"Form\", \"显示参数D的拟合结果\"))\r\n self.label_parameter_D.setText(_translate(\"Form\", \"参数D:\"))\r\n self.lineEdit_parameter_B_min.setToolTip(_translate(\"Form\", \"可输入参数B的下限,否则用默认值\"))\r\n self.lineEdit_parameter_B_max.setToolTip(_translate(\"Form\", \"可输入参数B的上限,否则用默认值\"))\r\n self.lineEdit_parameter_A_max.setToolTip(_translate(\"Form\", \"可输入参数A的上限,否则用默认值\"))\r\n self.lineEdit_parameter_A_min.setToolTip(_translate(\"Form\", \"可输入参数A的下限,否则用默认值\"))\r\n self.pushButton_move_A.setToolTip(_translate(\"Form\", \"连续变化参数A,可在拟合之间简单尝试\"))\r\n self.pushButton_move_A.setText(_translate(\"Form\", \"move_A\"))\r\n self.pushButton_move_D.setToolTip(_translate(\"Form\", \"连续变化参数D,可在拟合之间简单尝试\"))\r\n self.pushButton_move_D.setText(_translate(\"Form\", \"move_D\"))\r\n self.pushButton_move_C.setToolTip(_translate(\"Form\", \"连续变化参数C,可在拟合之间简单尝试\"))\r\n self.pushButton_move_C.setText(_translate(\"Form\", \"move_C\"))\r\n self.pushButton_move_B.setToolTip(_translate(\"Form\", \"连续变化参数B,可在拟合之间简单尝试\"))\r\n self.pushButton_move_B.setText(_translate(\"Form\", \"move_B\"))\r\n self.Load_dat_file_btn.setToolTip(_translate(\"Form\", \"加载dat文件\"))\r\n self.Load_dat_file_btn.setText(_translate(\"Form\", \"Load\"))\r\n self.groupBox_fitting.setToolTip(_translate(\"Form\", \"进行超导拟合\"))\r\n self.groupBox_fitting.setTitle(_translate(\"Form\", \"拟合\"))\r\n self.pushButton_fitting.setToolTip(_translate(\"Form\", \"进行拟合\"))\r\n self.pushButton_fitting.setText(_translate(\"Form\", \"拟合\"))\r\n self.comboBox_isotropic.setToolTip(_translate(\"Form\", \"选择一种各项同性的拟合方式\"))\r\n self.comboBox_isotropic.setItemText(0, _translate(\"Form\", \"s\"))\r\n self.comboBox_isotropic.setItemText(1, _translate(\"Form\", \"p\"))\r\n self.comboBox_isotropic.setItemText(2, _translate(\"Form\", \"d\"))\r\n self.comboBox_isotropic.setItemText(3, _translate(\"Form\", \"2s\"))\r\n self.checkBox_isotropic.setToolTip(_translate(\"Form\", \"是否进行各项同性拟合\"))\r\n self.checkBox_isotropic.setText(_translate(\"Form\", \"isotropic\"))\r\n self.checkBox_anisotropic.setToolTip(_translate(\"Form\", \"是否进行各项异性拟合\"))\r\n self.checkBox_anisotropic.setText(_translate(\"Form\", \"anisotropic\"))\r\n self.comboBox_anisotropic.setToolTip(_translate(\"Form\", \"选择一种各项异性的拟合方式\"))\r\n self.comboBox_anisotropic.setItemText(0, _translate(\"Form\", \"s\"))\r\n self.comboBox_anisotropic.setItemText(1, _translate(\"Form\", \"p\"))\r\n self.comboBox_anisotropic.setItemText(2, _translate(\"Form\", \"d\"))\r\n self.comboBox_anisotropic.setItemText(3, _translate(\"Form\", \"2s\"))\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n mainWindow = QMainWindow()\r\n form = SC_GAP_Fitting_Window()\r\n form.setupUi(mainWindow)\r\n mainWindow.show()\r\n app.exec_()\r\n","sub_path":"SuperConductors.py","file_name":"SuperConductors.py","file_ext":"py","file_size_in_byte":13397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"403376309","text":"from document.agent.catalog import Agent_Catalog_Document\nfrom document.agent.instance import Agent_Instance_Document\nfrom document.exec_env import Exec_Env_Document\nfrom marshmallow import Schema, validate, validates_schema\nfrom marshmallow.fields import Bool, Constant, DateTime as Date_Time, Nested, Raw, Str\nfrom schema.agent.catalog import Agent_Catalog_Schema, AGENT_STATUS\nfrom schema.base import Base_Schema\nfrom schema.exec_env import Exec_Env_Schema\nfrom schema.validate import In, Unique_List\nfrom utils.datetime import FORMAT\n\n__all__ = [\n 'Agent_Instance_Schema'\n]\n\n\nclass Agent_Instance_Action_Schema(Schema):\n \"\"\"Action of the agent instance installed in an execution environment.\"\"\"\n\n id = Str(required=True, example='list',\n description='Action id.')\n data = Raw(required=True, example='drop: all',\n description='Action data.')\n timestamp = Date_Time(format=FORMAT, required=True, readonly=True,\n description=\"Timestamp of the last time the action was executed correctly.\")\n\n\nclass Agent_Instance_Parameter_Schema(Schema):\n \"\"\"Parameter of the agent instance installed in an execution environment.\"\"\"\n\n id = Str(required=True, example='period',\n description='Parameter id.')\n value = Raw(required=True, example='10s',\n description='Paremeter value.'),\n timestamp = Date_Time(format=FORMAT, required=True, readonly=True,\n description=\"Timestamp of the last time the parameter was set correctly.\")\n\n\nclass Agent_Instance_Resource_Schema(Schema):\n \"\"\"Resource of the agent instance installed in an execution environment.\"\"\"\n\n id = Str(required=True, example='/opt/firewall.xml',\n description='Resource path.')\n content = Str(required=True,\n description='Resource content.')\n timestamp = Date_Time(format=FORMAT, required=True, readonly=True,\n description=\"Timestamp of the last time the resource data was updated or created correctly.\")\n\n\nclass Agent_Instance_Operation_Schema(Base_Schema):\n \"\"\"Represents the operations to perform with the agent instance installed in an execution environment.\"\"\"\n\n actions = Nested(Agent_Instance_Action_Schema, many=True, unknown='INCLUDE',\n description='List of agent instance actions.',\n validate=Unique_List.apply('id'),\n error_messages=Unique_List.error_messages)\n parameters = Nested(Agent_Instance_Parameter_Schema, many=True, unknown='INCLUDE',\n description='List of agent instance parameters.',\n validate=Unique_List.apply('id'),\n error_messages=Unique_List.error_messages)\n resources = Nested(Agent_Instance_Resource_Schema, many=True, unknown='INCLUDE',\n description='List of agent instance resources.',\n validate=Unique_List.apply('id'),\n error_messages=Unique_List.error_messages)\n\n\nclass Agent_Instance_Schema(Base_Schema):\n \"\"\"Represents an agent instance installed in an execution environment.\"\"\"\n doc = Agent_Instance_Document\n\n id = Str(required=True, example='filebeat@apache',\n description='Id of the agent instance installed in an execution environment.')\n agent_catalog_id = Str(required=True, readonly=True, example='filebeat',\n description='Id of the agent in the catalog.',\n validate=In.apply(Agent_Catalog_Document.get_ids),\n error_messages=In.error_messages)\n exec_env_id = Str(required=True, readonly=True, example='apache',\n description='Id of the execution environment where the agent instance is installed.',\n validate=In.apply(Exec_Env_Document.get_ids),\n error_messages=In.error_messages)\n status = Str(required=True, readonly=True, enum=AGENT_STATUS, example=AGENT_STATUS[0],\n description='Status of the agent instance',\n validate=validate.OneOf(AGENT_STATUS))\n actions = Nested(Agent_Instance_Action_Schema, many=True, unknown='INCLUDE',\n description='List of agent instance actions.',\n validate=Unique_List.apply('id'),\n error_messages=Unique_List.error_messages)\n parameters = Nested(Agent_Instance_Parameter_Schema, many=True, unknown='INCLUDE',\n description='List of agent instance parameters.',\n validate=Unique_List.apply('id'),\n error_messages=Unique_List.error_messages)\n resources = Nested(Agent_Instance_Resource_Schema, many=True, unknown='INCLUDE',\n description='List of agent instance resources.',\n validate=Unique_List.apply('id'),\n error_messages=Unique_List.error_messages)\n description = Str(example='Collect system metrics from execution environments.',\n description='Short description of the agent installed in the execution environment.')\n","sub_path":"schema/agent/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"277883399","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\nstep = []\nfor _ in range(n):\n step.append(int(input()))\ndp = [step[0]]\nfor i in range(1,n):\n one = dp[i-2]+step[i]\n two = step[i-1]+step[i]\n if i-3>-1:\n two+=dp[i-3]\n dp.append(max(one,two))\nprint(dp)","sub_path":"BOJ/BOJ_2156_포도주시식.py","file_name":"BOJ_2156_포도주시식.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"411478416","text":"def ct2(n, m=0, depth=0):\n indent = ' '*depth\n print(indent, '(%d,%d)' % (n, m))\n if (n <= 0):\n result = int(str(n)+str(m))\n else:\n result = ct2(n//2, m+1, depth+1) + ct2(n-4, m+2, depth+1)\n print(indent, '-->', result)\n return result\nct2(3)\t# Note:\tyou must correctly indicate newlines and indents\n # Hint: prints 10 lines","sub_path":"Quizzes/Recursion/CT-S17-2.py","file_name":"CT-S17-2.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600330950","text":"import random\nimport time\n\n\ndef enemy_generation():\n enemy_list = [\"gorgon\", \"dragon\", \"wicked fairie\", \"pirate\"]\n enemy = random.choice(enemy_list)\n return enemy\n print(f\"in gen it's {enemy}\")\n\n\ndef print_pause(sentence):\n print(sentence)\n time.sleep(2)\n\n\ndef start_game(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(\"You find yourself standing in an open field, \"\n \"filled with grass and yellow wildflowers.\")\n print_pause(f\"Rumor has it that a {enemy} is somewhere around here, \"\n \"and has been terrifying the nearby village.\")\n print_pause(\"In front of you is a house.\")\n\n print_pause(\"To your right is a dark cave.\")\n print_pause(\"In your hand you hold your trusty \"\n \"(but not very effective) dagger.\\n\")\n\n put_knock_or_cave_options(item, enemy)\n\n\ndef put_knock_or_cave_options(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(\"Enter 1 to knock on the door of the house.\")\n print_pause(\"Enter 2 to peer into the cave.\")\n print_pause(\"What would you like to do?\")\n\n get_knock_or_cave_decision(item, enemy)\n\n\ndef get_knock_or_cave_decision(item, enemy):\n\n item = item\n enemy = enemy\n\n prompt = \"(Please enter 1 or 2.)\\n\"\n\n options = [\"1\", \"2\"]\n\n choice = validate_input(prompt, options)\n\n if item == \"dagger\" and choice == \"1\":\n knock_dagger(item, enemy)\n elif item == \"dagger\" and choice == \"2\":\n cave_dagger(item, enemy)\n elif item == \"sword\" and choice == \"1\":\n knock_sword(item, enemy)\n elif item == \"sword\" and choice == \"2\":\n cave_sword(item, enemy)\n else:\n get_knock_or_cave_decision(item, enemy)\n\n\ndef cave_dagger(item, enemy):\n\n enemy = enemy\n\n print_pause(\"You peer cautiously into the cave.\")\n print_pause(\"It turns out to be only a very small cave.\")\n print_pause(\"Your eye catches a glint of metal behind a rock.\")\n print_pause(\"You have found the magical Sword of Ogoroth!\")\n print_pause(\"You discard your silly old dagger \"\n \"and take the sword with you.\")\n print_pause(\"You walk back out to the field.\\n\")\n\n item = \"sword\"\n\n put_knock_or_cave_options(item, enemy)\n\n\ndef cave_sword(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(\"You peer cautiously into the cave.\")\n print_pause(\"You've been here before, and gotten all the good stuff. \"\n \"It's just an empty cave now.\\n\")\n\n put_knock_or_cave_options(item, enemy)\n\n\ndef knock_dagger(item, enemy):\n\n print_pause(\"You approach the door of the house.\")\n print_pause(\"You are about to knock when the door opens and \"\n f\"out steps a {enemy}.\")\n print_pause(f\"Eep! This is the {enemy}'s house!\")\n print_pause(f\"The {enemy} attacks you!\")\n print_pause(\"You feel a bit under-prepared for this, what with \"\n \"only having a tiny dagger.\")\n\n put_fight_or_flight_options(item, enemy)\n\n\ndef knock_sword(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(\"You approach the door of the house.\")\n print_pause(\"You are about to knock when the door opens \"\n f\"and out steps a {enemy}.\")\n print_pause(f\"Eep! This is the {enemy}'s house!\")\n print_pause(f\"The {enemy} attacks you!\")\n\n put_fight_or_flight_options(item, enemy)\n\n\ndef put_fight_or_flight_options(item, enemy):\n\n item = item\n enemy = enemy\n\n prompt = \"Would you like to (1) fight or (2) run away?\\n\"\n\n options = [\"1\", \"2\"]\n\n choice = validate_input(prompt, options)\n\n if choice == \"1\" and item == \"dagger\":\n fight_dagger(item, enemy)\n elif choice == \"2\":\n flee_generic(item, enemy)\n elif choice == \"1\" and item == \"sword\":\n fight_sword(item, enemy)\n else:\n put_fight_or_flight_options(item, enemy)\n\n\ndef fight_dagger(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(\"You do your best...\")\n print_pause(f\"but your dagger is no match for the {enemy}.\")\n print_pause(\"You have been defeated!\")\n\n play_again()\n\n\ndef flee_generic(item, enemy):\n\n enemy = enemy\n item = item\n\n print_pause(\"You run back into the field. Luckily, \"\n \"you don't seem to have been followed.\\n\")\n\n put_knock_or_cave_options(item, enemy)\n\n\ndef flee_dagger(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(\"You run back into the field. Luckily, \"\n \"you don't seem to have been followed.\\n\")\n\n put_knock_or_cave_options(item, enemy)\n\n\ndef fight_sword(item, enemy):\n\n item = item\n enemy = enemy\n\n print_pause(f\"As the {enemy} moves to attack, \"\n \"you unsheath your new sword.\")\n print_pause(\"The Sword of Ogoroth shines brightly in your hand \"\n \"as you brace yourself for the attack.\")\n print_pause(f\"But the {enemy} takes one look at your shiny \"\n \"new toy and runs away!\")\n print_pause(f\"You have rid the town of the {enemy}. You are victorious!\")\n\n play_again()\n\n\ndef validate_input(prompt, options):\n while True:\n response = input(prompt).lower()\n for option in options:\n if option == response:\n return response\n\n\ndef play_again():\n prompt = \"Would you like to play again? (y/n)\\n\"\n\n options = [\"y\", \"n\"]\n\n choice = validate_input(prompt, options)\n\n if choice == \"y\":\n print_pause(\"Excellent! Restarting the game ...\")\n play_game()\n elif choice == \"n\":\n exit_game()\n\n\ndef exit_game():\n print_pause(\"Thanks for playing! See you next time.\")\n quit()\n\n\ndef play_game():\n item = \"dagger\"\n enemy = enemy_generation()\n start_game(item, enemy)\n\n\nplay_game()\n","sub_path":"adventuregame.py","file_name":"adventuregame.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14400121","text":"import numpy #loading our favorite library\nfrom matplotlib import pyplot #and the useful plotting library\nfrom numpy import sin,cos,pi,linspace,ones,zeros,abs,min,max\\\n,exp, shape, empty_like , size , arange, log, log10, loadtxt, savetxt\nimport argparse\nfrom decimal import Decimal\nimport csv\n\npyplot.rc('legend',**{'loc':'upper left'});\npyplot.rcParams[u'legend.fontsize'] = 16\npyplot.rcParams[u'legend.edgecolor']='white'\npyplot.rcParams[u'legend.facecolor']='0.8'\npyplot.rcParams[u'font.weight']='normal'\npyplot.rcParams[u'xtick.labelsize']=15\npyplot.rcParams[u'ytick.labelsize']=15\npyplot.rcParams[u'axes.titlesize']=16\npyplot.rcParams[u'axes.labelsize']=16\npyplot.rcParams[u'axes.spines.right']='false';\npyplot.rcParams[u'axes.spines.top']='false';\npyplot.rcParams[u'lines.linewidth'] = 1.5;\npyplot.rcParams[u'lines.markersize'] = 8;\n\n#===================================\n# Loading Data \n#===================================\nparser = argparse.ArgumentParser(description='python_DG_argument_parsing');\n\nparser.add_argument('-f', type=str, dest='python_input');\n\nargs = parser.parse_args();\n\nwith open(args.python_input) as file: \n \n reader=csv.reader(file, delimiter=':');\n\n for row in reader:\n if row[0]=='mode':\n mode=str(row[1]);\n elif row[0] == 'wave':\n wave = str(row[1]);\n elif row[0]=='CFL': \n CFL=Decimal(row[1]);\n elif row[0] == 'CFL_upw':\n CFL_upw = Decimal(row[1]);\n elif row[0] == 'dt':\n dt_=str(row[1]);\n elif row[0]=='DGp':\n DG=str(row[1]);\n elif row[0] == 'RK': \n RK=str(row[1]);\n elif row[0] == 'Nelem': \n Nelem_=str(int(row[1]));\n elif row[0] == 'T': \n T=Decimal(row[1]);\n elif row[0] =='dir': \n dir1 = str(row[1]);\n elif row[0] == 'errors': \n errors = str(row[1]);\n elif row[0]=='Beta':\n Beta = Decimal(row[1]);\n\nCFL = Decimal(CFL.quantize(Decimal('.001')));\nCFL_upw = Decimal(CFL_upw.quantize(Decimal('.001')));\nT=Decimal(T.quantize(Decimal('.001')));\nBeta=Decimal(Beta.quantize(Decimal('.01')));\ndt = float(dt_);\n\nif mode=='dt_const':\n fname_errors = dir1+errors+str(\"_dt\")+dt_+\"_Beta\"+str(Beta)\\\n +'_'+str(T)+str(\"T.dat\");\n fname_upw = dir1+errors+str(\"_dt\")+dt_+\"_Beta1.00\"\\\n +'_'+str(T)+str(\"T.dat\");\nelif mode=='CFL_const':\n fname_errors = dir1 + errors + str(\"_CFL\") + str(CFL) \\\n + \"_Beta\" + str(Beta) + '_' + str(T) + str(\"T.dat\");\n fname_upw = dir1 + errors + str(\"_CFL\") + str(CFL_upw) \\\n + \"_Beta1.00\" + '_' + str(T) + str(\"T.dat\");\n\ndata_errors = loadtxt(fname_errors);\ndata_upw = loadtxt(fname_upw);\n\nNelem = data_errors[:,0]; \nnDOF = Nelem * (int(DG)+1);\n\nL1_proj= data_errors[:,1];\nL1_aver= data_errors[:,2];\nL2_proj= data_errors[:,3];\nL2_aver= data_errors[:,4];\n\nL1_proj_upw = data_upw[:, 1];\nL1_aver_upw = data_upw[:, 2];\nL2_proj_upw = data_upw[:, 3];\nL2_aver_upw = data_upw[:, 4];\n\n# theoritcal curve:\ntheoretical_curve = exp(-(int(DG)+1) * log(nDOF)) ;\nshift = -0.8*log10(L2_proj[0]) + log10(theoretical_curve[0]);\ntheoretical_curve = theoretical_curve / 10**shift;\n\n#===================================\n# Order Calculations:\n#===================================\norder_L1_proj = zeros(size(L1_proj)-1);\norder_L1_proj_upw = zeros(size(L1_proj_upw)-1);\norder_L1_aver = zeros(size(L1_aver)-1);\norder_L2_proj = zeros(size(L2_proj)-1);\norder_L2_proj_upw = zeros(size(L2_proj_upw)-1);\norder_L2_aver = zeros(size(L2_aver)-1);\norder_exact = zeros(size(theoretical_curve)-1);\n\nfor i in range(1,size(L2_proj)):\n order_L1_proj[i-1] = log10(L1_proj[i-1]/L1_proj[i])/log10(2);\n order_L1_aver[i-1] = log10(L1_aver[i-1]/L1_aver[i])/log10(2);\n order_L2_proj[i-1] = log10(L2_proj[i-1]/L2_proj[i])/log10(2);\n order_L2_aver[i-1] = log10(L2_aver[i-1]/L2_aver[i])/log10(2);\n order_exact[i-1] = \\\n log10(theoretical_curve[i-1]/theoretical_curve[i])/log10(2);\n\nfor i in range(1,size(L2_proj_upw)):\n order_L1_proj_upw[i-1] = log10(L1_proj_upw[i-1]/L1_proj_upw[i])/log10(2);\n order_L2_proj_upw[i-1] = log10(L2_proj_upw[i-1]/L2_proj_upw[i])/log10(2);\n\nNelem_ = zeros(size(Nelem)-1);\nfor i in range(1,size(Nelem)):\n\tNelem_[i-1] = int(Nelem[i]);\n\norder_print = numpy.transpose([Nelem_,order_L1_proj,order_L1_aver\\\n ,order_L2_proj,order_L2_aver]);\norder_print_upw = numpy.transpose([Nelem_,order_L1_proj_upw,order_L2_proj_upw]);\n\norder_L1_print = numpy.transpose([Nelem_,order_L1_proj,order_L1_aver]);\norder_L2_print = numpy.transpose([Nelem_,order_L2_proj,order_L2_aver]);\n\nprint('L1_order',order_L1_print);\nprint('L2_order',order_L2_print);\n\nif mode=='CFL_const':\n order_out_name = dir1+'OAA_DGp'+DG+'_RK'+RK+'_Beta'\\\n +str(Beta)+'_CFL'+str(CFL)+ str(\"_\") \\\n + str(int(T)) + 'T.dat';\n order_out_name_upw = dir1 + 'OAA_DGp' + DG + '_RK' + RK \\\n + '_Beta1.00'+ '_CFL' + str(CFL_upw) \\\n + str(\"_\") +str(int(T)) + 'T.dat';\n\nelif mode=='dt_const':\n order_out_name = dir1+'OAA_DGp'+DG+'_RK'+RK+'_Beta'\\\n +str(Beta)+'_dt'+dt_+ str(\"_\") \\\n + str(int(T)) + 'T.dat';\n order_out_name_upw = dir1 + 'OAA_DGp' + DG + '_RK' + RK \\\n + '_Beta1.00' + '_dt' + dt_ \\\n + str(\"_\") +str(int(T)) + 'T.dat';\n\nsavetxt(order_out_name, order_print\\\n, fmt=\"%02d\"+\" %1.4f\"+\" %1.4f\"+\" %1.4f\"+\" %1.4f\"\\\n,header=\"Nelem, order_L1_proj, order_L1_aver, order_L2_proj, order_L2_aver\"\\\n,delimiter=' ');\n\nsavetxt(order_out_name_upw, order_print_upw\\\n, fmt=\"%02d\"+\" %1.4f\"+\" %1.4f\"\\\n,header=\"Nelem, order_L1_proj, order_L2_proj\"\\\n,delimiter=' ');\n\n#===================================\n#! Plotting The figure:\n#===================================\n\n\nif mode=='dt_const':\n title_a = str(\"DGp\") + DG + \" RK\" + RK \\\n + \" with dt=\" + '{:1.2e}'.format(dt) \\\n + ' at t/T=' + str(Decimal(T.quantize(Decimal('.1'))))\n\nelif mode=='CFL_const':\n\n\ttitle_a = str(\"DGp\")+ DG + \" RK\"+ RK\\\n + ' at t/T=' + str(Decimal(T.quantize(Decimal('.1'))));\n\nfig, ax = pyplot.subplots(figsize=(8.0, 6.0));\n\npyplot.plot(nDOF, L2_proj, '-or' \\\n , label=r'hybrid, $\\beta$=' + str(Beta) + ', CFL='+str(CFL));\n#pyplot.plot(nDOF, L2_aver, '-^b' \\\n# , label=r'L$_{2}$ of averages, $\\beta$=' + str(Beta));\npyplot.plot(nDOF, L2_proj_upw \\\n , '--sb', label=r'upwind, $\\beta=$1.00' + ', CFL='+str(CFL_upw));\n#pyplot.plot(nDOF, L2_aver_upw, '--vb', label=r'L$_{2}$ of averages, upwind');\npyplot.plot(nDOF, theoretical_curve, ':k', linewidth=1.5,label='p+1 slope');\n\npyplot.legend(loc='upper right');\npyplot.yscale('log');\npyplot.xscale('log');\n\n#pyplot.title(title_a)\npyplot.xlabel('nDOFs',labelpad=10)\npyplot.ylabel(r'L$_{2}$ error of solution',labelpad=10)\n\n# pyplot.ylim(1.0e-2,6.5e-2)\nfrom matplotlib import ticker\n\n# ax.yaxis.set_major_formatter(ticker.LogFormatter())\nax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=15))\n\nax.grid(which='minor', linestyle=':');\n\nfig.tight_layout()\npyplot.show()\n\n\n","sub_path":"python_tools/DGplot_error_analysis.py","file_name":"DGplot_error_analysis.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15518996","text":"# This file is part of ts_sal.\n#\n# Developed for the LSST Telescope and Site Systems.\n# This product includes software developed by the LSST Project\n# (https://www.lsst.org).\n# See the COPYRIGHT file at the top-level directory of this distribution\n# for details of code ownership.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n__all__ = [\"make_salpy_lib\"]\n\nimport glob\nimport os\nimport shutil\nimport subprocess\n\n\ndef get_env_dir(name, err_msg):\n path = os.environ.get(name)\n if path is None:\n raise RuntimeError(err_msg)\n if not os.path.isdir(path):\n raise RuntimeError(f\"{path} is not a directory\")\n print(f\"${name} = {path}\")\n return path\n\n\ndef make_salpy_lib(sal_name):\n \"\"\"Make a SALPY library for the given component\"\"\"\n print(f\"*** Make SALPY_{sal_name} library ***\")\n sal_dir = get_env_dir(\"TS_SAL_DIR\", \"ts_sal not setup\")\n xml_dir = get_env_dir(\"TS_XML_DIR\", \"ts_xml not setup\")\n sal_work_dir = get_env_dir(\"SAL_WORK_DIR\", \"$SAL_WORK_DIR must be defined\")\n ld_lib_path_name = \"LD_LIBRARY_PATH\"\n initial_ld_lib_path = os.environ[ld_lib_path_name]\n\n try:\n os.environ[ld_lib_path_name] = f\"{sal_work_dir}/lib:{initial_ld_lib_path}\"\n lib_names = dict(\n cpp=f\"libsacpp_{sal_name}_types.so\",\n python=f\"SALPY_{sal_name}.so\")\n src_lib_paths = dict(\n cpp=os.path.join(sal_work_dir, sal_name, \"cpp\", lib_names[\"cpp\"]),\n python=os.path.join(sal_work_dir, sal_name, \"cpp\", \"src\", lib_names[\"python\"]))\n dest_lib_paths = dict(\n cpp=os.path.join(sal_dir, \"lib\", lib_names[\"cpp\"]),\n python=os.path.join(sal_dir, \"python\", lib_names[\"python\"]))\n\n for path in src_lib_paths.values():\n if os.path.isfile(path):\n print(f\"Remove {path}\")\n os.remove(path)\n\n print(f\"*** Copy {sal_name} XML files ***\")\n interfaces_dir = os.path.join(xml_dir, \"sal_interfaces\")\n std_xml_paths = [os.path.join(interfaces_dir, f\"{n}.xml\") for n in (\"SALGenerics\", \"SALSubsystems\")]\n sal_xml_paths = glob.glob(os.path.join(interfaces_dir, sal_name, \"*.xml\"))\n if not sal_xml_paths:\n raise RuntimeError(f\"Could not find any XML files for SAL component {sal_name}\")\n for xmlpath in std_xml_paths + sal_xml_paths:\n shutil.copy(xmlpath, sal_work_dir)\n\n print(f\"*** Validate and generate {sal_name} libraries ***\")\n for command in (\"validate\", \"html\", \"sal cpp\", \"sal python\"):\n full_cmd = f\"salgenerator {sal_name} {command}\"\n print(full_cmd)\n subprocess.run(full_cmd, check=True, cwd=sal_work_dir, shell=True)\n\n print(f\"*** Move {sal_name} libraries into place ***\")\n for path in src_lib_paths.values():\n if not os.path.isfile(path):\n raise RuntimeError(f\"{path} not generated\")\n for name in src_lib_paths.keys():\n src_path = src_lib_paths[name]\n dest_path = dest_lib_paths[name]\n if os.path.exists(dest_path):\n os.remove(dest_path)\n os.rename(src_path, dest_path)\n\n finally:\n print(f\"*** Cleanup {sal_name} files ***\")\n os.environ[ld_lib_path_name] = initial_ld_lib_path\n xmlfiles = glob.glob(os.path.join(sal_work_dir, \"*.xml\"))\n for f in xmlfiles:\n os.remove(f)\n\n sal_name_dirs = glob.glob(os.path.join(sal_work_dir, f\"{sal_name}*\"))\n for name_dir in sal_name_dirs:\n shutil.rmtree(os.path.join(name_dir), ignore_errors=True)\n\n for subdir in (\"html\", \"idl-templates\", \"include\", \"lib\", \"sql\"):\n shutil.rmtree(os.path.join(sal_work_dir, subdir), ignore_errors=True)\n print(f\"*** Done generating SALPY_{sal_name} ***\")\n","sub_path":"python/lsst/ts/sal/make_salpy_lib.py","file_name":"make_salpy_lib.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437516300","text":"__author__ = 'Matt Wittman'\n\"\"\"From Kaggle website https://www.kaggle.com/mcwitt/pkdd-15-predict-taxi-service-trajectory-i/heatmap\n\"\"\"\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#from zipfile import ZipFile\n\nnbin = 1000\nlat_mid = 41.1496100\nlon_mid = -8.6109900\nw = 0.1 # window size\n\n#with ZipFile('../input/train.csv.zip') as zf:\n\ndata = pd.read_csv('../input/train.csv',\n usecols=['POLYLINE'],\n chunksize=10000,\n converters={'POLYLINE': lambda x: json.loads(x)})\n\n# process data in chunks to avoid using too much memory\nz = np.zeros((nbin, nbin))\n\nfor chunk in data:\n\n latlon = np.array([(lat, lon)\n for path in chunk.POLYLINE\n for lon, lat in path if len(path) > 0])\n\n z += np.histogram2d(*latlon.T, bins=nbin,\n range=[[lat_mid - w, lat_mid + w],\n [lon_mid - w, lon_mid + w]])[0]\n\n\nlog_density = np.log(1+z)\nplt.imshow(log_density[::-1,:]) # flip vertically and plot\nplt.axis('off')\nplt.savefig('../working/heatmap.png')\n","sub_path":"Kaggle/PredictTaxiServiceTrajectory/Examples/HeatMap.py","file_name":"HeatMap.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"506133602","text":"#!/usr/bin/env python3\n\nfrom html.parser import HTMLParser\nfrom urllib.parse import urljoin\n\nclass pageCook(HTMLParser):\n url=''\n content=''\n links=[]\n meta={}\n title=''\n state=0\n def handle_starttag(self, tag, attrs):\n if(self.state==0):\n if(tag=='meta' and 'name' in dict(attrs).keys()):\n self.meta[dict(attrs)['name']]=dict(attrs)['content']\n if(tag=='title'):\n self.state=-1\n if(self.state==-1):\n self.state==0\n if(self.state==1):\n if(tag=='script'):\n self.state=2\n try:\n if(tag=='a'):\n self.links.append(urljoin(self.url, dict(attrs)['href']))\n if(tag=='img'):\n self.content+=dict(attrs)['alt']\n except KeyError:\n pass\n # there are always some tags ignoring the rule!\n if(tag=='base'):\n try:\n self.base=dict(attrs)['href']\n except KeyError:\n pass\n # there are always some tags ignoring the rule!\n if(tag=='body'):\n self.state=1\n def handle_endtag(self, tag):\n if(self.state==2 and tag=='script'):\n self.state=1\n if(self.state==1 and tag in ['address', 'article', 'aside',\n 'blockquote', 'br', 'dd', 'dialog', 'div', 'dt', 'h1', 'h2',\n 'h3', 'h4', 'h5', 'h6', 'li', 'tr', 'td', 'th', 'textarea']):\n self.content+='\\n'\n def handle_data(self, data):\n if(self.state==-1):\n self.title=data\n self.state=0\n if(self.state==1):\n self.content+=data\n def getData(self):\n return {'content':self.content, 'links':self.links, 'meta':self.meta, 'title':self.title}\n def init(self, url):\n self.reset()\n self.content=''\n self.links=[]\n self.meta={}\n self.title=''\n self.url=url\n self.base=url\n self.state=0\n\n","sub_path":"modules/pageCook.py","file_name":"pageCook.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575679966","text":"# This file is part of beets.\n# Copyright 2014, Fabrice Laporte.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Tests for the 'lyrics' plugin.\"\"\"\n\nimport os\nimport _common\nimport sys\nfrom _common import unittest\nfrom beetsplug import lyrics\nfrom beets.library import Item\nfrom beets.util import confit\n\n\nclass LyricsPluginTest(unittest.TestCase):\n def setUp(self):\n \"\"\"Set up configuration\"\"\"\n lyrics.LyricsPlugin()\n\n def test_search_artist(self):\n item = Item(artist='Alice ft. Bob', title='song')\n self.assertIn(('Alice ft. Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n item = Item(artist='Alice feat Bob', title='song')\n self.assertIn(('Alice feat Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n item = Item(artist='Alice feat. Bob', title='song')\n self.assertIn(('Alice feat. Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n item = Item(artist='Alice feats Bob', title='song')\n self.assertIn(('Alice feats Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertNotIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n item = Item(artist='Alice featuring Bob', title='song')\n self.assertIn(('Alice featuring Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n item = Item(artist='Alice & Bob', title='song')\n self.assertIn(('Alice & Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n item = Item(artist='Alice and Bob', title='song')\n self.assertIn(('Alice and Bob', ['song']),\n lyrics.search_pairs(item))\n self.assertIn(('Alice', ['song']),\n lyrics.search_pairs(item))\n\n def test_search_pairs_multi_titles(self):\n item = Item(title='1 / 2', artist='A')\n self.assertIn(('A', ['1 / 2']), lyrics.search_pairs(item))\n self.assertIn(('A', ['1', '2']), lyrics.search_pairs(item))\n\n item = Item(title='1/2', artist='A')\n self.assertIn(('A', ['1/2']), lyrics.search_pairs(item))\n self.assertIn(('A', ['1', '2']), lyrics.search_pairs(item))\n\n def test_search_pairs_titles(self):\n item = Item(title='Song (live)', artist='A')\n self.assertIn(('A', ['Song']), lyrics.search_pairs(item))\n self.assertIn(('A', ['Song (live)']), lyrics.search_pairs(item))\n\n item = Item(title='Song (live) (new)', artist='A')\n self.assertIn(('A', ['Song']), lyrics.search_pairs(item))\n self.assertIn(('A', ['Song (live) (new)']), lyrics.search_pairs(item))\n\n item = Item(title='Song (live (new))', artist='A')\n self.assertIn(('A', ['Song']), lyrics.search_pairs(item))\n self.assertIn(('A', ['Song (live (new))']), lyrics.search_pairs(item))\n\n item = Item(title='Song ft. B', artist='A')\n self.assertIn(('A', ['Song']), lyrics.search_pairs(item))\n self.assertIn(('A', ['Song ft. B']), lyrics.search_pairs(item))\n\n item = Item(title='Song featuring B', artist='A')\n self.assertIn(('A', ['Song']), lyrics.search_pairs(item))\n self.assertIn(('A', ['Song featuring B']), lyrics.search_pairs(item))\n\n item = Item(title='Song and B', artist='A')\n self.assertNotIn(('A', ['Song']), lyrics.search_pairs(item))\n self.assertIn(('A', ['Song and B']), lyrics.search_pairs(item))\n\n def test_remove_credits(self):\n self.assertEqual(\n lyrics.remove_credits(\"\"\"It's close to midnight\n Lyrics brought by example.com\"\"\"),\n \"It's close to midnight\"\n )\n self.assertEqual(\n lyrics.remove_credits(\"\"\"Lyrics brought by example.com\"\"\"),\n \"\"\n )\n\n # don't remove 2nd verse for the only reason it contains 'lyrics' word\n text = \"\"\"Look at all the shit that i done bought her\n See lyrics ain't nothin\n if the beat aint crackin\"\"\"\n self.assertEqual(lyrics.remove_credits(text), text)\n\n def test_is_lyrics(self):\n texts = ['LyricsMania.com - Copyright (c) 2013 - All Rights Reserved']\n texts += [\"\"\"All material found on this site is property\\n\n of mywickedsongtext brand\"\"\"]\n for t in texts:\n self.assertFalse(lyrics.is_lyrics(t))\n\n def test_slugify(self):\n text = u\"http://site.com/\\xe7afe-au_lait(boisson)\"\n self.assertEqual(lyrics.slugify(text), 'http://site.com/cafe_au_lait')\n\n def test_scrape_strip_cruft(self):\n text = u\"\"\"\n  one\n
\n two !\n

\n four\"\"\"\n self.assertEqual(lyrics._scrape_strip_cruft(text, True),\n \"one\\ntwo !\\n\\nfour\")\n\n def test_scrape_strip_scripts(self):\n text = u\"\"\"foobaz\"\"\"\n self.assertEqual(lyrics._scrape_strip_cruft(text, True),\n \"foobaz\")\n\n def test_scrape_strip_tag_in_comment(self):\n text = u\"\"\"fooqux\"\"\"\n self.assertEqual(lyrics._scrape_strip_cruft(text, True),\n \"fooqux\")\n\n def test_scrape_merge_paragraphs(self):\n text = u\"one

two

three\"\n self.assertEqual(lyrics._scrape_merge_paragraphs(text),\n \"one\\ntwo\\nthree\")\n\n\nLYRICS_TEXTS = confit.load_yaml(os.path.join(_common.RSRC, 'lyricstext.yaml'))\ndefinfo = dict(artist=u'The Beatles', title=u'Lady Madonna') # default query\n\n\nclass MockFetchUrl(object):\n def __init__(self, pathval='fetched_path'):\n self.pathval = pathval\n self.fetched = None\n\n def __call__(self, url, filename=None):\n self.fetched = url\n url = url.replace('http://', '').replace('www.', '')\n fn = \"\".join(x for x in url if (x.isalnum() or x == '/'))\n fn = fn.split('/')\n fn = os.path.join(_common.RSRC, 'lyrics', fn[0], fn[-1]) + '.txt'\n with open(fn, 'r') as f:\n content = f.read()\n return content\n\n\ndef is_lyrics_content_ok(title, text):\n \"\"\"Compare lyrics text to expected lyrics for given title\"\"\"\n\n setexpected = set(LYRICS_TEXTS[lyrics.slugify(title)].split())\n settext = set(text.split())\n setinter = setexpected.intersection(settext)\n # consider lyrics ok if they share 50% or more with the reference\n if len(setinter):\n ratio = 1.0 * max(len(setexpected), len(settext)) / len(setinter)\n return (ratio > .5 and ratio < 2.5)\n return False\n\n\nclass LyricsGooglePluginTest(unittest.TestCase):\n # Every source entered in default beets google custom search engine\n # must be listed below.\n # Use default query when possible, or override artist and title fields\n # if website don't have lyrics for default query.\n sourcesOk = [\n dict(definfo,\n url=u'http://www.absolutelyrics.com',\n path=u'/lyrics/view/the_beatles/lady_madonna'),\n dict(definfo,\n url=u'http://www.azlyrics.com',\n path=u'/lyrics/beatles/ladymadonna.html'),\n dict(definfo,\n url=u'http://www.chartlyrics.com',\n path=u'/_LsLsZ7P4EK-F-LD4dJgDQ/Lady+Madonna.aspx'),\n dict(definfo,\n url=u'http://www.elyricsworld.com',\n path=u'/lady_madonna_lyrics_beatles.html'),\n dict(definfo,\n url=u'http://www.lacoccinelle.net',\n artist=u'Jacques Brel', title=u\"Amsterdam\",\n path=u'/paroles-officielles/275679.html'),\n dict(definfo,\n url=u'http://www.lyrics007.com',\n path=u'/The%20Beatles%20Lyrics/Lady%20Madonna%20Lyrics.html'),\n dict(definfo,\n url='http://www.lyrics.com/',\n path=u'lady-madonna-lyrics-the-beatles.html'),\n dict(definfo,\n url='http://www.lyricsmania.com/',\n path='lady_madonna_lyrics_the_beatles.html'),\n dict(definfo,\n url=u'http://www.lyrics.net',\n path=u'/lyric/17547916'),\n dict(definfo,\n url=u'http://www.lyricsontop.com',\n artist=u'Amy Winehouse', title=u\"Jazz'n'blues\",\n path=u'/amy-winehouse-songs/jazz-n-blues-lyrics.html'),\n dict(definfo,\n url=u'http://lyrics.wikia.com/',\n path=u'The_Beatles:Lady_Madonna'),\n dict(definfo,\n url='http://www.metrolyrics.com/',\n path='lady-madonna-lyrics-beatles.html'),\n dict(definfo,\n url=u'http://www.onelyrics.net/',\n artist=u'Ben & Ellen Harper', title=u'City of dreams',\n path='ben-ellen-harper-city-of-dreams-lyrics'),\n dict(definfo,\n url=u'http://www.paroles.net/',\n artist=u'Lilly Wood & the prick', title=u\"Hey it's ok\",\n path=u'lilly-wood-the-prick/paroles-hey-it-s-ok'),\n dict(definfo,\n url=u'http://www.reggaelyrics.info',\n artist=u'Beres Hammond', title=u'I could beat myself',\n path=u'/beres-hammond/i-could-beat-myself'),\n dict(definfo,\n url='http://www.releaselyrics.com',\n path=u'/e35f/the-beatles-lady-madonna'),\n dict(definfo,\n url=u'http://www.smartlyrics.com',\n path=u'/Song18148-The-Beatles-Lady-Madonna-lyrics.aspx'),\n dict(definfo,\n url='http://www.songlyrics.com',\n path=u'/the-beatles/lady-madonna-lyrics'),\n dict(definfo,\n url=u'http://www.stlyrics.com',\n path=u'/songs/r/richiehavens48961/ladymadonna2069109.html'),\n dict(definfo,\n url=u'http://www.sweetslyrics.com',\n path=u'/761696.The%20Beatles%20-%20Lady%20Madonna.html')]\n\n def setUp(self):\n \"\"\"Set up configuration\"\"\"\n\n try:\n __import__('bs4')\n except ImportError:\n self.skipTest('Beautiful Soup 4 not available')\n if sys.version_info[:3] < (2, 7, 3):\n self.skipTest(\"Python's built-in HTML parser is not good enough\")\n lyrics.LyricsPlugin()\n lyrics.fetch_url = MockFetchUrl()\n\n def test_default_ok(self):\n \"\"\"Test each lyrics engine with the default query\"\"\"\n\n for f in (lyrics.fetch_lyricswiki, lyrics.fetch_lyricscom):\n res = f(definfo['artist'], definfo['title'])\n self.assertTrue(lyrics.is_lyrics(res))\n self.assertTrue(is_lyrics_content_ok(definfo['title'], res))\n\n def test_missing_lyrics(self):\n self.assertFalse(lyrics.is_lyrics(LYRICS_TEXTS['missing_texts']))\n\n def test_sources_ok(self):\n for s in self.sourcesOk:\n url = s['url'] + s['path']\n res = lyrics.scrape_lyrics_from_html(lyrics.fetch_url(url))\n self.assertTrue(lyrics.is_lyrics(res), url)\n self.assertTrue(is_lyrics_content_ok(s['title'], res), url)\n\n def test_is_page_candidate_exact_match(self):\n from bs4 import SoupStrainer, BeautifulSoup\n\n for s in self.sourcesOk:\n url = unicode(s['url'] + s['path'])\n html = lyrics.fetch_url(url)\n soup = BeautifulSoup(html, \"html.parser\",\n parse_only=SoupStrainer('title'))\n self.assertEqual(lyrics.is_page_candidate(url, soup.title.string,\n s['title'], s['artist']),\n True, url)\n\n def test_is_page_candidate_fuzzy_match(self):\n url = u'http://www.example.com/lazy_madonna_beatles'\n urlTitle = u'example.com | lazy madonna lyrics by the beatles'\n title = u'Lady Madonna'\n artist = u'The Beatles'\n # very small diffs (typo) are ok\n self.assertEqual(lyrics.is_page_candidate(url, urlTitle, title,\n artist), True, url)\n # reject different title\n urlTitle = u'example.com | busy madonna lyrics by the beatles'\n self.assertEqual(lyrics.is_page_candidate(url, urlTitle, title,\n artist), False, url)\n # (title, artist) != (artist, title)\n urlTitle = u'example.com | the beatles lyrics by Lazy Madonna'\n self.assertEqual(lyrics.is_page_candidate(url, urlTitle, title,\n artist), False, url)\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromName(__name__)\n\nif __name__ == '__main__':\n unittest.main(defaultTest='suite')\n","sub_path":"test/test_lyrics.py","file_name":"test_lyrics.py","file_ext":"py","file_size_in_byte":13568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70651803","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport socket\nimport json\nfrom ddGoodsList_GLSpider import getSku\n\nsocket.setdefaulttimeout(20)\n\nuser_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\nheaders = {\"user_agent\":user_agent}\n\nurl = 'http://category.dangdang.com/cp01.54.06.00.00.00.html'\n\nbkDict = {}\n\nfor i in range(2,21):\n\tprint(i)\n\tpageUrl = url[:29]+'pg'+str(i)+'-'+url[29:]\n\tp = getSku(pageUrl)\n\tp.parse()\n\tgs = p.getSkuUrl()\n\tbkDict['page'+str(i)] = gs\n\tprint(gs)\n\tprint('\\n')\n\t# p.savePageUrl()\n\njsObj = json.dumps(bkDict,ensure_ascii=False)\nfileObj = open('bkDict.json','w')\nfileObj.write(jsObj)\nfileObj.close()","sub_path":"ddSpider/ddSpider.py","file_name":"ddSpider.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137521413","text":"from Stack import Stack\n\nclass SQLink(Stack):\n\n class Node:\n def __init__(self, data, next):\n self.data = data\n self.next = next\n\n def __init__(self):\n self.size = 0\n self.tail = None\n\n def push(self, ele):\n new_node = self.Node(ele, None)\n if self.is_empty():\n new_node.next = new_node\n else:\n new_node.next = self.tail.next\n self.tail.next = new_node\n\n self.tail = new_node\n self.size += 1\n\n\n def is_empty(self):\n return self.size == 0\n\n # 첫번째 원소 삭제\n def pop(self):\n old_head = self.tail.next\n ele = old_head.data\n if self.size > 1:\n self.tail.next = old_head.next\n old_head = None\n else:\n self.tail = None\n\n self.size -= 1\n return ele\n\n def top(self):\n return self.tail.next.data\n\n\n\ns = SQLink()\ns.push(1)\n\nprint(s.top())\nprint(s.is_empty())\ns.push(2)\n\nprint(s.pop())\nprint(s.top())\nprint(s.pop())\nprint(s.is_empty())\n","sub_path":"Stack_Queue_Link.py","file_name":"Stack_Queue_Link.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187501794","text":"# Written by Eric Martin for COMP9021\n\n\n'''\nDefines Monomial and Polynomial classes.\nA polynomial is built from a string that represents a polynomial,\nthat is, a sum or difference of monomials.\n- The leading monomial can be either an integer,\n or an integer followed by x,\n or an integer followed by x\\^ followed by a nonnegative integer.\n- The other monomials can be either a nonnegative integer,\n or an integer followed by x,\n or an integer followed by x\\^ followed by a nonnegative integer.\nSpaces can be inserted anywhere in the string.\n'''\n\n\nimport re\n\n\nclass PolynomialError(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass Monomial:\n def __init__(self, coefficient = 0, degree = 0):\n self.coefficient = coefficient\n self.degree = degree\n self.next_monomial = None\n\n\nclass Polynomial:\n def __init__(self, input_polynomial = None):\n '''\n >>> Polynomial('1 2')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> Polynomial('-')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> Polynomial('+0')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> Polynomial('0x^-1')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> Polynomial('2x + +2')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> Polynomial('2x + -2')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> Polynomial('2x - +2')\n Traceback (most recent call last):\n ...\n PolynomialError: Incorrect input\n >>> print(Polynomial('0'))\n 0\n >>> print(Polynomial('0x'))\n 0\n >>> print(Polynomial('0x^0'))\n 0\n >>> print(Polynomial('0x^5'))\n 0\n >>> print(Polynomial('x'))\n x\n >>> print(Polynomial('1x'))\n x\n >>> print(Polynomial('1x^1'))\n x\n >>> print(Polynomial('2'))\n 2\n >>> print(Polynomial('2x^0'))\n 2\n >>> print(Polynomial('1 + 2-3 +10'))\n 10\n >>> print(Polynomial('x + x - 2x -3x^1 + 3x'))\n 0\n >>> print(Polynomial('x + 2 + x - x -3x^1 + 3x + 5x^0'))\n x + 7\n >>> print(Polynomial('-2x + 7x^3 +x - 0 + 2 -x^3 + x^23 - 12x^8 + 45 x ^ 6 -x^47'))\n -x^47 + x^23 - 12x^8 + 45x^6 + 6x^3 - x + 2\n '''\n if input_polynomial is None:\n self.head = None\n return\n if re.search('\\d\\s+\\d', input_polynomial):\n raise PolynomialError('Incorrect input')\n input_polynomial = input_polynomial.replace(' ', '')\n if not input_polynomial:\n raise PolynomialError('No input')\n if input_polynomial[0] == '+':\n raise PolynomialError('Incorrect input')\n if input_polynomial[0] == '-' and len(input_polynomial) > 1 and input_polynomial[1] == '0':\n raise PolynomialError('Incorrect input')\n for i in range(1, len(input_polynomial)):\n if (input_polynomial[i] in '+-' and\n not input_polynomial[i - 1].isdigit() and\n input_polynomial[i - 1] != 'x'):\n raise PolynomialError('Incorrect input')\n input_polynomial = input_polynomial.replace('-', '+-').split('+')\n # For the case where the leading factor is negative.\n if not input_polynomial[0]:\n input_polynomial = input_polynomial[1: ]\n monomial = self._get_monomial(input_polynomial[0])\n if not monomial:\n raise PolynomialError('Incorrect input')\n self.head = monomial\n for input_monomial in input_polynomial[1: ]:\n monomial = self._get_monomial(input_monomial)\n if not monomial:\n raise PolynomialError('Incorrect input')\n if not monomial.coefficient:\n continue\n self._add_monomial(monomial)\n\n def _copy(self):\n copy = Polynomial()\n if not self.head:\n return copy\n copy.head = Monomial(self.head.coefficient, self.head.degree)\n node = self.head.next_monomial\n node_copy = copy.head\n while node:\n node_copy.next_monomial = Monomial(node.coefficient, node.degree)\n node = node.next_monomial\n node_copy = node_copy.next_monomial\n return copy\n \n def _get_monomial(self, input_monomial):\n monomial_parts = input_monomial.split('x')\n if len(monomial_parts) > 2:\n return False\n if len(monomial_parts) == 1:\n try:\n coefficient = int(monomial_parts[0])\n return Monomial(coefficient, 0)\n except:\n return False\n # The case of 'x'.\n if not monomial_parts[0] and not monomial_parts[1]:\n return Monomial(1, 1)\n if not monomial_parts[0]:\n coefficient = 1\n elif monomial_parts[0] == '-':\n coefficient = -1\n else:\n try:\n coefficient = int(monomial_parts[0])\n except:\n return False\n # Needed for the leading monomial.\n if coefficient == 0:\n degree = 0\n else:\n if not monomial_parts[1]:\n degree = 1\n else:\n if monomial_parts[1][0] != '^':\n return False\n try:\n degree = int(monomial_parts[1][1: ])\n if degree < 0:\n return False\n except:\n return False \n return Monomial(coefficient, degree)\n\n def _add_monomial(self, monomial):\n if not self.head:\n self.head = monomial\n return\n if monomial.degree > self.head.degree:\n monomial.next_monomial = self.head\n self.head = monomial\n return\n if monomial.degree == self.head.degree:\n self._add_monomial_of_same_degree(None, self.head, monomial)\n return \n node = self.head\n while node.next_monomial and monomial.degree < node.next_monomial.degree:\n node = node.next_monomial\n if not node.next_monomial:\n node.next_monomial = monomial\n elif monomial.degree == node.next_monomial.degree:\n self._add_monomial_of_same_degree(node, node.next_monomial, monomial)\n else:\n monomial.next_monomial = node.next_monomial\n node.next_monomial = monomial\n \n def _add_monomial_of_same_degree(self, parent, node, monomial):\n if node.coefficient + monomial.coefficient:\n node.coefficient += monomial.coefficient\n elif not parent:\n if not self.head.next_monomial:\n self.head = Monomial()\n else:\n self.head = self.head.next_monomial\n else:\n parent.next_monomial = parent.next_monomial.next_monomial\n\n def _multiply_monomial(self, monomial):\n if not monomial.coefficient:\n self.head.coefficient = 0\n self.head.degree = 1\n self.head.next_monomial = None\n return\n node = self.head\n while node:\n node.coefficient *= monomial.coefficient\n node.degree += monomial.degree\n node = node.next_monomial\n \n def __str__(self):\n if not self.head:\n return ''\n if not self.head.degree:\n return str(self.head.coefficient)\n if self.head.coefficient == 1:\n output = ''\n elif self.head.coefficient == -1:\n output = '-'\n else:\n output = str(self.head.coefficient)\n output += 'x'\n if self.head.degree > 1:\n output += '^'\n output += str(self.head.degree)\n node = self.head\n while node.next_monomial:\n node = node.next_monomial\n if node.coefficient > 0:\n output += ' + '\n else:\n output += ' - '\n if abs(node.coefficient) != 1 or node.degree == 0:\n output += str(abs(node.coefficient))\n if node.degree:\n output += 'x'\n if node.degree > 1: \n output += '^'\n output += str(node.degree)\n return output\n \n def __add__(self, polynomial):\n '''\n >>> poly_1 = Polynomial('2x^5 - 71x^3 + 8x^2 - 93x^4 -6x + 192')\n >>> poly_2 = Polynomial('192 -71x^3 + 8x^2 + 2x^5 -6x - 93x^4')\n >>> print(poly_1 + poly_2)\n 4x^5 - 186x^4 - 142x^3 + 16x^2 - 12x + 384\n >>> print(poly_1)\n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n >>> print(poly_2)\n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n '''\n copy = self._copy()\n node = polynomial.head\n while node:\n copy._add_monomial(Monomial(node.coefficient, node.degree))\n node = node.next_monomial\n return copy\n\n def __mul__(self, polynomial):\n '''\n >>> poly = Polynomial('2x^5 - 71x^3 + 8x^2 - 93x^4 -6x + 192')\n >>> print(poly * poly)\n 4x^10 - 372x^9 + 8365x^8 + 13238x^7 + 3529x^6 + 748x^5 - 34796x^4 - 27360x^3 + 3108x^2 \\\n- 2304x + 36864\n >>> print(poly)\n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n >>> poly_1 = Polynomial('-11x^4 + 3x^2 + 7x + 9')\n >>> poly_2 = Polynomial('5x^2 -8x - 6')\n >>> print(poly_1 * poly_2)\n -55x^6 + 88x^5 + 81x^4 + 11x^3 - 29x^2 - 114x - 54\n >>> print(poly_1)\n -11x^4 + 3x^2 + 7x + 9\n >>> print(poly_2)\n 5x^2 - 8x - 6\n >>> poly_1 = Polynomial('-2x + 7x^3 +x - 0 + 2 -x^3 + x^23 - 12x^8 + 45 x ^ 6 -x^47')\n >>> poly_2 = Polynomial('2x^5 - 71x^3 + 8x^2 - 93x^4 -6x + 192')\n >>> print(poly_1 * poly_2)\n -2x^52 + 93x^51 + 71x^50 - 8x^49 + 6x^48 - 192x^47 + 2x^28 - 93x^27 - 71x^26 + 8x^25 - 6x^24 \\\n+ 192x^23 - 24x^13 + 1116x^12 + 942x^11 - 4281x^10 - 3123x^9 - 1932x^8 - 828x^7 + 8212x^6 + 145x^5 \\\n- 151x^4 + 1002x^3 + 22x^2 - 204x + 384\n >>> print(poly_1)\n -x^47 + x^23 - 12x^8 + 45x^6 + 6x^3 - x + 2\n >>> print(poly_2)\n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n '''\n product = Polynomial()\n node = polynomial.head\n while node:\n product_by_monomial = self._copy()\n product_by_monomial._multiply_monomial(Monomial(node.coefficient, node.degree))\n second_node = product_by_monomial.head\n while second_node:\n product._add_monomial(Monomial(second_node.coefficient, second_node.degree))\n second_node = second_node.next_monomial\n node = node.next_monomial\n return product\n\n def __sub__(self, polynomial):\n '''\n >>> poly = Polynomial('2x^5 - 71x^3 + 8x^2 - 93x^4 -6x + 192')\n >>> print(poly - poly)\n 0\n >>> print(poly) \n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n '''\n return self.__add__(polynomial.__mul__(Polynomial('-1')))\n\n def __truediv__(self, polynomial):\n '''\n >>> poly_1 = Polynomial('2x^5 - 71x^3 + 8x^2 - 93x^4 -6x + 192')\n >>> poly_2 = Polynomial('4x^5 - 186x^4 - 142x^3 + 16x^2 - 12x + 384')\n >>> print(poly_1 / poly_2)\n None\n >>> print(poly_1)\n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n >>> print(poly_2)\n 4x^5 - 186x^4 - 142x^3 + 16x^2 - 12x + 384\n >>> poly_1 = Polynomial('-55x^6 + 88x^5 + 81x^4 + 11x^3 - 29x^2 - 114x - 54')\n >>> poly_2 = Polynomial('-11x^4 + 3x^2 + 7x + 9')\n >>> poly_3 = Polynomial('5x^2 -8x - 6')\n >>> print(poly_1 / poly_2)\n 5x^2 - 8x - 6\n >>> print(poly_1 / poly_3)\n -11x^4 + 3x^2 + 7x + 9\n >>> print(poly_1)\n -55x^6 + 88x^5 + 81x^4 + 11x^3 - 29x^2 - 114x - 54\n >>> print(poly_2)\n -11x^4 + 3x^2 + 7x + 9\n >>> poly_1 = Polynomial('-2x + 7x^3 +x - 0 + 2 -x^3 + x^23 - 12x^8 + 45 x ^ 6 -x^47')\n >>> poly_2 = Polynomial('2x^5 - 71x^3 + 8x^2 - 93x^4 -6x + 192')\n >>> poly_1 = Polynomial('-2x^52 + 93x^51 + 71x^50 - 8x^49 + 6x^48 - 192x^47 + 2x^28 - 93x^27 - \\\n71x^26 + 8x^25 - 6x^24 + 192x^23 - 24x^13 + 1116x^12 + 942x^11 - 4281x^10 - 3123x^9 - 1932x^8 - 828x^7 \\\n+ 8212x^6 + 145x^5 - 151x^4 + 1002x^3 + 22x^2 - 204x + 384')\n >>> poly_2 = Polynomial('-x^47 + x^23 - 12x^8 + 45x^6 + 6x^3 - x + 2')\n >>> poly_3 = Polynomial('2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192')\n >>> print(poly_1 / poly_2)\n 2x^5 - 93x^4 - 71x^3 + 8x^2 - 6x + 192\n >>> print(poly_1 / poly_3)\n -x^47 + x^23 - 12x^8 + 45x^6 + 6x^3 - x + 2\n '''\n quotient = Polynomial()\n copy = self._copy()\n while copy.head.coefficient and copy.head.degree:\n if copy.head.coefficient % polynomial.head.coefficient:\n return\n if copy.head.degree < polynomial.head.degree:\n return\n polynomial_copy = polynomial._copy()\n coefficient = copy.head.coefficient // polynomial.head.coefficient\n degree = copy.head.degree - polynomial.head.degree\n polynomial_copy._multiply_monomial(Monomial(-coefficient, degree))\n copy = copy.__add__(polynomial_copy)\n quotient._add_monomial(Monomial(coefficient, degree)) \n return quotient\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod() \n\n","sub_path":"lab/Lab_9_solutions/polynomial.py","file_name":"polynomial.py","file_ext":"py","file_size_in_byte":13781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"249349129","text":"# coding: utf-8\nimport discord\nfrom discord.ext import commands\nimport setting\n\nTOKEN = setting.token\nPREFIX='$'\nSCH_FILE = '/usr/share/discord_bot/schedule.dat'\nVOTE_FILE = '/usr/share/discord_bot/vote.dat'\nYNVOTE_FILE = '/usr/share/discord_bot/ynvote.dat'\n\n\nclient = discord.Client()\n\ndef receive_message(prefix, message):\n if not message.startswith(prefix):\n return message\n message = message[len(prefix):]\n return receive_message(' ', message)\n\ndef write_schedule(M: int, D:int, h:int, m:int, message:str):\n # 現在,上書きをしている.追記にして複数予定に対応したい.\n if (1 <= M and M <= 12):\n if (1 <= M and M <= 31):\n if (0 <= h and h <= 23):\n if (0 <= m and m <= 59):\n with open(SCH_FILE, \"w\") as f:\n f.write(\"{0:02},{1:02},{2:02},{3:02},{4}\".format(M,D,h,m,message))\n\ndef permitted(user : discord.member.Member):\n allow_users = ['mkakh#3874']\n user = \"{}\".format(user)\n return (user in allow_users)\n\ndef com_hello(message):\n msg = \"{0.author.mention} :wave: Hello!\".format(message)\n return msg\n\ndef com_set(message):\n message.content = receive_message('set', message.content).split(' ')\n sch_content = message.content[4]\n MDhm = list(map(int, message.content[:-1]))\n write_schedule(*MDhm, sch_content)\n msg = '{0.author.mention} {1[0]:02}/{1[1]:02} {1[2]:02}:{1[3]:02}に{2}が設定されました.'.format(message, MDhm, sch_content)\n return msg\n\ndef com_check(message):\n with open(SCH_FILE, \"r\") as f:\n raw_date = f.read()\n date = raw_date.split(\",\")\n msg = \"{}/{} {}:{}に{}が設定されています.\".format(*date)\n return msg\n\ndef com_bash(message):\n message.content = receive_message('bash', message.content)\n message.content = \"'\" + message.content + \"'\"\n import subprocess, shlex\n args = shlex.split(message.content)\n p = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout_data, stderr_data = p.communicate()\n msg = stdout_data.decode('utf-8') + stderr_data.decode('utf-8')\n return msg\n\nasync def com_vote_start(message):\n ids = []\n for msg in ['月', '火', '水', '木', '金', '土', '日']:\n new_message = await client.send_message(message.channel, msg)\n await client.add_reaction(new_message, emoji='👍')\n ids = ids + [new_message.id]\n with open(VOTE_FILE, \"w\") as f:\n f.write(\"{}\".format(','.join(ids)))\n\nasync def com_vote_end(message):\n with open(VOTE_FILE, \"r\") as f:\n ids = f.read().split(\",\")\n strs = ['月', '火', '水', '木', '金', '土', '日']\n msg = ''\n for i in range(len(strs)):\n get_message = await client.get_message(message.channel, ids[i])\n votes = sum({react.emoji : react.count for react in get_message.reactions}.values())-1\n msg = msg + strs[i] + ': ' + str(votes) + '\\n'\n return msg\n\nasync def com_ynvote_start(message):\n ids = []\n for msg in ['YES', 'NO']:\n new_message = await client.send_message(message.channel, msg)\n await client.add_reaction(new_message, emoji='👍')\n ids = ids + [new_message.id]\n with open(YNVOTE_FILE, \"w\") as f:\n f.write(\"{}\".format(','.join(ids)))\n\nasync def com_ynvote_end(message):\n with open(YNVOTE_FILE, \"r\") as f:\n ids = f.read().split(\",\")\n strs = ['YES', 'NO']\n msg = ''\n for i in range(len(strs)):\n get_message = await client.get_message(message.channel, ids[i])\n votes = sum({react.emoji : react.count for react in get_message.reactions}.values())-1\n msg = msg + strs[i] + ': ' + str(votes) + '\\n'\n return msg\n\ndef com_help(message):\n return \"**一般権限**\\n$hello: 挨拶\\n$check: 日程確認\\n$help: ヘルプ\\n\\n**管理者権限**\\n$set: 日程セット\\n$bash: Bash\\n$vote_start: 曜日投票開始\\n$vote_end: 曜日投票集計\\n$ynvote_start: YES/NO投票開始\\n$ynvote_end: YES/NO投票集計\"\n \n\n@client.event\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n if message.content.startswith(PREFIX):\n message.content = receive_message(PREFIX, message.content)\n if message.content.startswith('hello'):\n msg = com_hello(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('check'):\n msg = com_check(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('help'):\n msg = com_help(message)\n await client.send_message(message.channel, msg)\n elif permitted(message.author):\n if message.content.startswith('set'):\n msg = com_set(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('bash'):\n msg = com_bash(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('vote_start'):\n await com_vote_start(message)\n elif message.content.startswith('vote_end'):\n msg = await com_vote_end(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('ynvote_start'):\n await com_ynvote_start(message)\n elif message.content.startswith('ynvote_end'):\n msg = await com_ynvote_end(message)\n await client.send_message(message.channel, msg)\n else:\n await client.send_message(message.channel, \"???\")\n else:\n await client.send_message(message.channel, \"???\")\n \n\n\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclient.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103553014","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Functions to save data in a Xspec-readable format.\"\"\"\n\nimport subprocess as sp\nimport numpy as np\nfrom astropy import log\nfrom .io import get_file_type\nfrom .io import get_file_extension\n\n\ndef save_as_xspec(fname, direct_save=False, save_lags=True):\n \"\"\"Save frequency spectra in a format readable to FTOOLS and Xspec.\n\n Parameters\n ----------\n fname : str\n Input HENDRICS frequency spectrum file name\n direct_save : bool\n If True, also call `flx2xsp` to produce the output .pha and .rsp files.\n If False (default), flx2xsp has to be called from the user\n\n Notes\n -----\n Uses method described here:\n https://asd.gsfc.nasa.gov/XSPECwiki/fitting_timing_power_spectra_in_XSPEC\n \"\"\"\n ftype, contents = get_file_type(fname)\n\n outroot = fname.replace(get_file_extension(fname), \"\")\n outname = outroot + \"_xsp.dat\"\n outroot_lags = outroot + \"_lags\"\n outname_lags = outroot_lags + \"_xsp.dat\"\n\n if ftype.endswith(\"pds\"):\n flo = contents.freq - contents.df / 2\n fhi = contents.freq + contents.df / 2\n power = contents.power.real * contents.df\n power_err = contents.power_err.real * contents.df\n else:\n raise ValueError(\"Data type not supported for Xspec\")\n\n np.savetxt(outname, np.transpose([flo, fhi, power, power_err]))\n if direct_save:\n sp.check_call(\"flx2xsp {0} {1}.pha {1}.rsp\".format(outname, outroot).split())\n\n if save_lags and ftype == \"cpds\":\n lags, lags_err = contents.time_lag()\n np.savetxt(\n outname_lags,\n np.transpose([flo, fhi, lags * contents.df, lags_err * contents.df]),\n )\n if direct_save:\n sp.check_call(\n \"flx2xsp {0} {1}.pha {1}.rsp\".format(outname_lags, outroot_lags).split()\n )\n\n\ndef main(args=None):\n \"\"\"Main function called by the `HEN2xspec` command line script.\"\"\"\n import argparse\n from .base import _add_default_args, check_negative_numbers_in_args\n\n description = (\n \"Save a frequency spectrum in a qdp file that can be \"\n \"read by flx2xsp and produce a XSpec-compatible spectrum\"\n \"file\"\n )\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\"files\", help=\"List of files\", nargs=\"+\")\n\n parser.add_argument(\n \"--flx2xsp\",\n help=\"Also call flx2xsp at the end\",\n default=False,\n action=\"store_true\",\n )\n _add_default_args(parser, [\"loglevel\", \"debug\"])\n\n args = check_negative_numbers_in_args(args)\n args = parser.parse_args(args)\n files = args.files\n if args.debug:\n args.loglevel = \"DEBUG\"\n\n log.setLevel(args.loglevel)\n with log.log_to_file(\"HEN2Xspec.log\"):\n for f in files:\n save_as_xspec(f, direct_save=args.flx2xsp)\n","sub_path":"hendrics/save_as_xspec.py","file_name":"save_as_xspec.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"565510010","text":"import sys\nimport os\nimport time\nfrom pprint import pprint\nimport telepot\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton\nfrom redacted import BOT_TOKEN\nimport database as db\n\nsys.path.append(\"python-doodle\") # python-doodle is located in a git submodule\nimport doodle\n\nshow_calendar_link = True\nif show_calendar_link:\n import dropbox\n from dropbox.exceptions import ApiError\n import icalendar\n from redacted import DROPBOX_TOKEN\n\n\ndef chat(msg: dict):\n \"\"\"on chat message\"\"\"\n content_type, chat_type, chat_id = telepot.glance(msg)\n user_to_db(user_id=msg['from']['id'], chat_id=chat_id, username=msg['from'].get('username'),\n first_name=msg['from'].get('first_name'), last_name=msg['from'].get('last_name'))\n if content_type == 'text':\n pprint(msg)\n if msg['text'].lower().startswith('/doodle'):\n command(msg)\n return\n\n urls = get_urls(msg)\n for url in urls:\n if 'doodle' in url:\n doodle_to_db(url, chat_id)\n bot.sendMessage(chat_id, \"Doodle saved!\")\n return\n\n\ndef get_urls(msg: dict) -> list:\n \"\"\"Extract urls from msg\"\"\"\n urls = []\n entities = msg.get('entities')\n if not entities:\n return urls\n url_lengths = [l['length'] for l in entities if l['type'] == 'url']\n words = msg['text'].split()\n urls = [w for w in words if '.' in w and len(w) in url_lengths]\n return urls\n\n\ndef doodle_to_db(url: str, chat_id: int, ical_url: str = None):\n \"\"\"Add Doodle to db\"\"\"\n session = db.Session()\n entry = session.query(db.Doodle).filter_by(chat_id=chat_id).first()\n if entry:\n entry.url = url\n if ical_url:\n entry.ical_url = ical_url\n else:\n entry = db.Doodle(chat_id=chat_id, url=url, ical_url=ical_url)\n session.add(entry)\n session.commit()\n\n\ndef user_to_db(user_id, chat_id, username=None, first_name=None, last_name=None):\n session = db.Session()\n entry = db.User(user_id=user_id, username=username, first_name=first_name,\n last_name=last_name)\n entry.chats.append(db.Chat(chat_id=chat_id))\n session.merge(entry)\n session.commit()\n\n\ndef get_ical_url_from_db(chat_id: int) -> str:\n session = db.Session()\n doodle_entry = session.query(db.Doodle).filter_by(chat_id=chat_id).first()\n session.close()\n if doodle_entry.ical_url:\n return doodle_entry.ical_url\n return \"\"\n\n\ndef command(msg):\n \"\"\"Doodle that Doodle\"\"\"\n content_type, chat_type, chat_id = telepot.glance(msg)\n session = db.Session()\n doodle_entry = session.query(db.Doodle).filter_by(chat_id=chat_id).first()\n if not doodle_entry:\n bot.sendMessage(chat_id, \"No doodle saved\")\n return\n\n chat_entry = session.query(db.Chat).filter_by(chat_id=chat_id).first()\n poll = doodle.Doodle(doodle_entry.url)\n message = DoodleMessage(poll=poll, chat_entry=chat_entry).get_message()\n\n reply_markup = None\n if not poll.is_open:\n if show_calendar_link:\n ical_url = get_ical_url_from_db(chat_id=chat_id)\n if not ical_url:\n ical_url = DropBoxUploader(poll).get_url()\n doodle_to_db(url=doodle_entry.url, chat_id=doodle_entry.chat_id, ical_url=ical_url)\n\n reply_markup = InlineKeyboardMarkup(inline_keyboard=[\n [dict(text='Add to calendar', url=ical_url)]\n ])\n bot.sendMessage(chat_id, message, parse_mode=\"Markdown\", disable_web_page_preview=True, reply_markup=reply_markup)\n session.close()\n\nclass DropBoxUploader(object):\n dbx = dropbox.Dropbox(DROPBOX_TOKEN)\n\n def __init__(self, poll):\n assert not poll.is_open\n self.event_times = poll.final\n self.title = poll.title\n self.location = poll.location\n self.dropbox_folder = \"/doodlebot/\"\n self.filename = f\".{int(time.time())}.ics\"\n self.dropbox_path = self.dropbox_folder+self.filename[1:]\n self.dl_url = \"\"\n self.direct_dl_url = \"\"\n\n self.upload()\n\n\n def create_ical(self):\n cal = icalendar.Calendar()\n for start_end in self.event_times:\n event = icalendar.Event()\n event.add('summary', self.title)\n event.add('dtstart', start_end[0])\n event.add('dtend', start_end[1])\n event.add('location', self.location)\n cal.add_component(event)\n return cal.to_ical()\n\n def upload(self):\n with open(self.filename, \"wb+\") as f:\n f.write(self.create_ical())\n\n with open(self.filename, 'rb') as f:\n self.dbx.files_upload(f.read(), path=self.dropbox_path)\n\n # remove original file\n os.remove(self.filename)\n\n def get_url(self):\n try:\n self.dl_url = self.dbx.sharing_create_shared_link_with_settings(self.dropbox_path).url\n except ApiError as e:\n error_string = str(e)\n url_and_more = error_string.split('https://')[1]\n self.dl_url = 'https://' + url_and_more.split(\"',\")[0]\n self.direct_dl_url = self.dl_url.replace('?dl=0', '?dl=1')\n return self.direct_dl_url\n\n\n\nclass DoodleMessage(object):\n def __init__(self, poll, chat_entry, ical_url=None):\n self.poll: doodle.Doodle = poll\n self.chat_entry: db.Chat = chat_entry\n self.ical_url = ical_url\n self.chat_members = {u.user_id: u for u in chat_entry.users}\n self.title: str = f\"*{poll.title}*\"\n self.participants: str = \"\\n\\U00002611\".join([''] + poll.participants).strip()\n self.final_dates: str = '\\n'.join([d[0].strftime('%A %d %B %H:%M').replace(\"00:00\", \"\") for d in poll.final])\n self.missing: str = \"\\n\\U00002610\".join([''] + self.get_missing()).strip()\n\n\n def get_message(self):\n if not self.poll.is_open:\n lines = [self.title]\n if self.ical_url:\n lines.append(f\"[add to calendar]({self.ical_url})\")\n lines.append(self.final_dates)\n return \"\\n\".join(lines)\n return f\"{self.title}\\n{self.poll.url}\\n{self.participants}\\n{self.missing}\"\n\n def get_missing(self) -> list:\n \"\"\"lists chat_members who did not participate in the doodle\"\"\"\n chat_members = self.chat_members.copy()\n participating = self.poll.participants\n\n print(\"chat_members:\")\n for chat_id, u in chat_members.items():\n print(u.first_name)\n\n for doodler in participating:\n chat_id = self.identify(doodler)\n try:\n chat_members.pop(chat_id)\n except KeyError:\n pass\n names = []\n for user in chat_members.values():\n mention_name = f\"@{user.username}\" if user.username else f\"[{user.first_name}](tg://user?id={user.user_id})\"\n names.append(mention_name)\n return names\n\n def identify(self, name) -> int:\n \"\"\"Returns the chat_id of a user\"\"\"\n score = float(\"inf\")\n most_likely = None\n for chat_id, user in self.chat_members.items(): # Check name against different aliases\n user_names = [user.first_name] # john\n if user.username:\n user_names.append(user.username) # @johndoe\n if user.last_name:\n user_names.extend([user.last_name, # doe\n user.first_name + user.last_name, # johndoe\n user.first_name[0] + user.last_name, # jdoe\n user.first_name + user.last_name[0]]) # johnd\n for user_name in user_names:\n edit_distance = self.levenshtein(name, user_name)\n if edit_distance == 0: # perfect match\n return chat_id\n if edit_distance < score:\n score = edit_distance\n most_likely = chat_id\n return most_likely\n\n def levenshtein(self, a, b):\n s1, s2 = a.lower(), b.lower()\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]\n\n\nif __name__ == '__main__':\n bot = telepot.Bot(BOT_TOKEN)\n bot.message_loop({'chat': chat})\n print('Listening...')\n while 1:\n time.sleep(10)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"321144543","text":"#Add to number\nnum1=5\nnum2=10\n\nsum = num1 + num2\nprint(\"Sum of {0} and {1} is {2}\".format(num1,num2,sum))\n\n#Adding two number provided by user input\nnumber1 = input(\"First number: \")\nnumber2 = input(\"Second number: \")\n\nsum = float(number1) + float(number2)\nprint(\"The sum of {0} and {1} is {2}\".format(number1,number2,sum))\n\n\n#Add two binary numbers \na = \"1101\"\nb = \"100\"\n\nmax_len = max(len(a), len(b))\na = a.zfill(max_len)\nb = b.zfill(max_len)\n\nresult = ''\ncarry = 0\nfor i in range (max_len-1,-1,-1):\n r = carry\n r += 1 if a[i] == '1' else 0\n r += 1 if b[i] == '1' else 0\n result = ('1'if r%2 == 1 else '0') + result\n \n carry = 0 if r<2 else 1\n \nif carry != 0:\n result = '1' + result \nprint(result.zfill(max_len)) \n\n\n#Add two binary numbers with functions\na = \"1101\"\nb = \"100\"\n\nsum = bin(int(a,2) + int(b,2))\n\nprint(sum[2:])\n\n#Add two octal numbers\n\na = \"123\"\nb = \"456\"\n\nsum = oct(int(a,8) + int(b,8))\nprint(sum[2:])\n\n#Add to hexadecimal numbers\n\na = \"01B\"\nb = \"378\"\n\nsum = hex(int(a, 16) + int(b, 16))\n\nprint(sum[2:])\n \n \n \n \n","sub_path":"addiction.py","file_name":"addiction.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"523707081","text":"import time\nimport warnings\nfrom typing import Union\n\nimport jwt\n\n\ndef _warn_ac():\n warnings.warn(\n \"Starting with 1.0, specifying claims will no longer be \"\n \"supported. Authorizations should be configured on the AC \"\n \"instead\",\n DeprecationWarning,\n )\n\n\nclass ClientAuth:\n \"\"\"\n Auth for the ZDS client, using JWT.\n\n Usage:\n\n >>> auth = ClientAuth(\n client_id='zrc',\n secret='my-secret',\n user_id='my-id',\n user_representation='my-name'\n )\n >>> auth.credentials()\n {\n 'Authorization': '..'\n }\n >>> requests.get(url, **auth.credentials())\n \"\"\"\n\n def __init__(\n self,\n client_id: str,\n secret: str,\n user_id: Union[str, None] = None,\n user_representation: Union[str, None] = None,\n **claims\n ):\n self.client_id = client_id\n\n if secret is None:\n warnings.warn(\n \"`None` secret received - casting to empty string\", UserWarning\n )\n secret = \"\"\n\n self.secret = secret\n\n if claims:\n _warn_ac()\n self.claims = claims\n\n self.user_id = user_id or \"\"\n self.user_representation = user_representation or \"\"\n\n def set_claims(self, **kwargs) -> None:\n \"\"\"\n Set the claims for the client.\n \"\"\"\n _warn_ac()\n claims = self.claims.copy()\n claims.update(**kwargs)\n\n # invalidate cache if needed (claims have changed)\n if hasattr(self, \"_credentials\") and claims != self.claims:\n del self._credentials\n\n self.claims = claims\n\n def credentials(self) -> dict:\n \"\"\"\n Return the HTTP Header containing the credentials.\n \"\"\"\n if not hasattr(self, \"_credentials\"):\n payload = {\n # standard claims\n \"iss\": self.client_id,\n \"iat\": int(time.time()),\n # custom claims\n \"client_id\": self.client_id,\n \"user_id\": self.user_id,\n \"user_representation\": self.user_representation,\n }\n if self.claims:\n payload[\"zds\"] = self.claims\n\n # TODO: drop custom header in 1.0\n headers = {\"client_identifier\": self.client_id}\n encoded = jwt.encode(\n payload, self.secret, headers=headers, algorithm=\"HS256\"\n )\n encoded = encoded.decode() # bytestring to string\n\n self._credentials = {\n \"Authorization\": \"Bearer {encoded}\".format(encoded=encoded)\n }\n return self._credentials\n","sub_path":"zds_client/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568029577","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 5 11:38:05 2018\n\n@author: jk\"\"\"\nimport cv2\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport face_dect\n\n\ndef prepare_training_data(data_folder_path):\n#get the directories (one directory for each subject) in data folder\n dirs = os.listdir(data_folder_path)\n #list to hold all subject faces\n faces = []\n#list to hold labels for all subjects\n labels = []\n #let's go through each directory and read images within it\n for dir_name in dirs:\n #our subject directories start with letter 's' so\n#ignore any non-relevant directories if any\n if not dir_name.startswith(\"s\"):\n continue;\n#extract label number of subject from dir_name\n#format of dir name = slabel\n#, so removing letter 's' from dir_name will give us label\n label = int(dir_name.replace(\"s\", \"\"))\n #build path of directory containing images for current subject subject\n#sample subject_dir_path = \"training-data/s1\"\n subject_dir_path = data_folder_path + \"/\" + dir_name\n #get the images names that are inside the given subject directory\n subject_images_names = os.listdir(subject_dir_path)\n \n#------STEP-3--------\n#go through each image name, read image, \n#detect face and add face to list of faces\n for image_name in subject_images_names:\n \n#ignore system files like .DS_Store\n if image_name.startswith(\".\"):\n continue;\n \n#build image path\n#sample image path = training-data/s1/1.pgm\n image_path = subject_dir_path + \"/\" + image_name\n \n#read image\n image = cv2.imread(image_path)\n \n#display an image window to show the image \n cv2.imshow(\"Training on image...\", image)\n cv2.waitKey(100)\n #detect face\n face, rect = face_dect.detect_face(image)\n#for the purpose of this tutorial\n#we will ignore faces that are not detected\n #if face is not None:\n#add face to list of faces\n faces.append(face)\n#add label for this face\n labels.append(label)\n \n cv2.destroyAllWindows()\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n \n return faces, labels\n\nprint(\"Preparing data...\")\nfaces, labels = prepare_training_data(\"dataset\")\nprint(\"Data prepared\")\n#print total faces and labels\nprint(\"Total faces: \", len(faces))\nprint(\"Total labels: \", len(labels))\n","sub_path":"preparedata.py","file_name":"preparedata.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"505044509","text":"import cv2\nimport numpy as np\n\nnamefile=input(\"Inserisci nome file (senza estenzione): \")\ncap = cv2.VideoCapture(0)#zero sta per la prima web camere che hai nel coputer e cosi via 1 2 3\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter(namefile+\".avi\", fourcc, 20.0, (640,480))###\nwhile True:\n ret,frame=cap.read()\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n out.write(frame)###registra quello che vede dalla telecamera in formato datogli sopra\n cv2.imshow('frame',frame)\n cv2.imshow('grigio',gray)\n\n if cv2.waitKey(1)& 0xFF ==ord('q'):\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\nexit()\n","sub_path":"try basic function/4 salva video webcam.py","file_name":"4 salva video webcam.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634813283","text":"#encoding:utf-8\nimport wave\nimport os\nimport argparse\n\ncorpus_second = 0.0\ncorpus_min = 0.0\ncorpus_hour = 0.0\naudio_file_count = 0\nmax_length = 0\nmax_length_file = \"unknow\"\nmin_length = 0\nmin_length_file = \"unknow\"\nsample_rate = 0\nparser = argparse.ArgumentParser(description = \"Calculate the duration of the corpus.\")\nparser.add_argument(\"wav_dir\",help=\"audio file dir (wav_dir/.../.wav)\")\nargs = parser.parse_args()\ncurrent_directory = os.path.dirname(os.path.abspath(__file__))\nfile_path = os.path.join(current_directory,args.wav_dir)\n\ndef traverse(file_path):\n global corpus_second,audio_file_count,max_length,max_length_file,min_length,min_length_file\n file_list = os.listdir(file_path)\n for onefile in file_list:\n file_or_dir = os.path.join(file_path,onefile)\n if os.path.isfile(file_or_dir) and file_or_dir.split(\".\")[-1]==\"wav\":\n try:\n onewave = wave.open(file_or_dir,\"rb\")\n channels, width, framerate, frames = onewave.getparams()[:4]\n onewave.close()\n if max_length < frames:\n max_length = frames\n max_length_file = file_or_dir\n if min_length > frames or min_length == 0:\n min_length = frames\n min_length_file = file_or_dir\n second = frames/(framerate*1.0)\n corpus_second += second\n print(file_or_dir + \"\\n\\t\" + \"channels:%d width:%2d framerate:%6d frames:%8d duration:%.03f\"\n %(channels,width,framerate,frames,second))\n audio_file_count+=1\n except:\n print(\"error: \"+ file_or_dir) \n elif os.path.isdir(file_or_dir):\n traverse(file_or_dir)\n\ntraverse(file_path)\ncorpus_min = corpus_second/60\ncorpus_hour = corpus_min/60\nti=\"-\"*100\nprint(\"\\n\"+ti+\"\\n\")\nind=\" \"\nprint(ind+\"文件个数 : %d\"%audio_file_count)\nif corpus_min < 1:\n print(ind+\"语料时长 : %f(秒)\"%corpus_second)\nif corpus_min >= 1 and corpus_hour < 1:\n print(ind+\"语料时长 : %.3f(分钟)\"%corpus_min)\nif corpus_hour >= 1:\n print(ind+\"语料时长 : %.3f(小时)\"%corpus_hour)\nprint(ind+\"最长语音 : \" + str(max_length) + \"\\n \" + max_length_file)\nprint(ind+\"最短语音 :\" + str(min_length) + \"\\n \" + min_length_file)\nprint('\\n')\n","sub_path":"count-corpus.py","file_name":"count-corpus.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"375790382","text":"\"\"\"\nAuthor: Zane Thornburg\n\"\"\"\n\n#### Cell Geometry ####\n\nfrom jLM.RegionBuilder import RegionBuilder\nimport jLM\n\nimport pandas as pd\nimport numpy as np\nimport random\n\ndef buildRegions(sim, N_edges, N_2, sim_center, ptn_ratio, dna_monomers, cyto_radius, riboFile, dnaFile, filename, pmap, PartIdxMap, partIdx):\n ext = sim.region(\"extracellular\")\n mem = sim.region(\"membrane\")\n cyt = sim.region(\"cytoplasm\")\n ribo = sim.region(\"ribosomes\")\n dna = sim.region(\"DNA\")\n she = sim.region(\"outer_cytoplasm\")\n\n build = RegionBuilder(sim)\n\n cytoplasm = build.ellipsoid(radius = cyto_radius, center = sim_center)\n cyto_dilation = build.dilate(cytoplasm, se = build.se26)\n cyto_shell = cyto_dilation & ~cytoplasm\n cyto_dilation = build.dilate(cyto_dilation, se = build.se26)\n membrane = cyto_dilation & ~cyto_shell & ~cytoplasm\n extracellular = ~cyto_dilation\n\n \n dnaDF = pd.read_csv(dnaFile, header = None)\n# dnaDF\n\n geneBlocks = []\n genePoints = []\n genome_placement = []\n\n\n for index, row in dnaDF.iterrows():\n x = row[0] + N_2\n y = row[1] + N_2\n z = row[2] + N_2\n\n genome_placement.append([x,y,z])\n\n\n for i in range(len(genome_placement)):\n\n gene_coord = genome_placement[i]\n genePoints.append(gene_coord)\n \n \n genes = np.full((N_edges, N_edges, N_edges), False)\n \n for gene in genePoints:\n # print(gene)\n x = int(gene[0])\n y = int(gene[1])\n z = int(gene[2])\n\n genes[x,y,z] = True\n \n \n riboDF = pd.read_csv(riboFile, header = None)\n \n ribosome_radius = 1e-8/sim.latticeSpacing\n\n ribosome_spheres = []\n ribo_points_x = []\n ribo_points_y = []\n ribo_points_z = []\n\n its = 0\n ribo_points = []\n ribo_center_points = []\n\n for index, row in riboDF.iterrows():\n # if index < 7348:\n x_int = row[1] + N_2\n y_int = row[2] + N_2\n z_int = row[3] + N_2\n \n# x_int = row[2]//2 + N_2\n# y_int = row[3]//2 + N_2\n# z_int = row[4]//2 + N_2\n\n center_point = [x_int,y_int,z_int]\n\n riboPoints = []\n\n xpoint1 = [x_int+1,y_int,z_int]\n xpoint2 = [x_int-1,y_int,z_int]\n ypoint1 = [x_int,y_int+1,z_int]\n ypoint2 = [x_int,y_int-1,z_int]\n zpoint1 = [x_int,y_int,z_int+1]\n zpoint2 = [x_int,y_int,z_int-1]\n\n riboPoints.append(center_point)\n riboPoints.append(xpoint1)\n riboPoints.append(ypoint1)\n riboPoints.append(zpoint1)\n riboPoints.append(xpoint2)\n riboPoints.append(ypoint2)\n riboPoints.append(zpoint2)\n\n ribo_points.append(center_point)\n ribo_center_points.append(center_point)\n\n ribo_points.append(xpoint1)\n ribo_points.append(ypoint1)\n ribo_points.append(zpoint1)\n ribo_points.append(xpoint2)\n ribo_points.append(ypoint2)\n ribo_points.append(zpoint2)\n\n\n ribo_center_points = np.array(ribo_center_points,dtype=np.int)\n \n ribosomes = np.full((N_edges, N_edges, N_edges), False)\n for coord in ribo_points:\n x = int(coord[0])\n y = int(coord[1])\n z = int(coord[2])\n\n ribosomes[x,y,z] = True\n \n build.compose(\n (ext, extracellular),\n (cyt, cytoplasm),\n (she, cyto_shell),\n (ribo, ribosomes),\n (mem, membrane),\n (dna, genes))\n \n degradosome = sim.species('Degradosome')\n PartIdxMap['Degradosome'] = partIdx\n partIdx = partIdx + 1\n \n sim, occupied_mem_spaces, membrane_spaces = addDegParticles(sim, pmap, N_edges, ptn_ratio, cyto_shell, ribo_points, genePoints, degradosome)\n \n partIdx = addsecY(sim, occupied_mem_spaces, membrane_spaces, ptn_ratio, PartIdxMap, partIdx)\n \n \n print('Geometry constructed')\n \n return sim, genePoints, ribo_points, ribo_center_points, ext, mem, cyt, ribo, dna, she, cyto_shell, partIdx\n\n\ndef addDegParticles(sim, pmap, N_edges, ptn_ratio, cyto_shell, ribo_points, genePoints, degradosome):\n \n membrane_spaces = []\n\n for i in range(N_edges):\n for j in range(N_edges):\n for k in range(N_edges):\n\n if cyto_shell[i][j][k]:\n membrane_spaces.append([i,j,k])\n \n deg_coords = []\n occupied_mem_spaces = []\n deg_num = 0\n\n for point in ribo_points:\n occupied_mem_spaces.append(point)\n\n for point in genePoints:\n occupied_mem_spaces.append(point)\n\n while deg_num < 120*ptn_ratio: #*700/500\n position = random.choice(membrane_spaces)\n if position not in occupied_mem_spaces:\n deg_num = deg_num + 1\n occupied_mem_spaces.append(position)\n deg_coords.append(position)\n\n else:\n# print(deg_num)\n continue\n\n for coord in deg_coords:\n\n x = int(coord[0])\n y = int(coord[1])\n z = int(coord[2])\n\n degradosome.placeParticle(x,y,z,1)\n\n print('Degradosomes placed ',deg_num)\n \n return sim, occupied_mem_spaces, membrane_spaces\n\ndef addsecY(sim, occupied_mem_spaces, membrane_spaces, ptn_ratio, PartIdxMap, partIdx):\n secy_coords = []\n\n secy_num = 0\n\n secy = sim.species('secY')\n PartIdxMap['secY'] = partIdx\n partIdx = partIdx + 1\n\n while secy_num < 66*ptn_ratio:\n position = random.choice(membrane_spaces)\n if position not in occupied_mem_spaces:\n secy_num = secy_num + 1\n occupied_mem_spaces.append(position)\n secy_coords.append(position)\n\n else:\n print(secy_num)\n continue\n\n for coord in secy_coords:\n\n x = int(coord[0])\n y = int(coord[1])\n z = int(coord[2])\n\n secy.placeParticle(x,y,z,1)\n \n return partIdx\n\n\ndef readDNAoccupancies(dnaPartFile):\n\n print(dnaPartFile)\n\n dnaPartDF = pd.read_csv(dnaPartFile, header = None)\n\n geneOccupancies = []\n\n for index, row in dnaPartDF.iterrows():\n\n occupancy = []\n\n for position in row:\n if position != -1:\n occupancy.append(position)\n\n geneOccupancies.append(occupancy)\n\n print('DNA occupancies read')\n \n return geneOccupancies\n\n\ndef mapDNA(gene_starts, dna_monomers):\n genes_added = 0\n position_added = 0\n\n DNA_map = []\n\n locus_added = []\n\n locus_finished = []\n\n locus_back = gene_starts[genes_added-1][0]\n locusTag = gene_starts[genes_added][0]\n start = gene_starts[genes_added][1]\n\n for i in range(dna_monomers-len(gene_starts)):\n\n if (position_added >= start - 11.9) and (locusTag not in locus_added):\n\n locus_added.append(locusTag)\n\n locusNum = locusTag.split('3A_')[1].lstrip('0')\n\n if locusTag == 'JCVISYN3A_0910':\n\n DNA_map.append('g_909')\n\n DNA_map.append('E_910')\n\n position_added = position_added + 11.9*2\n\n # print('Added ' + geneMetID)\n\n elif locusTag == 'JCVISYN3A_0001':\n\n locus_back = locusTag\n\n genes_added = genes_added + 1\n\n locusTag = gene_starts[genes_added][0]\n\n start = gene_starts[genes_added][1]\n\n DNA_map.append('g_1')\n\n position_added = position_added + 11.9\n\n # print('Added ' + geneMetID)\n\n else:\n\n locus_back = locusTag\n\n genes_added = genes_added + 1\n\n locusTag = gene_starts[genes_added][0]\n\n start = gene_starts[genes_added][1]\n\n direction = gene_starts[genes_added-1][2]\n\n locus_end = gene_starts[genes_added-2][0]\n\n direction_end = gene_starts[genes_added-2][2]\n\n locusNumEnd = locus_end.split('3A_')[1].lstrip('0')\n\n if direction_end == 1:\n\n endMetID = 'E_' + locusNumEnd\n\n DNA_map.append(endMetID)\n\n elif direction_end == -1:\n\n endMetID = 'g_' + locusNumEnd\n\n DNA_map.append(endMetID)\n\n if direction == 1:\n\n geneMetID = 'g_' + locusNum\n\n DNA_map.append(geneMetID)\n\n elif direction == -1:\n\n geneMetID = 'E_' + locusNum\n\n DNA_map.append(geneMetID)\n\n position_added = position_added + 11.9*2\n\n # print('Added ' + geneMetID)\n\n\n else:\n\n if (locusTag == 'JCVISYN3A_0910') and (locusTag in locus_added):\n\n locusNum = locusTag.split('3A_')[1].lstrip('0')\n\n intergeneMetID = 'C_' + locusNum\n\n position_added = position_added + 11.9\n\n DNA_map.append(intergeneMetID)\n\n # print('Added ' + intergeneMetID)\n\n else:\n\n locusNum = locus_back.split('3A_')[1].lstrip('0')\n\n intergeneMetID = 'C_' + locusNum\n\n position_added = position_added + 11.9\n\n DNA_map.append(intergeneMetID)\n\n DNA_map.append('g_910') \n print('DNA map written')\n \n return DNA_map\n\n\ndef addDNApart(sim, DNA_map, genePoints, geneOccupancies, ext, mem, cyt, ribo, dna, she, RDME_species_list, PartIdxMap, partIdx):\n \n geneEnds = {}\n geneStarts = {}\n \n for i in range(len(DNA_map)):\n\n for j in range(len(genePoints)):\n\n occupancy = geneOccupancies[j]\n\n if i+1 in occupancy:\n\n coord = genePoints[j]\n x = int(coord[0])\n y = int(coord[1])\n z = int(coord[2])\n\n geneMetID = DNA_map[i]\n \n# print(i,geneMetID,occupancy,coord)\n \n if 'E_' in geneMetID:\n geneEnds[geneMetID] = coord\n RDME_species_list.append(geneMetID)\n PartIdxMap[geneMetID] = partIdx\n partIdx = partIdx + 1\n \n if 'g_' in geneMetID:\n geneStarts[geneMetID] = coord\n RDME_species_list.append(geneMetID)\n PartIdxMap[geneMetID] = partIdx\n partIdx = partIdx + 1\n \n if geneMetID not in RDME_species_list:\n RDME_species_list.append(geneMetID)\n PartIdxMap[geneMetID] = partIdx\n partIdx = partIdx + 1\n\n geneSpecies = sim.species(geneMetID, annotation = geneMetID)\n\n geneSpecies.placeParticle(x,y,z,1)\n\n geneSpecies.diffusionRate(cyt,sim.diffusionZero)\n geneSpecies.diffusionRate(mem,sim.diffusionZero)\n geneSpecies.diffusionRate(ext,sim.diffusionZero)\n geneSpecies.diffusionRate(ribo,sim.diffusionZero)\n geneSpecies.diffusionRate(dna,sim.diffusionZero)\n\n # print('Added ' + geneMetID,x,y,z)\n\n break\n \n print('DNA particles added') \n \n return sim, geneEnds, geneStarts, partIdx","sub_path":"RDME_gCME_ODE/program/regions_and_complexes.py","file_name":"regions_and_complexes.py","file_ext":"py","file_size_in_byte":11106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"93958671","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis code is originally from parser.py. This is the basic grammar and rules\nfrom which the other specific grammars are built. This file is not meant to by used by itself.\nImported into the specific grammar files.\n\n#Constants\n errs\n rules\n#Functions\n S\n literals\n update\n dict_sum\n\"\"\"\n\n__author__ = 'Jathan McCollum, Mike Biancaniello, Michael Harding, Michael Shields'\n__editor__ = 'Joseph Malone'\n__maintainer__ = 'Jathan McCollum'\n__email__ = 'jathanism@aol.com'\n__copyright__ = 'Copyright 2006-2013, AOL Inc.; 2013 Saleforce.com'\n\nfrom support import *\n\n# Each production can be any of:\n# 1. string\n# if no subtags: -> matched text\n# if single subtag: -> value of that\n# if list: -> list of the value of each tag\n# 2. (string, object) -> object\n# 3. (string, callable_object) -> object(arg)\n\nsubtagged = set()\n\ndef S(prod):\n \"\"\"\n Wrap your grammar token in this to call your helper function with a list\n of each parsed subtag, instead of the raw text. This is useful for\n performing modifiers.\n\n :param prod: The parser product.\n \"\"\"\n subtagged.add(prod)\n return prod\n\ndef literals(d):\n '''Longest match of all the strings that are keys of 'd'.'''\n keys = [str(key) for key in d]\n keys.sort(lambda x, y: len(y) - len(x))\n return ' / '.join(['\"%s\"' % key for key in keys])\n\ndef update(d, **kwargs):\n # Check for duplicate subterms, which is legal but too confusing to be\n # allowed at AOL. For example, a Juniper term can have two different\n # 'destination-address' clauses, which means that the first will be\n # ignored. This led to an outage on 2006-10-11.\n for key in kwargs.iterkeys():\n if key in d:\n raise exceptions.ParseError('duplicate %s' % key)\n d.update(kwargs)\n return d\n\ndef dict_sum(dlist):\n dsum = {}\n for d in dlist:\n for k, v in d.iteritems():\n if k in dsum:\n dsum[k] += v\n else:\n dsum[k] = v\n return dsum\n\n## syntax error messages\nerrs = {\n 'comm_start': '\"comment missing /* below line %(line)s\"',\n 'comm_stop': '\"comment missing */ below line %(line)s\"',\n 'default': '\"expected %(expected)s line %(line)s\"',\n 'semicolon': '\"missing semicolon on line %(line)s\"',\n}\n\nrules = {\n 'digits': '[0-9]+',\n '': '[0-9]+',\n '': '[ \\\\t]+',\n '': '[ \\\\t\\\\n]+',\n '': \"('\\r'?,'\\n')/EOF\",\n 'alphanums': '[a-zA-Z0-9]+',\n 'word': '[a-zA-Z0-9_.-]+',\n 'anychar': \"[ a-zA-Z0-9.$:()&,/'_-]\",\n 'hex': '[0-9a-fA-F]+',\n 'ipchars': '[0-9a-fA-F:.]+',\n 'ipv4': ('digits, (\".\", digits)*', TIP),\n 'ipaddr': ('ipchars', TIP),\n 'cidr': ('(\"inactive:\", ws+)?, (ipaddr / ipv4), \"/\", digits, (ws+, \"except\")?', TIP),\n 'macaddr': 'hex, (\":\", hex)+',\n 'protocol': (literals(Protocol.name2num) + ' / digits', do_protocol_lookup),\n 'tcp': ('\"tcp\" / \"6\"', Protocol('tcp')),\n 'udp': ('\"udp\" / \"17\"', Protocol('udp')),\n 'icmp': ('\"icmp\" / \"1\"', Protocol('icmp')),\n 'icmp_type': (literals(icmp_types) + ' / digits', do_icmp_type_lookup),\n 'icmp_code': (literals(icmp_codes) + ' / digits', do_icmp_code_lookup),\n 'port': (literals(ports) + ' / digits', do_port_lookup),\n 'dscp': (literals(dscp_names) + ' / digits', do_dscp_lookup),\n 'root': 'ws?, junos_raw_acl / junos_replace_family_acl / junos_replace_acl / junos_replace_policers / ios_acl, ws?',\n}\n","sub_path":"trigger/acl/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353030705","text":"import sqlite3\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# Scraping books from the website\ndef scrape_books(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n books = soup.find_all(\"article\")\n all_books = []\n for book in books:\n book_data = (get_title(book), get_price(book), get_rating(book))\n all_books.append(book_data)\n save_books(all_books)\n\n\n# Saving books to a database\ndef save_books(all_books):\n connection = sqlite3.connect(\"books.db\")\n c = connection.cursor()\n c.execute('''CREATE TABLE books \n (title TEXT, price REAL, rating INTEGER)''')\n c.executemany(\"INSERT INTO books VALUES(?,?,?)\", all_books)\n connection.commit()\n connection.close()\n\n\n# Extracting books title\ndef get_title(book):\n return book.find(\"h3\").find(\"a\")[\"title\"]\n\n\n# Extracting book price\ndef get_price(book):\n price = book.find(class_=\"price_color\").get_text()\n return float(price.replace(\"£\", \"\").replace(\"Â\", \"\"))\n\n\n# Extracting book rating and converting it to a number\ndef get_rating(book):\n word_rating = book.find(class_=\"star-rating\").get_attribute_list(\"class\")[-1]\n ratings = {\"One\": 1, \"Two\": 2, \"Three\": 3, \"Four\": 4, \"Five\": 5}\n return ratings[word_rating]\n\n\nscrape_books(\"http://books.toscrape.com/catalogue/category/books/history_32/index.html\")\n","sub_path":"books_to_scrape.py","file_name":"books_to_scrape.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"494446713","text":"#!/usr/bin/python\r\n# -*- coding=utf-8 -*-\r\n\r\nfrom django.core.management.base import BaseCommand\r\nfrom ...sync import *\r\nfrom django.conf import settings\r\n\r\n\r\nclass Command(BaseCommand):\r\n help = \"this is a command customized for cleaning the database with a period of time(default is 7 days)!\"\r\n default_time_period = 7\r\n\r\n def add_arguments(self, parser):\r\n parser.add_argument('--user_center', help=\"user center ip address.\")\r\n parser.add_argument('--clean_all', action='store_true', help=\"clear all items\")\r\n parser.add_argument('--clean_del_flag', action='store_true', help=\"clear all items del_flag=1\")\r\n parser.add_argument('--ignore_update_time', action='store_true', help=\"sync data ignore update time\")\r\n\r\n def handle(self, *args, **options):\r\n # 获取命令行参数\r\n clean_all = options['clean_all']\r\n clean_del_flag = options['clean_del_flag']\r\n ignore_update_time = options['ignore_update_time']\r\n user_center_ip = options['user_center']\r\n\r\n # 获取用户中心的domain\r\n if user_center_ip:\r\n uc_domain = \"http://\" + user_center_ip\r\n else:\r\n uc_domain = None\r\n\r\n # 删除所有表格中del_flag=True数据\r\n if clean_all or clean_del_flag:\r\n reversed_model_list = settings.MODEL_SYNC_FROM_USER_CENTER[:]\r\n reversed_model_list.reverse()\r\n for model_name in reversed_model_list:\r\n clear_table(model_name, clean_all)\r\n\r\n # 同步数据\r\n for model_name in settings.MODEL_SYNC_FROM_USER_CENTER:\r\n refresh_table(model_name, ignore_update_time, uc_domain)\r\n","sub_path":"applications/user_center/management/commands/sync_user_data.py","file_name":"sync_user_data.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621486502","text":"# -*- coding: utf-8 -*-\nimport base64\nimport json\nimport os\nimport logging\n\nfrom time import gmtime, strftime, sleep\n\nfrom selenium.webdriver.remote.remote_connection import LOGGER\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\n\nfrom scrapy import Request\nfrom scrapy.spiders import CrawlSpider\nfrom scrapy import Selector\nfrom mapacheCrawler.items import AlumnoItem, HorarioItem\n\n\nclass LoginSpider(CrawlSpider):\n name = 'login'\n carreras=['INNI', 'INCO', 'INBI', 'INCE']\n\n def __init__(self):\n LOGGER.setLevel(logging.WARNING)\n self.driver = webdriver.Chrome(executable_path='C:\\chromedriver.exe')\n\n self.checkDate()\n self.checkDirs()\n self.loadCredentials(r'C:\\Users\\Sabal\\Desktop\\mapache\\Crawler\\credenciales')\n\n\n def start_requests(self):\n\n return [Request(\n url= 'http://siiauescolar.siiau.udg.mx/wus/gupprincipal.inicio',\n callback= self.navigateSIIAU\n )]\n \n\n def navigateSIIAU(self, response):\n self.driver.get(response.url)\n self.driver.switch_to_frame(1)\n\n self.login()\n\n self.driver.switch_to.frame('Menu')\n if(self.credentials[2] == 'L'):\n self.driver.find_element_by_xpath('//a[text() = \"ALUMNOS\"]').click()\n self.driver.find_element_by_xpath('//a[text() = \"REGISTRO\"]').click()\n self.switchCarrera()\n else: \n self.driver.find_element_by_xpath('//a[text() = \"ALUMNOS SEMS\"]').click()\n self.driver.find_element_by_xpath('//a[text() = \"REGISTRO\"]').click()\n\n\n print(\"Cambiando carrera\")\n\n self.driver.find_element_by_xpath('//a[contains(text(),\"Horario\")]').click()\n\n self.driver.switch_to.default_content()\n self.driver.switch_to_frame(1)\n self.driver.switch_to.frame('contenido')\n\n selector = Selector(text = self.driver.find_element_by_xpath('//table[position()=3]/tbody').get_attribute('innerHTML')).xpath('//tr')\n\n self.driver.get(\"http://siiauescolar.siiau.udg.mx/wus/gupprincipal.salir\")\n\n return self.getHorario(selector)\n\n\n def saveScreen(self, data, name):\n name = './screenshoots/login/' + name + '_' + self.timeDate + '.png'\n\n with open(name, 'wb') as f:\n f.write(base64.b64decode(data))\n\n\n def loadCredentials(self, filepath):\n credentialFile = open(filepath, 'r')\n\n buffer = credentialFile.read()\n\n credentialFile.close()\n data = buffer.split(\",\")\n\n username = data[0]\n password = data[1]\n siiautype = data[2]\n user = data[3]\n \n self.credentials = (username, password, siiautype, user)\n\n\n def login(self):\n code = self.driver.find_element_by_name('p_codigo_c')\n password = self.driver.find_element_by_name('p_clave_c')\n\n code.send_keys(self.credentials[0])\n password.send_keys(self.credentials[1])\n\n self.driver.find_element_by_xpath('//input[@type=\"submit\"]').click()\n\n\n def switchCarrera(self):\n carreras = []\n element = self.driver.find_element_by_name('p_carrera')\n\n selectorCarreras = Selector(text = element.get_attribute('innerHTML')).xpath('//option/text()')\n selectCarreras = Select(element)\n # selectorCarreras = Select(\"INCO-2015-B\")\n print(\"Esto trae: \")\n print(element.get_attribute('innerHTML'))\n if (element == \"BGC-2012-B\"):\n # selectCarreras = Select(\"INCO-2015-B\")\n print(\"Entra\")\n # for i in range(0, len(selectorCarreras)):\n # carreras.append(selectorCarreras[i].extract())\n\n # for i in range(0, len(carreras)):\n # for j in range(0, len(self.carreras)):\n # if(carreras[i].find(self.carreras[j]) != 0):\n # selectCarreras.select_by_index(i)\n\n\n def getHorario(self, selector):\n table = []\n\n currentRow = 0\n\n for i in range(2, len(selector)):\n row = selector[i].xpath('td/text() | td/br | td/table/tbody/tr/td/text()').extract()\n if(row[1][0] == 'I' and len(row[1]) == 5):\n table.append(row)\n currentRow = len(table)\n print('primer [' + '] ['.join(row) + ']')\n else:\n table[currentRow-1].extend(row)\n print('segundo [' + '] ['.join(row) + ']')\n\n for row in table:\n print(row)\n item = AlumnoItem()\n\n numHorario = int(len(row) / 17)\n\n item['nrc'] = row[0]\n item['cve'] = row[1]\n item['materia'] = row[2]\n item['seccion'] = row[3]\n item['creditos'] = row[4]\n\n item['dias'] = ''\n\n for i in range(0, numHorario):\n for j in range(6 + (17*i), 12 + (17*i)):\n if(row[j] != '
'):\n item['dias'] += row[j]\n item['dias'] += '|'\n\n item['edificio'] = ''\n item['aula'] = ''\n\n for i in range(0, numHorario):\n item['horario'] = row[5 + (17*i)]\n item['edificio'] += row[12 + (17*i)][3]\n item['aula'] += row[13 + (17*i)]\n\n item['profesor'] = row[14]\n item['fechaInicia'] = row[15]\n item['fechaFinal'] = row[16]\n\n yield item\n\n\n def checkDirs(self):\n if not os.path.exists('screenshoots/login'):\n os.makedirs('screenshoots/login')\n\n if not os.path.exists('data/login'):\n os.makedirs('data/login')\n\n\n def checkDate(self):\n self.timeDate = strftime(\"%Y_%m_%d_%H_%M_%S\", gmtime())\n","sub_path":"Crawler/mapacheCrawler/spiders/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"268467016","text":"import insightconnect_plugin_runtime\nfrom .schema import StartPatchScanInput, StartPatchScanOutput, Input, Output, Component\n\n# Custom imports below\nfrom insightconnect_plugin_runtime.exceptions import PluginException\nimport time\n\n\nclass StartPatchScan(insightconnect_plugin_runtime.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(\n name='start_patch_scan',\n description=Component.DESCRIPTION,\n input=StartPatchScanInput(),\n output=StartPatchScanOutput())\n\n def run(self, params={}):\n endpoint_names = params.get(Input.HOSTNAMES, [])\n machine_group_ids = params.get(Input.MACHINE_GROUP_IDS, [])\n use_machine_credential = params.get(Input.USE_MACHINE_CREDENTIAL, False)\n max_poll_time = params.get(Input.MAX_POLL_TIME)\n\n if not endpoint_names and not machine_group_ids:\n raise PluginException(cause='No hostnames or machine group IDs specified.',\n assistance='Either hostnames or machine group IDs must be specified.'\n )\n if use_machine_credential:\n if not endpoint_names:\n raise PluginException(cause='Machine credentials can only be set to true if hostname is specified.',\n assistance='Either provide a valid hostname or set machine credentials to False.')\n payload = {\n \"credentialId\": params.get(Input.CREDENTIAL_ID),\n \"diagnosticTraceEnabled\": params.get(Input.DIAGNOSTIC_TRACE_ENABLED),\n \"endpointNames\": endpoint_names,\n \"machinegroupIds\": machine_group_ids,\n \"name\": params.get(Input.NAME),\n \"runAsCredentialId\": params.get(Input.RUN_AS_CREDENTIAL_ID),\n \"templateId\": params.get(Input.TEMPLATE_ID),\n \"useMachineCredential\": use_machine_credential\n }\n scan_details = self.connection.ivanti_api.start_patch_scan(payload)\n\n i = 0\n # Poll for patch scan completion\n while i < max_poll_time:\n time.sleep(10)\n patch_scan_status_details = self.connection.ivanti_api.get_patch_scan_status_details(scan_details['id'], allow_404=True)\n if patch_scan_status_details is not None:\n if patch_scan_status_details.get('isComplete'):\n scan_details['isComplete'] = patch_scan_status_details['isComplete']\n scan_details['updatedOn'] = patch_scan_status_details['updatedOn']\n break\n i += 10\n\n return {\n Output.SCAN_DETAILS: scan_details\n }\n","sub_path":"ivanti_security_controls/icon_ivanti_security_controls/actions/start_patch_scan/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150039480","text":"##Connection for EVIL protocol\n\n##Connection is created and held by a socket, and held by ONE application\n\n##Connection is responsible for maintaining the sliding window ARQ and its\n##in/out buffers, as well as its own state, including termination\n\n##when a connection is first created it is either in:\n## syn_recv (if it was created with socket.accept)\n## syn_sent (if it was created with socket.connect)\n\nimport Queue\nimport threading\nfrom util import EVILPacket\nfrom util import debugLog\nimport util\nimport time\n\nMAX_SEND_BYTES = 800\n\nclass STATE():\n CLOSED = 1\n LISTEN = CLOSED << 1\n SYN_RECV = LISTEN << 1\n SYN_SENT = SYN_RECV << 1\n ESTABLISHED = SYN_SENT << 1\n FIN_WAIT_1 = ESTABLISHED << 1\n FIN_WAIT_2 = FIN_WAIT_1 << 1\n FIN_CLOSING = FIN_WAIT_2 << 1\n TIME_WAIT = FIN_CLOSING << 1\n CLOSE_WAIT = TIME_WAIT << 1\n LAST_ACK = CLOSE_WAIT << 1\n\n\nclass Connection:\n\n ##member variables are window size, max window size, in/out buffers, and\n ##a state enum\n DEFAULT_TIMEOUT = 5\n\n def __init__(self, src_port, dst_port, maxWindowSize, state, otherAddress, socket):\n self.max_window_size = maxWindowSize\n self.max_send_size = 1\n self.state = state\n self.stateCond = threading.Condition()\n self.otherAddress = (otherAddress,dst_port)\n self.src_port = src_port\n self.dst_port = dst_port\n\n ##constructor, needs max window size for requirements and state for bidir\n ##should also initialize buffers etc\n self.currentWindowSize = 1\n self.missedKeeps = 0\n\n self.seq = 0\n self.ack = 0\n\n ### Threading Queues:\n\n self.dgram_queue_in = Queue.Queue() # contains EVILPacket objs\n # self.dgram_queue_out = queue.Queue() # contains EVILPacket objs\n self.dgram_unconf = []\n self.dgram_unsent = []\n self.str_queue_in = Queue.Queue()\n self.str_queue_out = Queue.Queue()\n\n self.queue_cond = threading.Condition()\n self.socket = socket\n\n self.establishedCondition = threading.Condition()\n self.resendTimer = 0\n self.thread = threading.Thread(None, self.c_thread, \"c_thread\")\n self.thread.start()\n\n def setState(self, newState, timerReset=True):\n self.stateCond.acquire()\n self.state = newState\n if timerReset:\n self.resendTimer = 0\n self.stateCond.release()\n\n def setMaxWindowSize(self, W):\n self.max_window_size = W\n\n ##called by the socket on each connection passing in a packet that was\n ##sent to the connection, could be an ack or data, checksum has been done\n def handleIncoming(self,packet):\n try:\n self.dgram_queue_in.put(packet,timeout=0.5)\n debugLog(\"added incoming packet to queue\")\n self.queue_cond.acquire()\n self.queue_cond.notifyAll()\n debugLog(\"queue notification sent\")\n self.queue_cond.release()\n self.queue_cond.release()\n debugLog(\"released twice\")\n except Exception as e:\n pass\n\n\n ##called by the application when it wants to read the data from the stream\n ##pauses until data is available, deletes data from the buffer once gotten\n def get(self,maxSize,block=True,timeout=None):\n if self.state != STATE.ESTABLISHED and self.str_queue_out.empty():\n raise Exception(\"Cannot read from non-established connection\")\n return self.str_queue_in.get(block,timeout)\n\n\n ##called by the application when it wants to send data to the connection\n ##should add the data to the output buffer, then handle it when appropriate\n def send(self,data,block=True,timeout=None):\n if self.state != STATE.ESTABLISHED:\n raise Exception(\"Cannot write to non-established connection\")\n dataChunks = []\n while len(data) != 0:\n dataChunks.append(data[:MAX_SEND_BYTES])\n data = data[MAX_SEND_BYTES:]\n\n for i in range(len(dataChunks)):\n self.str_queue_out.put(dataChunks[i],block,timeout)\n self.queue_cond.acquire()\n self.queue_cond.notify()\n self.queue_cond.release()\n\n\n\n ##send a FIN, set state appropriately\n def close(self):\n debugLog(\"connection.close() called!\")\n if self.state == STATE.CLOSED:\n raise Exception(\"Cannot close an already-closed connection\")\n dgram = self.new_dgram()\n dgram.setFlag(util.FLAG.FIN,True)\n for _ in range(10):\n self.socket.addToOutput(self.otherAddress,dgram)\n self.stateCond.acquire()\n self.state = STATE.CLOSED\n self.stateCond.notifyAll()\n self.stateCond.release()\n\n\n def new_dgram(self,seq=None,ack=None):\n if seq == None:\n seq = self.seq\n if ack == None:\n ack = self.ack\n dgram = util.EVILPacket()\n dgram.src_port = self.src_port\n dgram.dst_port = self.dst_port\n dgram.seq = seq\n dgram.ack = ack\n dgram.window = self.max_window_size\n\n return dgram\n\n def process_data_str(self,data):\n # seq will be added in EVIL.py\n dgram = self.new_dgram()\n dgram.data = data\n dgram.seq = self.seq + len(data)\n dgram.window = self.max_window_size\n dgram.checksum = dgram.generateCheckSum()\n\n self.seq += len(data) #TODO may change\n\n if len(self.dgram_unconf) >= self.max_send_size:\n debugLog(\"queue full; deferring packet\")\n self.dgram_unsent.append(dgram)\n else:\n debugLog(\"appended packet to queue\")\n self.dgram_unconf.append(dgram)\n self.socket.addToOutput(self.otherAddress,dgram)\n\n def process_dgram(self,dgram):\n self.stateCond.acquire()\n oldState = self.state\n self.max_send_size = dgram.window\n new_dgram = self.new_dgram()\n if dgram.checkFlag(util.FLAG.FIN):\n debugLog(\"FIN received. Closing connection.\")\n self.state = STATE.CLOSED\n self.stateCond.notifyAll()\n elif oldState != STATE.ESTABLISHED:\n if oldState == STATE.CLOSED:\n pass\n if oldState == STATE.SYN_SENT:\n if dgram.checkFlag(util.FLAG.SYN) and dgram.checkFlag(util.FLAG.ACK):\n new_dgram.setFlag(util.FLAG.ACK,True)\n self.setState(STATE.ESTABLISHED)\n self.stateCond.notifyAll()\n self.socket.addToOutput(self.otherAddress,new_dgram)\n debugLog(\"Sent ACK\")\n if oldState == STATE.SYN_RECV:\n if dgram.checkFlag(util.FLAG.ACK):\n self.setState(STATE.ESTABLISHED)\n self.stateCond.notifyAll()\n debugLog(\"Connection Established\")\n if oldState == STATE.FIN_WAIT_1:\n pass\n if oldState == STATE.FIN_WAIT_2:\n pass\n if oldState == STATE.FIN_CLOSING:\n pass\n if oldState == STATE.TIME_WAIT:\n pass\n if oldState == STATE.CLOSE_WAIT:\n pass\n if oldState == STATE.LAST_ACK:\n pass\n pass\n else:\n if dgram.checkFlag(util.FLAG.KEP):\n if dgram.checkFlag(util.FLAG.ACK):\n self.missedKeeps = 0\n else:\n new_dgram.setFlag(util.FLAG.ACK,True)\n new_dgram.setFlag(util.FLAG.KEP,True)\n self.socket.addToOutput(self.otherAddress,new_dgram)\n if dgram.checkFlag(util.FLAG.SYN) and dgram.checkFlag(util.FLAG.ACK):\n new_dgram.setFlag(util.FLAG.ACK,True)\n debugLog(\"Resending ACK\")\n self.socket.addToOutput(self.otherAddress,new_dgram)\n dataLen = len(dgram.data)\n if self.ack + dataLen < dgram.seq:\n #Out of order packet. Will be dropped.\n # Re-acknowledge last received in-order packet with RET set\n new_dgram.setFlag(util.FLAG.RET,True)\n self.socket.addToOutput(self.otherAddress,new_dgram)\n debugLog(\"Received packet out-of-order. Re-ACKing last received packet\")\n return\n elif self.ack + dataLen == dgram.seq and dataLen != 0:\n self.resendTimer = time.time()\n\n self.ack += len(dgram.data) #TODO: need to change to fit data type\n rcvd_ack = dgram.ack\n j = len(self.dgram_unconf)\n i = 0\n while i < j:\n if self.dgram_unconf[i].seq <= rcvd_ack:\n self.dgram_unconf.pop(i)\n self.resendTimer = time.time()\n i -= 1\n j -= 1\n i += 1\n if len(dgram.data) != 0:\n self.str_queue_in.put(dgram.data)\n new_dgram = self.new_dgram() #get new ack number\n debugLog(\"ACKing received packet\")\n self.socket.addToOutput(self.otherAddress,new_dgram)\n elif dgram.checkFlag(util.FLAG.RET):\n debugLog(\"Got RET - resending!\")\n #need to resend unconfirmed packets.\n #will fake resendTimer so that resend happens right away\n self.resendTimer = time.time() - (self.DEFAULT_TIMEOUT + 1)\n self.stateCond.release()\n\n def checkTimeout(self):\n if (time.time() - self.resendTimer) < self.DEFAULT_TIMEOUT:\n return\n new_dgram = self.new_dgram()\n self.stateCond.acquire()\n oldState = self.state\n\n if oldState == STATE.SYN_RECV:\n new_dgram.setFlag(util.FLAG.SYN,True)\n new_dgram.setFlag(util.FLAG.ACK,True)\n self.socket.addToOutput(self.otherAddress,new_dgram)\n debugLog(\"resent SYN+ACK\")\n elif oldState == STATE.SYN_SENT:\n new_dgram.setFlag(util.FLAG.SYN,True)\n self.socket.addToOutput(self.otherAddress,new_dgram)\n debugLog(\"resent SYN\")\n elif oldState == STATE.ESTABLISHED:\n #Need to resend any unACK-ed data\n for dgram in self.dgram_unconf:\n debugLog(\"Resending data\")\n self.socket.addToOutput(self.otherAddress,dgram)\n\n if self.missedKeeps >= 3:\n self.state = STATE.CLOSED\n return\n if len(self.dgram_unconf) == 0:\n new_dgram.setFlag(util.FLAG.KEP,True)\n self.socket.addToOutput(self.otherAddress,new_dgram)\n self.missedKeeps += 1\n\n self.resendTimer = time.time()\n self.stateCond.release()\n\n def establishConnection(self):\n self.stateCond.acquire()\n debugLog(str(self.state))\n new_dgram = self.new_dgram()\n new_dgram.setFlag(util.FLAG.SYN,True)\n if self.state == STATE.SYN_RECV:\n new_dgram.setFlag(util.FLAG.ACK,True)\n debugLog(\"Sending SYN+ACK\")\n else:\n debugLog(\"Sending SYN\")\n self.socket.addToOutput(self.otherAddress,new_dgram)\n while self.state != STATE.ESTABLISHED and self.state != STATE.CLOSED:\n self.stateCond.wait()\n debugLog(str(self.state))\n self.stateCond.release()\n return\n\n\n # Waits for input, calls appropriate fn\n def c_thread(self):\n\n self.resendTimer = time.time()\n while True:\n cond = self.queue_cond\n cond.acquire()\n if self.dgram_queue_in.empty() and self.str_queue_out.empty():\n ##debugLog(\"waiting\")\n cond.wait(timeout=1)\n ##debugLog(\"wait interrupted\")\n cond.release()\n while not self.dgram_queue_in.empty():\n dgram = self.dgram_queue_in.get()\n debugLog(\"Got incoming packet from queue\")\n self.process_dgram(dgram)\n while len(self.dgram_unconf) < self.max_send_size and len(self.dgram_unsent) > 0:\n dgram = self.dgram_unsent.pop(0)\n dgram.ack = self.ack\n debugLog(\"Sending delayed data\")\n self.socket.addToOutput(self.otherAddress,dgram)\n self.dgram_unconf.append(dgram)\n while not self.str_queue_out.empty():\n held = len(self.dgram_unconf)\n debugLog(\"Got incoming data from queue\")\n data = self.str_queue_out.get()\n self.process_data_str(data)\n if self.state == STATE.CLOSED:\n debugLog(\"Connection closed\")\n break\n self.checkTimeout()\n #TODO: call socket fn to remove conn from list\n","sub_path":"connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":12767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464705095","text":"import requests\r\nimport json\r\nimport Utility as util\r\n\r\n# print api result\r\ndef print_result(response):\r\n print(\"---------returns----------\")\r\n print(response.status_code)\r\n print(response.text)\r\n\r\n# api request caller\r\ndef api_resp_check(response, endpoint):\r\n print(\"API call: \"+endpoint)\r\n if response.status_code != 200:\r\n print(\"----------ERROR-------------\")\r\n print_result(response)\r\n util.script_exit(False)\r\n\r\ndef controlHubLogin(user,pswd, config):\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'X-Requested-By': 'SDC',\r\n }\r\n data = '{\"userName\":\"'+user+'\", \"password\":\"'+pswd+'\"}'\r\n response = requests.post(url=config['URL']['CONTROL_HUB_AUTH'], headers=headers, data=data)\r\n return response\r\n\r\ndef getSessionToken(user,pswd, config):\r\n response = controlHubLogin(user,pswd,config)\r\n if (response.status_code != 200):\r\n return \"\" \r\n cookies = response.cookies\r\n return cookies.items()[0][1]\r\n\r\ndef setAuthToken(user,pswd, config):\r\n _sessionToken = getSessionToken(user,pswd,config)\r\n if _sessionToken == \"\":\r\n return False\r\n util.writeToFile(\"token.txt\",_sessionToken)\r\n return True\r\n\r\n\r\n# get header for data collector api\r\ndef get_api_headers():\r\n _sessionToken = util.readFile(\"token.txt\")\r\n print(\"token: \"+_sessionToken)\r\n return {\r\n 'Content-Type': 'application/json',\r\n 'X-Requested-By': 'SDC',\r\n 'X-SS-REST-CALL': 'true',\r\n 'X-SS-User-Auth-Token': _sessionToken\r\n }\r\n\r\ndef sch_api_get_request(config, endpoint, params=None, data=None):\r\n\r\n if data and params:\r\n response = requests.get(config['URL']['HOST']+endpoint, headers=get_api_headers(), params=params, json=data)\r\n elif data:\r\n response = requests.get(config['URL']['HOST']+endpoint, headers=get_api_headers(), json=data)\r\n elif params:\r\n response = requests.get(config['URL']['HOST']+endpoint, headers=get_api_headers(), params=params)\r\n else:\r\n response = requests.get(config['URL']['HOST']+endpoint, headers=get_api_headers())\r\n api_resp_check(response, endpoint)\r\n return response\r\n\r\ndef sch_api_post_request(config, endpoint, params=None, data=None):\r\n\r\n if data and params:\r\n response = requests.post(config['URL']['HOST']+endpoint, headers=get_api_headers(), params=params, json=data)\r\n elif data:\r\n response = requests.post(config['URL']['HOST']+endpoint, headers=get_api_headers(), json=data)\r\n elif params:\r\n response = requests.post(config['URL']['HOST']+endpoint, headers=get_api_headers(), params=params)\r\n else:\r\n response = requests.post(config['URL']['HOST']+endpoint, headers=get_api_headers())\r\n api_resp_check(response, endpoint)\r\n return response\r\n\r\ndef sch_api_put_request(config, endpoint, data=None):\r\n response = requests.put(config['URL']['HOST']+endpoint, headers=get_api_headers(), json=data)\r\n api_resp_check(response, endpoint)\r\n return response\r\n\r\n","sub_path":"scheduler-seperate/cronjob/APIBuilder.py","file_name":"APIBuilder.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"509861282","text":"import torch\nfrom clustorch.base import ClusteringModel\nfrom sklearn.cluster import AgglomerativeClustering\n\n\nclass Hierarchical(ClusteringModel):\n \"\"\" Defines the Hierarchical clustering algorithm.\"\"\"\n\n def __init__(\n self,\n n_clusters=2,\n affinity=\"euclidean\",\n linkage=\"ward\",\n verbose=False,\n seed=None,\n ):\n self.k = n_clusters\n self.affinity = affinity\n self.linkage = linkage\n self.verbose = verbose\n self.status = {}\n\n self.model = AgglomerativeClustering(\n n_clusters=self.k, affinity=self.affinity, linkage=self.linkage\n )\n\n if isinstance(seed, int):\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n def fit(self, X, y=None, sample_weight=None):\n if X.is_cuda:\n y_hat = self.model.fit_predict(X.cpu().numpy())\n else:\n y_hat = self.model.fit_predict(X.numpy())\n return torch.from_numpy(y_hat)\n\n def to_string(self):\n return \"Hierarchical \" + self.affinity\n","sub_path":"clustorch/hierarchical.py","file_name":"hierarchical.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"294460655","text":"#!/usr/bin/python3\n\n# @Project = step_LeetCode\n# @File : 971_Flip_Binary_Tree_To_Match_Preorde_Traversal\n# @Author : TCY\n# @Time : 2019/8/12 15:03\n# @Email : tangcaiyuan@hust.edu.cn\n# @Software: PyCharm\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def flipMatchVoyage(self, root: TreeNode, voyage: List[int]) -> List[int]:\n self.ans = []\n self.i = 0\n\n def dfs(node, v):\n if not node:\n return True\n if node.val != v[self.i]:\n return False\n self.i += 1\n if node.left and node.left.val == v[self.i]:\n return dfs(node.left, v) and dfs(node.right, v)\n elif node.right and node.right.val == v[self.i]:\n if node.left:\n self.ans.append(node.val)\n return dfs(node.right, v) and dfs(node.left, v)\n return node.left == None and node.right == None\n\n if dfs(root, voyage):\n return self.ans\n else:\n return [-1]\n\n\n\"\"\" if not voyage:\n return False\n if node.val != voyage[0]:\n return False\n if node.left and not node.right:\n node.left, node.right = node.right, node.left\n\n l_val, l_loc = -1,0\n r_val, r_loc = -1,0\n if node.left:\n l_val = node.left.val\n if node.right:\n r_val = node.right.val\n for loc,val in enumerate(voyage):\n if val == r_val:\n r_loc = loc\n if val == l_val:\n l_loc = loc\n if l_loc > r_loc:\n self.ans.append(node.val)\n node.left, node.right = node.right, node.left\n\n r_ans, l_ans = True, True\n if node.right:\n r_ans = dfs(node.right, voyage[max(l_loc,r_loc):])\n if node.left:\n l_ans = dfs(node.left, voyage[1:max(l_loc,r_loc)])\n #print(l_ans, r_ans)\n if r_ans and l_ans:\n return True\n else:\n return False\n if dfs(root, voyage):\n return self.ans\n else:\n return [-1]\"\"\"\n\n","sub_path":"Weekly_Contest/Weekly_Contest_118/971_Flip_Binary_Tree_To_Match_Preorde_Traversal.py","file_name":"971_Flip_Binary_Tree_To_Match_Preorde_Traversal.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"347668218","text":"import sys\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdftypes import PDFObjRef, resolve1\nfrom Provisioning2.IAS import IAS\n\nfilename = sys.argv[1]\nfp = open(filename, 'rb')\nEDIPI = \"\"\nLNAME = \"\"\nFNAME = \"\"\nparser = PDFParser(fp)\ndoc = PDFDocument(parser)\nfields = resolve1(doc.catalog['AcroForm'])['Fields']\nfor i in fields:\n field = resolve1(i)\n name, value = field.get('T'), field.get('V')\n if isinstance(value, PDFObjRef):\n # value = resolve1(value)\n value = (field.get('T').decode('utf-8'), resolve1(field.get('V')))\n if name == b\"User Signature\":\n EDIPI = ((value[1]['Name'].decode(\"utf-8\").split('.'))[-1])\n LNAME = ((value[1]['Name'].decode(\"utf-8\").split('.'))[0])\n FNAME = ((value[1]['Name'].decode(\"utf-8\").split('.'))[1])\n # print ('{0}: {1}'.format(name, value))\n\nurl = \"https://cacpt.csd.disa.mil:443/ECRSWebServices/uas?wsdl\"\npkey = 'C:\\Pentaho\\projects\\DHA_Provisioning\\certs\\keyfile-decrypted.key'\ncert = 'C:\\Pentaho\\projects\\DHA_Provisioning\\certs\\certfile.crt'\nUID = IAS(url, pkey, cert)\nx = UID.getUIDEmail(EDIPI)\nprint(LNAME, FNAME,x)\n","sub_path":"Provisioning2/retrieveFromPDF.py","file_name":"retrieveFromPDF.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597112068","text":"\"\"\"\nGiven inorder and postorder traversal of a tree, construct the binary tree.\n\nNote:\nYou may assume that duplicates do not exist in the tree.\n\nFor example, given\n\ninorder = [9,3,15,20,7]\npostorder = [9,15,7,20,3]\nReturn the following binary tree:\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\"\"\"\nclass Solution():\n def buildTree(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n if not inorder or not postorder: return None\n index = inorder.index(postorder[-1])\n l = self.buildTree(inorder[:index], postorder[:index])\n r = self.buildTree(inorder[index+1:], postorder[index:-1])\n root = TreeNode(postorder[-1])\n root.left, root.right = l, r\n return root\n\n def buildTree(self, inorder, postorder):\n def build(il, ir, pl, pr):\n if il > ir or pl > pr: return None\n root = TreeNode(postorder[pr])\n i = index[postorder[pr]]\n root.left = build(il, i-1, pl, pl+i-il-1)\n root.right = build(i+1, ir, pl+i-il, pr-1)\n return root\n index = {n: i for i, n in enumerate(inorder)}\n return build(0, len(inorder)-1, 0, len(postorder)-1)\n","sub_path":"leetcode/constructBinaryTreeFromInorderAndPostorderTraversal/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532998241","text":"# Python 2 and 3 support\nfrom __future__ import division, unicode_literals, print_function\n\n# Common Imports\nimport os\nimport numpy as np\n\n# ML Imports\nfrom sklearn.datasets import make_blobs, make_moons\nfrom sklearn.mixture import BayesianGaussianMixture\n\n# Graph Imports\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Config\nPROJECT_ROOT_DIR = '.'\n\n\n# Declare Functions\ndef image_path(fig_id):\n if not os.path.exists('images'):\n os.makedirs('images')\n return os.path.join(PROJECT_ROOT_DIR, 'images', fig_id)\n\n\ndef save_fig(fig_id, tight_layout=True):\n print(\"Saving\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(image_path(fig_id) + \".png\", format=\"png\", dpi=300)\n\n\ndef plot_clusters(X, y=None):\n plt.scatter(X[:, 0], X[:, 1], c=y, s=1)\n plt.xlabel(\"$x_1$\", fontsize=14)\n plt.ylabel(\"$x_2$\", fontsize=14, rotation=0)\n\n\ndef plot_data(X):\n plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)\n\n\ndef plot_centroids(centroids, weights=None, circle_color='w', cross_color='k'):\n if weights is not None:\n centroids = centroids[weights > weights.max() / 10]\n plt.scatter(\n centroids[:, 0],\n centroids[:, 1],\n marker='o',\n s=30,\n linewidths=8,\n color=circle_color,\n zorder=10,\n alpha=0.9)\n plt.scatter(\n centroids[:, 0],\n centroids[:, 1],\n marker='x',\n s=50,\n linewidths=50,\n color=cross_color,\n zorder=11,\n alpha=1)\n\n\ndef plot_gaussian_mixture(clusterer, X, resolution=1000, show_ylabels=True):\n mins = X.min(axis=0) - 0.1\n maxs = X.max(axis=0) + 0.1\n xx, yy = np.meshgrid(\n np.linspace(mins[0], maxs[0], resolution),\n np.linspace(mins[1], maxs[1], resolution))\n Z = -clusterer.score_samples(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n plt.contourf(\n xx,\n yy,\n Z,\n norm=LogNorm(vmin=1.0, vmax=30.0),\n levels=np.logspace(0, 2, 12))\n plt.contour(\n xx,\n yy,\n Z,\n norm=LogNorm(vmin=1.0, vmax=30.0),\n levels=np.logspace(0, 2, 12),\n linewidths=1,\n colors='k')\n Z = clusterer.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plt.contour(xx, yy, Z, linewidths=2, colors='r', linestyles='dashed')\n\n plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)\n plot_centroids(clusterer.means_, clusterer.weights_)\n\n\n# Dataset\nX1, y1 = make_blobs(n_samples=1000, centers=((4, -4), (0, 0)), random_state=42)\nX1 = X1.dot(np.array([[0.374, 0.95], [0.732, 0.598]]))\nX2, y2 = make_blobs(n_samples=250, centers=1, random_state=42)\nX2 = X2 + [6, -8]\nX = np.r_[X1, X2]\ny = np.r_[y1, y2]\n\n# Train model\n# Setting n_components higher than needed\n# BGM weights zero for unnecessary clusters\nbgm = BayesianGaussianMixture(n_components=10, n_init=10, random_state=42)\nbgm.fit(X)\nprint(\"EM Estimates\", bgm.weights_)\nprint(\"EM Means\", bgm.means_[:4])\nprint(\"EM Covariances\", bgm.covariances_[:3])\nprint(\"Convergance, and iterations\", bgm.converged_, bgm.n_iter_)\nprint(\"Hard clustering predictions\", bgm.predict(X))\nprint(\"Hard clustering probabilities\", bgm.predict_proba(X)[:1])\n\n# Train models with high weight concentrations on datapoints\n# weight_concentration_prior 0.01, 10000; with weight prior's dictating\n# optimal number clusters\nbgm_low = BayesianGaussianMixture(\n n_components=10,\n max_iter=1000,\n n_init=1,\n weight_concentration_prior=0.01,\n random_state=42)\nbgm_high = BayesianGaussianMixture(\n n_components=10,\n max_iter=1000,\n n_init=1,\n weight_concentration_prior=10000,\n random_state=42)\nnn = 73\nbgm_low.fit(X[:nn])\nbgm_high.fit(X[:nn])\n\nprint(\"high/low weights\")\nprint(np.round(bgm_low.weights_, 2))\nprint(np.round(bgm_high.weights_, 2))\n\nplt.figure(figsize=(9, 4))\n\nplt.subplot(121)\nplot_gaussian_mixture(bgm_low, X[:nn])\nplt.title(\"weight_concentration_prior = 0.01\", fontsize=14)\nplt.subplot(122)\nplot_gaussian_mixture(bgm_high, X[:nn], show_ylabels=False)\nplt.title(\"weight_concentration_prior = 10000\", fontsize=14)\n\nsave_fig(\"mixture_concentration_prior_diagram\")\nplt.show()\n\n# Classify moons data\nX_moons, y_moons = make_moons(n_samples=1000, noise=0.05, random_state=42)\n\nbgm = BayesianGaussianMixture(n_components=10, n_init=10, random_state=42)\nbgm.fit(X_moons)\n\nplt.figure(figsize=(9, 3.2))\n\nplt.subplot(121)\nplot_data(X_moons)\nplt.xlabel(\"$x_1$\", fontsize=14)\nplt.ylabel(\"$x_2$\", fontsize=14, rotation=0)\n\nplt.subplot(122)\nplot_gaussian_mixture(bgm, X_moons, show_ylabels=False)\n\nsave_fig(\"moons_vs_bgm_diagram\")\nplt.show()\n","sub_path":"clustering/variational_bayesian_gaussian_mixtures.py","file_name":"variational_bayesian_gaussian_mixtures.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148735743","text":"import math\r\ndef longestpalindrome(s):\r\n if (s == ''):\r\n return None\r\n fwdlist = list(s)\r\n revlist = fwdlist[::-1]\r\n fwdlist1 = list(s)\r\n revlist1 = fwdlist1[::-1]\r\n fwdlist2 = list(s)\r\n revlist2 = fwdlist2[::-1]\r\n longstr = \"\"\r\n if(fwdlist == revlist):\r\n longstr = ''.join(fwdlist) \r\n return longstr \r\n else:\r\n for i in range(0, len(fwdlist)):\r\n if(len(fwdlist) == 2):\r\n return longstr\r\n if(fwdlist == revlist):\r\n if(len(''.join(fwdlist)) > len(longstr)):\r\n longstr = ''.join(fwdlist) \r\n elif(fwdlist1 == revlist1):\r\n if(len(''.join(fwdlist1)) > len(longstr)):\r\n longstr = ''.join(fwdlist1)\r\n elif(fwdlist2 == revlist2):\r\n if(len(''.join(fwdlist2)) > len(longstr)):\r\n longstr = ''.join(fwdlist2)\r\n else:\r\n fwdlist.pop()\r\n revlist.pop(0)\r\n fwdlist1.pop(0)\r\n revlist1.pop()\r\n if(len(fwdlist2) > 2):\r\n fwdlist2.pop(0)\r\n fwdlist2.pop()\r\n revlist2.pop(0)\r\n revlist2.pop() \r\n print(longstr) \r\n return longstr\r\n\r\ndef allpermutations(str):\r\n allperm = \"\"\r\n strlst = list(str)\r\n allperm += str + \" \"\r\n totlist = permutation(strlst)\r\n #adds words together\r\n allperm += ''.join(totlist)\r\n return allperm\r\n \r\ndef permutation(strlst):\r\n output = []\r\n i = len(strlst) - 1\r\n j = len(strlst) - 1\r\n v = math.factorial(len(strlst)) \r\n for k in range(0, v - 1):\r\n if((k == math.factorial(len(strlst) / len(strlst))) and len(strlst) > 3):\r\n tlst = strlst[1:len(strlst)-2:-1]\r\n tlst.insert(0, strlst[0])\r\n strlst = tlst\r\n output.extend(strlst)\r\n output.append(\" \")\r\n j = len(strlst) - 1 \r\n elif(k % 2 == 0):\r\n swaplst = swap(strlst, i-1, i)\r\n output.extend(swaplst)\r\n output.append(\" \")\r\n strlst = swaplst\r\n elif(k % 2 != 0):\r\n templst = swaplst[j:]\r\n templst1 = swaplst[:j]\r\n templst.extend(templst1)\r\n j = j - 1\r\n output.extend(templst)\r\n output.append(\" \")\r\n strlst = templst\r\n return output \r\n\r\n\r\ndef swap(swaplst, idx1, idx2):\r\n swaplst[idx1], swaplst[idx2] = swaplst[idx2], swaplst[idx1]\r\n return swaplst\r\n\r\nprint(allpermutations(\"abc\"))\r\n\r\n\r\n\r\n\r\ndef main():\r\n assert longestpalindrome(\"ababad\") == \"ababa\", \"Base Case\"\r\n assert longestpalindrome(\"ab\") == \"\", \"Not palindrome\"\r\n assert longestpalindrome(\"bb\") == \"bb\", \"Double small\"\r\n assert longestpalindrome(\"bavava\") == \"avava\", \"Reverse at Front\"\r\n assert longestpalindrome(\"bavavac\") == \"avava\", \"Both ends\"\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"week-2/Python/perren-wright.py","file_name":"perren-wright.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"229130928","text":"import re\n\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout\n\nimport config as config\nfrom utils.GUI_main_window import init_container, init_combo_box, init_inputBox, init_checkBox, init_button\nfrom utils.GUI_operation_tab import init_slider_bar_box\n\n\nclass XeThruX4ControlPane(QWidget):\n def __init__(self):\n super().__init__()\n\n # default range\n self.range_min = 0\n self.range_max = 0.4\n\n # for checking only one freq box\n self._toggle = None\n self.state = ['idle'] # see the docstring of self.update_state for details\n\n self.background = QVBoxLayout(self)\n self.setLayout(self.background)\n\n self.main_page = init_container(parent=self.background, vertical=True,\n style=\"background-color:\" + config.container_color + \";\")\n # - device (combo box)\n self.device = init_combo_box(parent=self.main_page,\n label=\"Device\",\n item_list=[\"X4M300\", \"X4M200\", \"X4M03\"])\n self.device.activated[str].connect(self.device_onChanged)\n # - port (input box)\n self.XeThruX4_port_block, self.XeThruX4_port_textbox = init_inputBox(parent=self.main_page,\n label=\"Port (device_name): \",\n label_bold=True,\n default_input=\"Default: COM8\")\n\n self.freq_block = init_container(parent=self.main_page,\n label=\"Frequency Band\",\n vertical=False)\n # - frequency band (check box)\n self.low_freq_checkbox = init_checkBox(parent=self.freq_block,\n label=\"Low (7.290 GHz)\",\n function=self.low_freq_action)\n self.high_freq_checkbox = init_checkBox(parent=self.freq_block,\n label=\"High (8.748 GHz)\",\n function=self.high_freq_action)\n\n # - range (input boxes)\n self.range_container = init_container(parent=self.main_page,\n label=\"Range (m) [0.5 ~ 3]\",\n label_bold=True,\n vertical=False)\n\n self.min_range_block, self.min_range_textbox = init_inputBox(parent=self.range_container,\n label=\"Min:\",\n label_bold=False,\n default_input=\"0\")\n self.min_range_textbox.textChanged.connect(self.check_range)\n self.max_range_block, self.max_range_textbox = init_inputBox(parent=self.range_container,\n label=\"Max:\",\n label_bold=False,\n default_input=\"0.4\")\n self.max_range_textbox.textChanged.connect(self.check_range)\n\n # - fps ( bar)\n self.fps_block, self.fps_slider_view = init_slider_bar_box(self.main_page,\n label=\"FPS\",\n vertical=True,\n label_bold=True,\n min_value=10,\n max_value=25)\n self.fps_slider_view.setValue(23)\n\n # - check box\n self.baseband_block = init_container(parent=self.main_page,\n label=\"Baseband\",\n vertical=True)\n self.baseband_checkbox = init_checkBox(parent=self.baseband_block,\n function=self.baseband_checkbox_function)\n # - two buttons\n\n self.buttons_block = init_container(self.main_page, vertical=False)\n self.start_stop__btn = init_button(parent=self.buttons_block,\n label=\"Start/stop sensor\",\n function=self.start_stop_btn_action)\n self.reset_btn = init_button(parent=self.buttons_block,\n label=\"Reset to default\",\n function=self.reset_btn_action)\n self.show()\n\n def check_range(self):\n\n self.range_min = re.findall(\"\\d+\\.\\d+\", self.min_range_textbox.text())\n self.range_max = re.findall(\"\\d+\\.\\d+\", self.max_range_textbox.text())\n\n if self.range_min >= self.range_max:\n print(\"Range_min >= range_max.\")\n\n\n def low_freq_action(self):\n if self.low_freq_checkbox.isChecked():\n self.update_state(\"freq_low\")\n self._toggle = True\n self.high_freq_checkbox.setChecked(not self._toggle)\n else:\n self.update_state('not_freq_low')\n self._toggle = not self._toggle\n return\n\n def high_freq_action(self):\n if self.high_freq_checkbox.isChecked():\n self.update_state(\"freq_high\")\n self._toggle = True\n self.low_freq_checkbox.setChecked(not self._toggle)\n else:\n self.update_state('not_freq_high')\n self._toggle = not self._toggle\n return\n\n def update_state(self, act):\n \"\"\"\n update the current state based on action\n The working states, as oppose to 'idle' include that of 'pending', 'testing', 'countingDown', 'writing'\n @param act:\n \"\"\"\n\n # check the checkbox logic\n if act in ['follow', 'not_follow', 'locate', 'not_locate']:\n self.check_locate_follow_logic(act)\n # test/record logic\n print(\"update function not implemented\")\n\n def check_locate_follow_logic(self, act):\n \"\"\"\n can only choose one\n :return:\n \"\"\"\n if act == 'freq_low':\n # if locate is chosen, remove it\n if 'freq_high' in self.state:\n self.state.remove('freq_high')\n self.state.append(act)\n\n elif act == 'not_freq_low':\n if 'freq_low' in self.state:\n self.state.remove('freq_low')\n\n elif act == 'freq_high':\n if 'freq_low' in self.state:\n self.state.remove('freq_low')\n self.state.append(act)\n\n elif act == 'freq_high':\n if 'freq_high' in self.state:\n self.state.remove('freq_high')\n\n def baseband_checkbox_function(self):\n print('Baseband checked. Function not implemented...')\n\n def start_stop_btn_action(self):\n print('start/stop button clicked. Function not implemented...')\n # start testing\n self.low_freq_checkbox.setDisabled(True)\n self.high_freq_checkbox.setDisabled(True)\n\n # check range value\n if self.range_min >= self.range_max:\n print(\"Range_min >= range_max. Can't start.\")\n return\n else:\n print('recording')\n\n def reset_btn_action(self):\n # start testing\n self.low_freq_checkbox.setChecked(False)\n self.high_freq_checkbox.setChecked(False)\n self.low_freq_checkbox.setDisabled(False)\n self.high_freq_checkbox.setDisabled(False)\n\n self.state.clear()\n print('reset button clicked. Function not implemented...')\n\n def device_onChanged(self):\n print(\"conbobox selection changed. Function not implemented..\")","sub_path":"mGesf/main_page_tabs/XeThruX4ControlPane.py","file_name":"XeThruX4ControlPane.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"488814742","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('workshop', '0003_auto_20151018_2239'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='truck',\n name='diesel_card_pin',\n field=models.CharField(max_length=10, null=True, verbose_name=b'Dieselkort PIN', blank=True),\n ),\n ]\n","sub_path":"workshop/migrations/0004_truck_diesel_card_pin.py","file_name":"0004_truck_diesel_card_pin.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"286308736","text":"import io\nimport json\nfrom collections import defaultdict\nfrom itertools import islice\nimport logging\nimport pprint\nimport pandas as pd\nfrom typing import Dict, Any, List, Optional\nfrom haystack.document_store.sql import DocumentORM\nimport subprocess\nimport time\nimport torch\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef launch_es(sleep=15):\n # Start an Elasticsearch server via Docker\n\n logger.info(\"Starting Elasticsearch ...\")\n status = subprocess.run(\n ['docker run -d -p 9200:9200 -e \"discovery.type=single-node\" elasticsearch:7.9.2'], shell=True\n )\n if status.returncode:\n logger.warning(\"Tried to start Elasticsearch through Docker but this failed. \"\n \"It is likely that there is already an existing Elasticsearch instance running. \")\n else:\n time.sleep(sleep)\n\ndef launch_open_distro_es(sleep=15):\n # Start an Open Distro for Elasticsearch server via Docker\n\n logger.info(\"Starting Open Distro for Elasticsearch ...\")\n status = subprocess.run(\n ['docker run -d -p 9200:9200 -p 9600:9600 -e \"discovery.type=single-node\" amazon/opendistro-for-elasticsearch:1.13.2'], shell=True\n )\n if status.returncode:\n logger.warning(\"Tried to start Open Distro for Elasticsearch through Docker but this failed. \"\n \"It is likely that there is already an existing Elasticsearch instance running. \")\n else:\n time.sleep(sleep)\n\ndef launch_opensearch(sleep=15):\n # Start an OpenSearch server via docker\n\n logger.info(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n _ = subprocess.run(['docker rm opensearch'], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n ['docker run -d -p 9201:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name opensearch opensearchproject/opensearch:1.0.0-rc1'],\n shell=True\n )\n if status.returncode:\n logger.warning(\"Tried to start OpenSearch through Docker but this failed. \"\n \"It is likely that there is already an existing OpenSearch instance running. \")\n else:\n time.sleep(sleep)\n\n\ndef stop_opensearch():\n logger.info(\"Stopping OpenSearch...\")\n status = subprocess.run(['docker stop opensearch'], shell=True)\n if status.returncode:\n logger.warning(\"Tried to stop OpenSearch but this failed. \"\n \"It is likely that there was no OpenSearch Docker container with the name opensearch\")\n status = subprocess.run(['docker rm opensearch'], shell=True)\n\n\ndef stop_service(document_store):\n ds_class = str(type(document_store))\n if \"OpenSearchDocumentStore\" in ds_class:\n stop_opensearch()\n else:\n logger.warning(f\"No support yet for auto stopping the service behind a {ds_class}\")\n\n\ndef launch_milvus(sleep=15):\n # Start a Milvus server via docker\n\n logger.info(\"Starting Milvus ...\")\n logger.warning(\"Automatic Milvus config creation not yet implemented. \"\n \"If you are starting Milvus using launch_milvus(), \"\n \"make sure you have a properly populated milvus/conf folder. \"\n \"See (https://milvus.io/docs/v1.0.0/milvus_docker-cpu.md) for more details.\")\n status = subprocess.run(\n ['sudo docker run -d --name milvus_cpu_1.0.0 \\\n -p 19530:19530 \\\n -p 19121:19121 \\\n -v /home/$USER/milvus/db:/var/lib/milvus/db \\\n -v /home/$USER/milvus/conf:/var/lib/milvus/conf \\\n -v /home/$USER/milvus/logs:/var/lib/milvus/logs \\\n -v /home/$USER/milvus/wal:/var/lib/milvus/wal \\\n milvusdb/milvus:1.0.0-cpu-d030521-1ea92e'\n ],\n shell=True\n )\n if status.returncode:\n logger.warning(\"Tried to start Milvus through Docker but this failed. \"\n \"It is likely that there is already an existing Milvus instance running. \")\n else:\n time.sleep(sleep)\n\n\ndef print_answers(results: dict, details: str = \"all\"):\n \"\"\"\n Utilitiy function to print results of Haystack pipelines\n :param results: Results from a pipeline\n :param details: One of [\"minimum\", \"medium\", \"all]. Defining the level of details to print.\n :return: None\n \"\"\"\n # TODO: unify the output format of Generator and Reader so that this function doesn't have the try/except\n # Or implement a class method like PredReader.print() and PredGenerator.print() that handles all this functionality.\n # This default case is when the answers come from a Reader\n try:\n answers = results[\"answers\"]\n pp = pprint.PrettyPrinter(indent=4)\n if details in (\"minimal\", \"medium\"):\n if details == \"minimal\":\n keys_to_keep = set([\"answer\", \"context\"])\n elif details == \"medium\":\n keys_to_keep = set([\"answer\", \"context\", \"score\"])\n\n # filter the results\n filtered_answers = []\n for ans in answers:\n filtered_answers.append({k: getattr(ans, k) for k in keys_to_keep})\n pp.pprint(filtered_answers)\n else:\n pp.pprint(results)\n # This fall back case is when the answers come from a Generator\n except:\n if details == \"minimal\":\n print(f\"Query: {results['query']}\")\n for a in results[\"answers\"]:\n print(f\"Answer: {a['answer']}\")\n else:\n pp.pprint(results)\n\n\ndef print_documents(results: dict, max_text_len: Optional[int] = None, print_meta: bool = False):\n print(f\"Query: {results['query']}\")\n pp = pprint.PrettyPrinter(indent=4)\n for d in results[\"documents\"]:\n print()\n new_text = d.content[:max_text_len]\n if len(new_text) != len(d.content):\n new_text += \"...\"\n results = {\n \"name\": d.meta.get(\"name\", None),\n \"content\": new_text\n }\n if print_meta:\n results[\"meta\"] = d.meta\n pp.pprint(results)\n\n\ndef export_answers_to_csv(agg_results: list, output_file):\n \"\"\"\n Exports answers coming from finder.get_answers() to a CSV file\n :param agg_results: list of predictions coming from finder.get_answers()\n :param output_file: filename of output file\n :return: None\n \"\"\"\n if isinstance(agg_results, dict):\n agg_results = [agg_results]\n\n assert \"query\" in agg_results[0], f\"Wrong format used for {agg_results[0]}\"\n assert \"answers\" in agg_results[0], f\"Wrong format used for {agg_results[0]}\"\n\n data = {} # type: Dict[str, List[Any]]\n data[\"query\"] = []\n data[\"prediction\"] = []\n data[\"prediction_rank\"] = []\n data[\"prediction_context\"] = []\n\n for res in agg_results:\n for i in range(len(res[\"answers\"])):\n temp = res[\"answers\"][i]\n data[\"query\"].append(res[\"query\"])\n data[\"prediction\"].append(temp[\"answer\"])\n data[\"prediction_rank\"].append(i + 1)\n data[\"prediction_context\"].append(temp[\"context\"])\n\n df = pd.DataFrame(data)\n df.to_csv(output_file, index=False)\n\n\n\ndef convert_labels_to_squad(labels_file: str):\n \"\"\"\n Convert the export from the labeling UI to SQuAD format for training.\n\n :param labels_file: path for export file from the labeling tool\n :return:\n \"\"\"\n with open(labels_file, encoding='utf-8') as label_file:\n labels = json.load(label_file)\n\n labels_grouped_by_documents = defaultdict(list)\n for label in labels:\n labels_grouped_by_documents[label[\"document_id\"]].append(label)\n\n labels_in_squad_format = {\"data\": []} # type: Dict[str, Any]\n for document_id, labels in labels_grouped_by_documents.items():\n qas = []\n for label in labels:\n doc = DocumentORM.query.get(label[\"document_id\"])\n\n assert (\n doc.content[label[\"start_offset\"]: label[\"end_offset\"]]\n == label[\"selected_text\"]\n )\n\n qas.append(\n {\n \"question\": label[\"question\"],\n \"id\": label[\"id\"],\n \"question_id\": label[\"question_id\"],\n \"answers\": [\n {\n \"text\": label[\"selected_text\"],\n \"answer_start\": label[\"start_offset\"],\n \"labeller_id\": label[\"labeler_id\"],\n }\n ],\n \"is_impossible\": False,\n }\n )\n\n squad_format_label = {\n \"paragraphs\": [\n {\"qas\": qas, \"context\": doc.content, \"document_id\": document_id}\n ]\n }\n\n labels_in_squad_format[\"data\"].append(squad_format_label)\n\n with open(\"labels_in_squad_format.json\", \"w+\", encoding='utf-8') as outfile:\n json.dump(labels_in_squad_format, outfile)\n\n\ndef get_batches_from_generator(iterable, n):\n \"\"\"\n Batch elements of an iterable into fixed-length chunks or blocks.\n \"\"\"\n # TODO consider moving to base.DocumentStore\n it = iter(iterable)\n x = tuple(islice(it, n))\n while x:\n yield x\n x = tuple(islice(it, n))\n","sub_path":"haystack/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"558772837","text":"__doc__=\"\"\" GUIdemo_OO.py\r\nThis is a very basic model, demonstrating the ease\r\nof interfacing to SimGUI.\r\n\"\"\"\r\nfrom SimPy.Simulation import *\r\nfrom SimPy.Monitor import *\r\nfrom random import *\r\nfrom SimPy.SimGUI import *\r\n\r\n## Model components ---------------------------------------\r\n\r\nclass Launcher(Process):\r\n nrLaunched=0\r\n def launch(self):\r\n while True:\r\n gui.writeConsole(\"Launch at %.1f\"%self.sim.now())\r\n Launcher.nrLaunched+=1\r\n gui.launchmonitor.observe(Launcher.nrLaunched)\r\n yield hold,self,uniform(1,gui.params.maxFlightTime)\r\n gui.writeConsole(\"Boom!!! Aaaah!! at %.1f\"%self.sim.now())\r\n \r\n## Model --------------------------------------------------\r\nclass GUIdemoModel(Simulation):\r\n def run(self):\r\n self.initialize()\r\n gui.launchmonitor=Monitor(name=\"Rocket counter\",\r\n ylab=\"nr launched\",tlab=\"time\",sim=self)\r\n Launcher.nrLaunched=0\r\n for i in range(gui.params.nrLaunchers):\r\n lau=Launcher(sim=self)\r\n self.activate(lau,lau.launch())\r\n self.simulate(until=gui.params.duration)\r\n gui.noRunYet=False\r\n gui.writeStatusLine(\"%s rockets launched in %.1f minutes\"%\r\n (Launcher.nrLaunched,self.now()))\r\n \r\n## Model GUI ----------------------------------------------\r\n\r\nclass MyGUI(SimGUI):\r\n def __init__(self,win,**par):\r\n SimGUI.__init__(self,win,**par)\r\n self.run.add_command(label=\"Start fireworks\",\r\n command=GUIdemoModel().run,underline=0)\r\n self.params=Parameters(duration=duration,\r\n maxFlightTime=maxFlightTime,\r\n nrLaunchers=nrLaunchers)\r\n \r\n## Experiment data ----------------------------------------\r\n\r\nduration=2000\r\nmaxFlightTime=11.7\r\nnrLaunchers=3\r\n\r\n## Experiment/Display ------------------------------------\r\n\r\nroot=Tk()\r\ngui=MyGUI(root,title=\"RocketGUI\",doc=__doc__,consoleHeight=40)\r\ngui.mainloop()\r\n","sub_path":"SimPy-2.1.0/SimPyModels/GUIdemo_OO.py","file_name":"GUIdemo_OO.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311339721","text":"# Implimentation of various sorting algorithms for lists\n##########################\n\ndef selection_sort(unsortedList):\n\t\"\"\"\n\tLook through the list. Find the smallest element. Swap it to the front.\n\tRepeat.\n\t\"\"\"\n\tfor inx, element in enumerate(unsortedList):\n\t\tsmallest = unsortedList[inx]\n\t\tposition = inx\n\t\tfor count in range(inx, len(unsortedList)):\n\t\t\tif smallest > unsortedList[count]:\n\t\t\t\tsmallest = unsortedList[count]\n\t\t\t\tposition = count\n\t\tif position != inx:\n\t\t\ttransitory = unsortedList[inx]\n\t\t\tunsortedList[inx] = unsortedList[position]\n\t\t\tunsortedList[position] = transitory\n\treturn unsortedList\n\ndef insertion_sort(unsortedList):\n\t\"\"\"\n\tInsert (via swaps) the next element in the sorted list of the previous\n\telements.\n\t\"\"\"\n\tfor inx in range(1, len(unsortedList)):\n\t\tif unsortedList[inx] < unsortedList[inx - 1]:\n\t\t\tfor count in range (inx, 0, -1):\n\t\t\t\tif unsortedList[count] < unsortedList[count - 1]:\n\t\t\t\t\ttransitory = unsortedList[count - 1]\n\t\t\t\t\tunsortedList[count - 1] = unsortedList[count]\n\t\t\t\t\tunsortedList[count] = transitory\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\treturn unsortedList\n\ndef merge_sort(firstListFragment, secondListFragment = []):\n\t\"\"\"\n\tOur first recursive algorithm.\n\t\"\"\"\n\tlength = len(firstListFragment)\n\tif length > 1:\n\t\tfirstListFragment = merge_sort(firstListFragment[:length//2], firstListFragment[length//2:])\t\n\tlength = len(secondListFragment)\n\tif length > 1:\n\t\tsecondListFragment = merge_sort(secondListFragment[:length//2], secondListFragment[length//2:])\n\treturn merge(firstListFragment, secondListFragment)\n\ndef merge(firstListFragment, secondListFragment):\n\t\"\"\"\n\tmerge the two lists\n\t\"\"\"\n\tleft = 0\n\tright = 0\n\tleftLength = len(firstListFragment)\n\trightLength = len(secondListFragment)\n\tresult =[]\n\twhile left < leftLength and right < rightLength:\n\t\tif firstListFragment[left] <= secondListFragment[right]:\n\t\t\tresult.append(firstListFragment[left])\n\t\t\tleft += 1\n\t\telse:\n\t\t\tresult.append(secondListFragment[right])\n\t\t\tright += 1\n\tresult.extend(firstListFragment[left:])\n\tresult.extend(secondListFragment[right:])\n\treturn result","sub_path":"Random/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605679085","text":"from some_module import members\nfrom functools import reduce\n\nmembers_young = min (members, key = lambda x: x['age'])\nmembers_oldest = max(members, key = lambda x: x['age'])\n\ndef sum_min_max(members: list):\n list_1 = []\n for i in members:\n list_1.append(i[\"age\"])\n global sum_age\n sum_age=(reduce(lambda x, y: x+y, list_1))\n\nsum_min_max(members)\n\n\n\n","sub_path":"week2/lesson2-intro-to-functional/homework/LashkoVolodymyr/task_5.py","file_name":"task_5.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"512636267","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, sampler\nfrom torchvision import datasets, transforms\nimport numpy as np\nfrom ResNet import ResNet\n\n\n# In[2]:\n\n\nclass ChunkSampler(sampler.Sampler):\n def __init__(self, num_samples, start=0):\n self.num_samples = num_samples\n self.start = start\n \n def __iter__(self):\n return iter(range(self.start, self.start+self.num_samples))\n \n def __len__(self):\n return self.num_samples\n\n\n# In[3]:\n\n\nclass train:\n ## A train class for cifar-10\n DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n def __init__(self, name, model, BATCH_SIZE = 128, LEARNING_RATE = 0.1, MOMENTUM = 0.9, WEIGHT_DECAY = 1e-4, EPOCHS = 200):\n transform_augment = transforms.Compose([transforms.RandomHorizontalFlip(),transforms.RandomCrop(32, padding=4)])\n transform_normalize = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n NUM_TRAIN = 45000\n NUM_VAL = 5000\n trainset = datasets.CIFAR10('./data', train=True, download=False, \n transform=transforms.Compose([transform_augment, transform_normalize]))\n testset = datasets.CIFAR10('./data', train=False, download=False, transform=transform_normalize)\n valset = datasets.CIFAR10('./data', train=True, download=False, transform=transform_normalize) \n \n self.train_loader = DataLoader(trainset, batch_size = BATCH_SIZE, sampler = ChunkSampler(NUM_TRAIN))\n self.val_loader = DataLoader(valset, batch_size = BATCH_SIZE, sampler = ChunkSampler(NUM_VAL, start=NUM_TRAIN))\n self.test_loader = DataLoader(testset, batch_size = BATCH_SIZE)\n \n self.name = name\n \n self.epoch = EPOCHS\n self.model = model.to(self.DEVICE)\n self.criterion = nn.CrossEntropyLoss().to(self.DEVICE)\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr = LEARNING_RATE, momentum = MOMENTUM, weight_decay = WEIGHT_DECAY)\n self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones = [self.epoch/2,3*self.epoch/4])\n \n self.train_acc = []\n self.valid_acc = []\n \n self.best_acc = 80\n \n \n def get_param_count(self):\n param_counts = [np.prod(p.size()) for p in self.model.parameters()]\n return sum(param_counts)\n \n def check_accuracy(self, loader):\n num_correct = 0\n num_samples = 0\n self.model.eval()\n for img, label in loader:\n img = img.to(self.DEVICE)\n scores = self.model(img)\n preds = torch.argmax(scores, 1).cpu()\n num_correct += (preds == label).sum()\n num_samples += preds.size(0)\n\n acc = float(num_correct) / num_samples\n print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))\n self.valid_acc.append(acc)\n \n def train(self,loader):\n num_correct = 0\n num_samples = 0\n self.model.train()\n for i, (img, label) in enumerate(loader):\n img = img.to(self.DEVICE)\n label = label.to(self.DEVICE)\n scores = self.model(img)\n loss = self.criterion(scores, label)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n preds = torch.argmax(scores, 1)\n num_correct += (preds == label).sum().item()\n num_samples += preds.size(0)\n \n acc = float(num_correct) / num_samples \n self.train_acc.append(acc)\n \n def save_model(self):\n if self.valid_acc[-1] >= max(self.valid_acc):\n self.best_acc = self.valid_acc[-1]\n name = \"saved_model/\" + self.name + \".pkl\"\n torch.save(self.model.state_dict(), name )\n \n def main(self):\n ## train\n for epoch in range(self.epoch):\n print('Starting epoch %d / %d' % (epoch+1, self.epoch))\n self.train(self.train_loader)\n self.model.convert()\n self.check_accuracy(self.val_loader)\n self.model.convert()\n self.lr_scheduler.step()\n self.save_model()\n \n ## save curve\n train_name = 'curve/' + self.name + '_train_acc'\n valid_name = 'curve/' + self.name + '_valid_acc'\n np.save(self.train_acc, train_name)\n np.save(self.valid_acc, valid_name)\n \n ## test\n self.model.load_state_dict(torch.load(\"saved_model/\" + self.name + \".pkl\"))\n self.model.convert()\n self.model = self.model.to(self.DEVICE)\n print('Final test accuracy:')\n check_accuracy(self.test_loader)\n\n","sub_path":"MS/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548511219","text":"import json\nimport os\nfrom datetime import datetime\n\nfrom django.core.management.base import BaseCommand\nfrom quotes.models import Quote, QuoteTags\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\nclass Command(BaseCommand):\n \"\"\"Comando que cria as ações e tags contidas nos arquivos quotes.json e quotes_tags.json\n\n >>> python manage.py createquotes\n \"\"\"\n\n def handle(self, *args, **options):\n print(\"Creating quotes and its tags...\")\n dir_path = os.path.join(__location__, \"../../..\", \"data\")\n\n with open(\n os.path.join(dir_path, \"quotes_tags.json\"),\n encoding=\"utf-8\",\n ) as tags_file:\n tags = json.load(tags_file)\n for tag in tags:\n del tag[\"slug\"]\n childrens = tag.pop(\"childrens\")\n tag_obj = QuoteTags.objects.create(**tag)\n for children in childrens:\n del children[\"slug\"]\n QuoteTags.objects.create(**children, parent=tag_obj)\n\n with open(\n os.path.join(dir_path, \"quotes.json\"),\n encoding=\"utf-8\",\n ) as quotes_file:\n quotes = json.load(quotes_file)\n for quote in quotes:\n quote_tags = quote.pop(\"tags\")\n quote_obj = Quote.objects.create(\n **{\n **quote,\n \"date\": datetime.strptime(quote.pop(\"date\"), \"%d/%m/%Y\"),\n }\n )\n quote_obj.tags.add(*quote_tags) # taggit needs a pk\n quote_obj.save()\n\n print(\"Quotes and tags created!\")\n","sub_path":"django/quotes/management/commands/createquotes.py","file_name":"createquotes.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265430682","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division\nimport sys\nsrci = sys.stdin\n\nn = int(srci.readline())\n\nnn = n\nbase = 1\nwhile nn >= 10:\n nn //= 10\n base *= 10\n\nnn = (nn+1) * base\n\nprint(\"{0}\".format(nn-n))\n","sub_path":"cf/0850/808a.py","file_name":"808a.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"558841223","text":"import numpy as np\nfrom unetdeconv import UnetDeconv\nfrom unet import Unet\nfrom oldunet import OldUnet\nfrom imageparser import ImageParser\nfrom imageaugmentator import ImageAugmentator\nfrom sklearn.model_selection import train_test_split\nimport cv2\nfrom constants import *\nimport gc\nimport os\n\nparser = ImageParser()\nutrech_dataset, singapore_dataset, amsterdam_dataset = parser.get_all_images_and_labels()\n\nt1_utrecht = [row[\"t1\"] for row in utrech_dataset]\nflair_utrecht = [row[\"flair\"] for row in utrech_dataset]\nlabels_utrecht = [row[\"label\"] for row in utrech_dataset]\nwhite_mask_utrecht = [row[\"mask\"] for row in utrech_dataset]\ndistance_utrecht = [row[\"danielsson_dist\"] for row in utrech_dataset]\n\nt1_singapore = [row[\"t1\"] for row in singapore_dataset]\nflair_singapore = [row[\"flair\"] for row in singapore_dataset]\nlabels_singapore = [row[\"label\"] for row in singapore_dataset]\nwhite_mask_singapore = [row[\"mask\"] for row in singapore_dataset]\ndistance_singapore = [row[\"danielsson_dist\"] for row in singapore_dataset]\n\nt1_amsterdam = [row[\"t1\"] for row in amsterdam_dataset]\nflair_amsterdam = [row[\"flair\"] for row in amsterdam_dataset]\nlabels_amsterdam = [row[\"label\"] for row in amsterdam_dataset]\nwhite_mask_amsterdam = [row[\"mask\"] for row in amsterdam_dataset]\ndistance_amsterdam = [row[\"danielsson_dist\"] for row in amsterdam_dataset]\n\nslice_shape = SLICE_SHAPE\n\nprint('Utrecht: ', len(t1_utrecht), len(flair_utrecht), len(labels_utrecht))\nprint('Singapore: ', len(t1_singapore), len(flair_singapore), len(labels_singapore))\nprint('Amsterdam: ', len(t1_amsterdam), len(flair_amsterdam), len(labels_amsterdam))\n\n\n'''\n\nT1 DATA\n\n'''\nutrecht_data_t1 = parser.get_all_images_np_twod(t1_utrecht)\nutrecht_resized_t1 = parser.resize_slices(utrecht_data_t1, slice_shape)\nutrecht_resized_t1 = parser.remove_top_bot_slices(utrecht_resized_t1, UTRECH_N_SLICES)\nutrecht_normalized_t1 = parser.normalize_minmax(utrecht_resized_t1,\n UTRECH_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\ndel utrecht_data_t1, utrecht_resized_t1, t1_utrecht\n\nsingapore_data_t1 = parser.get_all_images_np_twod(t1_singapore)\nsingapore_resized_t1 = parser.resize_slices(singapore_data_t1, slice_shape)\nsingapore_resized_t1 = parser.remove_top_bot_slices(singapore_resized_t1, SINGAPORE_N_SLICES)\nsingapore_normalized_t1 = parser.normalize_minmax(singapore_resized_t1,\n SINGAPORE_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\ndel singapore_data_t1, singapore_resized_t1, t1_singapore\n\namsterdam_data_t1 = parser.get_all_images_np_twod(t1_amsterdam)\namsterdam_resized_t1 = parser.resize_slices(amsterdam_data_t1, slice_shape)\namsterdam_resized_t1 = parser.remove_top_bot_slices(amsterdam_resized_t1, AMSTERDAM_N_SLICES)\namsterdam_normalized_t1 = parser.normalize_minmax(amsterdam_resized_t1,\n AMSTERDAM_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\ndel amsterdam_data_t1, amsterdam_resized_t1, t1_amsterdam\n\n\"\"\"\n\nLABELS DATA\n\n\"\"\"\n\nlabels_utrecht_imgs = parser.get_all_images_np_twod(labels_utrecht)\nlabels_singapore_imgs = parser.get_all_images_np_twod(labels_singapore)\nlabels_amsterdam_imgs = parser.get_all_images_np_twod(labels_amsterdam)\n\nlabels_utrecht_resized = parser.resize_slices(labels_utrecht_imgs, slice_shape)\nlabels_singapore_resized = parser.resize_slices(labels_singapore_imgs, slice_shape)\nlabels_amsterdam_resized = parser.resize_slices(labels_amsterdam_imgs, slice_shape)\n\nlabels_utrecht_resized = parser.remove_third_label(labels_utrecht_resized)\nlabels_singapore_resized = parser.remove_third_label(labels_singapore_resized)\nlabels_amsterdam_resized = parser.remove_third_label(labels_amsterdam_resized)\n\ndel labels_utrecht_imgs, labels_singapore_imgs, labels_amsterdam_imgs\n\nlabels_utrecht_resized = parser.remove_top_bot_slices(labels_utrecht_resized, UTRECH_N_SLICES)\nlabels_singapore_resized = parser.remove_top_bot_slices(labels_singapore_resized, SINGAPORE_N_SLICES)\nlabels_amsterdam_resized = parser.remove_top_bot_slices(labels_amsterdam_resized, AMSTERDAM_N_SLICES)\n\nfinal_label_imgs = np.concatenate([labels_utrecht_resized,\n labels_singapore_resized,\n labels_amsterdam_resized], axis=0)\nfinal_label_imgs = np.expand_dims(np.asanyarray(final_label_imgs), axis=3)\n\n'''\n\nFLAIR DATA\n\n'''\n\nutrecht_data_flair = parser.get_all_images_np_twod(flair_utrecht)\nutrecht_resized_flairs = parser.resize_slices(utrecht_data_flair, slice_shape)\nutrecht_resized_flairs = parser.remove_top_bot_slices(utrecht_resized_flairs, UTRECH_N_SLICES)\nutrecht_normalized_flairs = parser.normalize_quantile(utrecht_resized_flairs, labels_utrecht_resized,\n UTRECH_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\ndel utrecht_data_flair, utrecht_resized_flairs, flair_utrecht\n\nsingapore_data_flair = parser.get_all_images_np_twod(flair_singapore)\nsingapore_resized_flairs = parser.resize_slices(singapore_data_flair, slice_shape)\nsingapore_resized_flairs = parser.remove_top_bot_slices(singapore_resized_flairs, SINGAPORE_N_SLICES)\nsingapore_normalized_flairs = parser.normalize_quantile(singapore_resized_flairs, labels_singapore_resized,\n SINGAPORE_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\ndel singapore_data_flair, singapore_resized_flairs, flair_singapore\n\namsterdam_data_flair = parser.get_all_images_np_twod(flair_amsterdam)\namsterdam_resized_flairs = parser.resize_slices(amsterdam_data_flair, slice_shape)\namsterdam_resized_flairs = parser.remove_top_bot_slices(amsterdam_resized_flairs, AMSTERDAM_N_SLICES)\namsterdam_normalized_flairs = parser.normalize_quantile(amsterdam_resized_flairs, labels_amsterdam_resized,\n AMSTERDAM_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\n\ndel amsterdam_data_flair, amsterdam_resized_flairs, flair_amsterdam\ndel labels_utrecht_resized, labels_singapore_resized, labels_amsterdam_resized\n\n\"\"\"\n\nDISTANCES\n\n\"\"\"\nutrecht_distances = parser.get_all_images_np_twod(distance_utrecht)\nutrecht_resized_dist = parser.resize_slices(utrecht_distances, slice_shape)\nutrecht_resized_dist = parser.remove_top_bot_slices(utrecht_resized_dist, UTRECH_N_SLICES)\nutrecht_normalized_dist = parser.normalize_minmax(utrecht_resized_dist,\n UTRECH_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\n\nsingapore_distances = parser.get_all_images_np_twod(distance_singapore)\nsingapore_resized_dist = parser.resize_slices(singapore_distances, slice_shape)\nsingapore_resized_dist = parser.remove_top_bot_slices(singapore_resized_dist, SINGAPORE_N_SLICES)\nsingapore_normalized_dist = parser.normalize_minmax(singapore_resized_dist,\n SINGAPORE_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\n\namsterdam_distances = parser.get_all_images_np_twod(distance_amsterdam)\namsterdam_resized_dist = parser.resize_slices(amsterdam_distances, slice_shape)\namsterdam_resized_dist = parser.remove_top_bot_slices(amsterdam_resized_dist, AMSTERDAM_N_SLICES)\namsterdam_normalized_dist = parser.normalize_minmax(amsterdam_resized_dist,\n AMSTERDAM_N_SLICES - REMOVE_TOP - REMOVE_BOT)\n\n'''\n\nDATA CONCAT\n\n'''\n\nnormalized_t1 = np.concatenate([utrecht_normalized_t1,\n singapore_normalized_t1,\n amsterdam_normalized_t1], axis=0)\nnormalized_flairs = np.concatenate([utrecht_normalized_flairs,\n singapore_normalized_flairs,\n amsterdam_normalized_flairs], axis=0)\n\ndistances = np.concatenate([utrecht_normalized_dist,\n singapore_normalized_dist,\n amsterdam_normalized_dist], axis=0)\n\ndel utrecht_normalized_t1, singapore_normalized_t1, amsterdam_normalized_t1\ndel utrecht_normalized_flairs, singapore_normalized_flairs, amsterdam_normalized_flairs\n\ndata_t1 = np.expand_dims(np.asanyarray(normalized_t1), axis=3)\ndata_flair = np.expand_dims(np.asanyarray(normalized_flairs), axis=3)\ndata_distances = np.expand_dims(distances, axis=3)\nall_data = np.concatenate([data_t1, data_flair, data_distances], axis=3)\n\n\n\ndel data_t1, data_flair, data_distances\n\ngc.collect()\n\n'''\n\nAUGMENTATION\n\n'''\ndata_train, test_data, labels_train, test_labels = train_test_split(all_data, final_label_imgs,\n test_size=0.15, random_state=42)\n\ndata_train, validation_data, labels_train, validation_labels = train_test_split(data_train, labels_train,\n test_size=0.04, random_state=42)\n\naugmentator = ImageAugmentator()\ndata_augmented, labels_agumented = augmentator.perform_all_augmentations(data_train, labels_train)\n\ndata_train = np.asanyarray(data_augmented)\nlabels_train = np.asanyarray(labels_agumented)\ndel data_augmented, labels_agumented\n'''\n\nTRAINING\n\n'''\ngc.collect()\ntraining_name = 'test_new_deconv'\nbase_path = os.getcwd()\n\nprint(data_train.shape, labels_train.shape, test_data.shape, test_labels.shape)\nunet = OldUnet(img_shape=data_train.shape[1:])\nunet.train(data_train, labels_train, (test_data, test_labels), training_name, base_path, epochs=1, batch_size=30)\n\n'''\n\nVALIDATING\n\n'''\nvalidation_data = np.asanyarray(validation_data)\nvalidation_labels = np.asanyarray(validation_labels)\nunet.predict_and_save(validation_data, validation_labels)\n\n\"\"\"\n\nVISUALIZING\n\n\"\"\"\ndel data_train, labels_train, training_name\ngc.collect()\n#unet.save_visualize_activations(validation_data, validation_labels, batch_size=30)\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":9740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"247464582","text":"from model import Guest, Party, session\nimport csv\n\ndef load_guests(session):\n with open('seed_data/guests.csv', 'rU') as csvfile:\n invite_reader = csv.reader(csvfile, delimiter=',')\n next(invite_reader, None)\n for guest_row in invite_reader:\n guest = Guest()\n # guest.id = guest_row[0]\n guest.party_id = guest_row[1]\n guest.last_name = guest_row[2]\n guest.first_name = guest_row[3]\n guest.side = guest_row[4]\n guest.grouping = guest_row[5]\n guest.priority = guest_row[6]\n guest.probability = guest_row[7]\n guest.gender = guest_row[8]\n # guest.guest_type = guest_row[9]\n\n session.add(guest)\n\ndef load_parties(session):\n with open('seed_data/parties.csv', 'rU') as csvfile:\n party_reader = csv.reader(csvfile, delimiter=',')\n next(party_reader, None)\n for party_row in party_reader:\n party = Party()\n # party.id = party_row[0]\n party.side = party_row[1]\n party.grouping = party_row[2]\n party.addr_1 = party_row[3]\n party.addr_2 = party_row[4]\n party.city = party_row[5]\n party.state = party_row[6]\n party.zipcode = party_row[7]\n party.country = party_row[8]\n\n session.add(party)\n\ndef main(session):\n load_guests(session)\n load_parties(session)\n\n session.commit()\n\nif __name__ == \"__main__\":\n # s = newmod.connect()\n main(session)","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360888717","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom ttkbootstrap import Style\n\nDARK = 'superhero'\nLIGHT = 'flatly'\n\ndef create_radiobutton_test(widget_style, style):\n frame = ttk.Frame(padding=10)\n\n # title\n title = ttk.Label(frame, text=widget_style, anchor=tk.CENTER)\n title.pack(padx=5, pady=2, fill=tk.BOTH)\n ttk.Separator(frame).pack(padx=5, pady=5, fill=tk.X)\n\n # default style\n cb = ttk.Radiobutton(frame, text=widget_style, style=widget_style)\n cb.pack(padx=5, pady=5, fill=tk.BOTH)\n cb.invoke()\n\n # color styles\n for color in style.theme.colors:\n cb_style = f'{color}.{widget_style}'\n cb = ttk.Radiobutton(frame, text=cb_style, style=cb_style)\n cb.pack(padx=5, pady=5, fill=tk.BOTH)\n cb.invoke()\n\n # disabled style\n cb = ttk.Radiobutton(frame, text=widget_style, style=widget_style,\n state=tk.DISABLED)\n cb.pack(padx=5, pady=5, fill=tk.BOTH)\n cb.invoke()\n\n return frame\n\n\nif __name__ == '__main__':\n # create visual widget style tests\n root = tk.Tk()\n style = Style(theme=DARK)\n\n test1 = create_radiobutton_test('TRadiobutton', style)\n test1.pack(side=tk.LEFT, fill=tk.BOTH)\n test2 = create_radiobutton_test('Roundtoggle.Toolbutton', style)\n test2.pack(side=tk.LEFT, fill=tk.BOTH)\n test3 = create_radiobutton_test('Squaretoggle.Toolbutton', style)\n test3.pack(side=tk.LEFT, fill=tk.BOTH)\n test4 = create_radiobutton_test('Toolbutton', style)\n test4.pack(side=tk.LEFT, fill=tk.BOTH)\n test5 = create_radiobutton_test('Outline.Toolbutton', style)\n test5.pack(side=tk.LEFT, fill=tk.BOTH)\n\n root.mainloop()","sub_path":"tests/test_radiobutton.py","file_name":"test_radiobutton.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"62031135","text":"from urllib.parse import urljoin\nfrom qiniu import Auth,put_file\n\nfrom swiper import config\n\n\ndef qn_upload(filename,filepath):\n '''将文件上传至七牛云'''\n #构建鉴权对象\n qn = Auth(config.QN_ACCESS_KEY,config.QN_SECRET_KEY)\n #生产上传 Token,有效期为1小时\n token = qn.upload_token(config.QN_BUCKET,filename,3600)\n #上传文件\n ret,info = put_file(token,filename,filepath)\n\n if info.ok():\n url = urljoin(config.QN_BASEURL,filename)\n return True,url\n else:\n return False,''","sub_path":"libs/qncloud.py","file_name":"qncloud.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"253890573","text":"# -*- coding: utf-8 -*-\nimport mxnet.ndarray as nd\nfrom mxnet.base import numeric_types\nfrom mxnet.recordio import RecordIOType\nfrom recordio_pb2 import RecordHead, RecordData, RecordUnit, ExtraData\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\n\ndef pack_head(id, reserve=0, version=1.0):\n \"\"\" pack a head info into RecordHead format\n\n Parameters\n ----------\n id : int\n record id.\n reserve : int\n record reserve info\n version : float\n version of data structure\n\n Returns\n -------\n RecordHead\n The head format in recordio.proto.\n \"\"\"\n\n head = RecordHead()\n head.id = id\n head.reserve = reserve\n head.version = version\n return head\n\ndef pack_ndarray_data(id, data):\n \"\"\" pack a NDArray into RecordData\n\n Parameters\n ----------\n id : int\n record id.\n data : NDArray\n ndarray data.\n\n Returns\n -------\n RecordData\n The data format in recordio.proto.\n \"\"\"\n rec_data = RecordData()\n rec_data.id = id\n rec_data.type = RecordIOType.NDARRAY\n rec_data.value = nd.save_to_str(data)\n return rec_data\n\ndef pack_string_data(id, data, is_binary=True):\n \"\"\" pack a string data info into RecordData\n\n Parameters\n ----------\n id : int\n record id.\n data : str\n string data.\n is_binary : bool\n whether save the data as binary,\n NDArray format data will read in RecordIter when using binary,\n\n Returns\n -------\n RecordData\n The data format in recordio.proto.\n \"\"\"\n\n rec_data = RecordData()\n rec_data.id = id\n rec_data.type = RecordIOType.BINARY if is_binary else RecordIOType.STRING\n rec_data.value = data\n return rec_data\n\ndef pack_ndarray_extra(key, data):\n \"\"\" pack a NDArray into RecordData\n\n Parameters\n ----------\n id : int\n record id.\n data : NDArray\n ndarray data.\n\n Returns\n -------\n RecordData\n The data format in recordio.proto.\n \"\"\"\n rec_data = ExtraData()\n rec_data.key = key\n rec_data.type = RecordIOType.NDARRAY\n rec_data.value = nd.save_to_str(data)\n return rec_data\n\ndef pack_string_extra(key, data, is_binary=True):\n \"\"\" pack a string extra data info into RecordData\n\n Parameters\n ----------\n id : int\n record id.\n data : str\n string data.\n is_binary : bool\n whether save the data as binary,\n NDArray format data will read in RecordIter when using binary,\n\n Returns\n -------\n RecordData\n The data format in recordio.proto.\n \"\"\"\n\n\n rec_data = ExtraData()\n rec_data.key = key\n rec_data.type = RecordIOType.BINARY if is_binary else RecordIOType.STRING\n rec_data.value = data\n return rec_data\n\ndef pack(header, data, label, extra=[]):\n \"\"\" pack a data into string for MXRecordIO\n\n Parameters\n ----------\n header : RecordHead\n Header of the image record.\n data : RecordData or list of RecordData\n record data list\n label : float or list of float\n label list\n extra : ExtraData or list of ExtraData\n\n Returns\n -------\n s : str\n The packed string.\n\n Examples\n --------\n >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]\n >>> id = 2574\n >>> header = recordio_pb_pack.pack_head(id)\n >>> with open(path, 'r') as file:\n ... s = file.read()\n >>> packed_s = recordio_pb_pack.pack(header, s, label)\n \"\"\"\n if isinstance(data, list):\n for d in data:\n if not isinstance(d, RecordData):\n raise TypeError('type %s not supported' % str(type(d)))\n elif isinstance(data, RecordData):\n data = [data]\n else:\n raise TypeError('type %s not supported' % str(type(data)))\n\n if isinstance(label, list):\n for l in label:\n if not isinstance(l, numeric_types):\n raise TypeError('type %s not supported' % str(type(l)))\n elif isinstance(label, numeric_types):\n label = [label]\n else:\n raise TypeError('type %s not supported' % str(type(label)))\n\n if isinstance(extra, list):\n for e in extra:\n if not isinstance(e, ExtraData):\n raise TypeError('type %s not supported' % str(type(e)))\n elif isinstance(extra, ExtraData):\n extra = [extra]\n else:\n raise TypeError('type %s not supported' % str(type(extra)))\n\n rec_unit = RecordUnit()\n rec_unit.head.CopyFrom(header)\n\n # body\n rec_body = rec_unit.body\n for d in data:\n rec_data = rec_body.data.add()\n rec_data.CopyFrom(d)\n for l in label:\n rec_body.label.append(l)\n for e in extra:\n ed = rec_body.extra.add()\n ed.CopyFrom(e)\n return rec_unit.SerializeToString()\n\ndef pack_img_data(id, img, quality=95, img_fmt='.jpg'):\n \"\"\"Pack an image into string.\n\n Parameters\n ----------\n id : uint64\n id of the image record.\n img : numpy.ndarray\n Image to be packed.\n quality : int\n Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.\n img_fmt : str\n Encoding of the image (.jpg for JPEG, .png for PNG).\n\n Returns\n -------\n s : str\n The packed string.\n\n Examples\n --------\n >>> id = 2574\n >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]\n >>> header = recordio_pb_pack.pack_head(id)\n >>> img = cv2.imread('test.jpg')\n >>> packed_img = recordio_pb_pack.pack_img_data(id, img)\n >>> packed_s = recordio_pb_pack.pack(header, packed_img, label)\n \"\"\"\n assert cv2 is not None\n jpg_formats = ['.JPG', '.JPEG']\n png_formats = ['.PNG']\n encode_params = None\n if img_fmt.upper() in jpg_formats:\n encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]\n elif img_fmt.upper() in png_formats:\n encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]\n\n ret, buf = cv2.imencode(img_fmt, img, encode_params)\n assert ret, 'failed to encode image'\n return pack_string_data(id, buf.tostring())\n","sub_path":"tools/recordio_pb_pack.py","file_name":"recordio_pb_pack.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354769177","text":"from GJEMS.ephys.rawDataAnalyse import RawDataAnalyser\nfrom scipy.signal import iirfilter, freqz, lfilter, kaiserord, firwin\nimport numpy as np\nfrom neo import AnalogSignal\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.optimize import minimize, basinhopping\nplt.ion()\nimport os\nimport quantities as qu\nimport time\nimport json\nimport operator\nimport subprocess\n\n# enable_debugging = True\nenable_debugging = False\n\nif enable_debugging:\n import ipdb\n\n\nmplPars = { 'text.usetex' : True,\n 'axes.labelsize' : 'large',\n 'font.family' : 'serif',\n 'font.sans-serif' : 'computer modern roman',\n }\n\nfor a, b in mplPars.items():\n plt.rcParams[a] = b\n\n\ndef LPFilterKaiser(signal, cutoff=100, transitionWidth=40, rippleDB=20):\n\n cutoff *= qu.Hz\n nyqFreq = signal.sampling_rate / 2\n\n\n transitionWidth = transitionWidth * qu.Hz\n\n N, beta = kaiserord(rippleDB, transitionWidth / nyqFreq)\n\n tapsLP = firwin(N, cutoff/nyqFreq, window=('kaiser', beta))\n\n delay = (N - 1) * 0.5 * signal.sampling_period\n\n\n filteredSignal = AnalogSignal(\n signal=lfilter(tapsLP, 1.0, signal.magnitude),\n sampling_rate=signal.sampling_rate,\n units=signal.units,\n t_start=signal.t_start - delay\n )\n\n\n return delay, filteredSignal\n\n\ndef HPFilterKaiser(signal, cutoff=10, transitionWidth=40, rippleDB=20):\n\n cutoff *= qu.Hz\n nyqFreq = signal.sampling_rate / 2\n\n transitionWidth = transitionWidth * qu.Hz\n\n N, beta = kaiserord(rippleDB, transitionWidth / nyqFreq)\n\n tapsLP = firwin(N, cutoff/nyqFreq, window=('kaiser', beta))\n\n temp = np.zeros((N,))\n temp[(N - 1) / 2] = 1\n tapsHP = temp - tapsLP\n\n delay = (N - 1) * 0.5 * signal.sampling_period\n\n\n filteredSignal = AnalogSignal(\n signal=lfilter(tapsHP, 1.0, signal.magnitude),\n sampling_rate=signal.sampling_rate,\n units=signal.units,\n t_start=signal.t_start - delay\n )\n\n\n return delay, filteredSignal\n\n\ndef getNoiseVar(resp):\n\n temp = resp - np.median(resp)\n return np.median(np.abs(temp)) / 0.6745\n\n\ndef getSpikeAmps(resp, spikeTimes):\n\n spikeInds = map(int, (spikeTimes - resp.t_start) * resp.sampling_rate)\n return resp[spikeInds]\n\n\n\ndef doubleExpFun(x, Ar, Ad, t0, itaur, itaud):\n\n expd = Ad * np.exp(-itaud * (x - t0))\n expr = Ar * np.exp(-itaur * (x - t0))\n\n doubleExp = expd - expr\n doubleExp[x < t0] = (Ad - Ar)\n\n return doubleExp\n\n\n\n\n\ndef threeDoubleExps(x, Ar1, Ad1, Ar2, Ad2, Ar3, Ad3, t1, t2, t3, itaur1, itaud1, itaur2, itaud2, itaur3, itaud3):\n\n if np.any(np.array([Ar1, Ad1, Ar2, Ad2, Ar3, Ad3, itaur1, itaud1, itaur2, itaud2, itaur3, itaud3]) < 0):\n\n return np.ones_like(x) * 100\n #\n # elif (Ad1 < Ar1) or (Ad2 > Ar2) or (Ad3 > Ar3):\n # return np.ones_like(x) * 100\n\n return doubleExpFun(x, Ar1, Ad1, t1, itaur1, itaud1) \\\n + doubleExpFun(x, Ar2, Ad2, t2, itaur2, itaud2) \\\n + doubleExpFun(x, Ar3, Ad3, t3, itaur3, itaud3)\n\ndef accept_test(f_new, x_new, f_old, x_old):\n\n [Ar1, Ad1, Ar2, Ad2, Ar3, Ad3, t1, t2, t3, itaur1, itaud1, itaur2, itaud2, itaur3, itaud3] = x_new\n\n isPos = lambda x: x > 0\n\n logicalAnd = lambda x, y: x & y\n\n areAllPos = reduce(logicalAnd,\n map(isPos, [Ar1, Ad1, Ar2, Ad2, Ar3, Ad3, itaur1, itaud1, itaur2, itaud2, itaur3, itaud3]))\n\n areTimesAcceptable = reduce(logicalAnd, map(lambda x: -5 < x < 10, [t1, t2, t3]))\n\n\n\n decision = bool(areAllPos\n and (itaud2 < itaur2) and (itaud3 < itaur3)\n and (t1 <= t2) and (t2 <= t3)\n and (itaud1 > 0.3) and (itaud2 > 0.3) and (itaur1 > 0.3)\n )\n if decision:\n print(f_new, decision)\n return decision\n\n\ndef getLocalMaxima(a):\n a = np.array(a)\n b = np.arange(a.shape[0])\n c = np.hstack((False, a[1:] > a[:-1])) & np.hstack((a[:-1] > a[1:], False))\n return b[c], a[c]\n\n\n\n\n# expName = '130705-1LY'\nexpName = '130425-1Al'\n\n\n\ndirpath = '/home/ajay/DataAndResults/GJEphys/NIXFiles/'\nresDir = '/home/ajay/DataAndResults/GJEphys/OPExcitationFitting/'\ntoIgnoreFile = '/home/ajay/DataAndResults/GJEphys/NIXFiles/toIgnore.json'\ntempParFile = '/home/ajay/DataAndResults/GJEphys/OPExcitationFitting/tempParsFreq.json'\nwith open(toIgnoreFile, 'r') as fle:\n toIgnore = json.load(fle)\n\nTs = 4.8e-5 * qu.s\n\ntraceLength = 1\nnPts = int(traceLength / Ts)\n\ntraceStart = 0 * qu.s\n\n# freqs = [100]\n# freqs = [200]\n# freqs = [265]\n# freqs = [300]\n# freqs = [400]\nfreqs = [100, 200, 265, 300, 400]\n\n\ntypes = ['BeforeStimulus', 'DuringStimulus', 'AfterStimulus']\ntypeDurs = [3 * qu.s, 1 * qu.s, 3 * qu.s]\n\nprint('Doing ' + expName)\nfor freq in freqs:\n print('Doing ' + str(freq) + 'Hz')\n\n\n intervalToIgnore = None\n if expName in toIgnore:\n intervalToIgnore = toIgnore[expName]\n\n def shouldBeIgnored(resp):\n\n if intervalToIgnore is None:\n return False\n else:\n respInIgnoreIntervals = [(x * qu.s <= resp.t_start <= y * qu.s) | (x * qu.s <= resp.t_stop <= y * qu.s)\n for x, y in intervalToIgnore]\n return reduce(operator.or_, respInIgnoreIntervals)\n\n rda = RawDataAnalyser(expName, dirpath)\n\n resps = rda.getContResps(types=types, freqs=freqs)\n\n normedFilteredSigs = []\n\n refSig = resps[freq][0]['DuringStimulus']\n refSigCentered = refSig - np.median(refSig)\n\n for resp in resps[freq]:\n\n\n\n respNormedSigs = []\n\n for typeInd, tpye in enumerate(types):\n\n temp = resp[tpye]\n if shouldBeIgnored(temp):\n break\n temp1 = temp.magnitude\n centeredSig = temp1 - np.median(temp1)\n typeLen = int(typeDurs[typeInd] * temp.sampling_rate)\n presSigScaled = np.zeros((typeLen, ))\n sigLen = min(typeLen, temp.shape[0])\n tempNoiseVar = getNoiseVar(temp)\n\n if tpye == 'BeforeStimulus':\n\n presSigScaled[-sigLen:] = centeredSig[-sigLen:]\n\n else:\n presSigScaled[:sigLen] = centeredSig[:sigLen]\n\n presSigScaled[presSigScaled > 50 * tempNoiseVar] = 50 * tempNoiseVar\n presSigScaled[presSigScaled < -50 * tempNoiseVar] = -50 * tempNoiseVar\n\n respNormedSigs += [presSigScaled]\n\n if len(respNormedSigs) == 3:\n\n respFB = AnalogSignal(signal=np.concatenate(respNormedSigs),\n units=qu.dimensionless,\n sampling_rate=temp.sampling_rate,\n t_start=-3 * qu.s,\n )\n normedFilteredSigs.append(respFB)\n\n\n avgSig = reduce(lambda x, y: (x + y), normedFilteredSigs) / len(normedFilteredSigs)\n delay, avgSigLP = LPFilterKaiser(avgSig, cutoff=15, transitionWidth=10, rippleDB=60)\n\n # fig, ax = plt.subplots(figsize=(10, 8))\n # ax.plot(avgSig.times, avgSig, 'b')\n # ax.plot(avgSigLP.times, avgSigLP, 'r')\n # ax.legend(['Sig', 'SigLP'])\n\n\n delayms = delay.copy()\n delayms.units = qu.ms\n delayms = delayms.magnitude\n print(delayms)\n\n def twoDoubleExps(x, A1, A2, t1, t2, taur1, taud1, taur2, taud2):\n\n d1 = doubleExpFun(x, A1, A1, t1, 1 / taur1, 1 / taud1)\n d2 = doubleExpFun(x, A2, A2, t2, 1 / taur2, 1 / taud2)\n\n # if np.any(np.array([A1, A2, t1, t2, itaur1, itaud1, itaur2, itaud2]) < 0)\\\n # or np.any(np.array([t1, t2]) < delayms - 10):\n #\n # return np.ones_like(x) * 100\n # to avoid inverted double exponential for the lower delay one\n # elif (Ad1 < Ar1):\n # return np.zeros_like(x)\n # else:\n return d1 - d2\n\n\n signal2FitFull = avgSigLP\n sigStart = traceStart - delay\n totalPoints = nPts + int(delay * signal2FitFull.sampling_rate)\n t = sigStart + np.arange(totalPoints) * signal2FitFull.sampling_period\n fitStart = int((sigStart - signal2FitFull.t_start) * signal2FitFull.sampling_rate)\n\n # fig, ax = plt.subplots(figsize=(10, 8))\n # ax.plot(avgSig.times, avgSig, 'b')\n # ax.plot(avgSigLP.times, avgSigLP, 'r')\n # ax.legend(['Sig', 'SigLP'])\n # ax.set_title(str(freq))\n #\n # fig.canvas.draw()\n\n\n baseLine = np.concatenate((signal2FitFull[0:fitStart], signal2FitFull[fitStart + totalPoints + 1:]), axis=1).mean()\n signal2Fit = signal2FitFull[fitStart: fitStart + totalPoints + 1] - baseLine\n print(baseLine)\n xData = (signal2Fit.times - signal2Fit.t_start).magnitude * 1e3\n yData = signal2Fit.magnitude\n\n f = lambda x: np.concatenate((np.random.rand(2) * 10,\n np.random.rand(2) * 100 + delayms - 50,\n np.random.rand(4) * 50 + 50),\n axis=1).tolist()\n p0s = map(f, range(10))\n outFile = os.path.join(resDir, expName + str(int(freq)) + '.npz')\n\n with open(tempParFile, 'w') as fle:\n json.dump({'p0s': p0s, 'outFile': outFile, 'xData': xData.tolist(), 'yData': yData.tolist(),\n 'filterDelay': float(delayms)}, fle)\n\n subprocess.call(['python', 'scripts/fitDoubleExp.py', tempParFile])\n\n with open(outFile, 'r') as fle:\n pOpt = json.load(fle)\n\n [A1, A2, t1, t2, taur1, taud1, taur2, taud2] = pOpt\n print(np.round([A1, A2], 4).T)\n print(np.round([t1, t2], 4).T)\n print(np.round([taur1, taud1, taur2, taud2], 4).T)\n fitSignal = twoDoubleExps(xData, *pOpt)\n\n if t2 > t1:\n doubleExp1 = doubleExpFun(xData, A1, A1, t1, 1 / taur1, 1 / taud1)\n doubleExp2 = -doubleExpFun(xData, A2, A2, t2, 1 / taur2, 1 / taud2)\n\n\n else:\n doubleExp2 = doubleExpFun(xData, A1, A1, t1, 1 / taur1, 1 / taud1)\n doubleExp1 = -doubleExpFun(xData, A2, A2, t2, 1 / taur2, 1 / taud2)\n\n\n print(fitSignal.max(), fitSignal.min())\n print(doubleExp1.max(), doubleExp2.min())\n\n # fig1, ax1 = plt.subplots(nrows=2, ncols=1, figsize=(10, 16))\n # # ax1.plot((centeredSignal.times - centeredSignal.t_start) * 1e3, centeredSignal, 'r', label='raw')\n # ax1[0].plot((signal2Fit.times - signal2Fit.t_start) * 1e3, signal2Fit, 'g', label='filtered')\n # ax1[0].plot(xData, fitSignal, 'b', label='fit')\n #\n # ax1[0].legend()\n #\n # ax1[1].plot(xData, fitSignal, 'b', label='fit')\n # ax1[1].plot(xData, doubleExp2, 'r', label='doubleExp2')\n # ax1[1].plot(xData, doubleExp1, 'c', label='doubleExp1')\n # ax1[1].legend()\n # ax1[0].set_title(str(freq))\n\n\n\n # fig1.canvas.draw()\n # raw_input()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/undocumentedTemp/OPExcitationVsFreq.py","file_name":"OPExcitationVsFreq.py","file_ext":"py","file_size_in_byte":10985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89767955","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def __repr__(self):\n return f\"Node({self.data})\"\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n\n def __iter__(self):\n node = self.head\n while node:\n yield node.data\n node = node.next\n\n def __len__(self) -> int:\n return len(tuple(iter(self)))\n\n def __repr__(self):\n return \"->\".join([str(item) for item in self])\n\n def __getitem__(self, idx):\n # Indexing Support. Used to get a node at particular position\n if not 0 <= idx < len(self):\n raise ValueError(\"list index out of range.\")\n for i, node in enumerate(self):\n if i == idx:\n return node\n\n def __setitem__(self, idx, data):\n # Used to change the data of a particular node\n if not 0 <= idx < len(self):\n raise ValueError(\"list index out of range.\")\n current = self.head\n for _ in range(idx):\n current = current.next\n current.data = data\n\n def insert_head(self, data) -> None:\n self.insert_nth(0, data)\n\n def insert_tail(self, data) -> None:\n self.insert_nth(len(self), data)\n\n def insert_nth(self, idx: int, data) -> None:\n if not 0 <= idx <= len(self):\n raise ValueError(\"List index out of range.\")\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n elif idx == 0:\n new_node.next = self.head\n self.head = new_node\n else:\n temp = self.head\n for _ in range(idx - 1):\n temp = temp.next\n new_node.next = temp.next\n temp.next = new_node\n\n def print_list(self) -> None:\n print(self)\n\n def delete_head(self):\n return print(self.delete_nth(0))\n\n def delete_tail(self):\n return print(self.delete_nth(len(self) - 1))\n\n def delete_nth(self, idx: int = 0):\n if not 0 <= idx <= len(self) - 1:\n raise IndexError(\"list index out of range.\")\n delete_node = self.head\n\n if idx == 0:\n self.head = self.head.next\n else:\n temp = self.head\n for _ in range(idx - 1):\n temp = temp.next\n delete_node = temp.next\n temp.next = temp.next.next\n return delete_node.data\n\n def is_empty(self) -> bool:\n return self.head is None\n\n def reverse(self):\n prev = None\n current = self.head\n\n while current:\n next_node = current.next\n current.next = prev\n prev = current\n current = next_node\n self.head = prev\n\n\nif __name__ == \"__main__\":\n\n linked_list = SinglyLinkedList()\n linked_list.insert_head(input(\"Inserting 1st at head \").split())\n linked_list.insert_head(input(\"Inserting 2nd at head \").split())\n print(\"\\nPrint list: \")\n linked_list.print_list()\n\n linked_list.insert_tail(input(\"Inserting 1st tail \").strip())\n linked_list.insert_tail(input(\"Inserting 2nd at tail \").strip())\n print(\"\\nPrint list: \")\n linked_list.print_list()\n\n linked_list.insert_nth(3, input(\"Inserting nth at position \").split())\n print(\"\\nPrint list: \")\n linked_list.print_list()\n\n print(\"\\nDelete head \")\n linked_list.delete_head()\n\n print('\\nDelete tail ')\n linked_list.delete_tail()\n print('\\nPrint linked list ')\n linked_list.print_list()\n\n print('\\nReverse linked list ')\n linked_list.reverse()\n linked_list.print_list()\n\n print('\\nString representation of linked list: ')\n print(linked_list)\n\n print('\\nReading /changing Node data using indexing: ')\n print(f\"Element at Position 1: {linked_list[1]}\")\n linked_list[1] = input(\"Enter New Value: \").strip()\n print(\"New List: \")\n print(linked_list)\n\n print(f\"Length of linked list is: {len(linked_list)}\")\n","sub_path":"Data Structure/Linked list/Singly Linked List/implementation/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297771435","text":"\nimport time\n\nimport cv2\nimport numpy as np\n\nimport CLLibrary as cl\n\nclass SpeedCalculator(object):\n\tdef __init__(self):\n\t\tself._ctrArr = []\n\t\tself._spdVecs = []\n\n\tdef calcSpeed(self):\n\t\tdCenter = self._ctrArr[-1][0].center - self._ctrArr[0][0].center\n\t\tdt = self._ctrArr[-1][1] - self._ctrArr[0][1]\n\t\tself._spdVecs.append(dCenter / dt)\n\t\tprint('dCenter = %s, dt = %f, v = %s' % (dCenter, dt, dCenter / dt))\n\t\tself._ctrArr = []\n\n\tdef processContour(self, c):\n\t\tt = time.time()\n\n\t\tif c is None and len(self._ctrArr) <= 5:\n\t\t\tself._ctrArr = []\n\t\telif c is None and len(self._ctrArr) > 5:\n\t\t\tself.calcSpeed()\n\t\telif c is not None:\n\t\t\tself._ctrArr.append([c, t])\n\n\tdef recordSpd(self):\n\t\tnpyData = cl.NpyData('npyData.npy')\n\t\tnpyData['beltSpeed'] = np.mean(self._spdVecs, axis = 0)\n\t\tprint('avg spd = %s' % (npyData['beltSpeed']))\n\t\tnpyData.save()\n","sub_path":"LoadMaterial/SpeedCalculator.py","file_name":"SpeedCalculator.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"92267561","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport zlib\nimport hashlib\n\nfrom flask import Blueprint, jsonify, request\n\nfrom handlers.shorttext import ShorttextHandler\nfrom errors import (\n InvalidArguments,\n MissingArguments,\n AccessDatabaseErrror,\n)\n\n\nshorttext = Blueprint('shorttext', __name__)\n\n\n@shorttext.route('/', methods=('GET', ))\ndef get_shorttext_by_guid(guid):\n '''获取短文本'''\n res = ShorttextHandler.get_shorttext(guid) or {}\n return jsonify(res)\n\n\n@shorttext.route('/', methods=('POST', ))\ndef index():\n '''创建短文本'''\n try:\n data = json.loads(request.data)\n except:\n raise InvalidArguments\n if 'text' not in data or not data['text']:\n raise MissingArguments\n _type = ''\n if 'type' in data and data['type']:\n if data['type'] not in ('url', 'normal_text'):\n raise InvalidArguments('invalid argument \\'type\\' <%s>' % data['type']) # noqa\n _type = data['type']\n md5 = hashlib.md5(_type+data['text']).hexdigest()\n crc = zlib.crc32(_type+data['text'])\n # 增加个LRU Cache\n res = ShorttextHandler.get_shorttext_by_hash(md5, crc)\n if res is not None:\n guid = res['guid']\n else:\n # 未发现已有缩写,创建缩写\n guid = ShorttextHandler.create_shorttext(data['text'], _type)\n if guid is None:\n raise AccessDatabaseErrror()\n return jsonify(dict(guid=guid))\n","sub_path":"src/shorttext/shorttext.py","file_name":"shorttext.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259132238","text":"#-*- coding:utf-8 -*-\n''' \n#文件名:\n#作者:陈圆圆\n#创建日期:2018/1/9\n#模块描述:地址规则\n#历史修改记录\n#修改人:\n#修改日期:\n#修改内容:\n'''\nimport sys,time\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.path.append(\"/testIsomp/common/\")\nfrom _icommon import getElement,selectElement,frameElement,commonFun\nfrom _cnEncode import cnEncode\nsys.path.append(\"/testIsomp/webElement/process/\")\nfrom test_access_approval_ment import Accapproval\n#导入用户元素类\nsys.path.append(\"/testIsomp/webElement/user/\")\nfrom userElement import UserPage\n\nclass AddressRule(object):\n\tdef __init__(self, driver):\n\t\tself.driver = driver\n\t\tself.getElem = getElement(driver)\n\t\tself.selectElem = selectElement(driver)\n\t\tself.frameElem = frameElement(driver)\n\t\tself.cmf = commonFun(driver)\n\t\tself.cnEn = cnEncode()\n\t\tself.acproval = Accapproval(driver)\n\t\tself.user = UserPage(driver)\n\n\tu'''点击批量删除'''\n\tdef click_bulkdel_address(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"delete_rule_address\")\n\n\tu'''点击编辑按钮\n\t\tParameters:\n\t\t\t- addressname:地址规则名称\n\t'''\n\tdef click_edit_address(self, addressname):\n\t\ttry:\n\t\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\t\trow = self.acproval.find_name_by_row(addressname, \"fortRuleAddressName\")\n\t\t\tupdate_xpath = \".//*[@id='content_table']/tbody/tr[\" + str(row) + \"]/td[5]/input[1]\"\n\t\t\ttime.sleep(3)\n\t\t\tself.getElem.find_element_wait_and_click(\"xpath\", update_xpath)\n\t\texcept Exception:\n\t\t\tprint(\"Click the Edit button to fail\")\n\n\tu'''点击删除按钮\n\t\tParameters:\n\t\t\t- addressname:时间规则名称\n\t'''\n\tdef click_del_address(self, addressname):\n\t\ttry:\n\t\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\t\trow = self.acproval.find_name_by_row(addressname, \"fortRuleAddressName\")\n\t\t\tdel_xpath = \".//*[@id='content_table']/tbody/tr[\" + str(row) + \"]/td[5]/input[2]\"\n\t\t\tself.getElem.find_element_wait_and_click(\"xpath\", del_xpath)\n\t\texcept Exception:\n\t\t\tprint(\"Click the Del button to fail\")\n\n\tu'''填写检索名称\n\t\tParameters:\n\t\t\t- addressname:名称\n\t'''\n\tdef set_search_addressname(self, addressname):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tname = self.cnEn.is_float(addressname)\n\t\tself.getElem.find_element_wait_and_clear(\"id\", \"rule_address_id\")\n\t\tself.getElem.find_element_wait_and_sendkeys(\"id\", \"rule_address_id\", name)\n\n\tu'''点击检索按钮'''\n\tdef click_search_address(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"rule_address\")\n\n\tu'''填写地址规则名称\n\t\tParameters:\n\t\t\t- rulename:规则名称\n\t'''\n\tdef set_rulename(self, rulename):\n\t\tname = self.cnEn.is_float(rulename)\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_clear(\"id\", \"fortRuleAddressName\")\n\t\tself.getElem.find_element_wait_and_sendkeys(\"id\", \"fortRuleAddressName\", name)\n\n\tu'''选择IP规则\n\t\tParameters:\n\t\t\t- stauts:1代表勾选IP掩码,2代表勾选IP区间,3代表IP掩码和IP区间\n\t'''\n\tdef checkbox_ip_rule(self, stauts):\n\t\tstaut = self.cnEn.is_float(stauts)\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tif staut == '1':\n\t\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"one_ip\")\n\t\telif staut == '2':\n\t\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"duo_ip\")\n\t\telse:\n\t\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"one_ip\")\n\t\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"duo_ip\")\n\n\tu'''点击增加多个IP'''\n\tdef click_add_more_ip(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"add_ip_mask\")\n\n\tu'''填写ip地址\n\t\tParameters:\n\t\t\t- iplist:所填写的IP列表集合\n\t'''\n\tdef set_ip(self, iplist):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\t#获取所有页面IP地址集合\n\t\tfortips = self.driver.find_elements_by_name(\"fortIp\")\n\t\tipaddress = iplist.split()\n\t\tself.ip_mask_common(fortips, ipaddress)\n\n\tu'''填写ip地址掩码\n\t\tParameters:\n\t\t\t- masklist:所填写的IP掩码value值列表集合\n\t'''\n\tdef set_ip_mask(self, masklist):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\t#获取所有页面IP掩码集合\n\t\tfortMasks = self.driver.find_elements_by_name(\"fortMask\")\n\t\tmasklists = masklist.split()\n\t\tself.ip_mask_common(fortMasks, masklists)\n\n\tu'''填写IP地址和掩码公共方法\n\t\tParameters:\n\t\t\t- ipsets:页面IP段集合\n\t\t\t- dataipsets:列表数据集合\n\t'''\n\tdef ip_mask_common(self, ipsets, dataipsets):\n\t\t#获取页面集合长度\n\t\tipsetth = len(ipsets)\n\t\tfortth = 0\n\t\t#循环ip掩码进行填写操作\n\t\tfor ipset in ipsets:\n\t\t\tif fortth < ipsetth and dataipsets[fortth] != 'no':\n\t\t\t\tfortips = self.driver.find_elements_by_name(\"fortIp\")\n\t\t\t\tif ipsets == fortips:\n\t\t\t\t\tipset.clear()\n\t\t\t\tipset.send_keys(dataipsets[fortth])\n\t\t\t\tfortth = fortth + 1\n\n\tu'''点击增加多个IP段'''\n\tdef click_add_ip_segment(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"add_ip_range\")\n\n\tu'''填写起始段IP\n\t\tParameters:\n\t\t\t- ipstartlist:所填写的起始段IP列表集合\n\t'''\n\tdef set_ip_start(self, ipstartlist):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tipstarts = ipstartlist.split()\n\t\tself.ip_segment_common(\"fortIpStart\", ipstarts)\n\n\tu'''填写结束段IP\n\t\tParameters:\n\t\t\t- ipendlist:所填写的结束段IP列表集合\n\t'''\n\tdef set_ip_end(self, ipendlist):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tipends = ipendlist.split()\n\t\tself.ip_segment_common(\"fortIpEnd\", ipends)\n\n\tu'''填写区间段IP公共方法\n\t\tParameters:\n\t\t\t- ipsetname:页面IP段name属性\n\t\t\t- dataipsets:列表数据集合\n\t'''\n\tdef ip_segment_common(self, ipsetname, dataipsets):\n\t\t#获取所有页面IP段集合\n\t\tipsets = self.driver.find_elements_by_name(ipsetname)\n\t\t#获取IP段集合长度\n\t\tipsetsth = len(ipsets)\n\t\tfortth = 0\n\t\t#循环进行段ip填写操作\n\t\tfor ipset in ipsets:\n\t\t\tif fortth < ipsetsth:\n\t\t\t\tipset.clear()\n\t\t\t\tipset.send_keys(dataipsets[fortth])\n\t\t\t\tfortth = fortth + 1\n\n\tu'''点击测试'''\n\tdef click_test(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"test_ip\")\n\n\tu'''填写测试ip地址\n\t\tParameters:\n\t\t\t- testip:ip地址\n\t'''\n\tdef set_ip_test(self, testip):\n\t\tip = self.cnEn.is_float(testip)\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_clear(\"id\", \"testIp\")\n\t\tself.getElem.find_element_wait_and_sendkeys(\"id\", \"testIp\", ip)\n\n\tu'''填写描述信息\n\t\tParameters:\n\t\t\t- description:描述信息\n\t'''\n\tdef set_description(self, description):\n\t\tdescript = self.cnEn.is_float(description)\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_clear(\"id\", \"fortDescription\")\n\t\tself.getElem.find_element_wait_and_sendkeys(\"id\", \"fortDescription\", descript)\n\n\tu'''点击保存'''\n\tdef click_save(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"save_rule_address\")\n\n\tu'''点击返回'''\n\tdef click_back(self):\n\t\tself.frameElem.from_frame_to_otherFrame(\"rigthFrame\")\n\t\tself.getElem.find_element_wait_and_click_EC(\"id\", \"history_skip\")\n\n\tu'''给用户添加地址规则\n\t\tParameters:\n\t\t\t- username:要编辑的用户名称\n\t\t\t- adrerule:地址规则名称\n\t'''\n\tdef edit_user_address_rule(self, username, adrerule):\n\t\tself.frameElem.from_frame_to_otherFrame(\"mainFrame\")\n\t\tname = self.cnEn.is_float(username)\n\t\tself.user.operate_edit(name)\n\t\tself.user.click_advanced_option()\n\t\tself.select_adress_rule(adrerule)\n\t\tself.user.save_button()\n\t\tself.cmf.click_login_msg_button()\n\t\tself.user.click_back_button()\n\n\tu'''选择地址规则\n\t\tParameters:\n\t\t\t- adrerule:地址规则名称\n\t'''\n\tdef select_adress_rule(self, adrerule):\n\t\tself.frameElem.from_frame_to_otherFrame(\"mainFrame\")\n\t\taddressrule = self.cnEn.is_float(adrerule)\n\t\tselect_elem = self.getElem.find_element_with_wait_EC('id',\"fortRuleAddressId\")\n\t\tself.selectElem.select_element_by_visible_text(select_elem, addressrule)","sub_path":"webElement/rule/test_address_rule_ment.py","file_name":"test_address_rule_ment.py","file_ext":"py","file_size_in_byte":8177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"627552790","text":"import gym\nimport numpy as np\nfrom ddpg_noise import OrnsteinUhlenbeckActionNoise\nfrom ddpg_model import DDPGModel\nfrom ddpg_buffer import DDPGBuffer\n\ndef normalize_state(s):\n # the state is 3-dimentional\n # s[0], s1[1] from -1 to 1, s[2] from -8 to 8\n s = s.flatten()\n s[2] /= 8.0\n return s\n\ndef normalize_reward(r):\n # the reward is in [-16.27..., 0]\n r = r.flatten()[0]\n min_reward = -16.2736045\n normal_para = np.abs(min_reward) / 2.0\n return r / normal_para + 1.0\n\ndef actual_action(a):\n # the action sapce is [-2, 2]\n return 2.0 * a\n\ndef gamma_normalize_rewards(rs, gamma = 0.9, ep = 15):\n l = len(rs)\n results = [0.0] * l\n weights = [0.0] * l\n\n for i in range(l - 1, -1, -1):\n w = 1.0\n for j in range(min(i + 1, ep)):\n ind = i - j\n results[ind] += rs[i] * w\n weights[ind] += w\n w = w * gamma\n \n return [results[i] / weights[i] for i in range(l)]\n\nif __name__ == '__main__':\n \n env = gym.make('Pendulum-v0')\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n print(state_dim, action_dim)\n done = True\n\n noise = OrnsteinUhlenbeckActionNoise(mu = np.zeros(action_dim))\n model = DDPGModel(state_dim, action_dim)\n buf = DDPGBuffer(1e5)\n\n ep_state = []\n ep_reward = []\n\n play_round = 0\n while True:\n if done:\n play_round = play_round + 1\n print('round %d finished' % play_round)\n buf.add_batch(ep_state, gamma_normalize_rewards(ep_reward))\n train_states, train_qs = buf.get_batch(500)\n if train_states is not None:\n model.train_model(train_states, train_qs)\n observation = normalize_state(env.reset())\n print('average reward', np.average(ep_reward))\n ep_state = []\n ep_reward = []\n if play_round % 20 == 0:\n env.render()\n oise = noise() * 0.2\n action = model.get_action(np.array([observation]), np.array([oise]))[0]\n new_observation, reward, done, info = env.step([actual_action(action)])\n ep_state.append(observation)\n ep_reward.append(normalize_reward(reward))\n observation = normalize_state(new_observation)\n\n \n","sub_path":"ddpg_pendulum_player.py","file_name":"ddpg_pendulum_player.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144492542","text":"#Dylan Myers\n#ME492_A14\n\n#the file sample_text was in a subfolder titled RW_Text_Files\nfilename = 'sample_text.txt'\nfileLoc = 'RW_Text_Files'\nfilenameAct = fileLoc + '/' + filename\n\nf = open(filenameAct, 'r')\nall_lines = f.readlines()\nf.close()\n\ntotalizer = 0\nsumTotal = 0\nnumList = []\n\nfor line in all_lines:\n\tif line.startswith('X-DSPAM-Confidence:'):\n\t\tline = line.replace('X-DSPAM-Confidence:','')\n\t\tline = line.strip()\n\t\tline = float(line)\n\t\tnumList.append(line)\n\t\ttotalizer = totalizer +1\navgConf = sum(numList)/totalizer\nprint('The average spam confidence is ' + str(avgConf))\n","sub_path":"ME492_A14.py","file_name":"ME492_A14.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"549678494","text":"from COLORS import style \n\nCONJ = '∧'\nDISJ = '∨'\nIMPL = '→'\nEQUIV = '↔'\nNEG = '¬'\nTOP = '⊤'\nBOT = '⊥'\nCONNECTIVES = [EQUIV, IMPL, DISJ, CONJ]\n\n# An expression tree node\nclass ExpressionTreeNode: \n\n # Constructor to create a node \n def __init__(self , value):\n self.value = value \n self.left = None\n self.right = None\n\n # Dictionary which associates to each symbol a function\n self.__funct = {NEG : self.negation, EQUIV: self.equivalence, IMPL : self.implication, DISJ : self.disjunction, CONJ: self.conjunction}\n \n def inorder(self):\n if self.left != None:\n self.left.inorder()\n\n print (self.value, end = \"\")\n \n if self.right != None:\n self.right.inorder()\n \n def inorder_parentheses(self):\n \n if self.value in CONNECTIVES:\n string = '(' + self.left.inorder_parentheses() + self.value + self.right.inorder_parentheses() + ')'\n return string\n elif self.value == NEG:\n string = '(¬' + self.left.inorder_parentheses() + ')'\n return string\n else:\n return self.value\n \n def evaluate(self, value_dict, show_steps):\n # Leaf => operand, print its truth value\n if self.right == self.left == None:\n if show_steps == True:\n print(\"Atom \" + self.value + \": \" + str(value_dict[self.value]))\n \n return (value_dict[self.value], self.value)\n\n else:\n # Operator: negation\n if self.value == NEG:\n truth_val, string = self.left.evaluate(value_dict, show_steps)\n\n negated_truth_val = self.__funct[self.value](truth_val) \n string = \"(\" + self.value + string + \")\"\n if show_steps == True:\n print(string + \" is \" + str(negated_truth_val))\n \n return (negated_truth_val, string)\n \n # Operator: other binary connective\n else:\n truth_val1, string1 = self.left.evaluate(value_dict, show_steps)\n\n truth_val2, string2 = self.right.evaluate(value_dict, show_steps)\n\n truth_val = self.__funct[self.value](truth_val1, truth_val2)\n string = \"(\" + string1 + self.value + string2 + \")\"\n\n if show_steps == True:\n print(string + \" is \" + str(truth_val))\n return (truth_val, string)\n\n @staticmethod\n def negation(truth_val):\n return not truth_val\n \n @staticmethod\n def conjunction(truth_val1, truth_val2):\n return truth_val1 and truth_val2\n \n @staticmethod\n def disjunction(truth_val1, truth_val2):\n return truth_val1 or truth_val2\n \n @staticmethod\n def implication(truth_val1, truth_val2):\n if truth_val1 == True and truth_val2 == False:\n return False\n return True\n \n @staticmethod\n def equivalence(truth_val1, truth_val2):\n return truth_val1 == truth_val2\n\n \nclass ExpressionTree:\n\n def __init__(self, postfix):\n \n self.postfix = postfix \n\n self.root = self.__constructTree()\n \n # Returns the root of the constructed tree from the given postfix expression \n def __constructTree(self): \n stack = [] \n \n # Traverse through every character of input expression \n for char in self.postfix : \n \n # Operand, simply push into stack \n if char not in CONNECTIVES and char != NEG: \n t = ExpressionTreeNode(char) \n stack.append(t) \n \n # Operator \n else: \n \n # Char is a connective different from negation(unary operator)\n if char in CONNECTIVES:\n # Pop two top nodes \n t = ExpressionTreeNode(char) \n t1 = stack.pop() \n t2 = stack.pop() \n \n # make them children \n t.right = t1 \n t.left = t2 \n \n # Add this subexpression to stack \n stack.append(t)\n # Char is negation: will only pop 1 operand from the stack\n else:\n t = ExpressionTreeNode(char)\n t1 = stack.pop()\n\n # Make the operand a child of negation\n t.left = t1\n # Add this subexpression to stack\n stack.append(t)\n\n # Only element will be the root of expression tree \n t = stack.pop() \n \n return t \n\n def convert_to_NNF(self, show_steps):\n \n # Applying the idempocy laws\n self.__idempocy_laws(show_steps)\n\n # Applying the annihilation laws\n self.__annihilation_laws(show_steps)\n\n # Applying the laws of true and false\n self.__true_false_laws(show_steps)\n \n # Applying the reduction laws to eliminate equivalences and implications\n self.__reduction_laws(show_steps)\n\n # Keeps track of whether the loop modifies the formula: we stop when it doesn't\n self.__global_modified_flag = True\n\n while self.__global_modified_flag == True:\n self.__global_modified_flag = False\n\n self.__idempocy_laws(show_steps)\n self.__annihilation_laws(show_steps)\n self.__true_false_laws(show_steps)\n self.__negation_laws(show_steps)\n \n \"\"\" ###########################################################################\n Reduction laws functions\n \"\"\"\n\n def __reduction_laws(self, show_steps):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.__reduce_eq_wrapper()\n \n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Reducing equivalences: (F↔G) ~ (F→G)∧(G→F)\") + style.RESET(\"\"))\n self.inorder_parentheses()\n\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.__reduce_impl_wrapper()\n \n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Reducing implications: (F→G) ~ (¬F∨G)\") + style.RESET(\"\"))\n self.inorder_parentheses()\n\n def __reduce_eq_wrapper(self):\n self.__reduce_eq(self.root)\n\n def __reduce_eq(self, node):\n \n if node.value == EQUIV:\n # Setting the modified flag to True\n self.__modified_flag = True\n\n #Changing the node value to '∧'\n node.value = CONJ\n\n # These will be the new children of the current node\n new_left = ExpressionTreeNode(IMPL)\n new_right = ExpressionTreeNode(IMPL)\n\n # Setting the children of the new left child of node\n new_left.left = node.left\n new_left.right = node.right\n\n # Setting the children of the new right child of node\n new_right.left = node.right\n new_right.right = node.left\n\n # Updating the children of node\n node.left = new_left\n node.right = new_right\n\n # Call the function for the new left child(will update for right sub-tree too since references)\n self.__reduce_eq(node.left)\n \n else:\n if node.left != None:\n self.__reduce_eq(node.left)\n if node.right != None:\n self.__reduce_eq(node.right)\n\n def __reduce_impl_wrapper(self):\n self.__reduce_impl(self.root)\n \n def __reduce_impl(self, node):\n if node.value == IMPL:\n # Setting the modified flag to True\n self.__modified_flag = True\n\n # Changing the node value to '∨'\n node.value = DISJ\n\n # Creating a new left child for the current node, containing '¬'\n new_left = ExpressionTreeNode(NEG)\n new_left.left = node.left\n\n # Updating the children of node\n node.left = new_left\n\n # Call the function for its children\n if node.left != None:\n self.__reduce_impl(node.left)\n if node.right != None:\n self.__reduce_impl(node.right)\n\n \"\"\" ###########################################################################\n Idempocy laws functions\n \"\"\"\n\n def __idempocy_laws(self, show_steps):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root, _ = self.__apply_idempocy(self.root)\n \n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Applying idempocy laws: F∧F ~ F, F∨F ~ F\") + style.RESET(\"\"))\n self.inorder_parentheses()\n self.__global_modified_flag = True\n\n def __apply_idempocy(self, node):\n str_left = \"\"\n str_right = \"\"\n\n # Binary connectives\n if node.value in CONNECTIVES:\n node.left, str_left = self.__apply_idempocy(node.left)\n node.right, str_right = self.__apply_idempocy(node.right)\n # Unary connective\n elif node.value == NEG:\n node.left, str_left = self.__apply_idempocy(node.left)\n # Atom\n else:\n return (node, node.value)\n\n # Or / And connective\n if (node.value == DISJ or node.value == CONJ) and str_left == str_right:\n # Return the node containing the child(Apply idempocy)\n # Set the modified flag to True\n self.__modified_flag = True\n return (node.left, str_left)\n elif node.value in CONNECTIVES:\n return (node, '(' + str_left + node.value + str_right + ')')\n elif node.value == NEG:\n return (node, '(' + node.value + str_left + ')')\n else:\n return (node, node.value)\n\n \"\"\" ###########################################################################\n Annihilation laws functions \n \"\"\"\n\n def __annihilation_laws(self, show_steps):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.__apply_annihilation(self.root)\n \n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Applying annihilation laws: F∨¬F ~ ⊤, F∧¬F ~ ⊥\") + style.RESET(\"\"))\n self.inorder_parentheses()\n self.__global_modified_flag = True\n\n def __apply_annihilation(self, node):\n str_left = \"\"\n str_right = \"\"\n\n # Binary connectives\n if node.value in CONNECTIVES:\n str_left = self.__apply_annihilation(node.left)\n str_right = self.__apply_annihilation(node.right)\n # Unary connective\n elif node.value == NEG:\n str_left = self.__apply_annihilation(node.left)\n # Atom\n else:\n return node.value\n \n # Implication + Annihilation\n if node.value == IMPL and str_left == str_right:\n # Setting the modified flag to True\n self.__modified_flag = True\n # Changing the value of the node to 'T'\n node.value = TOP\n \n # Deleting the children nodes\n node.left = None\n node.right = None\n\n return node.value\n \n # Disjunction + Annihilation\n elif node.value == DISJ and ('(¬' + str_left + ')' == str_right or str_left == '(¬' + str_right + ')'):\n # Setting the modified flag to True\n self.__modified_flag = True\n # Changing the value of the node to 'T'\n node.value = TOP\n\n # Deleting the children nodes\n node.left = None\n node.right = None\n\n return node.value\n \n # Conjunction + Annihilation\n elif node.value == CONJ and ('(¬' + str_left + ')' == str_right or str_left == '(¬' + str_right + ')'):\n # Setting the modified flag to True\n self.__modified_flag = True\n # Changing the value of the node to '⊥'\n node.value = BOT\n\n # Deleting the children nodes\n node.left = None\n node.right = None\n\n return node.value\n \n # Binary connective\n elif node.value in CONNECTIVES:\n return '(' + str_left + node.value + str_right + ')'\n # Unary connective(negation)\n elif node.value == NEG:\n return '(' + node.value + str_left + ')'\n # Atom\n else:\n return node.value\n \n \"\"\" ###########################################################################\n Laws of True and False functions\n \"\"\"\n\n def __true_false_laws(self, show_steps):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root = self.__apply_true_false(self.root)\n\n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Applying laws of 'True' and 'False': \") + style.RESET(\"\"))\n self.inorder_parentheses()\n self.__global_modified_flag = True\n\n def __apply_true_false(self, node):\n \n # Binary connective\n if node.value in CONNECTIVES:\n node.left = self.__apply_true_false(node.left)\n node.right = self.__apply_true_false(node.right)\n \n # Unary connective\n elif node.value == NEG:\n node.left = self.__apply_true_false(node.left)\n # Atom\n else:\n return node\n\n # Negation\n if node.value == NEG:\n if node.left.value == TOP:\n # Setting the modified flag to True\n self.__modified_flag = True\n node.value = BOT\n node.left = None\n return node\n elif node.left.value == BOT:\n # Setting the modified flag to True\n self.__modified_flag = True\n node.value = TOP\n node.left = None\n return node\n # Disjunction\n elif node.value == DISJ:\n if node.left.value == BOT:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.right\n elif node.right.value == BOT:\n # Setting the modified flag to True\n self.__modified_flag = True \n return node.left\n elif node.left.value == TOP:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.left\n elif node.right.value == TOP:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.right\n # Conjunction\n elif node.value == CONJ:\n if node.left.value == TOP:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.right\n elif node.right.value == TOP:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.left\n elif node.left.value == BOT:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.left\n elif node.right.value == BOT:\n # Setting the modified flag to True\n self.__modified_flag = True\n return node.right\n # Implication\n elif node.value == IMPL and (node.left.value == BOT or node.right.value == TOP):\n if node.left.value == BOT:\n # Setting the modified flag to True\n self.__modified_flag = True\n node.value = TOP\n node.left = None\n return node\n \n return node\n\n \"\"\" ###########################################################################\n Negation functions\n \"\"\"\n\n def __negation_laws(self, show_steps):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root = self.__apply_double_negation(self.root)\n\n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Removing double negations: ¬(¬F) ~ F\") + style.RESET(\"\"))\n self.inorder_parentheses()\n self.__global_modified_flag = True\n\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root = self.__apply_de_morgan(self.root)\n \n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Applying De Morgan's laws: ¬(F∨G) ~ ¬F∧¬G, ¬(F∧G) ~ ¬G∨¬F\") + style.RESET(\"\")) \n self.inorder_parentheses()\n self.__global_modified_flag = True\n\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root = self.__apply_other_negation(self.root)\n\n if show_steps == True and self.__modified_flag == True:\n print(style.GREEN(\"Applying other negations: ¬(F→G) ~ F∧(¬G), ¬(F↔G) ~ F↔(¬G)\") + style.RESET(\"\"))\n self.inorder_parentheses()\n self.__global_modified_flag = True\n\n def __apply_de_morgan(self, node):\n # Binary operator\n if node.value in CONNECTIVES:\n node.left = self.__apply_de_morgan(node.left)\n node.right = self.__apply_de_morgan(node.right)\n # Unary operator\n elif node.value == NEG:\n node.left = self.__apply_de_morgan(node.left)\n # Atom\n else:\n return node\n \n if node.value == NEG:\n \n if node.left.value == DISJ or node.left.value == CONJ:\n # Set the modified flag to True\n self.__modified_flag = True\n\n # Flip the connective\n if node.left.value == DISJ:\n node.left.value = CONJ\n else:\n node.left.value = DISJ\n\n # Creating nodes containing negation\n new_left = ExpressionTreeNode(NEG)\n new_right = ExpressionTreeNode(NEG)\n\n # Setting children of negations\n new_left.left = node.left.left\n new_right.left = node.left.right\n\n # Updating the children of the former disjunction\n node.left.left = new_left\n node.left.right = new_right\n\n return node.left\n \n return node\n\n def __apply_other_negation(self, node):\n # Binary operator\n if node.value in CONNECTIVES:\n node.left = self.__apply_other_negation(node.left)\n node.right = self.__apply_other_negation(node.right)\n # Unary operator\n elif node.value == NEG:\n node.left = self.__apply_other_negation(node.left)\n # Atom\n else:\n return node\n \n if node.value == NEG:\n \n if node.left.value == IMPL or node.left.value == EQUIV:\n # Set the modified flag to True\n self.__modified_flag = True\n\n # Flip the connective in the case of implication\n if node.left.value == IMPL:\n node.left.value = CONJ\n\n # Creating a new node containig negation\n new_right = ExpressionTreeNode(NEG)\n\n # Setting child of negation\n new_right.left = node.left.right\n\n # Updating the child of the former implication/equivalence\n node.left.right = new_right\n\n return node.left\n \n return node\n \n def __apply_double_negation(self, node):\n \n # Negation\n if node.value == NEG:\n\n count = 1\n current_node = node\n\n # Reaching the last negation in the tree, and counting their amount\n while current_node.left.value == NEG:\n count += 1\n current_node = current_node.left\n \n if count > 1:\n # Set the modified flag to True\n self.__modified_flag = True\n\n # Recur down the tree first\n current_node.left = self.__apply_double_negation(current_node.left)\n\n # If there is an even amount of negations, return the child of the last negation\n if count % 2 == 0:\n return current_node.left\n # Else return the last negation in the subtree\n else:\n return current_node\n\n # Other binary connectives\n elif node.value in CONNECTIVES:\n node.left = self.__apply_double_negation(node.left)\n node.right = self.__apply_double_negation(node.right)\n # Atoms\n return node\n\n \"\"\" ########################################################################### \"\"\"\n\n def convert_to_DNF(self):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root = self.__apply_tautologies(self.root, CONJ, DISJ)\n if self.__modified_flag == True:\n print(style.GREEN(\"Applying A∧(B∨C) ~ (A∧B)∨(A∧C) to reach DNF\") + style.RESET(\"\"))\n self.inorder_parentheses()\n else:\n print(style.RED(\"No more modifications required to reach DNF.\") + style.RESET(\"\"))\n\n def convert_to_CNF(self):\n # Initializing the modified flag with False\n self.__modified_flag = False\n self.root = self.__apply_tautologies(self.root, DISJ, CONJ)\n if self.__modified_flag == True:\n print(style.GREEN(\"Applying A∨(B∧C) ~ (A∨B)∧(A∨C) to reach CNF\") + style.RESET(\"\"))\n self.inorder_parentheses()\n else:\n print(style.RED(\"No more modifications required to reach CNF.\") + style.RESET(\"\"))\n\n def __apply_tautologies(self, node, primary, secondary):\n \n if node.left != None:\n node.left = self.__apply_tautologies(node.left, primary, secondary)\n if node.right != None:\n node.right = self.__apply_tautologies(node.right, primary, secondary)\n\n if node.value == primary and (node.left != None and node.left.value == secondary):\n self.__modified_flag = True\n # Changing the value of node.value \n node.value = secondary\n\n # Save the node.left.right\n temp = node.left.right\n\n # Changing the value of node.left.value \n node.left.value = primary\n node.left.right = node.right\n\n # Creating a new node\n new_right = ExpressionTreeNode(secondary)\n new_right.left = temp\n new_right.right = node.right\n\n node.right = new_right\n\n elif node.value == primary and (node.right != None and node.right.value == secondary):\n self.__modified_flag = True\n # Changing the value of node.value \n node.value = secondary\n\n # Save the node.right.right\n temp = node.right.left\n\n # Changing the value of node.right.value \n node.right.value = primary\n node.right.left = node.left\n\n # Creating a new node\n new_left = ExpressionTreeNode(primary)\n new_left.left = node.left\n new_left.right = temp\n\n node.left = new_left\n\n return node\n\n \"\"\" ########################################################################### \"\"\"\n\n def inorder_traversal(self):\n if self.root != None:\n self.root.inorder()\n \n def inorder_parentheses(self):\n if self.root != None:\n print(style.CYAN(self.root.inorder_parentheses()) + style.RESET(\"\"))\n\n def comp_truth_value(self, value_dict, show_steps):\n \"\"\"\n Computes the truth value of the expression associated to the expression tree,\n based on 'value_dict', a dictionary which maps each propositional variable to\n a truth value. It also shows the steps\n \"\"\"\n if self.root != None:\n return self.root.evaluate(value_dict, show_steps)[0]\n else:\n print(\"Empty expression!\")\n","sub_path":"ExpressionTree.py","file_name":"ExpressionTree.py","file_ext":"py","file_size_in_byte":24544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328462070","text":"import argparse\nimport os\nimport time\nimport math\nimport numpy as np\nimport random\nimport sys\nimport json\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils import to_gpu, Corpus, batchify\nfrom models import Seq2Seq2Decoder, Seq2Seq, MLP_D, MLP_G, MLP_Classify, load_models\nimport shutil\n\nparser = argparse.ArgumentParser(description='ARAE for Yelp transfer')\n# Path Arguments\nparser.add_argument('--data_path', type=str, required=True,\n help='location of the data corpus')\nparser.add_argument('--outf', type=str, default='yelp_example',\n help='output directory name')\nparser.add_argument('--load_vocab', type=str, default=\"\",\n help='path to load vocabulary from')\nparser.add_argument('--corpus_name', type=str, required=True)\n\n# Data Processing Arguments\nparser.add_argument('--vocab_size', type=int, default=30000,\n help='cut vocabulary down to this size '\n '(most frequently seen words in train)')\nparser.add_argument('--maxlen', type=int, default=25,\n help='maximum sentence length')\nparser.add_argument('--lowercase', dest='lowercase', action='store_true',\n help='lowercase all text')\nparser.add_argument('--no-lowercase', dest='lowercase', action='store_true',\n help='not lowercase all text')\nparser.set_defaults(lowercase=True)\n\n# Other\nparser.add_argument('--epochs', type=int, default=25,\n help='maximum number of epochs')\nparser.add_argument('--sample', action='store_true',\n help='sample when decoding for generation')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--cuda', dest='cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--no-cuda', dest='cuda', action='store_true',\n help='not using CUDA')\nparser.set_defaults(cuda=True)\nparser.add_argument('--device_id', type=str, default='0')\n\nargs = parser.parse_args()\nprint(vars(args))\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.device_id\n\n# make output directory if it doesn't already exist\nif not os.path.isdir(args.outf):\n os.makedirs(args.outf)\n\n# Set the random seed manually for reproducibility.\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, \"\n \"so you should probably run with --cuda\")\n else:\n torch.cuda.manual_seed(args.seed)\n\ndef evaluate_generator(whichdecoder, noise):\n gan_gen.eval()\n autoencoder.eval()\n\n for d in en_data: \n indices, _, lengths = d \n indices = indices.cuda()\n hidden = autoencoder.encode(indices, lengths, noise=noise)\n max_indices = \\\n autoencoder.generate(whichdecoder, hidden, maxlen=50, sample=args.sample)\n\n with open(\"%s/%s_generated_%s.txt\" % (args.outf, whichdecoder, args.epochs), \"a\") as f:\n max_indices = max_indices.data.cpu().numpy()\n for idx in max_indices:\n # generated sentence\n words = [corpus.dictionary.idx2word[x] for x in idx]\n # truncate sentences to first occurrence of \n truncated_sent = []\n for w in words:\n if w != '':\n truncated_sent.append(w)\n else:\n break\n chars = \" \".join(truncated_sent)\n f.write(chars)\n f.write(\"\\n\")\n\n# Load data\nlabel_ids = {\"pos\": 1, \"neg\": 0}\nid2label = {1:\"pos\", 0:\"neg\"}\ndatafiles = [(args.data_path, args.corpus_name, False),\n ]\n\nwith open(os.path.join(args.outf,\"vocab.json\"), \"r\") as f:\n vocabdict = json.load(f)\nvocabdict = {k: int(v) for k, v in vocabdict.items()}\ncorpus = Corpus(datafiles,\n maxlen=args.maxlen,\n vocab_size=args.vocab_size,\n lowercase=args.lowercase,\n vocab=vocabdict)\n\n# save arguments\nntokens = len(corpus.dictionary.word2idx)\nprint(\"Vocabulary Size: {}\".format(ntokens))\nargs.ntokens = ntokens\n\neval_batch_size = 100\nen_data = batchify(corpus.data[args.corpus_name], eval_batch_size, shuffle=False)\nprint(len(en_data))\nprint(\"Loaded data!\")\n\nmodel_args, idx2word, autoencoder, gan_gen, gan_disc = load_models(args.outf, args.epochs, twodecoders=True)\n\nif args.cuda:\n autoencoder = autoencoder.cuda()\n gan_gen = gan_gen.cuda()\n gan_disc = gan_disc.cuda()\n\none = to_gpu(args.cuda, torch.FloatTensor([1]))\nmone = one * -1\n\nevaluate_generator(1, False)\n","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"104591245","text":"import matplotlib.pyplot as opt, matplotlib.animation as animation, pathlib, random as rd\r\nfrom Main import dl, intrs, animate, get_y\r\n\r\ndata = open(str(pathlib.Path(__file__).parent.absolute())+\"\\Data.txt\",\"w\")\r\npoint_s = (0, 5)\r\notr_s = ((-5, 5), (-5, -5))\r\nline_1 = dl(otr_s[0][0], otr_s[0][1], otr_s[1][0]+8, otr_s[1][1]+8)\r\nline_2 = dl(otr_s[0][0], otr_s[0][1], otr_s[1][0], otr_s[1][1])\r\npoint_1 = ((otr_s[0][0]+otr_s[0][1])/2, get_y((otr_s[0][0]+otr_s[0][1])/2, line_2))\r\notr_1 = ((point_s[0], point_1[0]), (point_s[1], point_1[1]))\r\notr_2 = ((point_s[0], otr_s[0][0]), (point_s[1], otr_s[1][0]))\r\npoint_2, point_3 = intrs(otr_1, line_1), intrs(otr_2, line_1)\r\nline_3 = dl(point_1[0], point_3[0], point_1[1], point_3[1])\r\nline_4 = dl(otr_s[0][1], point_2[0], otr_s[1][1], point_2[1])\r\npoint_4 = intrs(line_3, line_4)\r\nline_5 = dl(point_4[0], point_s[0], point_4[1], point_s[1])\r\ndata.write(\"l \"+str(line_1)+'\\n')\r\ndata.write(\"p \"+str(point_s)+'\\n')\r\ndata.write(\"l \"+str(line_2)+'\\n')\r\ndata.write(\"o \"+str(otr_s)+'\\n')\r\ndata.write(\"p \"+str(point_1)+'\\n')\r\ndata.write(\"o \"+str(otr_2)+'\\n'+\"o \"+str(otr_1)+'\\n')\r\ndata.write(\"p \"+str(point_3)+'\\n'+\"p \"+str(point_2)+'\\n')\r\ndata.write(\"l \"+str(line_3)+'\\n'+\"l \"+str(line_4)+'\\n')\r\ndata.write(\"p \"+str(point_4)+'\\n')\r\ndata.write(\"l \"+str(line_5)+'\\n')\r\ndata.close()\r\nfig = opt.figure()\r\nAnI = animation.FuncAnimation(fig, animate, interval=1000)\r\nopt.xlim(-10, 10)\r\nopt.ylim(-10, 10)\r\nopt.grid()\r\nopt.show()","sub_path":"L_4.py","file_name":"L_4.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"536256618","text":"from .tagged import TaggedTransformer\nfrom .tagged_feature import word2features\n\n\nclass CustomTransformer(TaggedTransformer):\n def extract_features(self, feature):\n n = feature.find(\"=\")\n return [feature[:n], feature[n+1:]]\n\n def _convert_features_to_dict(self, features):\n return dict([self.extract_features(feature) for feature in features])\n\n def _convert_features_to_list(self, features):\n return [u\"{}={}\".format(k, v) for k, v in features.items()]\n pass\n\n def _word2features(self, s, i, template):\n features = word2features(s, i, template)\n features = self._convert_features_to_dict(features)\n for i in range(-2, 3):\n t = \"T[{}].is_in_dict\".format(i)\n t2 = \"T[{}]\".format(i)\n t3 = \"T[{}].lower\".format(i)\n if features[t] == 'True':\n features[t2] = \"-\"\n features[t3] = \"-\"\n for i in range(-2, 2):\n t = \"T[{},{}].is_in_dict\".format(i, i + 1)\n t2 = \"T[{},{}]\".format(i, i + 1)\n if features[t] == 'True':\n features[t2] = \"-\"\n features = self._convert_features_to_list(features)\n\n return features\n\n def sentence2features(self, s):\n output = [self._word2features(s, i, self.template) for i in\n range(len(s))]\n return output\n","sub_path":"venv/lib/python3.6/site-packages/underthesea/word_sent/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528220709","text":"\"\"\"Execution API tasks module.\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport shlex\nimport subprocess\n\nfrom django.db import transaction\n\nfrom celery import shared_task\nfrom celery.utils.log import get_task_logger\n\nfrom .models import Command\n\n\nlog = get_task_logger(__name__)\n\n\ndef do_execute(command, *, timeout=10, **kwargs):\n \"\"\"Higher level subprocess execution abstraction.\n\n :returns: (exit_code->int, stdout->str, stderr->str)\n \"\"\"\n _command = shlex.split(command)\n try:\n with subprocess.Popen(\n _command,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n **kwargs) as proc:\n try:\n stdout, stderr = proc.communicate(timeout=timeout)\n return (proc.returncode, stdout, stderr)\n except subprocess.TimeoutExpired as e:\n proc.kill()\n stdout, stderr = proc.communicate()\n return (proc.returncode, stdout, stderr)\n except subprocess.SubprocessError as e:\n return (1, '', e)\n\n\n@shared_task\n@transaction.atomic\ndef execute_command(pk, lazy=True):\n \"\"\"Execute a shell command in the background.\"\"\"\n log.info('Executing: Command id: %d.', pk)\n this = Command.objects.select_for_update().get(pk=pk)\n if not lazy or this.updated_on > this.executed_on:\n this.status, this.stdout, this.stderr = do_execute(this.command)\n this.save()\n log.info('Command id: %d executed.', pk)\n else:\n log.info('Command id: %d up to date. Execution not required.', pk)\n","sub_path":"exec_api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333876950","text":"# -*- coding: utf-8 -*-\n\n#introduction to the design and analysis of algorithms excise1.1 4\ndef naivesqrt(n):\n for i in range(n):\n if i * i > n:\n return i-1\n elif i * i == n:\n return i\n\n\n\n\nif __name__ == '__main__':\n print(naivesqrt(10))\n","sub_path":"daa/naivesqrt.py","file_name":"naivesqrt.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"471825215","text":"import time\nfrom threading import Thread, Event, Lock\nimport ctypes\nfrom multiprocessing import Event as MPEvent, Condition as MPCondition, Array as MPArray, Queue as MPQueue\nfrom queue import Empty, Full\nimport struct\nimport psutil\nimport os\nfrom datetime import datetime\nimport gc\nimport base64\n\nimport numpy as np\nimport sys\nimport cv2\nif 'cv2' not in sys.modules:\n print('!!!Wrong CV2 import!!!')\n import cv2.cv2 as cv2\n\nfrom capture.capture import CamCapture\nfrom imgproc.imgproc_process import ImgProcProcess\n\n\nclass ImgProcCommander(object):\n IMG_SIZE = (320, 240, 3)\n DATA_PATH = '/home/pi/pren-data/captures/'\n\n def __init__(self, cam_capture: CamCapture, num_processes: int, return_image: bool = True):\n self._cam_capture = cam_capture\n self._num_processes = num_processes\n self._return_image = return_image\n\n self._callbacks = []\n\n self._service = None\n self._service = Thread(target=self.__service_thread, name='ImgProcCommander')\n self._shutdown_event = Event()\n\n # 0 deactivates the function; > 0 says how many pictures should be saved within one capture wave\n self._images_to_save = 0\n self._image_storage = list()\n self._image_storage_save_now_event = Event()\n self._image_save_service = None\n\n self._processes = [None for _ in range(self._num_processes)]\n self._proc_condition = [MPCondition() for _ in range(self._num_processes)]\n self._proc_time_since_last_frame = [0 for _ in range(self._num_processes)]\n self._proc_shutdown_event = MPEvent()\n\n # flag if new frame, frame_number, frame_time, width, height, channels, return out_image?\n self._param_format = '=BIdIIIB'\n self._image_buffer_size = ImgProcCommander.IMG_SIZE[0] * ImgProcCommander.IMG_SIZE[1] * \\\n ImgProcCommander.IMG_SIZE[2]\n\n self._proc_in_parameters = [MPArray(ctypes.c_uint8, struct.calcsize(self._param_format), lock=False)\n for _ in range(self._num_processes)]\n self._proc_in_parameters_np = [np.frombuffer(arr, dtype=np.uint8) for arr in self._proc_in_parameters]\n self._proc_in_image = [MPArray(ctypes.c_uint8, self._image_buffer_size, lock=False)\n for _ in range(self._num_processes)]\n self._proc_in_image_np = [np.frombuffer(arr, dtype=np.uint8) for arr in self._proc_in_image]\n\n self._proc_result_queue = MPQueue(maxsize=100)\n self._proc_results = list()\n self._proc_results_lock = Lock()\n\n def register_callback(self, callback):\n self._callbacks.append(callback)\n\n def __frame_callback(self, frame_number, img_buffer):\n if len(img_buffer) == 0:\n return\n\n frame_time = time.time()\n sent_frame = False\n\n if self._images_to_save > 0 and not self._image_storage_save_now_event.is_set():\n if len(self._image_storage) >= self._images_to_save:\n print('ImgProcCommander: Target reached. {:d} frames saved. Start writing to disk.'\n .format(len(self._image_storage)))\n self._image_storage_save_now_event.set()\n elif psutil.virtual_memory().percent >= 80:\n print('ImgProcCommander: Memory full. {:d} frames saved. Start writing to disk.'\n .format(len(self._image_storage)))\n self._image_storage_save_now_event.set()\n else:\n self._image_storage.append((frame_number, frame_time, img_buffer))\n\n if not self._image_storage_save_now_event.is_set():\n proc_prio = list(enumerate(self._proc_time_since_last_frame))\n proc_prio.sort(key=lambda x: x[1], reverse=False)\n\n for i, _ in proc_prio:\n if self._proc_condition[i].acquire(block=False):\n buffer = np.frombuffer(img_buffer, np.uint8)\n img = cv2.imdecode(buffer, cv2.IMREAD_COLOR)\n expected_shape = (img.shape[1], img.shape[0], img.shape[2]) # OpenCV lists rows first then columns\n shape = img.shape\n if expected_shape != ImgProcCommander.IMG_SIZE:\n raise RuntimeError('ImgProcCommander: Invalid image size ({})'.format(img.shape))\n self._proc_in_image_np[i][:] = np.ravel(img)\n params = struct.pack(self._param_format, 1, frame_number, frame_time, *shape, int(self._return_image))\n self._proc_in_parameters[i][:] = bytearray(params)\n self._proc_time_since_last_frame[i] = time.time()\n self._proc_condition[i].notify_all()\n self._proc_condition[i].release()\n sent_frame = True\n break # send frame to only one process\n if not sent_frame:\n #print('Frame {0:d} dropped.'.format(frame_number))\n pass\n\n def start_new_frame_saving(self, number_of_frames: int):\n if self._image_save_service is not None and self._image_save_service.is_alive():\n print('ImgProcCommander: Saving still in progress. No new saving period initialized.')\n return\n self.__reset_image_saving()\n self._images_to_save = number_of_frames\n\n print('ImgProcCommander: Starting image capture mode. Saving the next {:d} frames.'.format(number_of_frames))\n\n def __reset_image_saving(self):\n self._images_to_save = 0\n self._image_storage_save_now_event.clear()\n self._image_storage.clear()\n gc.collect()\n\n def __image_save_thread(self):\n print('ImgSaveService: Start saving images.')\n if len(self._image_storage) <= 0:\n print('ImgSaveService: Nothing to save.')\n self.__reset_image_saving()\n return\n time_duration = self._image_storage[len(self._image_storage) - 1][1] - self._image_storage[0][1]\n file_size = 0\n for (_, _, image) in self._image_storage:\n file_size += len(image)\n print('ImgSaveService: Duration of images {:f} | File size: {:.2f}MB'\n .format(time_duration, file_size / 2 ** 20))\n if not os.path.exists(ImgProcCommander.DATA_PATH):\n print('ImgSaveService: Path {} has to exist. Aborting save.'.format(ImgProcCommander.DATA_PATH))\n self.__reset_image_saving()\n return\n\n dt = datetime.fromtimestamp(self._image_storage[0][1])\n folder_name = dt.strftime('cap_%y%m%d_%H%M%S')\n save_path = os.path.abspath(os.path.join(ImgProcCommander.DATA_PATH, folder_name))\n if os.path.exists(save_path):\n print('ImgSaveService: Path {} already exists. Aborting save.'.format(save_path))\n self.__reset_image_saving()\n return\n else:\n os.mkdir(save_path)\n print('ImgSaveService: Saving to path ' + save_path)\n\n print('ImgSaveService: Writing frame description file...')\n with open(os.path.join(save_path, 'frame_desc.csv'), 'w', encoding='utf-8') as f:\n for img in self._image_storage:\n f.write('{:d};{:.6f};{:d}\\n'.format(img[0], img[1], len(img[2])))\n\n print('ImgSaveService: Writing image data file...')\n with open(os.path.join(save_path, 'image_data_jpeg.bin'), 'wb') as f:\n for img in self._image_storage:\n f.write(img[2])\n\n print('ImgSaveService: Cleaning up...')\n self.__reset_image_saving()\n\n print('ImgSaveService: Finished saving process. Terminating.')\n\n def __service_thread(self):\n print('ImgProcCommander: Starting service.')\n\n self._cam_capture.register_callback(self.__frame_callback)\n\n for i in range(self._num_processes):\n self._processes[i] = ImgProcProcess(i + 1, self._proc_condition[i], self._proc_in_parameters[i],\n self._proc_in_image[i], self._proc_result_queue)\n self._processes[i].daemon = True\n self._processes[i].name = 'ImgProcProcess ' + str(i+1)\n self._processes[i].start()\n\n while not self._shutdown_event.is_set():\n if self._image_storage_save_now_event.is_set():\n if self._image_save_service is None or not self._image_save_service.is_alive():\n self._image_save_service = Thread(target=self.__image_save_thread, name='ImgSaveService')\n self._image_save_service.start()\n\n try:\n (frame_number, detected_number, frame_delay, frame_time, result_image) = self._proc_result_queue.get(block=True, timeout=0.1)\n except Empty:\n continue\n\n with self._proc_results_lock:\n if len(self._proc_results) >= 1000:\n self._proc_results.pop(0)\n self._proc_results.append({\n 'frameNumber': frame_number,\n 'detectedNumber': detected_number,\n 'frameDelay': frame_delay,\n 'frameTime': frame_time,\n 'resultImage': result_image\n })\n #print('ImgProcResult: {:d} {:d} {:.2f} {:.2f} {:d}'.\n # format(frame_number, detected_number, frame_delay, frame_time,\n # len(result_image) if result_image is not None else -1))\n\n # call registered callbacks\n for callback in self._callbacks:\n callback(frame_number, detected_number, result_image)\n\n for process in self._processes:\n process.shutdown()\n\n for process in self._processes:\n process.join(3)\n\n print('ImgProcCommander: Service terminated.')\n\n def get_result_images(self, number_of_images: int):\n with self._proc_results_lock:\n num = min(number_of_images, len(self._proc_results))\n new_list = list()\n for proc_result in self._proc_results[len(self._proc_results)-num:len(self._proc_results)]:\n img = proc_result['resultImage']\n if img is not None:\n img = base64.b64encode(img)\n\n new_list.append({\n 'frameNumber': proc_result['frameNumber'],\n 'detectedNumber': proc_result['detectedNumber'],\n 'frameDelay': proc_result['frameDelay'],\n 'frameTime': proc_result['frameTime'],\n 'resultImage': img\n })\n return new_list\n\n def start_service(self):\n if not self.is_running():\n self._service.start()\n else:\n raise RuntimeError('ImgProcCommander: You cannot start a service that is already running.')\n\n def is_running(self):\n return self._service.is_alive()\n\n def shutdown(self):\n self._shutdown_event.set()\n self._service.join(10)\n if self._service.is_alive():\n print('ImgProcCommander: Service thread did not terminate after 5 seconds.')\n if self._image_save_service is not None and self._image_save_service.is_alive():\n print('ImgProcCommander: Wait for ImgSaveService to terminate...')\n self._image_save_service.join()\n","sub_path":"mission_control/imgproc_command.py","file_name":"imgproc_command.py","file_ext":"py","file_size_in_byte":11305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"218717516","text":"import os\nimport indentbomextract\nimport xlsxwriter\nimport schempnextract\n\n'''\nindentbomextract.py\n\t1. create pandas data frame from indented BOM\n\n\npn_check_process:\n\t\n'''\n\ndef check(bomdf, schempn, outputdir):\n\t'''\n\t\tCompares each P/N in schempn to bomdf and marks up outputfile .xlsx:\n\t\t\tGreen: P/N and quantity match on schematics and indented bills\n\t\t\tYellow: more on BOM then schematic\n\t\t\tOrange: shortage - more on schematic than BOMs\n\t\t\tRed: Neg driving\n\t\tChecks if any op seq on BOM is <0 qty, otherwise sums all pns in diff op seq.\n\n\t\toutputfile:\n\t\tp/n, schem qty, BOM total qty, Note (if op: x qty < 0), Hightlight\n\n\t\tinput:\n\t\t\tbomdf: (pandas df): indented BOM\n\t\t\tschempn (dict): {p/n: qty} on schematics\n\t\t\toutputfile (str): path to output file to write .xlsx sheet of colorcoding\n\t\t\t\t\t\t\t\t\n\t'''\n\n\t# Create excel sheet in output file location\n\tos.chdir(outputdir)\n\tworkbook = xlsxwriter.Workbook('Schem_BOM_Checked.xlsx')\n\tworksheet = workbook.add_worksheet()\n\t\n\t# Create Titles - format column widths\n\tworksheet.write(0, 0, 'Part Num')\n\tworksheet.set_column(0, 0, 18.0)\n\n\tworksheet.write(0, 1, 'Schem Qty')\n\tworksheet.set_column(1, 1, 10.0)\n\n\tworksheet.write(0, 2, 'BOMs Qty')\n\tworksheet.set_column(2, 2, 10.0)\n\n\tworksheet.write(0, 3, 'Note')\n\tworksheet.set_column(3, 3, 48.0)\n\n\tworksheet.set_column(4,4, 24.0)\n\n\t# highlight spreadsheet:\n\tred_format = workbook.add_format()\n\tred_format.set_pattern(1)\n\tred_format.set_bg_color('red')\n\n\torg_format = workbook.add_format()\n\torg_format.set_pattern(1)\n\torg_format.set_bg_color('orange')\n\n\tyel_format = workbook.add_format()\n\tyel_format.set_pattern(1)\n\tyel_format.set_bg_color('yellow')\n\n\tgrn_format = workbook.add_format()\n\tgrn_format.set_pattern(1)\n\tgrn_format.set_bg_color('green')\n\n\tpur_format = workbook.add_format()\n\tpur_format.set_pattern(1)\n\tpur_format.set_bg_color('purple')\n\n\n\t# Make Key\n\tworksheet.write(0, 4, 'red - Neg qty on BOM', red_format)\n\tworksheet.write(1, 4, 'Org - Schem Qty > BOM Qty', org_format)\n\tworksheet.write(2, 4, 'Yel - Schem Qty < BOM Qty', yel_format)\n\tworksheet.write(3, 4, 'Grn - Schem Qty = BOM Qty', grn_format)\n\tworksheet.write(4, 4, 'Pur - No Idea what happened', pur_format)\n\n\t\n\t# Apply process to : \n\trow = 1\n\tfor key, value in schempn.items():\n\t\t# get dict{op: qty} total p/n in each op seq from bomdf\n\t\tbompn = pn_indented_bom_lookup(key, bomdf)\n\t\t# print(str(key) + ': ' + str(value) + ' || ', end='')\n\t\t# print(bompn, end='')\n\n\t\t# Sum bom qty from each op sequence\n\t\tbomqty = 0\n\t\tif bompn:\n\t\t\tfor op, bomvalue in bompn.items():\n\t\t\t\tbomqty += bomvalue\n\t\telse:\n\t\t\tbomqty = 0\n\n\t\t# print to excel the following:\n\t\t# schem pn, qty on schem, for each op seq in bomdf (op, qty)\n\t\tworksheet.write(row, 0, key)\n\t\tworksheet.write(row, 1, value)\n\t\t# print(value)\n\t\tworksheet.write(row, 2, bomqty)\n\n\t\t# if bompn(value) < 0: red\n\t\tif bomqty == 0:\n\t\t\tworksheet.write(row, 0, key, red_format)\n\t\t# elif: sum bomdf dict(qty) < schem pn qty -> orange\n\t\telif bomqty < schempn[key]:\n\t\t\tworksheet.write(row, 0, key, org_format)\n\n\t\t# elif: sum bomdf dict(qty) > schem pn qty -> yellow\n\t\telif bomqty > schempn[key]:\n\t\t\tworksheet.write(row, 0, key, yel_format)\n\t\t# elif: sum bomdf dict(qty) == schem pn qty -> green\n\t\telif bomqty == schempn[key]:\n\t\t\tworksheet.write(row, 0, key, grn_format)\n\t\t# else: purple (idk what happened)\n\t\telse:\n\t\t\tworksheet.write(row, 0, key, pur_format)\n\t\t# if any bomdf dict {op, qty} < 0, -Red\n\t\tfor op, qty in bompn.items():\n\t\t\tif qty < 0:\n\t\t\t\tworksheet.write(row, 0, key, red_format)\n\n\t\trow += 1\n\n\n\tworkbook.close()\n\t\n\ndef pn_indented_bom_lookup(partnum, bomdf):\n\n\t'''\n\t\tinput:\n\t\t\tpartnum (string): partnumber to look up\n\t\t\tbomdf (pands df): bom with index and columns ('Level', 'Item', 'Description', 'Type', 'Op Seq', 'Quantity')\n\t\t\n\t\toutput:\n\t\t\tdict{op: qty}\n\t'''\n\n\n\tcurrentrow = 0\n\tqty = {}\n\t# process sum all p/n with same op seq\n\tfor item in bomdf['Item']:\n\t\tupsearchqty = {} #### Making a list of dictionarys\n\t\tif str(item) == str(partnum):\n\t\t\t# print(item + ' == ' + partnum)\n\t\t\t\n\t\t\tupsearchqty[int(bomdf.at[currentrow,'Op Seq'])] = float(bomdf.at[currentrow, 'Quantity'])\n\t\t\t# print('Upsearch Qty: ' + str(upsearchqty))\n\n\t\t\tnextlev = int(bomdf.at[currentrow, 'Level']) - 1\n\t\t\t# print('nextLev: ' + str(nextlev))\n\n\t\t\t# if BOM level > 1: search up BOM level \n\t\t\tupsearchrow = currentrow - 1\n\t\t\twhile nextlev > 0:\n\t\t\t\t# print('upsearchrow: ' + str(upsearchrow) + ' | Level: ' + str(df.at[upsearchrow,'Level']))\n\t\t\t\t# print('upsearchrow: ' + str(upsearchrow) + ' | nextLev: ' + str(nextlev))\n\n\t\t\t\tif nextlev == int(bomdf.at[upsearchrow, 'Level']):\n\t\t\t\t\tupsearchqty[bomdf.at[currentrow,'Op Seq']] = upsearchqty[bomdf.at[currentrow,'Op Seq']] * int(bomdf.at[upsearchrow,'Quantity'])\n\t\t\t\t\tnextlev = int(bomdf.at[upsearchrow, 'Level']) - 1\n\t\t\t\t\t# print('Found next level P/N: ' + str(bomdf.at[upsearchrow, 'Item']) + ' | Bom Level: ' + str(bomdf.at[upsearchrow, 'Level']) + ' | nextlev: ' + str(nextlev) )\n\n\t\t\t\tupsearchrow -= 1\n\n\t# print qty(op: qty) to screem from bom\n\t# Need Op Seq check\n\t\tfor op, value in upsearchqty.items():\n\t\t\tif op in qty:\n\t\t\t\tqty[op] = qty[op] + upsearchqty[op]\n\t\t\telse:\n\t\t\t\tqty[op] = upsearchqty[op]\n\n\t\tcurrentrow += 1\n\n\treturn qty\n\n\nif __name__ == \"__main__\":\n\n\tos.chdir('/home/matthewlefort/Documents/Projects/Python/PDF_PartNumberCheck/Data')\n\tcwd = os.getcwd()\n\toutputdir = cwd\n\n\telecfile = os.path.join(cwd,'990653411-A.pdf')\n\thydfile = os.path.join(cwd, '990653414-A.pdf ')\n\n\telec_pn = schempnextract.pdf_extract(elecfile)\n\thyd_pn = schempnextract.pdf_extract(hydfile)\n\tschem_pns = schempnextract.combine(elec_pn, hyd_pn)\n\n\n\tunitfile = os.path.join(cwd,'unitLevel.xlsx')\n\ttopfile = os.path.join(cwd, 'topLevel.xlsx')\n\n\ttopleveldf = indentbomextract.load_bom(topfile)\n\tunitleveldf = indentbomextract.load_bom(unitfile)\n\tbomdf = indentbomextract.combinedf(topleveldf, unitleveldf)\n\n\t# Test this for various p/n confirm accurate findings in p/n qtys to op seqs\n\t# pntest = pn_indented_bom_lookup('070420353', topleveldf)\n\n\tcheck(bomdf, schem_pns, outputdir)\n\n","sub_path":"pncheckprocess.py","file_name":"pncheckprocess.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233339298","text":"import sys\ntest_cases = open('words.txt', 'r')\n\nfor line in test_cases:\n\ta,b,c = line.split()\n\ta,b,c = int(a),int(b),int(c)\n\tfor i in range (1,c+1):\n\t\tif i % a == 0 and i % b == 0:\n\t\t\tprint(\"FB\",end=' ')\n\t\telif i % a == 0 and i % b != 0:\n\t\t\tprint(\"F\",end=' ')\n\t\telif i % a !=0 and i % b ==0:\n\t\t\tprint(\"B\",end=' ')\n\t\telse:\n\t\t\tprint(i,end=' ')\n\tprint('')\n\n","sub_path":"Python 3/Easy/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414468412","text":"#!/usr/bin/python\n\n# python simpleplot.py \nimport Qwt, sys\nimport numpy as np\na=None\nif (len(sys.argv) > 1 and sys.argv[1] == '4'):\n from PyQt4 import QtGui, QtCore\n a=QtGui.QApplication(sys.argv)\nelse:\n from PyQt5 import QtCore, QtGui, QtWidgets\n a=QtWidgets.QApplication(sys.argv)\n#from PyQt5.QtWidgets import (QApplication)\n\n#class MainWindow(QtWidgets.QMainWindow, UI.MainUI.Ui_MainWindow):\n\nplot=Qwt.QwtPlot()\nplot.setTitle(\"Plot Demo\")\nplot.setCanvasBackground(QtCore.Qt.white)\nplot.insertLegend( Qwt.QwtLegend() )\ngrid = Qwt.QwtPlotGrid()\ngrid.attach( plot )\n\ncurve = Qwt.QwtPlotCurve()\ncurve.setTitle(\"Some Points\")\ncurve.setPen(QtCore.Qt.blue,4)\ncurve.setRenderHint( Qwt.QwtPlotItem.RenderAntialiased, True );\n\nsymbol = Qwt.QwtSymbol( Qwt.QwtSymbol.Ellipse, QtGui.QBrush( QtCore.Qt.yellow ), QtGui.QPen( QtCore.Qt.red, 2 ), QtCore.QSize( 8, 8 ) );\ncurve.setSymbol( symbol )\n\n#x=[0.0,1.0,2.0,3.0,4.0,5.0]\n#y=[4.4,3.0,4.5,6.8,7.9,7.1]\nx=np.arange(0,10,0.1)\ny=np.sin(x)\ncurve.setSamples(x,y)\ncurve.attach(plot)\n\nplot.resize(600,400)\nplot.replot()\nplot.show()\nsys.exit(a.exec_())\n","sub_path":"examples/simpleplot.py","file_name":"simpleplot.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"276707594","text":"#!/usr/bin/python\n# This script is for Car 2.\n#This script is like ReceiverSendBackFile.py except that it receives and sends back twice since it's in the middle\n#It waits to receive a message then sends a message back\n\nimport socket\nimport datetime\n\n\n\ndef Main():\n #This section needs to be edited according to the car number\n listeningPort = 5002 #Car1 is 5001, Car2 is 5002, Car3 is 5003, etc...\n hostLower = '192.168.1.11' #Send back to this IP. Car1 is ..1.11, Car2 is ...1.12, Car3 is 1.13, etc...\n portLower = 5001\n hostUpper = '192.168.1.13' #Pass message on (send) to this IP\n portUpper = 5003\n #End of section that needs to be edited\n\n expectedFileSize = 4096\n print(\"starting...\")\n\n mySocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n mySocket.bind(('0.0.0.0', int(listeningPort)))\n data = \"\"\n while True:\n data = mySocket.recv(expectedFileSize).decode() #receive message\n print(\"Received file length \"+str(len(data)))\n mySocket.sendto(data.encode(),(hostUpper,int(portUpper))) #Pass message on to the \"Upper\" host\n # Wait for a message back from the upper host\n data = mySocket.recv(expectedFileSize).decode() #receive message\n print(\"Received file length \"+str(len(data)))\n #Send back to the lower host\n mySocket.sendto(data.encode(),(hostLower,int(portLower))) #Send back to the \"Lower\" host\n\n mySocket.close()\n\nif __name__ == '__main__':\n\n Main()\n","sub_path":"Car2.py","file_name":"Car2.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168091718","text":"import numpy as np\r\n\r\ndef tridiag(a,b,c,k1=-1,k2=0,k3=1):\r\n\treturn np.diag(a,k1) + np.diag(b,k2) + np.diag(c,k3)\r\n\r\nn=6\r\n\r\na=np.ones(n-1)\r\nb=np.zeros(n)\r\n\r\nU=tridiag(-a,b,a)\r\n\r\nfor i in range(1,2):\r\n\tprint(U)\r\n\tU=U.dot(U)/(2)","sub_path":"phase4_2017_end/numerics/exactRotation.py","file_name":"exactRotation.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47637399","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.views.generic import TemplateView\n\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n\n # Django Login\n url(r'^login/', views.loginform, name=\"login\"),\n url(r'^logout/', views.logoutform, name=\"logout\"),\n\n #login_success\n url(r'^accounts/profile/', views.login_success, name='login-success'),\n\n\n url(r'^project/', views.project, name=\"faculty\"),\n url(r'^arc/$', views.hod, name=\"arc\"),\n url(r'^arc-gradesheet/$', views.arcGradesheet, name=\"arc-gradesheet\"),\n \n url(r'^upload/$', views.upload, name=\"upload\"),\n # url(r'^warden/$', views.hod, name=\"hod\"),\n # url(r'^leave/', views.leave, name=\"leave\"),\n url(r'^gradesheet/', views.gradesheet, name=\"gradesheet\"),\n url(r'^gradesheet2/(?P\\w+)/$', views.gradesheet2, name=\"gradesheet2\"),\n url(r'^gradesheet3/(?P\\w+)/$', views.gradesheet3, name=\"gradesheet3\"),\n \n url(r'^transcript/', views.transcript, name=\"transcript\"),\n url(r'^transcript-continuing-price/', views.transcriptContinuingPrice, name=\"transcript3\"),\n url(r'^transcript-graduated-price/', views.transcriptGraduatedPrice, name=\"transcript4\"),\n url(r'^transcript2/(?P\\w+)/$', views.transcript2, name=\"transcript2\"),\n url(r'^arc/([0-9]+)/$', views.hodprojectapprove, name=\"arcprojectapprove\"),\n url(r'^arc-gradesheet/([0-9]+)/$', views.arcgradesheetapprove, name=\"arcgradesheetapprove\"),\n # url(r'^hostelsuperintendent/([0-9]+)/$', views.hostelsuperintendentdaypassapprove, name=\"hostelsuperintendentdaypassapprove\"),\n # url(r'^student/(?P\\d+)/$',views.studentDetails, name=\"studentDetails\"),\n\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"swd/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"501201029","text":"import sys\nfrom requests.auth import HTTPBasicAuth\nimport os, requests\nfrom termcolor import (colored)\n\nSAUCE_USERNAME = os.environ[\"SAUCE_USERNAME\"]\nSAUCE_ACCESS_KEY = os.environ[\"SAUCE_ACCESS_KEY\"]\n\n#First arg in command is sc_tunnel_data.py\n\n#seconf arg is the username\nuser = sys.argv[1]\n\n#Third arg is the user's access key\nuser_key = sys.argv[2]\n\n#fourth arg is the tunnel ID\ntunnel_id = sys.argv[3]\n\n\ndataCenter = ['us-west-1', 'eu-central-1', 'us-east-1', 'apac-southeast-1']\nregion = 0\n\nresponse = requests.get(\"https://api.\"+ dataCenter[region] +\".saucelabs.com/rest/v1/\" + user +\"/tunnels/\" + tunnel_id + \"\" , auth=HTTPBasicAuth(SAUCE_USERNAME, SAUCE_ACCESS_KEY))\n\nif int(response.status_code) != 200:\n\tprint(response.status_code)\n\tprint(dataCenter[region])\n\tregion =+ 1\n\tprint(dataCenter[region])\n\n\tresponse = requests.get(\"https://api.\"+ dataCenter[region] +\".saucelabs.com/rest/v1/\" + user +\"/tunnels/\" + tunnel_id + \"\" , auth=HTTPBasicAuth(SAUCE_USERNAME, SAUCE_ACCESS_KEY))\n\n\nprint (colored(\"Release: \", 'green'), response.json()['metadata']['release'])\n\nif str(response.json()['status']) == 'running':\n\tprint(colored(\"Status: \", 'green'), colored(response.json()['status'], 'green', attrs=['blink', 'underline']))\nelse:\n\tprint(colored(\"Status: \", 'green'), colored(response.json()['status'], 'red'))\n\tprint(colored(\"User Shut Down: \", 'green'), colored(response.json()['user_shutdown'], 'red'))\n\n\nif str(response.json()['tunnel_identifier']) != '':\n\tprint (colored(\"Tunnel Identifier: \", 'green'), response.json()['tunnel_identifier'])\nelse:\n\tprint (colored(\"Tunnel Identifier: \", 'green'), colored(\"none\", 'red'))\n\nif str(response.json()['shared_tunnel']) == 'true':\n\tprint (colored(\"Shared Tunnel: \", 'green'), response.json()['shared_tunnel'])\nelse:\n\tprint (colored(\"Shared Tunnel: \", 'green'), colored(response.json()['shared_tunnel'], 'red'))\n\nprint (colored(\"Command Arguments: \", 'green'), response.json()['metadata']['command_args'])\n","sub_path":"sc_tunnel_data.py","file_name":"sc_tunnel_data.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"456299718","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 7 10:52:07 2018\n\n@author: Ahmad\n\"\"\"\nimport pickle\n\ncontacts = {}\ncontactList = []\n\ndef load():\n \"\"\" Load saved contacts from file. \"\"\"\n \n try:\n with open(\"Memory.data\", \"rb\") as f:\n global contacts\n contacts = pickle.load(f)\n except:#If no file found, so user is newbie.\n print(\"Hi, Welcome to Memory..\")\ndef save():\n \"\"\" Save contacts to file. \"\"\"\n \n with open(\"Memory.data\", \"wb\") as f:\n pickle.dump(contacts,f)\n \ndef checkExist(name):\n \"\"\" checkExist(name)\n \n Check if name exists in contactList and return True or False. \"\"\"\n \n listContacts(False)\n if name in contactList:\n return True\n else:\n return False\n \ndef setType(Type):\n \"\"\" Give a number and return corresponding contact type. \"\"\"\n \n if Type == 1:\n Type = 'Family'\n elif Type == 2:\n Type = 'Friend'\n elif Type == 3:\n Type = 'Colleague'\n else : print(\"Error: Invalid Type\")\n return Type\n \ndef setPerson(name,number,email,Type):\n \"\"\" Give contact's data and creat the contact according to its type. \"\"\"\n \n if Type == 'Family':\n p = FamilyPerson(name,number,email)\n p.add()\n elif Type == 'Friend':\n p = FriendPerson(name,number,email)\n p.add()\n elif Type == 'Colleague':\n p = ColleaguePerson(name,number,email)\n p.add()\n\ndef menu():\n \"\"\"\" Run when program starts and run functions according to order value. \"\"\"\n \n try:\n order = int(input(\"\"\"Please select an operation (Enter number):\n \\n1. View Contacts\n \\n2. Add new contact\n \\n3. Edit contact\n \\n4. Delete contact\n \\n-->\"\"\"))\n if order == 1:\n showContacts()\n elif order == 2:\n addPerson()\n elif order == 3:\n editPerson()\n elif order == 4:\n delPerson()\n else:\n print(\"Error: Invalid Operation\")\n except:#if order isn't a number, int() raise an error.\n print(\"This isn't an integer\")\n menu()\n\n \ndef listContacts(capital):\n \"\"\" listContacts(boolean)\n \n Call whenever a list of contacts keys needed, e.g. in checkExist().\n Initialize contactList[] with keys in contacts{}\n and make key Capitalize or lowecase according to given capital variable. \"\"\"\n \n global contactList\n if capital:\n contactList = [key.capitalize() for key in contacts.keys()]\n else:\n contactList = [key.lower() for key in contacts.keys()]\n \ndef showContacts():\n \"\"\" Print contacts list. \"\"\"\n \n listContacts(True)\n if len(contactList) == 0:#If ContactList is empty, run menu() with a message.\n print(\"There is no contacts. but don't worry, You can add them if you want.\")\n menu()\n contactList.sort()\n for contact in contactList:\n print(\"{0} --> {1}\".format(contactList.index(contact),contact))\n try:\n order = int(input(\"\"\"whay do you want? (Enter Number)\n \\n1. View a contact\n \\n2. Return to Menu\n \\n-->\"\"\"))\n if order ==1:\n viewPerson()\n elif order ==2:\n menu()\n else:\n print(\"Error: Invalid operation\")\n menu()\n except:#if order isn't a number, int() raise an error.\n print(\"This isn't an integer\")\n menu()\n \ndef viewPerson():\n try:\n index = int(input(\"Enter Contact's index -->\"))\n contact = contacts[contactList[index]]#Selected contact to view.\n sep=\"-\"*20\n print(sep)\n print(\"\\nContact Name: {}\\n\".format(contactList[index]))\n print(\"Contact Details:\")\n print(\"\"\"\\nNumber: {0}\n \\nEmail: {1}\n \\nType: {2}\n \"\"\".format(contact[0],contact[1],contact[2]))\n print(sep)\n menu()\n \n except:#if order isn't a number, int() raise an error.\n print(\"This isn't an integer\")\n menu()\n \ndef addPerson():\n \"\"\" Run from menu() to add a person and run setPerson() with given data. \"\"\"\n \n name = input(\"Enter contact's name -->\").capitalize()\n number = input(\"Enter contact's number -->\")\n email = input(\"Enter contact's Email -->\")\n try:\n Type = int(input(\"\"\"Enter contact's Type:\n \\n1. Family\n \\n2. Friend\n \\n3. Colleague\n \\n-->\"\"\"))\n except:#if order isn't a number, int() raise an error.\n print(\"This isn't an integer\")\n menu()\n\n setPerson(name,number,email,setType(Type))\n menu()\n \ndef editPerson():\n \"\"\"\" Run from menu() to edit existing person.\n \n If the contact exsit, this function delete it and creat new contact\n using setPerson().\n \"\"\"\n \n name = input(\"Enter Contact's name -->\").capitalize()\n if not checkExist(name.lower()):#Check if given name exist in contacts dict.\n print(\"This Contact doesn't exist\")\n else:\n contact = contacts[name]\n newName = \"\"#used to creat new contact\n try:\n item = int(input(\"\"\"Enter item's number to edit:\n \\n1.Name\n \\n2.Number\n \\n3.Email\n \\n4.Type\n \\n-->\"\"\"))\n if item == 1:\n newName = input(\"Enter new name -->\").capitalize()\n elif item == 2:\n contact[0] = input(\"Enter new number -->\")\n elif item == 3:\n contact[1] = input(\"Enter new Email -->\")\n elif item == 4:\n try:\n value = int(input(\"\"\"Enter new Type's number:\n \\n1. Family\n \\n2. Friend\n \\n3. Colleague\n \\n-->\"\"\"))\n contact[2] = setType(value)\n except:#if order isn't a number, int() raise an error.\n print(\"This isn't an integer\")\n menu()\n else:\n print(\"Error: Invalid Item\")\n \n del contacts[name]#delete existing contcat\n if newName == \"\":\n newName = name#set old name as new name if name not changed.\n setPerson(newName,*contact)#star before a list as argument return its items.\n print(\"{0} edited successfully.\".format(name))\n except:#if order isn't a number, int() raise an error.\n print(\"This isn't an integer\")\n menu()\n\n menu()\n \ndef delPerson():\n \"\"\" Run from menu() to delete existing contact. \"\"\"\n \n name = input(\"Enter contact's name -->\").capitalize()\n if checkExist(name.lower()):#check if given name exist in contacts dict. \n confirm = input(\"Are you sure to delete {0}? (Y,N)\".format(name))\n if confirm.upper() == \"Y\":\n del contacts[name]\n save()\n print(\"{0} deleted successfully\".format(name))\n elif confirm.upper() == \"N\":\n print(\"You canceled the operation\")\n else:\n print(\"Error: Invalid answer\")\n else:\n print(\"This Contact doesn't exist\")\n menu()\n \nclass Person:\n \"\"\" General person class used by inherited classes. \"\"\"\n \n def __init__(self, name, number,email):\n self.name = name\n self.number = number\n self.email = email\n def add(self):\n contacts[self.name] = [self.number, self.email,self.type]\n save()\n \nclass FamilyPerson(Person):\n \"\"\" Family person class, set type to 'Family'. \"\"\"\n def __init__(self, name, number, email):\n Person.__init__(self,name,number,email)\n self.type = 'Family'\n\n\nclass FriendPerson(Person):\n \"\"\" Friend person class, set type to 'Friend'. \"\"\"\n def __init__(self, name, number, email):\n Person.__init__(self,name,number,email)\n self.type = 'Friend'\n\nclass ColleaguePerson(Person):\n \"\"\" Colleague person class, set type to 'Colleague'. \"\"\"\n def __init__(self,name,number,email):\n Person.__init__(self,name,number,email)\n self.type = 'Colleague'\n\n\n \n\nload()\nmenu()","sub_path":"Memory.py","file_name":"Memory.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195536967","text":"import numpy as np\n#from numpy import sin, cos, arcsin, arccos, arctan2, square, sqrt, abs, power\nfrom . import substate\nfrom .. utils import covarianceContainer\n\nclass oneDPositionVelocity(substate.SubState):\n def __init__(\n self,\n objectID,\n stateVectorHistory,\n covarianceStorage='covariance',\n biasState=True,\n artificialBiasMeas=True,\n biasStateTimeConstant=0.9,\n biasStateProcessNoiseVar=1e-3,\n biasMeasVar=1,\n storeLastStateVectors=0,\n ):\n \n if not isinstance(stateVectorHistory['covariance'], covarianceContainer):\n stateVectorHistory['covariance'] = covarianceContainer(\n stateVectorHistory['covariance'],covarianceStorage\n )\n self.biasState = biasState\n if biasState:\n super().__init__(stateDimension=3, stateVectorHistory=stateVectorHistory, storeLastStateVectors=storeLastStateVectors)\n else:\n super().__init__(stateDimension=2, stateVectorHistory=stateVectorHistory,storeLastStateVectors=storeLastStateVectors)\n self.stateVector = stateVectorHistory['stateVector']\n self.objectID = objectID\n self.velocityVar = (\n stateVectorHistory['covariance'].convertCovariance('covariance').value[1,1]\n )\n self.positionVar = (\n stateVectorHistory['covariance'].convertCovariance('covariance').value[0,0]\n )\n stateVectorHistory['positionStd'] = np.sqrt(self.positionVar)\n stateVectorHistory['velocityStd'] = np.sqrt(self.velocityVar)\n self.currentPosition = 0\n self.currentVelocity=0\n self.currentBiasState = 0\n \n self.artificialBiasMeas = artificialBiasMeas\n\n self.biasStateProcessNoiseVar = biasStateProcessNoiseVar\n self.biasStateTimeConstant = biasStateTimeConstant\n self.artificialBiasMeasVar = biasMeasVar\n\n def storeStateVector(self, svDict):\n xPlus = svDict['stateVector']\n aPriori = svDict['aPriori']\n\n if aPriori is False:\n self.stateVector = xPlus\n \n self.currentPosition = xPlus[0]\n self.positionVar = svDict['covariance'].convertCovariance('covariance').value[0,0]\n self.currentVelocity = xPlus[1]\n self.velocityVar = svDict['covariance'].convertCovariance('covariance').value[1,1]\n\n if self.biasState:\n self.currentBiasState = xPlus[2]\n svDict['biasState'] = self.currentBiasState\n else:\n svDict['biasState'] = 0\n\n svDict['position'] = self.currentPosition\n svDict['velocity'] = self.currentVelocity\n svDict['positionStd'] = np.sqrt(self.positionVar)\n svDict['velocityStd'] = np.sqrt(self.velocityVar)\n \n svDict['stateVector'] = self.stateVector\n super().storeStateVector(svDict)\n\n def timeUpdate(self, dT, dynamics=None):\n if self.biasState:\n #F = np.array([[1, dT, 0],[0, 1, 0], [0, 0, np.power(1 + 1e-1, -dT)]])\n F = np.array([[1, dT, 0],[0, 1, 0], [0, 0, np.exp(-dT/self.biasStateTimeConstant)]])\n else:\n F = np.array([[1, dT],[0, 1]])\n dT2 = np.square(dT)\n dT3 = np.power(dT, 3)\n dT4 = np.power(dT, 4)\n if self.covariance().form == 'covariance':\n if self.biasState:\n Q = np.array([[dT4/4, dT3/2, 0],[dT3/2, dT2, 0], [0,0,self.biasStateProcessNoiseVar * dT2]])\n else:\n Q = np.array([[dT4/4, dT3/2],[dT3/2, dT2]])\n elif self.covariance().form == 'cholesky':\n if self.biasState:\n Q = np.array([[dT2/2,0, 0],[dT,0, 0], [0,0,0]])\n else:\n Q = np.array([[dT2/2,0],[dT,0]])\n \n accelKey = self.objectID + 'acceleration'\n if dynamics is not None and accelKey in dynamics:\n acceleration = dynamics[accelKey]['value']\n accVar = dynamics[accelKey]['var']\n else:\n acceleration = 0\n accVar = 0\n if self.biasState:\n self.stateVector = F.dot(self.stateVector) + np.array([0, acceleration * dT, 0])\n else:\n self.stateVector = F.dot(self.stateVector) + np.array([0, acceleration * dT])\n if self.covariance().form == 'covariance':\n Q = covarianceContainer(Q * accVar, 'covariance')\n if self.biasState:\n Q[2,2] = self.biasStateProcessNoiseVar * dT*dT\n elif self.covariance().form == 'cholesky':\n Q = covarianceContainer(Q * np.sqrt(accVar), 'cholesky')\n if self.biasState:\n Q[2,2] = np.sqrt(self.biasStateProcessNoiseVar) * dT\n else:\n raise ValueError('unrecougnized covariance')\n \n return {'F': F, 'Q': Q}\n\n def getMeasurementMatrices(self, measurement, source=None):\n HDict = {}\n RDict = {}\n dyDict = {}\n\n if 'position' in measurement:\n if self.biasState:\n H = np.array([[1, 0, 1]])\n else:\n H = np.array([[1, 0]])\n dY = measurement['position']['value'] - H.dot(self.stateVector) \n HDict['%s position' %self.objectID] = H\n RDict['%s position' %self.objectID] = np.array(\n [[measurement['position']['var']]]\n )\n dyDict['%s position' %self.objectID] = dY\n if 'velocity' in measurement:\n if self.biasState:\n H = np.array([[0, 1, 0]])\n else:\n H = np.array([[0, 1]])\n dY = measurement['velocity']['value'] - H.dot(self.stateVector)\n HDict['%s velocity' %self.objectID] = H\n RDict['%s velocity' %self.objectID] = np.array(\n [[measurement['velocity']['var']]]\n )\n dyDict['%s velocity' %self.objectID] = dY\n\n if self.biasState and self.artificialBiasMeas:\n HDict['artificialBiasMeas'] = np.array([[0,0,1]])\n RDict['artificialBiasMeas'] = np.array([[self.artificialBiasMeasVar]])\n # RDict['artificialBiasMeas'] = np.array([[1]])\n dyDict['artificialBiasMeas'] = -self.stateVector[2]\n return {'H': HDict, 'R': RDict, 'dY': dyDict}\n\n","sub_path":"modest/substates/oneDimensionalPositionVelocity.py","file_name":"oneDimensionalPositionVelocity.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"510692728","text":"import copy\nfrom collections import defaultdict\nfrom functools import reduce\n\n\ndef read_input(fname):\n with open(fname) as f:\n return [int(line.rstrip()) for line in f.readlines()]\n\ndef solve_part1(puzzle):\n puzzle = sorted(puzzle)\n device_jolts = max(puzzle) + 3\n puzzle.append(device_jolts)\n diff_counts = defaultdict(int)\n for ndx, x in enumerate(puzzle):\n if ndx == 0:\n diff = x\n else:\n diff = puzzle[ndx] - puzzle[ndx-1]\n diff_counts[diff] += 1\n return diff_counts[1] * diff_counts[3]\n\n\nreach_count_memento = {0:1}\n\ndef reach_count(RT, target):\n if target in reach_count_memento:\n return reach_count_memento[target]\n else:\n result = sum(reach_count(RT, x) for x in RT[target])\n reach_count_memento[target] = result\n return result\n\n\ndef solve_part2(puzzle):\n puzzle.append(0)\n device_jolts = max(puzzle) + 3\n puzzle.append(device_jolts)\n puzzle = sorted(puzzle)\n T = {}\n for ndx, x in enumerate(puzzle):\n T[x] = [elem for elem in puzzle[ndx+1:ndx+4] if elem <= x+3]\n print(puzzle)\n print(T)\n RT = defaultdict(list)\n for k in T:\n for elem in T[k]:\n RT[elem].append(k)\n print(RT)\n return reach_count(RT, device_jolts)\n\n # paths = 0\n # Q = [(0, set())]\n # while Q:\n # vertex, seen = Q.pop(0)\n # if vertex == device_jolts:\n # paths += 1\n # else:\n # seen = copy.copy(seen)\n # seen.add(vertex)\n # for adjacent in [a for a in T[vertex] if a not in seen]:\n # Q.append((adjacent, seen))\n\n# https://www.geeksforgeeks.org/count-possible-paths-two-vertices/\n# 19208 for part two for testinput\n\npuzzle = read_input('2020_10.txt')\n# puzzle = read_input('2020_10_test.txt')\nprint(puzzle)\nprint(f'part 1: {solve_part1(puzzle)}') # 1656\nprint(f'part 2: {solve_part2(puzzle)}') # 281474976710656 is prob waaay to high, 67706637778944 too\n# 981 is too low\n# 68719476736 also wrong\n","sub_path":"aoc/2020/2020_10.py","file_name":"2020_10.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"390616954","text":"bis = [line.split()[0] for line in open('bidirectional_lncs.txt')]\nantis = [line.split()[0] for line in open('anti_lncs.txt')]\n\nbiandantis = []\nfor lnc in bis:\n if lnc in antis:\n biandantis.append(lnc)\n\nwith open('anti_and_bidirectional_lncs.txt', 'w') as fw:\n fw.write('\\n'.join(biandantis))\n","sub_path":"anti_and_bi.py","file_name":"anti_and_bi.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"201959272","text":"import requests\nimport os\nimport traceback\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\n\ndef download(url, filename):\n if os.path.exists(filename):\n print('file exists!')\n return\n try:\n r = requests.get(url, stream=True, timeout=60)\n r.raise_for_status()\n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.flush()\n return filename\n except KeyboardInterrupt:\n if os.path.exists(filename):\n os.remove(filename)\n raise KeyboardInterrupt\n except Exception:\n traceback.print_exc()\n if os.path.exists(filename):\n os.remove(filename)\n\n\n# 创建保存目录\nif os.path.exists('yugui') is False:\n os.makedirs('yugui')\n\n# 打开浏览器\nbrowser = webdriver.Chrome()\n# 进入图片详细查看页\nurl = 'https://www.duitang.com/blog/?id=1005406113'\nbrowser.get(url)\n\n# 设置下载数量\nstart = 1\nend = 133\nfor i in range(start, end + 1):\n #\t定位图片\n img = browser.find_elements_by_xpath(\"//img[@id='mbpho-img']\")\n for ele in img:\n target_url = ele.get_attribute(\"src\")\n print(target_url)\n img_name = target_url.split('/')[-1]\n filename = os.path.join('yugui', img_name[-25:])\n download(target_url, filename)\n # 显示进度\n print('%d / %d' % (i, end))\n # 下一页\n if i - end == 0:\n break\n next_page = browser.find_element_by_class_name(\"shownext\").click()\n time.sleep(3)\n\n# 关闭浏览器\nbrowser.quit()","sub_path":"duitangmitaomao/ceshi.py","file_name":"ceshi.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354053526","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.views.generic.base import View\n\nfrom .models import Post\nfrom .forms import CommentForm\n\n\nclass PostListView(View):\n \"\"\"Вывод статей категории\"\"\"\n\n def get_queryset(self):\n return Post.objects.filter(published=True, published_date__lte=timezone.now()) # У мишани datetime.now()\n\n def get(self, request, category_slug=None, slug=None):\n if category_slug is not None:\n posts = self.get_queryset().filter(category__slug=category_slug, category__published=True)\n elif slug is not None:\n posts = self.get_queryset().filter(tags__slug=slug, tags__published=True)\n else:\n posts = self.get_queryset()\n if posts.exists():\n template = posts.first().get_category_template()\n else:\n template = 'blog/post_list.html'\n return render(request, template, {'posts': posts})\n\n # CategoryView обработка родительских категорий\n # chosen_category = Category.objects.get(slug=category_slug)\n # if chosen_category.level == 1:\n # posts = Post.objects.filter(category=chosen_category, published=True)\n # return render(request, posts.first().get_category_template(), {\n # 'categories': category_list,\n # 'posts': posts\n # })\n # elif chosen_category.level == 0:\n # children_categories = Category.objects.filter(tree_id=chosen_category.tree_id)\n # posts = Post.objects.filter(category__in=children_categories, published=True)\n # return render(request, posts.first().get_category_template(), {\n # 'categories': category_list,\n # 'posts': posts\n # })\n\n\nclass PostDetailView(View):\n def get(self, request, **kwargs):\n post = get_object_or_404(Post, slug=kwargs.get('slug'))\n form = CommentForm()\n return render(request, post.template, {\n 'post': post,\n 'form': form\n })\n\n def post(self, request, **kwargs):\n form = CommentForm(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n form.post = Post.objects.get(slug=kwargs.get('slug'))\n form.author = request.user\n form.save()\n return redirect(request.path)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496060769","text":"from Preprocessor import *\nfrom BoundingBoxSplitter import *\nfrom MNISTKeras import *\n\nfrom WordsExtractor import *\nfrom DigitsExtractor import *\n\nclass OCR:\n\n def __init__(self):\n pass\n\n def process_image(self, image):\n\n # print(\"Preprocessing image...\")\n image = Preprocessor.process(image)\n\n out_image = np.zeros(image.shape)\n\n # print(\"Splitting rows...\")\n rows, processed = BoundingBoxSplitter.split_rows(image)\n # print(\"Detected \" + str(len(rows)) + \" rows.\")\n\n clf = Classifier(\"../model/keras_piro.h5\")\n\n # print(\"Processing rows...\")\n indices = []\n # test = []\n row_no = 1\n for row, coords, _ in rows:\n\n words, out_image, row_no = WordsExtractor.extract(row, coords, row_no, out_image)\n if len(words) > 0:\n # test.append(words[-1])\n digits = DigitsExtractor.extract(words)\n else:\n digits = []\n\n index = \"\"\n for digit in digits:\n # test.append(digit)\n predicted = clf.predict(digit)\n index += (str(predicted))\n\n indices.append(index)\n\n out_image = Preprocessor.make_out_image(out_image)\n # test.append(out_image)\n return indices, out_image\n","sub_path":"PySolution/OCR.py","file_name":"OCR.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420958594","text":"from logger import get_logger\nfrom .connection import connect_to_db, close_connection, execute_sql\nfrom .queries import database_drop, database_create\n\n\n# Setup logger\nlogger = get_logger('CREATE-DB')\n\n\ndef create_db():\n logger.info(f\"Start creating DB\")\n\n conn = connect_to_db(True)\n\n execute_sql(conn, database_drop, False)\n execute_sql(conn, database_create, False)\n\n close_connection(conn)\n\n logger.info(f\"Finish creating DB\")","sub_path":"db/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"287699904","text":"# Rolando Josue Quijije Banchon\n# Software\n# #Tercer semestre\n\n#Tarea 8 de ejercicios de pagina web\n\n\"\"\"Ejercicio 8\n\nLeer tres números enteros diferentes entre sí y \ndeterminar el número mayor de los tres.\"\"\"\n\nclass Tarea8:\n def __init__ (self):\n pass\n def NumeroMayor(self):\n print(\"_______________________________________\")\n n1= int(input(\"Ingrese primer numero entero: \"))\n n2= int(input(\"Ingrese segundo numero entero: \"))\n n3= int(input(\"Ingrese tercer numero entero: \"))\n print(\"_______________________________________\")\n if n1>n2 and n1>n3:\n nM=n1\n else:\n if n2>n3:\n nM=n2\n else:\n nM=n3\n print(\"El numero Mayor es:\",nM)\n print(\"_______________________________________\")\n input(\"enter para salir\") \nobjeto = Tarea8()\nobjeto.NumeroMayor()","sub_path":"Tarea8.py","file_name":"Tarea8.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207509379","text":"# -*- coding: utf8 -*-\r\n\r\nconsole.show()\r\nconsole.clear()\r\n\r\nimport re\r\n\r\nNotepad = notepad\r\nEditor = editor\r\n\r\nSCRIPT_NAME = 'Cleanup HTML'\r\nSCRIPT_VERSION = '1.0.1'\r\n\r\nCURRENT_FILE = notepad.getCurrentFilename()\r\nFILE_ENCODING = Notepad.getEncoding()\r\nOUTPUT_FILE = ''\r\n\r\nCOMMENTS = ''\r\n\r\nHEADER = ''\r\nHEADER = '%s Source File: %s (%s)\\r\\n%s\\r\\n' % (COMMENTS, CURRENT_FILE, FILE_ENCODING, HEADER)\r\n\r\n# определение переменных\r\nDELETE_TITLES = 1\r\nDELETE_COMMENTED = 0\r\n\r\ndef log(message):\r\n message = re.sub(r'(.+)', COMMENTS + r' \\1', message, re.M)\r\n console.write(message)\r\n\r\neqString = r'(\\s+)?=(\\s+)?'\r\n\r\ndef trim(string):\r\n if string:\r\n # удаление кавычек\r\n string = re.sub(r'[\"]', '', string)\r\n # удаление начальных и замыкающих пробелов\r\n string = re.sub(r'^[ \\t]+', '', string)\r\n string = re.sub(r'[ \\t]+$', '', string)\r\n # удаление повторяющихся пробелов\r\n string = re.sub(r'[\\t]+', ' ', string)\r\n string = re.sub(r'[\\s]+', ' ', string)\r\n # возвращение результата\r\n return string\r\n\r\nexpression = notepad.prompt(\r\n 'delete_titles - удаление заголовков (0|1)\\r\\ndelete_commented - удаление закоментированных строк (1|0)'.decode('utf-8').encode('cp1251'),\r\n 'Ввод данных:'.decode('utf-8').encode('cp1251'),\r\n ('delete_titles = %s' + '\\r\\n' + 'delete_commented = %s' + '\\r\\n') % (DELETE_TITLES, DELETE_COMMENTED)\r\n)\r\n\r\nif expression != None:\r\n expression = re.sub('\\r\\n', '; ', expression)\r\n list = expression.split('; ')\r\n\r\n DELETE_TITLES = re.sub(r'delete_titles' + eqString, '', list[0])\r\n DELETE_TITLES = trim(DELETE_TITLES)\r\n\r\n DELETE_COMMENTED = re.sub(r'delete_commented' + eqString, '', list[1])\r\n DELETE_COMMENTED = trim(DELETE_COMMENTED)\r\n\r\nlineCount = 0\r\ndef foundSomething(m):\r\n global DELETE_TITLES\r\n global DELETE_COMMENTED\r\n\r\n global lineCount\r\n lineCount += 1\r\n selectionStart, selectionEnd = editor.getUserLineSelection()\r\n\r\n lineString = m.group(1)\r\n lineNumber = selectionStart + lineCount\r\n\r\n if (re.match( r'(.*? p669 or hipass? (p or h): ')\n\thicatq='n' #'--> Using hicat? (y or n): ')\nif survey=='g':\n\tfield='combo'# --> Which gama field? (9 or 15 or combo or hipass): ')\n\tdmu='I' # Which GAMA DMU version to use? (I or II)\n\tzversion='16' # Redshift from which version of the Tiling Cat? ('16'=GAMA I, '31'=GAMA II&autoz)\n\tcsample='SM' # L or SM complete sample? For choosing input files and weighting factor\n\nopcat_subset='n' # Select a subset of the opcat sources?\n\n#'Use subset of original bigcat? y for normal, n when using shufflez cats #always 'n' now\nif survey=='2df' and pksdat=='h':\n\tbigcat_subset='n'\n\tindexcol=42\nelse:\n\tbigcat_subset='n'\n\nbinning='n' #'z binning?: ')\nbinby='SM_raw' # 'z', 'L' or 'colour' or 'lum_raw'. For GAMA, also: 'SM', 'SM_raw', 'gmi', 'umr', 'sfr', 'sdens'\nbinchoice='r'\t\t# Bin by even what? 'N'=even numbers of galaxies in each bin, 'r'=even bin ranges\nif binning=='y':\n\t#nbins=2 #'--> How many bins?: ') NOTE: Now defined in bindic in _vars.py\n\tsavebin='n' #'--> Save binned info?: ')\n# else:\n# \tnbins=1\n\t# Note: further binning plotting parameters in OmegaHI_vars.py\n\t\nconfuse='y' #'Account for confusion? (y or n): ')\nusetf='y' # Use tully-fisher method to decide spectral confusion?\nOHIvDV='y' # Make OHI vs DV plot? (If only one dv in loop, just changes the siglim used for HI mass integration)\n\nedgecut='y' # Exclude galaxies near rim of field\n\ndo_boostf='y' # Include factor in confusion correction to account for sources below the optical catalogue magnitude limit \n\nf_factor='y' # Apply F-factor correction to OmegaHI?\n\ncscale='n' # Scale up final mass? (Only for calibration against HICAT)\n\nfluxcalib='y' # Scale up fluxes due to Parkes calibration? (y only for SGP)\n\nnocontin='y' #'Exclude spectra near continuum sources? (y or n): ') (n for hipass, y for sgp)\nif nocontin=='y':\n\tsepfac=1.0 #'----> 1.0 if excluding full beamwidth, 2.0 if half a beamwidth\n\t\ncolour_msk='n' #'Exclude sources with bad colours? (y or n): ') NOTE: Check ranges in OHI_sourceselect.py (y for HIPASS, n for SGP)\n\ncompleteness_cut='y' # Exclude galaxies with r>19.8 or r>19.4\n\nfit='y' #'Fit baseline to stack? (y or n): ')\n\nindivfit='n' # Fit a baseline to the individual mass spectra and use this for final stack values? (y or n)\n\nblank='y' #'Blank bad freq channels? (y or n): ')\n\nweighting='y' #'Use weighting? (y or n): ')\n\nrfi_mask='y' #'Exclude full spectrum if source in RFI zone? (y or n): ')\n\ndeccut='n'\n\nabsmag_msk='n' # Mask GAMA sources with bad absmag (and stellar masses)\n\nexcl='n' #'Exclude particularly bad spectra? (y or n): ')\n\nfit_sin='n' #'Fit and remove sinusoid to stacked mass spectrum? (y or n): ')\n\nfit_gaussian='n'\n\nboost_rhoL='n' # To change the luminosity density from z=0 to median(z)\n\nshowplots='y'\n\navtyp='mn' #'Use mean or median stats? (mn or md):')\n\nnoshift='n' #'Stack without shifting? (y or n): ')\n\nsavearr='n' #'Save output array? (y or n): ')\n\nmagband='r'\n\nuse_ml='y' # The output spectrum to save and use for S/N is the stack?\n\ngenshz='n' #'Generate stacked shuffled spectra? (y or n):\nz_orig_col=-2 # Column with original (not shuffled) redshifts\nq_orig_col=-1\n\nrunnvn='n' #'Run NvN? (y or n): ')\nmovie='n'\t\t\t# Make NvN movie?\n\nf_or_m='m' #'Use frequency or mass? (f or m): ')\n\ncalcsn='n' #'Calculate S/N? (y or n): ')\n\nplotoverlay='y'\t\t#'Plot stack overlay? (y or n): ')\nif plotoverlay=='y':\n\tplotpar='mmstar' # m, f, ml, mmstar\n\tsaveplot='n' # Save the overlaid plot\n\nindivsn='n' \t\t#'Investigate individual spectra? (y or n): ')\n\nsingspec='n' #'Run process for only a single spectrum? (y or n): ')","sub_path":"OmegaHI_params.py","file_name":"OmegaHI_params.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"18353961","text":"import sys\n\nfrom .base import * # noqa\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom django_log_formatter_ecs import ECSFormatter\n\n\nMIDDLEWARE += [\n \"authbroker_client.middleware.ProtectAllViewsMiddleware\",\n]\n\nAUTHENTICATION_BACKENDS += [\n \"user.backends.CustomAuthbrokerBackend\",\n]\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"front_end/build/static\"),\n os.path.join(BASE_DIR, \"node_modules/govuk-frontend\"),\n)\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# X_ROBOTS_TAG (https://man.uktrade.io/docs/procedures/1st-go-live.html)\nX_ROBOTS_TAG = [\n 'noindex',\n 'nofollow',\n]\n\n# Django staff SSO user migration process requries the following\nMIGRATE_EMAIL_USER_ON_LOGIN = True\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': CELERY_BROKER_URL,\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n },\n 'KEY_PREFIX': 'cache_'\n }\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"ecs_formatter\": {\n \"()\": ECSFormatter,\n },\n },\n 'handlers': {\n 'ecs': {\n 'class': 'logging.StreamHandler',\n 'stream': sys.stdout,\n 'formatter': 'ecs_formatter',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['ecs', ],\n 'level': 'INFO',\n 'propagate': True,\n },\n },\n}\n\n# Set async file uploading\nASYNC_FILE_UPLOAD = True\n\nsentry_sdk.init(\n os.environ.get(\"SENTRY_DSN\"),\n environment=os.environ.get(\"SENTR Y_ENVIRONMENT\"),\n integrations=[DjangoIntegration()],\n)\n\n# HSTS (https://man.uktrade.io/docs/procedures/1st-go-live.html)\nSECURE_HSTS_SECONDS = 3600\nSECURE_HSTS_PRELOAD = True\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n\n# ## IHTC compliance\n\n# Set crsf cookie to be secure\nCSRF_COOKIE_SECURE = True\n\n# Set session cookie to be secure\nSESSION_COOKIE_SECURE = True\n\n# Make browser end session when user closes browser\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\n# Set cookie expiry to 4 hours\nSESSION_COOKIE_AGE = 4 * 60 * 60 # 4 hours in seconds\n\n# Prevent client side JS from accessing CRSF token\nCSRF_COOKIE_HTTPONLY = True\n\n# Prevent client side JS from accessing session cookie (true by default)\nSESSION_COOKIE_HTTPONLY = True\n\n# Set content to no sniff\nSECURE_CONTENT_TYPE_NOSNIFF = True\n\n# Set anti XSS header\nSECURE_BROWSER_XSS_FILTER = True\n","sub_path":"config/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550560643","text":"# Can take any number of arguments, but only has one expression\nremainder = lambda num: num % 2\nprint(remainder(5))\n\nproduct = lambda x, y: x * y\nprint(product(2, 3))\n\n\ndef testfunc(num):\n return lambda x: x * num\n\n\nresult1 = testfunc(10)\nresult2 = testfunc(100)\n\nprint(result1(9))\nprint(result2(9))\n\nnumbers_list = [2, 6, 8, 10, 11, 4, 12, 7, 13, 17, 0, 3, 21]\nfiltered_list = list(filter(lambda num: (num > 7), numbers_list))\n# filter takes object, iterable\nprint(filtered_list) # filters out the numbers less than 7\n\n\ndef addition(n):\n return n + n\n\n\n# double all numbers using regular function\nnumbers = [1, \"a\", 3, 4]\nnumbers2 = (4, \"bob\", 2, 7, 5)\nresult = map(lambda x, y: x + y, numbers, numbers2)\n# map applies function to\nprint(list(result))\n\nimport os\n\nclear = lambda: os.system(\"clear\")\nclear()\n\nproduct = lambda x, y: x * y\nprint(product(2, 3))\n","sub_path":"NLP/lam.py","file_name":"lam.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"206902047","text":"# -*- coding: utf-8 -*-\n# __author__ = 'qinjincheng'\n\nimport configparser\nimport os\n\nclass Config():\n def __init__(self):\n self._dir = os.path.dirname(__file__)\n self._configs = dict()\n\n def _load(self, name):\n filename = os.path.join(self._dir, '{}.ini'.format(name))\n if name in self._configs:\n pass\n else:\n self._configs.update({name: configparser.ConfigParser()})\n self._configs[name].read(filename)\n return self._configs[name]\n\n def get(self, name, section, option):\n config = self._load(name)\n if section in config.sections():\n if option in config.options(section):\n return config.get(section, option)\n else:\n raise Exception('ERROR: can not find {} in {} at config {}'.format(option, section, name))\n else:\n raise Exception('ERROR: can not find {} at config {}'.format(section, name))\n #\n #\n # def program(self, option, section='default'):\n # config = self._configs['program']\n # return config[section][option]\n #\n # def script(self, option, section='default'):\n # config = self._configs['script']\n # return config[section][option]\n\nif __name__ == '__main__':\n c = Config()\n print(c.program('fastqc'))","sub_path":"bioframe/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450213536","text":"from functions import *\n\n\n# Verify before running in case of accidental execution.\n# If this script is automated, this line will need to be deleted.\ninput('Are you ready?')\n\n# # 1) Prepare the self text of the voting post\nVotingText = open('VotingText.txt', 'r').read()\nbotDisclaimerText = bot_disclaimer()\n\n# Include the number of submissions and the contest end date in the text of the post.\n# This code makes the end date next Sunday.\n\nlastmonthfile = open('data/lastmonth.txt', 'r')\nlast_month_url = (lastmonthfile.read())\nnext_week = datetime.now() + timedelta(days=5) # Need at least 5 days to vote.\nnext_sunday = next_weekday(next_week, 6) # Pycharm doesn't like the .now(), but in testing seems it should work.\npretty_next_sunday = next_sunday.strftime('%A %B %d, %Y')\nnumbersubmitted = sum(1 for line in open('submissions.csv'))\nVotingText = VotingText.replace('%NUMBERSUBMITTED%', str(numbersubmitted))\nVotingText = VotingText.replace('%ENDDATE%', str(pretty_next_sunday))\nVotingText = VotingText.replace('%MYREDDITID%', my_reddit_ID)\nVotingText = VotingText.replace('%LASTMONTH%', last_month_url)\n\n# # 2) Get date from 7 days ago\n# We need to get the month from the previous month. Most of the contests are towards the end of the month.\n# Sometimes the voting is even in the first few days of the next month.\n# Therefore we need to get the date from 7 days earlier. That will be the month for the intent of the title of the\n# Reddit post.\ndate_7_days_ago = datetime.now() - timedelta(days=7)\ncontest_month = date_7_days_ago.strftime(\"%B\")\ncontest_year = date_7_days_ago.date().year\n\n# # 3) Submit the post to Reddit\npost_message = 'Vote Now for the ' + str(contest_month) + ' ' + str(contest_year) + ' Map Contest!'\nsubmission = r.subreddit('mapporn').submit(post_message, selftext=VotingText) # Submits the post to Reddit\nsubmission.mod.contest_mode()\nsubmission.mod.distinguish()\nshortlink = submission.shortlink\n\n# # 4) One by one add a comment to the post, each comment being a map to vote on\nf = open('submissions.csv', 'r')\nreader = csv.reader(f)\nfor row in reader:\n submission.reply('[' + row[0] + '](' + row[1] + ') \\n' + row[2] + '\\n\\n----\\n\\n^^^^' + row[4])\n # the brackets and parentheses are for hyperlinking the name, row[4] is the unique ID of the submission message,\n # in the congratulations.py program the bot will parse these comments looking for this code and use it to determine\n # the winners.\n\n # Now send a message to each contestant letting them know it's live.\n try:\n r.redditor(row[3]).message('The monthly map contest is live!', 'Thank you for contributing a map. '\n '[The voting on the monthly contest is '\n 'open now at this link.]('\n + shortlink + ') \\n' + botDisclaimerText)\n except:\n print('Could not send message to ' + row[3])\ngeneralcomment = submission.reply('General Comment Thread') # Have a general comment thread so\n # people don't post top level comments.\ngeneralcomment.mod.distinguish(sticky=True)\ngeneralcomment.reply('**What is with the ^^^small characters?** \\n'\n 'This contest is automated with a bot. The bot uses these random characters to index the maps and '\n 'to calculate the winner at the end of the contest.\\n\\n----\\n\\n ^^^[Github](https://github.com/petrarch1603/MapPornBot)')\nf.close()\n\n\n# # 5) Need to save the voting post raw_id for use in parsing the winner after a few days.\nraw_id = submission.id_from_url(shortlink)\nfile = open('data/votingpostdata.txt', 'w')\nfile.write(raw_id)\nfile.close()\n\n# # 6) Rename submissions to submissions_current (while there is voting going on).\n# SubmissionsCurrent will be the index of what is being voted on during the voting period. That way after the vote\n# we know who is the winner.\nos.replace('submissions.csv', 'submissions_current.csv')\n# Create a new submissions.csv, so that if we get submissions during the contest, they will be acquired without\n# creating conflicts. This code creates an empty file.\nopen('submissions.csv', 'w').close()\n\n# # 7) Need to post a \"Vote Now' advertisement to social media\n# I created a bunch of \"Vote Now\" posters for use in social media posts.\n# They are all in the 'voteimages' directory and have simple two digit file names: 01.png, 02.png, 03.png etc.\n# Each month a random image will be posted to social media.\n\n# Get random image_file_name\nimagecount = len([name for name in os.listdir('voteimages/')]) # counts how many images are in the directory\nrandraw = random.randint(1, imagecount) # Creates a random number between 1 and the image count.\n# Return a random number with a leading zero if necessary. (i.e. 02 instead of 2)\nimage_file_name = str(randraw).zfill(2)\n# Look in the directory and create a list of files with the name of the image.\n#\n# It's not elegant code, but it returns a full file name (i.e. 02.png instead of 02).\n# The problem is that there are multiple file exensions: jpg, png, jpeg, etc.\n# There is probably a better way to do it, but for now it works.\nimage_file_name = fnmatch.filter(os.listdir('voteimages/'), image_file_name + '.*')\nimage_file_name = image_file_name[0] # There should only be one image with that name, so this returns the name of\n # that file.\n\n# Post to social media\n# Change the message so it includes URL of the Reddit voting post.\npost_message_url = (post_message + '\\n' + shortlink + '\\n#MapPorn #Cartography #Contest')\nimage_file_name = ('voteimages/' + image_file_name)\n# Run a function to post it to different social media accounts\nsocial_media_post = generic_post(image_file_name, post_message_url)\n# The function returns a text string with the URLs of the relevant social media posts.\n# This is useful for verifying proper posting.\n\n\n# # 8) Send a Reddit message to me with a summary and links to social media posts\nsend_reddit_message_to_self('New Voting Post Posted', 'A new votingpost.py has been run. Check the post to make sure the bot did it right.'\n ' \\nHere\\'s the link to the post: ' + shortlink + ' \\nHere\\'s the social media '\n 'links: \\n' + str(social_media_post))\n\ntry:\n submission.mod.approve() # Unsure if these two work\nexcept Exception as e:\n print('Could not approve post. Exception: ' + e)\ntry:\n submission.mod.sticky()\nexcept Exception as e:\n print('Could not sticky post. Exception: ' + e)\n\n\n# # Notes\n#\n# 2017/12/02\n# Mostly successful execution. Social media post failed due to not having the images ready.\n# Fixed this for next time. I'm still unsure if the submission.mod.approve() and\n# submission.mod.sticky() functions will work. Will have to check that next month.\n#\n# Also next time, make sure that the submissions.csv is renamed to submissions_current.\n# Make a copy of submissions.csv before executing script next time!\n#\n#","sub_path":"votingpost.py","file_name":"votingpost.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315887820","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom common.chrome_proxy_shared_page_state import ChromeProxySharedPageState\nfrom telemetry.page import page as page_module\nfrom telemetry import story\n\n\nclass HTTPToDirectFallbackPage(page_module.Page):\n \"\"\"Page that tests falling back from the HTTP proxy to a direct connection.\"\"\"\n def __init__(self, url, page_set):\n super(HTTPToDirectFallbackPage, self).__init__(url=url, page_set=page_set,\n shared_page_state_class=ChromeProxySharedPageState)\n\n\nclass HTTPToDirectFallbackStorySet(story.StorySet):\n \"\"\"Chrome proxy test sites\"\"\"\n def __init__(self):\n super(HTTPToDirectFallbackStorySet, self).__init__()\n\n urls_list = [\n 'http://check.googlezip.net/fallback/',\n 'http://check.googlezip.net/block/',\n ]\n\n for url in urls_list:\n self.AddStory(HTTPToDirectFallbackPage(url, self))\n","sub_path":"tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/http_to_direct_fallback.py","file_name":"http_to_direct_fallback.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"303863056","text":"from os import access,F_OK\n\ndef create_dictionary(config):\n\n if access(config, F_OK) != True:\n print(\"Cannot find dictionary file!\")\n quit()\n \n locale_file = open(config,'r')\n print(locale_file.read())\n locale_file.close\n \n\n\n\n\n\ndef analyze():\n if access('config.txt',F_OK) != True:\n print(\"Cannot find config file!\")\n quit()\n \n config_file = open('config.txt') #Открываем файл\n \n config = \"\" #Переменная имени файла с локалью\n \n for line in config_file:#проходимся по каждой линии конфига\n\n word_num = 1 #Переменные для разделения строки с параметрами\n word_num2 = 1\n first = 0 #Переменная для определения первого и второго символа \"\n \n for word in line: #Проход по каждой букве строки\n if word != '\"': \n if first == 0: #Если символ не равен \" и он еще не встречался, то переменная первого параметра разделения плюс 1\n word_num += 1\n continue\n else: #Если символ не равен \" и он встречался, то переменная второго параметра разделения плюс 1\n \n word_num2 +=1\n continue\n \n else: \n if first == 0: #Иначе, если символ \" встретился, то переменная определения первого и второго символа \" плюс 1\n first+=1\n word_num2 = word_num\n \n else: #Иначе, если символ \" встретился второй раз, то записываем имя файла локали в переменную config\n config = line[word_num:word_num2]\n break\n\n\n config_file.close() #Закрываем файл\n print(config)\n create_dictionary(config)\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\": #Если модуль выполняется как главный, то запустить функцию analyze\n analyze()\n","sub_path":"localing.py","file_name":"localing.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"613323016","text":"#! /usr/bin/env python3\nimport os\nimport sys\nimport getopt\nimport logging\nfrom importlib import import_module\nfrom utils import multi_dl, fixpath, check_savedir, save_links, config, logger\n\n# 'domain and module name': ['sub dir', 'if need proxy']\nSITES = {\n 'twimg': ['Twitter', 1],\n 'media.tumblr': ['Tumblr', 1],\n 'sinaimg': ['Weibo', 0],\n 'flickr': ['Flickr', 1],\n 'twitter': ['Twitter', 1],\n 'bcy': ['半次元', 0],\n 'tuchong': ['Tuchong', 0],\n 'worldcosplay': ['WorldCosplay', 1],\n 'hentai-cosplay': ['Hentai-Cosplay', 1],\n 'weibo': ['Weibo', 0]\n}\nDirect_Link = ['twimg', 'media.tumblr', 'sinaimg']\nopen_dir = ''\n\n\n# get url\ndef start_dl(url):\n for k in SITES.keys():\n if k in url:\n if k in Direct_Link:\n module_name = 'direct-dl'\n else:\n module_name = k\n\n logger.debug('module name is %s ' % module_name)\n m = import_module('sites.%s' % module_name)\n\n dirname = SITES[k][0]\n needproxy = SITES[k][1]\n proxies = {'http': my_proxy, 'https': my_proxy} if needproxy else {}\n\n Links, FileNames, author, title = m.get_pic(url, proxies=proxies)\n\n if module_name == 'direct-dl' and subdir != '':\n author = subdir\n\n savedir = os.path.join(BASEPATH, dirname, author, title)\n savedir_fix = fixpath(savedir)\n\n global open_dir\n open_dir = savedir_fix\n\n # if savedir_fix exists\n savedir_fix = check_savedir(savedir_fix, title)\n if savedir_fix == -1:\n logger.info('Skip.')\n return 0\n\n logger.debug('\\n--- debug information ---\\n'\n 'Links: %s\\n'\n 'FileNames: %s\\n'\n 'author - title: \"%s\" - \"%s\"\\n'\n 'savedir_fix: %s\\n'\n '--- debug information end---\\n' % (Links, FileNames, author, title, savedir_fix))\n\n if not Links:\n logger.info('No pic-links found or no pic need to download')\n return 0\n\n FNames_fix = [fixpath(filename) for filename in FileNames]\n\n save_links(Links, FNames_fix, savedir_fix, url)\n multi_dl(Links, FNames_fix, savedir_fix, proxies=proxies)\n\n return 0\n\n logger.info('Not Supported Site')\n return -1\n\n\ndef from_file(filepath):\n have_failed = False\n\n def save_failed(_url):\n with open('%s.failed' % filepath, 'a') as f:\n f.write(_url + '\\n')\n return True\n\n if os.path.exists('%s.failed'):\n os.remove('%s.failed')\n\n with open(filepath, 'r') as file:\n urls = [line.strip() for line in file.readlines() if line != '\\n']\n for url in urls:\n try:\n start_dl(url.strip())\n urls = urls[1:]\n except Exception as _e:\n logger.error('%s meet error: %s' % (url, _e))\n have_failed = save_failed(url)\n continue\n except SystemExit as _e:\n if _e != 0:\n logger.error('%s meet error, SystemExit code is: %s' % (url, _e))\n have_failed = save_failed(url)\n continue\n\n if have_failed:\n logger.info('>>>>>>> The link download failed saved to %s.failed' % filepath)\n\nUsage_Message = \"\"\"\nOption: -h or --help: show this message\n -d or --debug: set log level to debug\n -p or --proxy scheme://ip:port: usage proxy scheme://ip:port\n -f or --file filename: file mode\n -i url: download from url\n -s subdir: subdir for dlpic-gui\"\"\"\n\n\ndef usage():\n print(Usage_Message)\n\n\ndef runmode(mode, _arg):\n if mode == 'filemode':\n from_file(_arg)\n elif mode == 'singlemode':\n start_dl(_arg)\n\n\nif __name__ == '__main__':\n\n logger.setLevel(logging.INFO)\n logging.getLogger('requests').setLevel(logging.WARNING)\n\n BASEPATH = config['BASEPATH']\n my_proxy = config['proxy'] if 'proxy' in config.keys() else ''\n\n MODE = None\n content = ''\n subdir = ''\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'hdp:f:i:s:', ['help', 'debug', 'proxy=', 'file='])\n for opt, arg in opts:\n if opt in ('-h', '--help'):\n usage()\n elif opt in ('-d', '--debug'):\n logger.setLevel(logging.DEBUG)\n logger.handlers[0].setLevel(logging.DEBUG)\n logger.debug('IN DEBUG MODE')\n elif opt in ('-p', \"--proxy\"):\n my_proxy = arg\n logger.debug('proxy from args overwrite config > %s' % arg)\n elif opt in ('-f', '--file'):\n logger.info('file mode')\n MODE = 'filemode'\n content = arg\n elif opt in ('-i', ):\n MODE = 'singlemode'\n content = arg\n elif opt in ('-s', ):\n subdir = arg\n\n except getopt.GetoptError as e:\n logger.error(e)\n usage()\n sys.exit(2)\n\n while True:\n\n if MODE:\n runmode(MODE, content)\n logger.info('DONE')\n sys.exit(0)\n\n try:\n dl_link = input('\\nPaste The Link:\\n').strip()\n\n if dl_link in ['bye', 'exit', 'q']:\n break\n elif dl_link == 'o':\n if open_dir:\n os.system('xdg-open \"%s\" >/dev/null 2>&1' % open_dir)\n else:\n logger.info(\"download dir not set, pass\")\n continue\n elif not 'http' == dl_link[:4]:\n logger.warning('Wrong Input!!!!')\n continue\n\n except (KeyboardInterrupt, EOFError):\n break\n\n start_dl(dl_link)\n\n logger.info('Bye~')\n","sub_path":"dlpic.py","file_name":"dlpic.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"519102660","text":"import urllib.request\nimport re\nimport os\n\n# 定义集数,从前面到后面-1\nbooks = list(range(950, 952))\n\nfor book in books:\n # https://one-piece.cn/post/10922/\n url = \"https://one-piece.cn/post/10\" + str(book) + \"/\"\n date = urllib.request.urlopen(url).read().decode(\"utf-8\")\n # src=\"http://wx3.sinaimg.cn/large/83940082gy1fwdhpmdqezj20nm0y6gvw.jpg\"\n os.mkdir(\"D:\\\\Desktop\\\\image\\\\\" + str(book))\n pat = 'src=\"(http:\\\\S+.jpg)\"'\n img_urls = re.compile(pat).findall(date)\n print(\"当前下载\" + str(book) + \"集\")\n count = 1\n for img_url in img_urls[0:-1]:\n print(\"正在下载\" + img_url + str(count))\n output_name = \"D:\\\\Desktop\\\\image\\\\\" + str(book) + \"\\\\\" + str(count) + \".jpg\"\n urllib.request.urlretrieve(img_url, output_name)\n count += 1\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"74556882","text":"import discord, random, datetime, shutil\nfrom discord.ext import commands\nfrom google_images_download import google_images_download\n\ndate = datetime.datetime.now()\nbot = commands.Bot(command_prefix=\"¥\")\nusuario = \" \"\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(status=discord.Status.online, activity=discord.Game(\"alguém me salva\"))\n print(\"Bot is ready.\")\n\n\n@bot.command()\nasync def ping(ctx):\n await ctx.send(f\"Ping {round(bot.latency * 1000)}ms\")\n\n@bot.command()\nasync def fala(message):\n await message.channel.send(\"que é mano me deixa em paz\")\n\n@bot.command()\nasync def como_vai(ctx):\n await ctx.send(\"mano eu vo cuidar do meu anão que é melhor\")\n\n@bot.command()\nasync def data_hoje(ctx):\n await ctx.send(date.strftime(\"%c\"))\n\n\n@bot.command()\nasync def comandos(ctx):\n\n embed = discord.Embed(\n title=\"Comandos\",\n description=\"Lista dos comandos disponíveis até o momento.\",\n colour=discord.Colour.dark_blue()\n )\n\n embed.set_thumbnail(url=\"https://cdn.discordapp.com/attachments/334037046156853249/738501052139044884/4f3c1dd048162d6e6996e4901f8215a9.jpg\")\n embed.add_field(name=\"fala\", value=\"descoble tloxa\")\n embed.add_field(name=\"como_vai\", value=\"descoble tloxa\")\n embed.add_field(name=\"data_hoje\", value=\"descoble tloxa\")\n embed.add_field(name=\"ping\", value=\"esse aqui é bem dificil de descobli ne \")\n embed.add_field(name=\"procura\", value=\"procula os negócio (não use palavras compostas, símbolos ou acentos)\")\n embed.set_footer(text=\"cuidado com um tal de kall\")\n\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def procura(ctx, qqcequer):\n\n embed = discord.Embed(\n title=\"Ela isso que você quelia?\",\n colour=discord.Colour.dark_blue()\n )\n\n response = google_images_download.googleimagesdownload()\n arguments = {\"keywords\": qqcequer,\n \"limit\": 1,\n \"save_source\":\"url\"\n }\n\n paths = response.download(arguments)\n\n #tentar isso aqui com o source_save só com image_url\n with open(\"C:/Users/joaop/PycharmProjects/botdisc/bot/downloads/url.txt\",'r') as urls:\n for each in urls:\n defurl = each[0:]\n\n embed.set_image(url=defurl)\n\n await ctx.send(embed=embed)\n\n shutil.rmtree(\"C:/Users/joaop/PycharmProjects/botdisc/bot/downloads\")\n\n\n@bot.command()\nasync def bullying(ctx, user):\n\n user == usuario\n\n await ctx.send(f\"{user} selá bulinad(a) agola hehe\")\n\n return\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(\"esse comando não existe mongol\")\n\n\n@bot.event\nasync def on_message(message):\n\n if str(message.author) == usuario:\n respostas = [\n \"pelo amol de deus pala de falal\",\n \"calalho mano só pala\",\n \"ah cala eu não aguento mais\",\n \"puta que paliu muleque dos infelno\",\n \"que cala insupoltavel pulta melda\",\n \"eu vo sulta com esse muleque aqui\",\n \"eu vo sulta.\",\n \"alguem tila esse melda daqui calalho\",\n \"AAAAAAAAAAAAAAAAAAAAAAA FILHO DA PULTAAAAA\",\n \"eu vo te apagal na polada seu meldinha\",\n \"fala mais um a pla você ve\",\n \"alguem me mata lapido eu nao supolto mais esse bosta\",\n \"NÃO PALA DE FALA É INCLIVEL\",\n \"pulta melda você se supelou agola\",\n \"já deu pla mim.\",\n \"você vai paga minha telapia esteje avisado\",\n \"eu desisto pla mim já deu\",\n \"quem botou essa pola aqui?\",\n \"começou a putalia\"\n ]\n\n random.shuffle(respostas)\n\n await message.channel.send(random.choice(respostas), tts=True)\n\n await bot.process_commands(message)\n\n\nbot.run(\"token\")\n","sub_path":"cebolinhabot.py","file_name":"cebolinhabot.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649797649","text":"inp = input('Enter Fahrenheit Temperature:')\ntry:\n fahr = float(inp)\n cel = (fahr - 32.0) * 5.0 / 9.0\n print(cel)\nexcept Exception as e:\n print('Please enter a number')\n\ninp = input('Enter Velocidade:')\ntry:\n velocidade = float(inp)\n calcular = velocidade * 5\n print('velocidade do som:', calcular)\nexcept Exception as e:\n print('Por favor coloque em numeros a velocidade.')\n","sub_path":"python-apreender/02_condicional/07_tratando_excecoes_try_except.py","file_name":"07_tratando_excecoes_try_except.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"642190885","text":"\n\ndef main():\n\ta = '11001011'\n\tb = '111000'\n\tres = list()\n\ta, b = [e for e in a], [k for k in b]\n\t#res = [ str(int(el) * int(b[len(b) - 1])) for el in a]\n\t#for i in range(len(b) - 1):\n\t#\tres = [ el for el in plus_binary('0' + ''.join(res), ''.join([ str(int(k) * int(b[len(b) - 2 - i ])) for k in a]) + '0' * (i + 1))]\n\t#print(res)\n\tser = ''\n\tfor i in range(len(b)):\n\t\tres.append([str(int(k) * int(b[len(b) - 1 - i ])) for k in a])\n\tfor q in range(len(res) - 1):\n\t\tser = plus_binary(''.join(res[0]), ''.join(res[q + 1]), q)\n\t\tres[0] = [el for el in ser]\n\tprint(ser)\n\ndef shift_left(a: str, n: int):\n\treturn a[n:len(a)] + '0'*n\n\ndef plus_binary(a: str, b: str, q: int):\n\tres = list()\n\tb += '0'*(q+1)\n\tif len(a) != len(b):\n\t\ta = ( '0' * (len(b) - len(a))) + a\n\ta, b = [int(al) for al in a], [int(bl) for bl in b]\n\ta.reverse()\n\tb.reverse()\n\tdop = 0\n\tfor i in range(len(a)):\n\t\tif a[i] + b[i] + dop == 3:\n\t\t\tres.append(1)\n\t\t\tdop = 1\n\t\telif a[i] + b[i] + dop == 2:\n\t\t\tres.append(0)\n\t\t\tdop = 1\n\t\telif a[i] + b[i] + dop == 1:\n\t\t\tres.append(1)\n\t\t\tdop = 0\n\t\telse:\n\t\t\tres.append(0)\n\tif dop == 1:\n\t\tres.append(1)\n\tres.reverse()\n\treturn ''.join(map(str, res))\n\nif __name__ == '__main__':\n\tmain()","sub_path":"lab2/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"534903143","text":"from paddle import fluid\nimport utils\nimport numpy as np\n\nclass BiRNN(object):\n def input_data(self, item_len):\n user_slot_names = fluid.data(name='user_slot_names', shape=[None, 1], dtype='int64', lod_level=1)\n item_slot_names = fluid.data(name='item_slot_names', shape=[None, item_len], dtype='int64', lod_level=1)\n lens = fluid.data(name='lens', shape=[None], dtype='int64')\n labels = fluid.data(name='labels', shape=[None, item_len], dtype='int64', lod_level=1)\n\n inputs = [user_slot_names] + [item_slot_names] + [lens] + [labels]\n \n return inputs\n\n def default_normal_initializer(self, nf=128):\n return fluid.initializer.TruncatedNormal(loc=0.0, scale=np.sqrt(1.0/nf))\n\n def default_param_clip(self):\n return fluid.clip.GradientClipByValue(1.0)\n\n def default_regularizer(self):\n return None\n\n def default_fc(self, data, size, num_flatten_dims=1, act=None, name=None):\n return fluid.layers.fc(input=data,\n size=size,\n num_flatten_dims=num_flatten_dims,\n param_attr=fluid.ParamAttr(initializer=self.default_normal_initializer(size),\n gradient_clip=self.default_param_clip(),\n regularizer=self.default_regularizer()),\n bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.0),\n gradient_clip=self.default_param_clip(),\n regularizer=self.default_regularizer()),\n act=act,\n name=name)\n\n def default_embedding(self, data, vocab_size, embed_size):\n gradient_clip = self.default_param_clip()\n reg = fluid.regularizer.L2Decay(1e-5) # IMPORTANT, to prevent overfitting.\n embed = fluid.embedding(input=data,\n size=[vocab_size, embed_size],\n param_attr=fluid.ParamAttr(initializer=fluid.initializer.Xavier(),\n gradient_clip=gradient_clip,\n regularizer=reg),\n is_sparse=True)\n\n return embed\n\n def default_drnn(self, data, nf, is_reverse, h_0):\n return fluid.layers.dynamic_gru(input=data,\n size=nf,\n param_attr=fluid.ParamAttr(initializer=self.default_normal_initializer(nf),\n gradient_clip=self.default_param_clip(),\n regularizer=self.default_regularizer()),\n bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.0),\n gradient_clip=self.default_param_clip(),\n regularizer=self.default_regularizer()),\n is_reverse=is_reverse,\n h_0=h_0)\n\n def net(self, inputs, hidden_size, user_vocab, item_vocab, embed_size):\n #encode\n user_embedding = self.default_embedding(inputs[0], user_vocab, embed_size)\n user_feature = self.default_fc(data=user_embedding,\n size=hidden_size,\n num_flatten_dims=1,\n act='relu', \n name='user_feature_fc')\n\n item_embedding = self.default_embedding(inputs[1], item_vocab, embed_size)\n item_embedding = fluid.layers.sequence_unpad(x=item_embedding, length=inputs[2])\n \n item_fc = self.default_fc(data=item_embedding, \n size=hidden_size, \n num_flatten_dims=1, \n act='relu', \n name='item_fc')\n \n pos = utils.fluid_sequence_get_pos(item_fc)\n pos_embed = self.default_embedding(pos, user_vocab, embed_size)\n pos_embed = fluid.layers.squeeze(pos_embed, [1])\n \n # item gru\n gru_input = self.default_fc(data=fluid.layers.concat([item_fc, pos_embed], 1),\n size=hidden_size * 3,\n num_flatten_dims=1,\n act='relu',\n name='item_gru_fc')\n\n item_gru_forward = self.default_drnn(data=gru_input,\n nf=hidden_size,\n h_0=user_feature,\n is_reverse=False)\n\n item_gru_backward = self.default_drnn(data=gru_input,\n nf=hidden_size,\n h_0=user_feature,\n is_reverse=True)\n item_gru = fluid.layers.concat([item_gru_forward, item_gru_backward], axis=1)\n\n out_click_fc1 = self.default_fc(data=item_gru,\n size=hidden_size,\n num_flatten_dims=1,\n act='relu',\n name='out_click_fc1')\n\n click_prob = self.default_fc(data=out_click_fc1,\n size=2,\n num_flatten_dims=1,\n act='softmax',\n name='out_click_fc2')\n\n labels = fluid.layers.sequence_unpad(x=inputs[3], length=inputs[2])\n loss = fluid.layers.reduce_mean(fluid.layers.cross_entropy(input=click_prob, label=labels))\n auc_val, batch_auc, auc_states = fluid.layers.auc(input=click_prob, label=labels)\n\n return loss, auc_val, batch_auc, auc_states\n","sub_path":"PaddleRec/rerank/listwise/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360497142","text":"# -*- coding: utf-8 -*-\n\n# Author : Robert\n# Create Date : 2019/3/19 7:51\n# File : multi_udp.py\n# IDE : PyCharm\nimport socket\n\n\n# AF_INET代表ipv4 SOCK_DGARM代表udp\n# 1.创建套接字\nimport threading\n\n\n# 5.接收数据\ndef recv(udp_socket):\n while True:\n recv_data = udp_socket.recvfrom(1024)\n print(recv_data)\n\ndef send(udp_socket):\n # 3.获取接收方的ip和端口\n addr_ip = input(\"对方的ip:\")\n addr_port = int(input(\"对方的端口:\"))\n addr = (addr_ip,addr_port)\n\n # 4.发送数据\n while True:\n send_data = input(\"输入要发送的数据:\")\n udp_socket.sendto(send_data.encode('utf-8'),addr)\n\ndef main():\n '''\n 完成udp聊天气的整体控制\n :return:\n '''\n # 1.创建socket\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # 2.绑定本地信息\n udp_socket.bind((\"\", 7891))\n\n # 3.创建两个线程用来同时进行接收和发送\n t1 = threading.Thread(target=recv, args=(udp_socket,))\n t2 = threading.Thread(target=send, args=(udp_socket,))\n\n t1.start()\n t2.start()\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"multi_task/mutli_threading/multi_udp_2.py","file_name":"multi_udp_2.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71200722","text":"from Game_logger import logger\nfrom Game_logger import log_decorator\nfrom Game_logger import debug_decorator\nimport Map_generator\nimport Warning_generator\nimport math\nimport pickle\nfrom functools import partial\n\n\n\n@log_decorator\n@debug_decorator\ndef dungeon_game():\n \"\"\"\n Function launch the dungeon game\n Returns:\n none\n \"\"\"\n size_of_map = int(input(\"Enter size of map\"))\n game_map = Map_generator.generate_map([size_of_map] * 2, 0.1, 0.05)\n\n player_position = Map_generator.generate_coordinates(game_map)\n direction_map = {'up': [1, 0], 'down': [-1, 0], 'left': [0, -1], 'right': [0, 1]}\n\n game_over = False\n player_win = False\n\n while not game_over:\n logger.info(\"Your position:\" + str(player_position))\n Warning_generator.generate_warning(game_map, player_position, 1)\n Warning_generator.generate_warning(game_map, player_position, 2)\n\n game_input = input(\"Enter direction\")\n if game_input == \"save\":\n save(game_map, player_position)\n logger.info(\"Game is saved\")\n continue\n elif game_input == \"load\":\n game_map, player_position = load()\n logger.info(\"Game is loaded\")\n continue\n else:\n direction = direction_map[game_input]\n\n player_position[0] += direction[0]\n player_position[1] += direction[1]\n\n player_position[0] = max(0, min(player_position[0], size_of_map - 1))\n player_position[1] = max(0, min(player_position[1], size_of_map - 1))\n\n if game_map[player_position[0]][player_position[1]] == 1:\n game_over = True\n\n elif game_map[player_position[0]][player_position[1]] == 2:\n\n game_over = True\n player_win = True\n\n if player_win:\n print(\"You won!!!\")\n else:\n print(\"You lost.\")\n print(game_map)\n\n\t\n@log_decorator\n@debug_decorator\ndef save(game_map, player_position):\n \"\"\"\n saves level in file\n :param str lvl: string level representation\n :param int player_x: current x-position of player\n :param int player_y: current y-position of player\n \"\"\"\n\n with open('save.pickable', 'wb') as handle:\n pickle.dump([game_map, player_position], handle)\n\n\t\t\n@log_decorator\n@debug_decorator\ndef load():\n \"\"\"\n loads level from file\n :return: loaded level\n :rtype: tuple\n \"\"\"\n\n with open('save.pickable', 'rb') as handle:\n game_save = pickle.load(handle)\n\n game_map = game_save[0]\n player_position = game_save[1]\n\n return game_map, player_position\n\n\ndungeon_game()\n\n","sub_path":"Kateryna_Liukina/6/Dungeon_Game.py","file_name":"Dungeon_Game.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"448009918","text":"import os, re\nimport pickle\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\nimport numpy as np\nimport pandas as pd\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import PorterStemmer\n\nmax_len = 50 # 최대 단어 수가 54였음 -> text당 사용할 단어 수\nmax_words = 10000 # 훈련시 사용할 단어 수 -> 최빈 10000개\ndata_ratio_train = 0.9\ndata_ratio_val = 0.0\ndata_ratio_test = 0.1\nembedding_dim = 50\n\ndef refineWords(s):\n # lowwer case\n # url, ,(comma) remove\n s = re.sub(r\"http\\S+|,\", '', s)\n words = s.lower().split()\n # 재난 장소 표현이 많아 없애지 않음\n # stops = set(stopwords.words(\"english\"))\n # meaningful_words = [w for w in words if not w in stops]\n # lemmatizer : original form\n lmtzr = WordNetLemmatizer()\n lemmatized_words = [lmtzr.lemmatize(word) for word in words]\n # stemming :\n # stemmer = PorterStemmer()\n\n return (\" \".join(lemmatized_words))\n\n# 1. read data\nprint (\"[PREP] 1. read data\")\ndata_path = r\"C:\\Users\\codbs\\Yooney_TF_codes\\TensorFlow_Test\\DataSets\\Real_or_Not_NLP_with_Disaster_Tweets.csv\"\ndata_raw = pd.read_csv( data_path)\n\n# 2. tokenizing\nprint (\"[PREP] 2. tokenizing : max len : %d, max words = %d\" % (max_len, max_words))\n\n# 2.1. lower and lemdatizing\ndata_raw[\"text\"] = data_raw[\"text\"].apply(refineWords)\n# test remove url i=7610 / , for 3\n# print (\"original : %s\" % data_raw[\"text\"][i])\n# print (\"after refine&tokenized : %s\" % tokenizer.sequences_to_texts( [sequences[i]])[0])\n\n# 2.2 tokenizing\ntokenizer = Tokenizer( num_words= max_words, oov_token= 0, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n')\ntokenizer.fit_on_texts(data_raw[\"text\"])\nsequences = tokenizer.texts_to_sequences( data_raw[\"text\"])\nword_index = tokenizer.word_index\nprint (\"tokenizer : %d개의 토큰을 찾음\" % len(word_index))\n\n# 2.3 padding 앞에서 부터\ndata = pad_sequences(sequences, maxlen=max_len)\nlabels = np.asarray( data_raw['target'])\nprint ('데이터 텐서 크기 : ', data.shape)\nprint ('레이블 텐서 크기 : ', labels.shape)\n\n# 3. shuffle and set split to training, val, test\nprint (\"[PREP] 3. shuffle and split data to train, test, val\")\nindices = np.arange( data.shape[0])\nnp.random.shuffle( indices)\ndata = data[indices]\nlabels = labels[indices]\n\ntrain_index = (0, int(data.shape[0]* data_ratio_train) )\nval_index = (train_index[1], int( train_index[1] + data.shape[0] * data_ratio_val))\ntest_index = (val_index[1], int( data.shape[0]))\nprint ('train ratio : ', data_ratio_train, train_index)\nprint ('val ratio : ', data_ratio_val, val_index)\nprint ('text ratio : ', data_ratio_test, test_index)\n\nx_train = data[train_index[0]:train_index[1]]\ny_train = labels[train_index[0]:train_index[1]]\nx_val = data[val_index[0]: val_index[1]]\ny_val = labels[val_index[0]: val_index[1]]\nx_test = data[test_index[0]: test_index[1]]\ny_test = labels[test_index[0]: test_index[1]]\ndata_split_index = { 'train' : train_index, 'val' : val_index, 'test' : test_index}\n\n# 4. read embeding data\nprint (\"[PREP] 4. embedding matrix create \")\nembeddings_index = {}\nglove_path = r\"C:\\Users\\codbs\\Yooney_TF_codes\\TensorFlow_Test\\DataSets\\glove\\glove.twitter.27B.50d.txt\"\nwith open( glove_path, 'rt', encoding='UTF8') as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nprint (\"glove : %d 개의 vector 찾음\" % len(embeddings_index))\n\n\nembedding_matrix = np.zeros( (max_words, embedding_dim))\nfor word , i in word_index.items():\n if i < max_words:\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n# 5. save data, tokenizer, embeddingmatrix\nprint (\"[PREP] 5. save data, tokenizer, embeddingmatrix\")\n# 1. data\npickle_save_path = r'C:\\Users\\codbs\\PycharmProjects\\tweetDisaster\\data_pickle'\nwith open( pickle_save_path + r'\\data.pickle', 'wb') as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\nwith open( pickle_save_path + r'\\labels.pickle', 'wb') as f:\n pickle.dump(labels, f, pickle.HIGHEST_PROTOCOL)\nwith open( pickle_save_path + r'\\data_split_index.pickle', 'wb') as f:\n pickle.dump(data_split_index, f, pickle.HIGHEST_PROTOCOL)\n# 2. tokenizer\nwith open( pickle_save_path + r'\\tokenizer.pickle', 'wb') as f:\n pickle.dump(tokenizer, f, pickle.HIGHEST_PROTOCOL)\n\n# 3. embedding matrix\nwith open( pickle_save_path + r'\\embedding_matrix.pickle', 'wb') as f:\n pickle.dump(embedding_matrix, f, pickle.HIGHEST_PROTOCOL)\n\n\n# # load\n# with open('data.pickle', 'rb') as f:\n# data = pickle.load(f)","sub_path":"load_N_Embedding.py","file_name":"load_N_Embedding.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432732021","text":"from flask import Flask,render_template\r\n\r\napp = Flask(__name__)\r\n\r\n#1.返回一个网页模板\r\n#2.如何给模板填充数据\r\n@app.route('/')\r\ndef hello_world():\r\n\r\n #比如传入网址\r\n url_str=\"www.baidu.com\"\r\n my_list=[1,3,5,7,9]\r\n\r\n my_dict={\r\n \"name\":'liqiang',\r\n 'url':'www.biadu.com'\r\n }\r\n\r\n #通常使用的变量名和要传递的数据的变量名保持一致\r\n return render_template('index.html',url_str=url_str,my_list=my_list,my_dict=my_dict)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"FlaskJinJiaTest/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358367849","text":"import argparse\nimport pybedtools\nimport time\nfrom pathlib import Path\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--reference', '-r', type=str, required=True,\n help='Relative or absolute path to the folder containing preprocessed \\\n reference fasta files')\nparser.add_argument('--transcriptome', '-tr', type=str, required=True, \n help='Path to the transcriptome bedfile')\nparser.add_argument('--outfile', '-out', type=str, default='.', \n help='Specify absolute or relative path to output directory, default = .') \nargs = parser.parse_args()\n\n# preparing files and folders\nts = time.time()\n\np = Path(args.outfile)\n\noutput_dir = Path(f'{p}', 'results', 'preprocessed_transcriptome', f'{ts}')\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# creating list of references\nref_files = Path(args.reference)\nreferences = [Path(reference).stem for reference in ref_files.glob('*.fa*')]\n\n# preparing transcriptome for output processing\ntranscriptome = args.transcriptome\ntranscriptome_name = Path(transcriptome).stem\n\ntranscriptome_bed_object = pybedtools.BedTool(args.transcriptome)\n\noutput_file_name = f'{transcriptome_name}_preprocessed.bed'\noutput_file_path = output_dir / output_file_name\n\n# processing output file\npybedtools.BedTool(line for line in transcriptome_bed_object\\\n if line.chrom in references)\\\n .saveas(output_file_path)","sub_path":"tools/transcriptome_prepr/transcriptome_processing_and_filtering.py","file_name":"transcriptome_processing_and_filtering.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"134775888","text":"import re\nimport math\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Object\nfrom LatinoAnalysis.NanoGardener.data.common_cfg import Type_dict\n\nclass PtCorrApplier(Module):\n '''\n Module that applies pt corrections to a given collection\n '''\n def __init__(self, Coll='CleanJet', CorrSrc='jecUncertTotal', kind='Up', doMET=True, METobject = 'MET'):\n self.CollTC = Coll\n self.CorrSrc = CorrSrc\n self.kind = kind\n self.isUp = True if kind == 'Up' else False\n self.doMET = doMET\n self.METobj = METobject\n self.minJetEn = 15\n prt_str = 'PtCorrApplier: CollectionToCorrect = ' + self.CollTC + ', CorrectionsToAplly = ' + self.CorrSrc + ', CorrectionType = ' + self.kind + ', PropagateToMET = ' + str(self.doMET)\n print(prt_str)\n\n def beginJob(self):\n pass\n\n def endJob(self):\n pass\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n self.out = wrappedOutputTree\n self.CollBr = {}\n oBrList = self.out._tree.GetListOfBranches()\n for br in oBrList:\n bname = br.GetName()\n btype = Type_dict[br.GetListOfLeaves()[0].GetTypeName()]\n if re.match('\\A'+self.CollTC+'_', bname):\n if btype not in self.CollBr: self.CollBr[btype] = []\n self.CollBr[btype].append(bname)\n self.out.branch(bname, btype, lenVar='n'+self.CollTC)\n #iBrList = inputTree.GetListOfBranches()\n #for br in iBrList:\n # bname = br.GetName()\n # btype = Type_dict[br.GetListOfLeaves()[0].GetTypeName()]\n # if re.match('\\A'+self.CollTC+'_', bname):\n # if btype not in self.CollBr: self.CollBr[btype] = []\n # if bname in self.CollBr[btype]: continue\n # self.CollBr[btype].append(bname)\n # self.out.branch(bname, btype, lenVar='n'+self.CollTC)\n if len(self.CollBr) < 1: raise IOError('PtCorrApplier: no branches with ' + self.CollTC+'_' + ' found in inputTree or outputTree.')\n if self.doMET:\n self.out.branch(self.METobj+'_pt', 'F')\n self.out.branch(self.METobj+'_phi', 'F')\n \n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n\n def analyze(self, event):\n coll = Collection(event, self.CollTC)\n nColl = len(coll)\n\n if self.doMET:\n met = Object(event, self.METobj)\n met_px = met['pt']*math.cos(met['phi']) \n met_py = met['pt']*math.sin(met['phi']) \n\n # Create new pt\n new_pt = []\n for iObj in range(nColl):\n if self.isUp: tmp_pt = coll[iObj]['pt'] + coll[iObj][self.CorrSrc]*coll[iObj]['pt']\n else: tmp_pt = coll[iObj]['pt'] - coll[iObj][self.CorrSrc]*coll[iObj]['pt']\n new_pt.append(tmp_pt)\n\n # MET\n if self.doMET and tmp_pt > self.minJetEn:\n pt_diff = tmp_pt - coll[iObj]['pt']\n met_px -= pt_diff*(math.cos(coll[iObj]['pt'])) \n met_py -= pt_diff*(math.sin(coll[iObj]['pt'])) \n if self.doMET:\n new_MET_pt = math.sqrt(met_px**2 + met_py**2)\n new_MET_phi = math.atan2(met_px, met_py)\n\n # Reorder\n order = []\n for idx1, pt1 in enumerate(new_pt):\n pt_idx = 0\n for idx2, pt2 in enumerate(new_pt):\n if pt1 < pt2 or (pt1 == pt2 and idx1 > idx2): pt_idx += 1\n order.append(pt_idx)\n \n # Fill branches\n for typ in self.CollBr:\n for bname in self.CollBr[typ]:\n if '_pt' in bname: \n temp_v = [new_pt[idx] for idx in order]\n self.out.fillBranch(bname, temp_v)\n else:\n temp_b = bname.replace(self.CollTC+'_', '')\n temp_v = [coll[idx][temp_b] for idx in order]\n self.out.fillBranch(bname, temp_v)\n if self.doMET:\n self.out.fillBranch(self.METobj+'_pt', new_MET_pt)\n self.out.fillBranch(self.METobj+'_phi', new_MET_phi)\n return True \n\n","sub_path":"NanoGardener/python/modules/PtCorrApplier.py","file_name":"PtCorrApplier.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"440762709","text":"\"\"\"\nStarts a grpc server for Note sources (playlist, NDE, and hook) using default hook settings.\n\"\"\"\nimport sys\nimport argparse\nimport time\nimport logging\nimport traceback\n\nfrom note.rpc.rpc_server import NoteRPCServer\nfrom note.nde.nde_source import LocalNDESource\nfrom note.playlist.pl_source import LocalPLSource\nfrom note.winamp.hook import WinampHook, WinampHookRequest\nfrom repl_scripts.shared import create_test_hook\n\n\ndef main() -> int:\n try:\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument('--host', type=str, default='0.0.0.0')\n parser.add_argument('--port', '-p', type=int, default=15666)\n parser.add_argument('--fake-hook', action='store_true')\n\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO)\n\n if args.fake_hook:\n hook = create_test_hook()\n else:\n request = WinampHookRequest.from_default()\n hook = WinampHook.create(request)\n\n nde_source = LocalNDESource.create_from_hook(hook)\n pl_source = LocalPLSource.create_from_hook(hook)\n\n server = NoteRPCServer.from_local_sources(\n local_hook=hook,\n pl_source=pl_source,\n nde_source=nde_source\n )\n\n server.serve(args.host, args.port)\n print(f'Started RPC server @ {args.host}:{args.port}')\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print('Stopping server')\n\n hook.destroy()\n\n return 0\n\n except Exception as e:\n traceback.print_exc()\n return 1\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"cli_tools/rpc_server.py","file_name":"rpc_server.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224808744","text":"#coding=gbk\n#!usr/bin/env python\n'''\ncv2.cornerHarris()参数:\n\n img - 数据类型为 ?oat32 的输入图像。\n blockSize - 角点检测中要考虑的领域大小。\n ksize - Sobel 求导中使用的窗口大小\n k - Harris 角点检测方程中的自由参数,取值参数为 [0,04,0.06].\n'''\nimport cv2\nimport numpy as np\n\nfilename = 'G:\\CODE\\opencv\\shudu.jpg'\nimg = cv2.imread(filename)\ngray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n\n# 转换为32位float\ngray = np.float32(gray)\n\n# 输入图像必须是float32,最后一个参数在0.04到0.05之间\ndst = cv2.cornerHarris(gray,2,3,0.04)\n\n# 图像膨胀运算\ndst = cv2.dilate(dst,None)\n\n# 大于一定值的像素点变为红色\nimg[dst>0.01*dst.max()] = [0,0,255]\n\ncv2.imshow('dst',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"图像特征提取与描述/角点检测/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305404977","text":"import cv2\n\nimport numpy as np\n\nfrom numpy import linalg\n\nfrom PIL import Image\n\nimport cv2\n\nimport math\n\n# finds angle between Oxy and \ndef cust_cross(setp):\n\tvectOxy = np.array([0,0,1])\n\tvx = setp[0][0] - setp[1][0]\n\tvy = setp[0][1] - setp[1][1]\n\tvz = setp[0][2] - setp[1][2]\n\n\tvx1 = setp[0][0] - setp[2][0]\n\tvy1 = setp[0][1] - setp[2][1]\n\tvz1 = setp[0][2] - setp[2][2]\n\n\tv1 = np.array([vx, vy, vz])\n\tv2 = np.array([vx1, vy1, vz1])\n\n\tvect = np.cross(v1, v2)\n\t\n\tangle=np.degrees(np.arccos(np.dot(vect,vectOxy)/(np.linalg.norm(vect)*np.linalg.norm(vectOxy))))\n\n\treturn angle\n\n\ndef draw_cube(im, acc_matrix, stpoints):\n\n\tzpoints = scale * acc_matrix @stpoints.T\n\t\n\tpoints = proj_matr3d @ zpoints\n\tpoints = points.T\n\tzpoints = zpoints.T\n\n\tcv2.line(im, (int(points[0][0]), int(points[0][1])), (int(points[1][0]), int(points[1][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[0][0]), int(points[0][1])), (int(points[2][0]), int(points[2][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[0][0]), int(points[0][1])), (int(points[4][0]), int(points[4][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[1][0]), int(points[1][1])), (int(points[3][0]), int(points[3][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[1][0]), int(points[1][1])), (int(points[5][0]), int(points[5][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[2][0]), int(points[2][1])), (int(points[3][0]), int(points[3][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[2][0]), int(points[2][1])), (int(points[6][0]), int(points[6][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[4][0]), int(points[4][1])), (int(points[6][0]), int(points[6][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[3][0]), int(points[3][1])), (int(points[7][0]), int(points[7][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[5][0]), int(points[5][1])), (int(points[4][0]), int(points[4][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[5][0]), int(points[5][1])), (int(points[7][0]), int(points[7][1])), (0, 255, 0), lineThickness );\n\tcv2.line(im, (int(points[6][0]), int(points[6][1])), (int(points[7][0]), int(points[7][1])), (0, 255, 0), lineThickness );\n\n\tif debug:\n\t\tcv2.line(im, (0,-int(centre[1][3])*scale), (500,-int(centre[1][3])*scale), (0, 0, 255), lineThickness )\n\t\tcv2.line(im, (-int(centre[0][3])*scale,0), (-int(centre[0][3])*scale,500), (0, 0, 255), lineThickness )\n\n\n\t\n\n\t# yellow \n\tif ( cust_cross(np.array([zpoints[0], zpoints[4], zpoints[1]])) < 90):\n\t\tcv2.fillConvexPoly(im, np.array([[int(points[0][0]), int(points[0][1])], \n\t\t\t[int(points[4][0]), int(points[4][1])], \n\t\t\t[int(points[5][0]), int(points[5][1])], \n\t\t\t[int(points[1][0]), int(points[1][1])]], \"int32\"), \n\t\t\t(0, 255, 255))\n\n\t# # purple \n\tif ( cust_cross(np.array([zpoints[0], zpoints[2], zpoints[4]]))< 90):\n\t\tcv2.fillConvexPoly(im, np.array([[int(points[0][0]), int(points[0][1])], \n\t\t\t[int(points[2][0]), int(points[2][1])], \n\t\t\t[int(points[6][0]), int(points[6][1])], \n\t\t\t[int(points[4][0]), int(points[4][1])]], \"int32\"), \n\t\t\t(128,0,128))\n\n\t# # red\n\tif ( cust_cross(np.array([zpoints[1], zpoints[5], zpoints[3]]))< 90):\n\t\tcv2.fillConvexPoly(im, np.array([[int(points[1][0]), int(points[1][1])], \n\t\t\t[int(points[5][0]), int(points[5][1])], \n\t\t\t[int(points[7][0]), int(points[7][1])], \n\t\t\t[int(points[3][0]), int(points[3][1])]], \"int32\"), \n\t\t\t(0,0,255))\n\n\t#green\n\tif ( cust_cross(np.array([zpoints[4], zpoints[5], zpoints[6]]))>90):\n\t\tcv2.fillConvexPoly(im, np.array([[int(points[4][0]), int(points[4][1])], \n\t\t\t[int(points[5][0]), int(points[5][1])], \n\t\t\t[int(points[7][0]), int(points[7][1])], \n\t\t\t[int(points[6][0]), int(points[6][1])]], \"int32\"), \n\t\t\t(0,128,0))\n\n\t# blue\n\tif ( cust_cross(np.array([zpoints[2], zpoints[6], zpoints[3]]))> 90):\n\t\tcv2.fillConvexPoly(im, np.array([[int(points[2][0]), int(points[2][1])], \n\t\t\t[int(points[6][0]), int(points[6][1])], \n\t\t\t[int(points[7][0]), int(points[7][1])], \n\t\t\t[int(points[3][0]), int(points[3][1])]], \"int32\"), \n\t\t\t(255,0,0))\n\n\t# white\n\tif (cust_cross(np.array([zpoints[0], zpoints[1], zpoints[2]])) < 90):\n\t\tcv2.fillConvexPoly(im, np.array([[int(points[0][0]), int(points[0][1])], \n\t\t\t[int(points[1][0]), int(points[1][1])], \n\t\t\t[int(points[3][0]), int(points[3][1])], \n\t\t\t[int(points[2][0]), int(points[2][1])]], \"int32\"), \n\t\t\t(255,255,255))\n\n\n\ndef rotateOx_matrix(phi, x, y, z):\n\t#return np.array([[np.cos(phi),0,np.sin(phi),0],\n\t#\t[0,1,0,0],\n\t#\t[-np.sin(phi), 0, np.cos(phi), 0],\n\t#\t[0,0,0,1]\n\t#\t])\n\t#return np.array([[np.cos(phi)+(1-np.cos(phi))*x**2, (1-np.cos(phi))*x*y-np.sin(phi)*z, (1-np.cos(phi))*x*z+np.sin(phi)*y, 0],\n\t#\t[(1-np.cos(phi))*y*x + np.sin(phi)*z, np.cos(phi) + (1-np.cos(phi))*y**2, (1-np.cos(phi))*y*z-np.sin(phi)*x, 0],\n\t#\t[(1-np.cos(phi))*z*x - np.sin(phi)*y, (1-np.cos(phi))*z*y + np.sin(phi)*x, np.cos(phi) + (1-np.cos(phi))*z**2, 0 ],\n\t#\t[0,0,0,0]\n\t#\t])\n\treturn np.array([[x**2+np.cos(phi)*(1-x**2), x*(1-np.cos(phi))*y - z*np.sin(phi),\t x*z*(1-np.cos(phi)) + y*np.sin(phi), \t0 ],\n\t\t[x*(1-np.cos(phi))*y + z*np.sin(phi), y**2+np.cos(phi)*(1-y**2),\t\t y*z*(1-np.cos(phi)) - x*np.sin(phi),\t0 ],\n\t\t[x*(1-np.cos(phi))*z - y*np.sin(phi), y*(1-np.cos(phi))*z + x*np.sin(phi),\t z**2 + np.cos(phi)*(1-z**2),\t\t0 ],\n\t\t[0,0,0,1],\n\t\t])\n\n\n\n\ndef move_matrix(xk=0,yk=0,zk=0):\n\treturn np.array([\n\t[1., 0., 0., xk],\n\t[0.,1.,0., yk],\n\t[0.,0.,1., zk],\n\t[0.,0.,0.,1.]\n\t])\n\ndef change_size(size):\n\tglobal points\n\tglobal l\n\tglobal centre\n\tl+=size\n\tpoints = np.array(\n\t\t\t\t[[stx, sty, stz,1.],\t\t\n\t\t\t\t[stx + l, sty, stz,1.],\t\t\n\t\t\t\t[stx, sty + l, stz,1.],\t\t\n\t\t\t\t[stx + l, sty + l, stz,1.],\t\t\n\t\t\t\t[stx, sty, stz+l,1.], \t\n\t\t\t\t[stx + l, sty, stz+l,1.], \t\n\t\t\t\t[stx, sty + l, stz+l,1.], \t\n\t\t\t\t[stx + l, sty + l, stz+l,1.]])\t\n\ttmpoints = accumulate_matrix@points.T\n\ttmpoints = tmpoints.T\n\n\tcentre = np.array([\n\t[1.,0.,0., -(tmpoints[0][0]+tmpoints[7][0])/2],\n\t[0.,1.,0., -(tmpoints[0][1]+tmpoints[7][1])/2],\n\t[0.,0.,1., -(tmpoints[0][2]+tmpoints[7][2])/2],\n\t[0.,0.,0.,1.]])\n\n\n\n\n\n\ndef callback(event, x,y, flags, param ):\n\tglobal im\n\tglobal accumulate_matrix \n\tglobal centre # to change centre by alt + left_button\n\tglobal points\t\t # to change size by mouse wheel\n\tglobal l\n\t\n\tif event == 1:\n\t\treturn\n\n\tif event == 4:\n\t\tcallback.lx = -1\n\t\tcallback.ly = -1\n\t\treturn\n\n\tif flags == 1:\n\t\tif callback.lx != -1 :\n\n\t\t\tif x > callback.lx:\n\t\t\t\taccumulate_matrix = linalg.inv(centre) @rotateOx_matrix (-np.pi/700*(x-callback.lx),0,1,0) @centre @accumulate_matrix\t\n\t\t\telse :\t\n\t\t\t\taccumulate_matrix = linalg.inv(centre) @rotateOx_matrix (np.pi/700*abs(x-callback.lx),0,1,0) @centre @accumulate_matrix\n\t\t\t\n\t\t\tif y > callback.ly:\n\t\t\t\taccumulate_matrix = linalg.inv(centre) @rotateOx_matrix (np.pi/700*(y-callback.ly),1,0,0) @centre @accumulate_matrix\n\t\t\telse :\n\t\t\t\taccumulate_matrix = linalg.inv(centre) @rotateOx_matrix (-np.pi/700*abs(y-callback.ly),1,0,0) @centre @accumulate_matrix\n\n\t\t\tdraw_cube(im, accumulate_matrix, points)\n\t\t\tcv2.imshow(\"cube\", im)\n\t\t\tim = np.zeros((height, width, 3), np.uint8)\n\n\t\tcallback.lx = x\n\t\tcallback.ly = y\n\n\tif flags == 33:\n\t\tif callback.lx != -1 :\n\t\t\tif x > callback.lx:\n\t\t\t\taccumulate_matrix = move_matrix(xk = 0.5+ 0.1*(x-callback.lx)) @accumulate_matrix\n\t\t\t\tcentre = move_matrix(xk = -0.5 - 0.1*(x-callback.lx)) @ centre\n\t\t\t\n\t\t\telif x < callback.lx:\n\t\t\t\taccumulate_matrix = move_matrix(xk = -0.5 - 0.1*abs(x-callback.lx)) @accumulate_matrix\n\t\t\t\tcentre = move_matrix(xk = 0.5 + 0.1*abs(x-callback.lx)) @ centre\n\n\n\t\t\tif y > callback.ly:\n\t\t\t\taccumulate_matrix = move_matrix(yk = 0.5 + 0.1*(y-callback.ly)) @accumulate_matrix\n\t\t\t\tcentre = move_matrix(yk = -0.5 - 0.1*(y-callback.ly)) @ centre\n\n\t\t\telif y < callback.ly:\n\t\t\t\taccumulate_matrix = move_matrix(yk = -0.5 - 0.1*abs(y-callback.ly)) @accumulate_matrix\n\t\t\t\tcentre = move_matrix(yk = 0.5 + 0.1*abs(y-callback.ly)) @ centre\n\n\t\t\tdraw_cube(im, accumulate_matrix, points)\n\t\t\tcv2.imshow(\"cube\", im)\n\t\t\tim = np.zeros((height, width, 3), np.uint8)\n\n\t\tcallback.lx = x\n\t\tcallback.ly = y\n\n\tif event == 10 :\n\t\tif flags > 0:\n\t\t\tchange_size(1)\n\t\t\tdraw_cube(im, accumulate_matrix, points)\n\t\t\tcv2.imshow(\"cube\", im)\n\t\t\tim = np.zeros((height, width, 3), np.uint8)\n\t\telse:\n\t\t\tchange_size(-1)\n\t\t\tdraw_cube(im, accumulate_matrix, points)\n\t\t\tcv2.imshow(\"cube\", im)\n\t\t\tim = np.zeros((height, width, 3), np.uint8)\n\ncallback.lx = -1\ncallback.ly = -1\n\nheight,width = 500, 500\nim = np.zeros((height,width,3), np.uint8)\n\naccumulate_matrix = np.identity(4)\n\n\nproj_matr3d = np.array(\n\t[[1.,0.,0.,0.],\n\t[0.,1.,0.,0.],\n\t[0.,0.,0.,0.],\n\t[0.,0.,0.,1.]])\n\nl = 50\nstx = 110.\nsty = 100.\nstz = 50.\n\npoints = np.array(\n [[stx, sty, stz,1.],\t\t#0 0-4, 0-2, 0-1\n [stx + l, sty, stz,1.],\t\t#1 1-5, 1-3\t\n [stx, sty + l, stz,1.],\t\t#2 2-6, 2-3\n [stx + l, sty + l, stz,1.],\t\t#3 3-7\n [stx, sty, stz+l,1.], \t#4 4-5, 4-6\n [stx + l, sty, stz+l,1.], \t#5 5-7\n [stx, sty + l, stz+l,1.], \t#6 6-7\n [stx + l, sty + l, stz+l,1.]])\t#7\n\n\n\ncentre = np.array([\n\t[1.,0.,0., -(stx + l/2)],\n\t[0.,1.,0., -(sty + l/2)],\n\t[0.,0.,1., -(stz + l/2)],\n\t[0.,0.,0.,1.]])\n\n\nscale = 2\nlineThickness = 2\n\ndebug = False\n\ncv2.namedWindow(\"cube\")\ncv2.setMouseCallback(\"cube\", callback)\n\ndraw_cube(im, accumulate_matrix, points)\ncv2.imshow(\"cube\", im)\n\nwhile True:\n\n\tk = cv2.waitKey(0)\n\n\tif k & 0xFF == ord('x'):\n \tif debug:\n \t\tdebug = False\n \telse :\n \t\tdebug = True\n\n\tif k%0xFF == 27:\n\t\tbreak\n\t\n\tdraw_cube(im, accumulate_matrix, points)\n\t\n\tcv2.imshow(\"cube\", im)\n\n\tim = np.zeros((height, width, 3), np.uint8)\n\n\n\n\n","sub_path":"cv2test.py","file_name":"cv2test.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"153698842","text":"from django.contrib import admin\n\nfrom icekit.admin import ICEkitFluentContentsAdmin\nfrom icekit.admin_tools.mixins import ListableMixinAdmin, HeroMixinAdmin, \\\n GoogleMapMixinAdmin\n\nfrom . import models\n\n\nclass AbstractLocationAdmin(\n ListableMixinAdmin,\n HeroMixinAdmin,\n):\n prepopulated_fields = {\"slug\": (\"title\",)}\n\n raw_id_fields = HeroMixinAdmin.raw_id_fields\n\n fieldsets = (\n (None, {\n 'fields': (\n 'title',\n 'slug',\n 'is_home_location',\n 'layout',\n )\n }),\n ('Display details', {\n 'fields': (\n 'address',\n 'phone_number',\n 'phone_number_call_to_action',\n 'url',\n 'url_call_to_action',\n 'email',\n 'email_call_to_action',\n )\n }),\n ) + \\\n HeroMixinAdmin.FIELDSETS + \\\n ListableMixinAdmin.FIELDSETS\n\n\nclass AbstractLocationWithGoogleMapAdmin(\n AbstractLocationAdmin\n):\n\n fieldsets = AbstractLocationAdmin.fieldsets + \\\n GoogleMapMixinAdmin.FIELDSETS\n\n\nclass LocationAdmin(\n AbstractLocationWithGoogleMapAdmin,\n ICEkitFluentContentsAdmin,\n):\n list_filter = ICEkitFluentContentsAdmin.list_filter\n\n\nadmin.site.register(models.Location, LocationAdmin)\n","sub_path":"icekit/plugins/location/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358463194","text":"from rest_framework import serializers\nfrom porzotokApp import models\nfrom django.db.models import Q\n\n\n# hotel details serializer\nclass HotelDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = \"__all__\"\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['hotel_user'] = HotelUserOwnerSerializer(instance.hotel_user_id).data\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['city'] = CitySerializer(instance.city_id).data\n response['reviews'] = ReviewSerializer(models.Review.objects.filter(hotel_id__hotel_id = instance.hotel_id), many=True).data\n return response\n\n# hotel user owner serializer\nclass HotelUserOwnerSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelUserOwner\n fields = \"__all__\"\n extra_kwargs = {'hotel_user_password': {'write_only': True}}\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['hotel_user_type'] = HotelUserTypeSerializer(instance.hotel_user_type_id).data\n return response\n\n#Image serializer\nclass ImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Image\n fields = '__all__'\n\n\n# country serializer\nclass CountrySerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Country\n fields = '__all__'\n\n# state serializer\nclass StateSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.State\n fields = '__all__'\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['country'] = CountrySerializer(instance.country_id).data\n return response\n\n# city serializer\nclass CitySerializer(serializers.ModelSerializer):\n class Meta:\n model = models.City\n fields = '__all__'\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['state'] = StateSerializer(instance.state_id).data\n return response\n\n# hotel user type serializer\nclass HotelUserTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelUserType\n fields = \"__all__\"\n\n# ReviewSerializer\nclass ReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Review\n fields = \"__all__\"\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['user_details'] = UsersReviewSerializer(instance.user_id).data\n response['replay'] = ReviewReplaySerializer(models.ReviewReplay.objects.filter(review_id = instance.review_id), many=True).data\n return response\n\n# User Review Serializer\nclass UsersReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = ['user_id','user_name', 'user_image']\n\n# ReviewReplaySerializer\nclass ReviewReplaySerializer(serializers.ModelSerializer):\n class Meta:\n model = models.ReviewReplay\n fields = \"__all__\"\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['hotel_details'] = ReviewReplayHotelDetailsSerializer(instance.hotel_id).data\n response['photo_details'] = FrontEndHotelLogoSerializer(instance.logo_id).data\n return response\n\nclass ReviewReplayHotelDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_id','hotel_name']\n\nclass FrontEndHotelLogoSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FrontEndHotelLogo\n fields = \"__all__\"\n\n# search autocomplete hotels\nclass FrontEndSearchHotelDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_name']\n\n# search autocomplete city\nclass FrontEndSearchCitySerializer(serializers.ModelSerializer):\n class Meta:\n model = models.City\n fields = ['city_id', 'city_name']\n\n# search result details\nclass SearchResultDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_id', 'hotel_name', 'slug_name', 'city_id', 'hotel_info', 'image_galary_details_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['city'] = CitySerializer(instance.city_id).data\n response['hotel_facilities'] = FrontEndSearchHotelFacilitesSerializer(models.HotelFacilites.objects.filter(hotel_id__hotel_id=instance.hotel_id), many=True).data\n response['room_details'] = HotelDetailsFrontEndSearchRoomPriceSerializer(models.Room.objects.filter(hotel_id=instance.hotel_id), many=True).data\n response['reviews'] = ReviewSerializer(models.Review.objects.filter(hotel_id__hotel_id = instance.hotel_id), many=True).data\n return response\n\n# search forntend HotelFacilitesFrontEndSerializer\nclass FrontEndSearchHotelFacilitesSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelFacilites\n fields = ['hotel_facilites_id', 'facilites_id', 'price_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['facilites_id'] = FacilitesSerializer(instance.facilites_id).data\n response['price_id'] = PriceSerializer(instance.price_id).data\n return response\n\n# Facilites Serializer\nclass FacilitesSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Facilites\n fields = \"__all__\"\n\n# Price serializer\nclass PriceSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Price\n fields = '__all__'\n\n# Search page front end hotels with room price\nclass HotelDetailsFrontEndSearchRoomPriceSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ('room_id', 'is_deals', 'price_id')\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['price_details'] = FrontEndSearchPriceSerializer(instance.price_id).data\n return response\n\n# Front end search page Price\nclass FrontEndSearchPriceSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Price\n fields = ['price_id', 'price', 'offer_price']\n\n# get search keyword \nclass SearchKeyWordSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RecentSearch\n fields = ['keyword']\n\n# HotelDetailsFrontEndSingleRoomSerializer\nclass HotelDetailsFrontEndSingleRoomSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ('room_id', 'room_name', 'room_no', 'is_deals', 'deal_start_date', 'is_active', 'floor_id', 'price_id', 'room_status', 'room_description', 'image_galary_details_id')\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['floor_details'] = FloorSerializer(instance.floor_id).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['facilites_details'] = FacilitesGroupSerializerFront(models.FacilitesGroup.objects.filter(room_id=instance.room_id), many=True).data\n return response\n\n# Single Hotel Info\nclass HotelInfoFrontEndRoomSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ('hotel_id', 'hotel_name', 'latitude', 'longitude', 'city_id', 'hotel_info', 'image_galary_details_id')\n lookup_field = \"slug_name\"\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['city'] = CitySerializer(instance.city_id).data\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['room_details'] = HotelDetailsFrontEndSingleRoomSerializer(models.Room.objects.filter(hotel_id=instance.hotel_id).order_by('-is_deals'), many=True).data\n response['hotel_facilities_details'] = HotelFacilitesFrontEndSerializer(models.HotelFacilites.objects.filter(hotel_id=instance.hotel_id), many=True).data\n response['hotel_foodmenu_details'] = HotelFoodMenuFrontEndSerializer(models.FoodMenu.objects.filter(hotel_id=instance.hotel_id), many=True).data\n response['reviews'] = ReviewSerializer(models.Review.objects.filter(hotel_id__hotel_id = instance.hotel_id), many=True).data\n return response\n\n\n\n# Floor serializer\nclass FloorSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Floor\n fields = \"__all__\"\n\n# FacilitesGroupSerializerFront\nclass FacilitesGroupSerializerFront(serializers.ModelSerializer):\n class Meta:\n model = models.FacilitesGroup\n fields = (\"facilites_id\", \"facilites_group_id\",)\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['facilites_id'] = FacilitesSerializer(instance.facilites_id).data\n return response\n\n# HotelFacilitesFrontEndSerializer\nclass HotelFacilitesFrontEndSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelFacilites\n fields = ['hotel_facilites_id', 'facilites_id', 'hotel_id', 'price_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['facilites_details'] = FacilitesSerializer(instance.facilites_id).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# HotelFoodMenuFrontEndSerializer\nclass HotelFoodMenuFrontEndSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FoodMenu\n fields = ['food_menu_id', 'food_name', 'hotel_id', 'price_id','food_image']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# Cart serializer\nclass CartSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Cart\n fields = '__all__'\n\n# RoomCartDetails Serializer\nclass RoomCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RoomCartDetails\n fields = \"__all__\"\n extra_kwargs = {'check_in_date':{'required':False},'check_out_date':{'required':False}, 'total_day':{'required':False}}\n\n# FoodMenuCartDetails Serializer\nclass FoodMenuCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FoodMenuCartDetails\n fields = \"__all__\"\n\n# FacilitesCartDetails Serializer\nclass FacilitesCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FacilitesCartDetails\n fields = \"__all__\"\n\n# PackageCartDetails Serializer\nclass PackageCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.PackageCartDetails\n fields = \"__all__\"\n\n# FrontEnd Cart serializer\nclass UserFrontEndCartSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Cart\n fields = ['cart_id', 'user_id', 'session_id', 'created_at']\n\n# FrontEnd Room Cart Show Details Serializer\nclass FrontEndRoomCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RoomCartDetails\n fields = \"__all__\"\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['cart_details'] = UserFrontEndCartSerializer(instance.cart_id ).data\n response['room_details'] = FrontEndRoomSerializer(instance.room_id).data\n response['hotels'] = HotelDetailsFrontEndSingleSerializer(models.HotelDetails.objects.get(hotel_id=instance.room_id.hotel_id.hotel_id)).data\n return response\n\n# FrontEnd Room Serializer\nclass FrontEndRoomSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ['room_id', 'hotel_id', 'room_name', 'room_no', 'price_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# HotelDetailsFrontEndSingleSerializer\nclass HotelDetailsFrontEndSingleSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_id', 'hotel_name', 'latitude', 'longitude', 'city_id', 'image_galary_details_id', 'hotel_info', 'hotel_type']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['city_id'] = CitySerializer(instance.city_id).data\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n return response\n\n# FrontEnd Food Menu Cart Details Serializer\nclass FrontEndFoodMenuCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FoodMenuCartDetails\n fields = ['food_cart_details_id', 'cart_id', 'food_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['cart_details'] = UserFrontEndCartSerializer(instance.cart_id ).data\n response['food_details'] = FrontEndFoodMenuSerializer(instance.food_id ).data\n return response\n\n# FrontEnd FoodMenu Serializer\nclass FrontEndFoodMenuSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FoodMenu\n fields = ['food_menu_id', 'food_name', 'price_id', 'food_image']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# FrontEnd Facilities Cart Details Serializer\nclass FrontEndFacilitiesCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FacilitesCartDetails\n fields = ['facilities_cart_details_id', 'cart_id', 'hotel_facilites_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['cart_details'] = UserFrontEndCartSerializer(instance.cart_id ).data\n response['hotel_facilities_details'] = FrontEndHotelFacilitesSerializer(instance.hotel_facilites_id ).data\n return response\n\n# FrontEnd Hotel Facilites Serializer\nclass FrontEndHotelFacilitesSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelFacilites\n fields = ['hotel_facilites_id', 'facilites_id', 'price_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['facilites_details'] = FacilitesSerializer(instance.facilites_id).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# FrontEnd Package Cart Details Serializer\nclass FrontEndPackageCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.PackageCartDetails\n fields = ['package_cart_details_id', 'package_id', 'cart_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n\n response['cart_details'] = UserFrontEndCartSerializer(instance.cart_id ).data\n response['package_details'] = FrontEndPackageSerializer(instance.package_id ).data\n\n return response\n\n# FrontEnd package Serializer\nclass FrontEndPackageSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Package\n fields = ['package_id', 'package_name', 'package_image', 'price_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# User register android serializer\nclass UserRegisterAndroidSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = \"__all__\"\n extra_kwargs = {'user_password': {'write_only': True}}\n\n# User profile update android serializer\nclass UserProfileUpdateAndroidSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n exclude = [\"user_password\"]\n extra_kwargs = {'user_password': {'write_only': True}}\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['city'] = CitySerializer(instance.city_id).data\n return response\n\n# single User serializer\nclass SingleUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n exclude = [\"user_password\"]\n \n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['city'] = CitySerializer(instance.city_id).data\n return response\n\n# UpdateRoomCartDetailsSerializer\nclass UpdateRoomCartDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RoomCartDetails\n fields = ('room_cart_details_id', 'check_in_date', 'check_out_date', 'total_day', 'static_regular_price', 'static_offer_price')\n\n# User login\nclass UserLoginAndroidSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n exclude = ['user_password']\n\n# User profile photo change android serializer\nclass UserProfilePhotoUpdateAndroidSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = [\"user_image\"]\n\n# recommended hotel details\nclass RecommendedHotelDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = \"__all__\"\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['city'] = CitySerializer(instance.city_id).data\n response['reviews'] = ReviewSerializer(models.Review.objects.filter(hotel_id__hotel_id = instance.hotel_id), many=True).data\n return response\n\nclass RoomDealsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ['room_id', 'room_name','room_no', 'image_galary_details_id', 'price_id','hotel_id','floor_id', 'allow_offer_percent', 'offer_discount_price']\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['floor_details'] = FloorSerializer(instance.floor_id).data\n response['hotel_details'] = HotelRoomDealsSerializer(instance.hotel_id).data\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n return response\n\n# 24 Hours Deals Porzotok Serializer\nclass HotelRoomDealsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_id', 'hotel_name', 'city_id']\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['reviews'] = ReviewSerializer(models.Review.objects.filter(hotel_id__hotel_id = instance.hotel_id), many=True).data\n response['city'] = CitySerializer(instance.city_id).data\n return response\n\n# releted Room Details \nclass RelatedRoomDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ['room_id', 'room_name', 'room_no', 'room_status', 'room_description', 'price_id','is_deals']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n response['facilites_details'] = SingleRoomFacilitesGroupSerializer(models.FacilitesGroup.objects.filter(room_id__room_id=instance.room_id), many=True).data\n return response\n\n# Single Room Details \nclass SingleRoomDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ['room_id', 'room_name', 'room_no', 'room_status', 'room_description', 'price_id','is_deals']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['price_details'] = PriceSerializer(instance.price_id).data\n response['facilites_details'] = SingleRoomFacilitesGroupSerializer(models.FacilitesGroup.objects.filter(room_id__room_id=instance.room_id), many=True).data\n # response['related_rooms'] = RelatedRoomDetailsSerializer(models.Room.objects.filter(~Q(room_id=instance.room_id), hotel_id__city_id__city_name=instance.hotel_id.city_id.city_name, price_id__price__range=((instance.price_id.price - 5000), (instance.price_id.price + 5000))), many=True).data\n return response\n\n# Single Room Facilites Group\nclass SingleRoomFacilitesGroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.FacilitesGroup\n fields = \"__all__\"\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['facilites_id'] = FacilitesSerializer(instance.facilites_id).data\n return response\n\n# Hotel by City\nclass HotelByCitySerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_id', 'hotel_name','slug_name','hotel_info','image_galary_details_id', 'city_id']\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n response['city'] = CitySerializer(instance.city_id).data\n return response\n\n# Booking list\nclass BookingListSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.ConfirmBooking\n fields = ['booking_id', 'booking_status', 'total_amount', 'created_at']\n\n# single Booking details serializer \nclass BookingDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.ConfirmBooking\n fields = '__all__'\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['customer_details'] = BookingDetailsCustomerSerializer(instance.user_id).data\n response['booking_information'] = CartRoomDetailsSerializer(models.RoomCartDetails.objects.filter(cart_id=instance.cart_id), many=True).data\n return response\n\n# Booking Details Customer\nclass BookingDetailsCustomerSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = ['user_name', 'user_phone', 'user_short_address']\n extra_kwargs = {'user_image':{'required':False}}\n\n# Room details Serializer\nclass CartRoomDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RoomCartDetails\n exclude = ['is_hold', 'created_at']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['room_details'] = HotelDetailsInformationSerializer(instance.room_id).data\n return response\n\n# Hotel Room Info Serializer\nclass HotelDetailsInformationSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Room\n fields = ('room_id', 'room_name', 'room_no', 'hotel_id', 'image_galary_details_id')\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['hotel_details'] = HotelInformationSerializer(instance.hotel_id).data\n response['image_url'] = ImageSerializer(models.Image.objects.filter(image_galary_details_id = instance.image_galary_details_id), many=True).data\n return response\n\n# hotel information\nclass HotelInformationSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.HotelDetails\n fields = ['hotel_id', 'latitude', 'longitude', 'hotel_name', 'short_address', 'city_id']\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response['city'] = CitySerializer(instance.city_id).data\n return response\n\n# Booking\nclass BookingSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Booking\n exclude = ['offer_id', 'hotel_discount_id', 'gift_card_id', 'cupon_id']","sub_path":"porzotokProject/androidAPI_V1/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":25750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315178964","text":"class CiphertextMessage(Message):\n def __init__(self, text):\n '''\n Initializes a CiphertextMessage object\n\n text (string): the message's text\n\n a CiphertextMessage object has two attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n '''\n Message.__init__(self, text)\n #should already be initialized:\n self.message_text = text\n self.valid_words = load_words(file_name)\n\n def decrypt_message(self):\n '''\n Decrypt self.message_text by trying every possible shift value\n and find the \"best\" one. We will define \"best\" as the shift that\n creates the maximum number of real words when we use apply_shift(shift)\n on the message text. If s is the original shift value used to encrypt\n the message, then we would expect 26 - s to be the best shift value\n for decrypting it.\n\n Note: if multiple shifts are equally good such that they all create\n the maximum number of you may choose any of those shifts (and their\n corresponding decrypted messages) to return\n\n Returns: a tuple of the best shift value used to decrypt the message\n and the decrypted message text using that shift value\n '''\n bestShift = 0 #initializing the bestShift parameter\n bestDecryptedMessage = \"\" #will be collapsing the list of words into a string here\n bestNumberOfRealWords = 0\n\n #place to store the best decrypted message thus far\n messageWordsList = self.message_text.split(\" \")\n #split the message into encrypted words using a space as seperator, words will include non-alpha chrs\n validWords = self.valid_words\n\n for shift in range(0, 27):\n realWordCount = 0 #counting the number of words that are valid in the decrypted message, reset at each change of shift value\n messageWordsListDecrypted = [] #save each word into a results list to check later on\n for messageWord in messageWordsList:\n #go through each word, translate it and append the word to the messageWordsListDecrypted\n wordDecrypted = self.apply_shift(shift)\n #apply an encryption, using the current shift\n messageWordsListDecrypted.append(wordDecrypted)\n #add the decrypted word to the list of decrypted words\n for word in messageWordsListDecrypted:\n #counting the number of valid words created from the current shift value\n if word in validWords:\n realWordCount += 1\n if realWordCount > bestNumberOfRealWords:\n bestNumberOfRealWords = realWordCount\n bestShift = shift\n bestDecryptedMessage = \" \".join(messageWordsListDecrypted)\n return (bestShift, bestDecryptedMessage)","sub_path":"ps6/class_CipherTextMessage.py","file_name":"class_CipherTextMessage.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"75042462","text":"#-------------------------------------------------------------------------------\n# Copyright (C) 09/2016 Eyob Demissie\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in \n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A \n# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THETHE AUTHORS OR \n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER \n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# \n# Except as contained in this notice, the name(s) of the above copyright holders \n# shall not be used in advertising or otherwise to promote the sale, use or other\n# dealings in this Software without prior written authorization.\n#-------------------------------------------------------------------------------\nimport SCons.Environment\nimport os\nimport glob\nimport fnmatch\n\n\ndef _GetComponentFromGit(env, target, source):\n \"\"\"This function is used to import a single component from git repository.\n The SourceCode() builder can be used for this but there is a problem.\n The components have SConscript files that need to be read during the\n reading phase of the SCons. If the SourceCode() target is not built, there\n is no way find the SConscript. So we are forced to use this function\n which gets the components immediately during the reading phase of the\n SConstcripts.\n \n\n For github there is a possbility of using svn export itself... since\n they have svn interface to the git repos but for now we use git\n itself directly.\n We have two choices git archive or git clone\n Since the archive is not supported in github then we don't use it,\n although bitbucket allows it (not verified this).\n \n So we clone and delete .git file. For now, I am not deleting .git\n I need to figure out if this is a good idea or not.\n \n git_ret = gc_env.Execute('git clone --depth=1 \"%s\" \"%s\"' % (source, trgt))\n if git_ret != 0:\n There is an error\n else:\n Remove the .git folder in the newly cloned location to make it\n plain vanilla code.\n \"\"\"\n gc_env = env.Clone()\n trgt = gc_env.Dir(target).srcnode().abspath\n def gc_print_cmd_line(s, target, source, env):\n # do not print the command line.\n pass\n def gc_show_cmd(env, trgt):\n print(\"*** Importing component - [%s] ***\" % \\\n env.Dir('#').rel_path(env.Dir(trgt).srcnode()))\n gc_env['PRINT_CMD_LINE_FUNC'] = gc_print_cmd_line\n if not os.path.exists(trgt):\n trgt_unix_like_path = env.Dir(trgt).srcnode().abspath\n trgt_unix_like_path = trgt_unix_like_path.replace('\\\\', '/')\n #print(source, trgt_unix_like_path)\n git_ret = gc_env.Execute('git clone --depth=1 --recursive -q -c advice.detachedHead=false %s \"%s\"' % (source, trgt_unix_like_path),\n show=gc_show_cmd(gc_env, trgt))\n if git_ret!=0:\n print(\"-\"*75)\n print(\"Build Script Error: Problem trying to import component.\")\n print(\"Make sure you have typed component name, version correctly.\")\n print(\"In addition, it may be a network connection problem.\")\n print(\"Try again.\")\n print(\"-\"*75)\n gc_env.Exit(1)\n\n #Clean up\n #git_repo = os.path.join(trgt, \".git\")\n #if (os.path.exists(git_repo)):\n # # Remove it.\n\n\ndef _glob_recursive(search_path, patterns, exclude_path=[]):\n matches = []\n for root, dirnames, filenames in os.walk(search_path):\n if [e for e in exclude_path if e in root]:\n continue\n for p in patterns:\n for filename in fnmatch.filter(filenames, p):\n matches.append(os.path.join(root, filename))\n for dirname in fnmatch.filter(dirnames, p):\n matches.append(os.path.join(root, dirname))\n return matches\n\n\ndef _FindComponentFiles(env, suffix=[], exclude=[], search_root=None, recursive=False):\n \"\"\" This function is used to find the component's files by looking at the\n the present source directory.\"\"\"\n suffix = env.Flatten(suffix)\n exclude = env.Flatten(exclude)\n\n if search_root is None:\n # Search under the present source directory.\n search_root = os.path.join(env.Dir('#').abspath,\n env.Dir('.').srcnode().path)\n search_path = search_root\n else:\n search_path = search_root\n\n m_f=[]\n if recursive:\n exclude_norm = [os.path.normpath(e) for e in exclude]\n m_f = _glob_recursive(search_path, [\"*.\"+ext for ext in suffix], [\".git\", \".svn\", \".bzr\", \".hg\"] + exclude + exclude_norm)\n else:\n for ext in suffix:\n for a in sorted(set([''])):\n search_path = os.path.join(search_path,a)\n m_f += env.Flatten(glob.glob(os.path.join(search_path,'*.'+ext)))\n\n m_f_filtered = []\n for f in m_f:\n # Check each file name and if it contains\n # \"unittest\" exclude from the list.\n # Filter the files and folders in the exclude list.\n if os.path.isdir(f):\n m_f_filtered.append(env.Dir(f))\n else:\n fparts = f.replace(\"\\\\\", \"|||\").replace(\"/\", \"|||\").split(\"|||\")\n fb = os.path.basename(f)\n if ('unittest' not in fb.lower()):\n if not [e for e in exclude if e in fparts]:\n m_f_filtered.append(env.File(f))\n return m_f_filtered\n\n\ndef _MergeDicts(env, d1, d2):\n \"\"\"This function merges dictionary d2 into d1. This is a very useful\n operation that we need repeatedly as we do recursive calls into\n component sconscripts. So it is added as part of the env so all the\n scripts have access to it without importing anything. For each key, the\n value is merged into a flattened list. Then it is sorted. If the items\n items are scons File() nodes their path is used to sort, otherwise\n they are just sorted as normal python objects.\"\"\"\n if not d2: d2={}\n if not d1: d1={}\n for k in d2:\n if k in d1:\n d=set(env.Flatten(list((d1[k] + d2[k],))))\n try:\n d=sorted(d, cmp=lambda x,y: cmp(x.path, y.path))\n except AttributeError:\n d=list(sorted(d))\n d1[k] = d\n else:\n d1[k]=d2[k]\n\n\ndef _ExtendEnvironment(envclass):\n envclass.GetComponentFromGit = _GetComponentFromGit\n envclass.FindComponentFiles = _FindComponentFiles\n envclass.MergeDicts = _MergeDicts\n_ExtendEnvironment(SCons.Environment.Environment)\n\n\nimport SCons.Script\n__site_init_file = os.path.abspath(__file__)\ndef _MakooSiteSconsGetPath(item):\n \"\"\" Given an item name, this function gets the absolute\n path of the item, within the site_scons or site_tools\n subfolder. This function exists to make sure that\n scripts that reference items in this folder do not have\n to be worried where site_scons folder is placed.\n \"\"\"\n site_scons_path = os.path.dirname(__site_init_file)\n if not item:\n return site_scons_path\n else:\n item_abs = os.path.join(site_scons_path, item)\n if os.path.isfile(item_abs):\n return item_abs\n item_abs = os.path.join(site_scons_path, \"site_tools\", item)\n if os.path.isfile(item_abs):\n return item_abs\n print(\"Error: Makoo Cannot find [%s] in [%s].\" % (item,site_scons_path))\n return None\n\ndef _MakooCommonScript(makoo_common_script_key=None):\n \"\"\" Given a key that identifies a builder, this function gets the\n script that matches the key. If the given key is not saved globally\n then we save it for subsequent calls that will call this function\n without a key. This function is used to make makoo work with different\n kind of builders. SConstruct can set the key at the beginning of \n scons call. But then, every re-usable component that does not\n care about which builder builds it can just call with None parameter.\n \"\"\"\n prj_scons = \"\"\n prj_scons_abspath = None\n\n try:\n prj_scons_saved = \"%s_project.scons\" % SCons.Script.MakooCommonScriptKey\n except:\n prj_scons_saved = None\n \n if makoo_common_script_key:\n prj_scons = \"%s_project.scons\" % makoo_common_script_key\n if prj_scons_saved:\n if makoo_common_script_key != SCons.Script.MakooCommonScriptKey:\n print(\"Error: Changing Makoo project builder from %s to %s not allowed.\" % (prj_scons_saved, prj_scons))\n prj_scons_abspath = None\n else:\n prj_scons_abspath = _MakooSiteSconsGetPath(prj_scons)\n else:\n SCons.Script.MakooCommonScriptKey = makoo_common_script_key\n prj_scons_abspath = _MakooSiteSconsGetPath(prj_scons)\n else:\n if prj_scons_saved:\n prj_scons_abspath = _MakooSiteSconsGetPath(prj_scons_saved)\n else:\n SCons.Script.MakooCommonScriptKey = makoo_common_script_key\n prj_scons_saved = \"%s_project.scons\" % Scons.Script.MakooCommonScriptKey\n prj_scons_abspath = _MakooSiteSconsGetPath(prj_scons_saved)\n return prj_scons_abspath\n \nSCons.Script.MakooSiteSconsGetPath = _MakooSiteSconsGetPath\nSCons.Script.MakooCommonScript = _MakooCommonScript\n\n\n\n","sub_path":"site_init.py","file_name":"site_init.py","file_ext":"py","file_size_in_byte":10071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"359035426","text":"import sys\n\nsys.path.append('../')\n\nimport CIAO_DatabaseTools\nfrom matplotlib import pyplot\nimport numpy\nimport scipy\n\n\nfig = pyplot.figure(0)\nax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\nax.clear()\n\n\ndb = CIAO_DatabaseTools.CIAO_Database()\n\nUTS = [1,2,3,4]\ncolors = ['b', 'g', 'r', 'y']\n\nvalues = db.query(keywords = [\"SEEING\", \"STREHL\"], AVC_State='ON',\n timeOfDay='NIGHT', UTS=UTS)\n #startTime='2017-03-25 00:00:00')\n #endTime='2017-02-28 00:00:00')\n\nfor i in UTS:\n ax.scatter(values[i][:,0], values[i][:,1], color = colors[i-1])\n\nfig.show()\n\n\ndb.close()\n","sub_path":"src/SQLTools/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"403609707","text":"# coding:utf-8\n'''\n@Copyright:LintCode\n@Author: monolake\n@Problem: http://www.lintcode.com/problem/binary-tree-path-sum\n@Language: Python\n@Datetime: 16-10-29 15:07\n'''\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nclass Solution:\n # @param {TreeNode} root the root of binary tree\n # @param {int} target an integer\n # @return {int[][]} all valid paths\n\n def searchPath(self, root, target, result, path):\n \n if root == None:\n return\n \n if root.left != None:\n path.append(root.val)\n self.searchPath(root.left, target - root.val, result, path)\n path.pop()\n if root.right != None:\n path.append(root.val)\n self.searchPath(root.right, target - root.val, result, path)\n path.pop()\n if root.left == None and root.right == None: # leaf node\n path.append(root.val)\n if root.val == target:\n result.append(path[:])\n path.pop()\n \n return\n \n \n def binaryTreePathSum(self, root, target):\n # Write your code here\n result = []\n path = []\n self.searchPath(root, target, result, path)\n return result\n \n \n ","sub_path":"376_binary-tree-path-sum/binary-tree-path-sum.py","file_name":"binary-tree-path-sum.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"560884872","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport jsonschema\nimport mock\n\nfrom fuel_plugin_builder import errors\nfrom fuel_plugin_builder.tests.base import BaseTestCase\nfrom fuel_plugin_builder.validators import BaseValidator\n\n\nclass TestBaseValidator(BaseTestCase):\n\n def setUp(self):\n class NewValidator(BaseValidator):\n def validate(self):\n pass\n\n self.plugin_path = '/tmp/plugin_path'\n self.validator = NewValidator(self.plugin_path)\n self.data = {'data': 'data1'}\n self.schema = {'schema': 'schema1'}\n\n @mock.patch('fuel_plugin_builder.validators.base.jsonschema')\n def test_validate_schema(self, schema_mock):\n self.validator.validate_schema(\n self.data,\n self.schema,\n 'file_path')\n schema_mock.validate.assert_called_once_with(\n self.data,\n self.schema)\n\n @mock.patch('fuel_plugin_builder.validators.base.jsonschema.validate',\n side_effect=jsonschema.exceptions.ValidationError('p1', 'p2'))\n def test_validate_schema_raises_error(self, validate_mock):\n with self.assertRaisesRegexp(\n errors.ValidationError,\n 'Wrong value format \"\", for file \"file_path\", p1'):\n self.validator.validate_schema(self.data, self.schema, 'file_path')\n validate_mock.assert_called_once_with(\n self.data,\n self.schema)\n\n @mock.patch('fuel_plugin_builder.validators.base.utils')\n @mock.patch(\n 'fuel_plugin_builder.validators.base.BaseValidator.validate_schema')\n def test_validate_file_by_schema(self, validate_mock, utils_mock):\n utils_mock.parse_yaml.return_value = self.data\n self.validator.validate_file_by_schema(self.schema, self.plugin_path)\n utils_mock.parse_yaml.assert_called_once_with(self.plugin_path)\n validate_mock(self.data, self.schema, self.plugin_path)\n","sub_path":"fuel_plugin_builder/fuel_plugin_builder/tests/test_base_validator.py","file_name":"test_base_validator.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"529587786","text":"import tensorflow as tf\n# with tf.device('/gpu:0'):\n# hello = tf.constant('Hello Tensorflow')\n#\n# va1 = tf.Variable(tf.random_normal([2,3]),name=\"weights\")\n# va2 = tf.Variable([1,2,3,4,5],name='height')\n# init_op = tf.initialize_all_variables()\n# sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True))\n# sess.run(init_op)\n# print(sess.run(hello))\n# saver = tf.train.Saver()\n# saver.save(sess,r'D:/1.ckpt')\n\nvay = tf.Variable([1,1,1,1,1],name='height')\nsaver = tf.train.Saver()\nsess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True))\nsaver.restore(sess,r'D:/1.ckpt')\n\nprint(sess.run(vay))\n\n\n\n\n","sub_path":"tensorFlow.py","file_name":"tensorFlow.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319485313","text":"from flask import Flask,request,jsonify\nfrom flask_httpauth import HTTPBasicAuth\n\napp=Flask(__name__)\n\nauth=HTTPBasicAuth()\n\nUSER_DATA={\n \"admin\":\"SuperSecretPwd\"\n }\n\n\n@auth.verify_password\ndef verify(username,password):\n if not(username and password):\n return False\n return USER_DATA.get(username) == password\n\n\n\n \n\nbooks_list=[\n {\n \"id\":0,\n \"author\":\"Jim Corbett\",\n \"language\":\"English\",\n \"title\":\"Story Of Sundarban\",\n },\n {\n \"id\":1,\n \"author\":\"Chinua Achebe\",\n \"language\":\"English\",\n \"title\":\"Things fall apart\",\n },\n {\n \"id\":2,\n \"author\":\"Hans Christian Anderson\",\n \"language\":\"Danish\",\n \"title\":\"Fairy Tales\",\n },\n { \n \"id\":3,\n \"author\":\"Emily Bront\",\n \"language\":\"English\",\n \"title\":\"Wuthering heights\",\n }, \n { \n \"id\":4,\n \"author\":\"Jorge Luis Borges\",\n \"language\":\"Spanish\",\n \"title\":\"Ficciones\",\n },\n {\n \"id\":5,\n \"author\":\"Sukumar Roy\",\n \"language\":\"Bengali\",\n \"title\":\"Sothpatra\",\n },\n {\n \"id\":6,\n \"author\":\"Arthur Conal Doyle\",\n \"language\":\"English\",\n \"title\":\"Sherlock Holmes\",\n },\n {\n \"id\":7,\n \"author\":\"Frances Hodgson Burnett\",\n \"language\":\"English\",\n \"title\":\"Secret Garden\",\n },\n {\n \"id\":8,\n \"author\":\"Dan Brown\",\n \"language\":\"English\",\n \"title\":\"Angels And Demons\",\n },\n]\n\n@app.route('/books',methods=['GET','POST'])\ndef books():\n if request.method=='GET':\n if len(books_list)>0:\n return jsonify(books_list)\n else:\n 'Nothing found',404\n\n if request.method=='POST':\n new_author= request.form['author']\n new_lang= request.form['language']\n new_title= request.form['title']\n iD=books_list[-1]['id']+1\n\n\n new_obj={\n 'id':iD,\n 'author':new_author,\n 'language':new_lang,\n 'title':new_title\n }\n books_list.append(new_obj)\n return jsonify(books_list),201\n\n\n\n@app.route('/book/',methods=['GET','PUT','DELETE'])\ndef single_book(id):\n if request.method=='GET':\n for i in books_list:\n if i['id']==id:\n return jsonify(i)\n pass\n \n if request.method=='PUT':\n for i in books_list:\n if i['id']==id:\n i['author']= request.form['author']\n i['language']= request.form['language']\n i['title']= request.form['title']\n \n \n return jsonify(books_list)\n\n\n if request.method=='DELETE':\n for index,i in enumerate (books_list):\n if i['id']==id:\n books_list.pop(index)\n \n return jsonify(books_list)\n\n\n\nif __name__=='__main__':\n app.run()\n\n\n \n\n\n\n\n","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"5071910","text":"#LANGLADE Maxime\n#26/02/20\nimport os\nfrom math import floor\nfrom os.path import join\nimport argparse\nimport subprocess\nimport cv2\nimport random\nimport glob\n\nfrom tqdm import tqdm\nfrom face_extraction import face_extraction\n\n#global variables\nframe_per_video = 10\nmask_w = 200\nmask_h = 200\ncount_couple = 0\ncount_alone = 0\n\ndef extract_frames_test(data_path, output_path, x_center, y_center):\n\n\t#print(data_path, \" \", x_center, \" \" ,y_center)\n\n\tos.makedirs(output_path, exist_ok=True)\n\n\treader = cv2.VideoCapture(data_path)\n\tframe_num = 0\n\twhile (reader.isOpened() | frame_num < frame_per_video):\n\t\tsuccess, image = reader.read()\n\t\tif not success:\n\t\t\tprint(\"frame error for: \", data_path, ' frame n° ', frame_num)\n\t\t\tbreak\n\n\t\tx_start = x_center - (1/2 * mask_w)\n\t\ty_start = y_center - (1/2 * mask_h)\n\t\ty_end = int(y_center + mask_h)\n\t\tx_end = int(x_center + mask_w)\n\t\tpatch = image[int(y_start):y_end, int(x_start):x_end]\n\t\tcv2.imwrite(join(output_path, '{:04d}.tiff'.format(frame_num)), patch)\n\t\tframe_num += 1\n\treader.release()\t\t\n\ndef count_type(video):\n\tglobal count_alone\n\tglobal count_couple\n\tvideo_name = video.split(\".\")[0]\n\tprint(video_name)\n\tassociate_real = video_name.split(\"_\")[0] + \"_\" + video_name.split(\"_\")[-1]\n\tprint(associate_real)\n\tpath_training = join(\"/Volumes/VERBATIM HD/Stage_Maxime/Celeb-DF-v2/Celeb-real/images/training\", associate_real)\n\tprint(path_training)\n\t#training = glob.glob(join(\"/Volumes/VERBATIM\\ HD/Stage_Maxime/Celeb-DF-v2/Celeb-real/images/training\", associate_real))\n\ttraining = os.path.isdir(path_training)\n\tprint(training)\n\tif training:\n\t\tif count_couple >= 50:\n\t\t\treturn False\n\t\tprint(\"couple\")\n\t\tcount_couple += 1\n\telse:\n\t\tif count_alone >= 50:\n\t\t\treturn False\n\t\tprint(\"alone\")\n\t\tcount_alone += 1\n\n\treturn True\n\ndef get_video(videos_path, id):\n\toff_set = 0\n\tsuccess = False\n\twhile not success:\n\t\t#print(\"video id: \" + video)\n\n\t\tid_r = (id + off_set) % len(os.listdir(videos_path))\n\t\tvideo = os.listdir(videos_path)[id_r] #todo check unicity in video_ids array\n\n\t\toff_set += 1\n\n\t\tif video[0] == '.':\n\t\t\tcontinue\n\n\t\treader = cv2.VideoCapture(join(videos_path, video))\n\t\tsuccess, image = reader.read()\n\t\treader.release()\n\n\t\tif not success:\n\t\t\tprint(\"couldn't readvideo n° \" + video)\n\t\t\tcontinue\n\n\t\tx, y = face_extraction(join(videos_path, video)) \n\n\t\tif x < mask_w or y < mask_h:\n\t\t\t#print(\"mask fail \" + video)\n\t\t\tsuccess = False\n\t\t\tcontinue\n\t\t\n\t\tif not count_type(video):\n\t\t\tsuccess = False\n\t\t\tcontinue\n\n\tprint(\"video found\")\n\treturn video, x, y\n\n\ndef extract_videos(data_path, nb_video):\n\n\tvideos_path = join(data_path, 'videos')\n\timages_path = join(data_path, 'images')\n\n\tsub_path = join(images_path, 'test_base')\n\tos.makedirs(sub_path, exist_ok=True)\n\t\n\t'''\n\tgeneric_video_name = \"id*.mp4\"\n\tvideo_in_folder = glob.glob(join(video_path, generic_video_name))\n\n\tnb_video_in_folder = len(video_in_folder)\n\tnb_video = int(nb_video)\n\n\tif nb_video > nb_video_in_folder:\n\t\tprint(\"Error: nb_video > nb_video_in_folder\")\n\t\tnb_video = nb_video_in_folder\n\n\tvideo_ids = random.sample(range(1, nb_video_in_folder), nb_video)\n'''\n\tnb_video_in_folder = len(os.listdir(videos_path))\n\tnb_video = int(nb_video)\n\n\tif nb_video > nb_video_in_folder:\n\t\tprint(\"Error: nb_video > nb_video_in_folder\")\n\t\tnb_video = nb_video_in_folder\n\n\t#video_ids = random.sample(range(1, nb_video_in_folder), nb_video)\n\tvideo_ids = random.sample(range(1, nb_video_in_folder), nb_video)\n\tcurrent_id = 0\n\twhile count_alone < 50 or count_couple < 50:\n\t\tprint(count_alone, \"-\", count_couple)\n\t\tvideo, x, y = get_video(videos_path, video_ids[current_id])\n\t\timage_folder = video.split('.')[0]\n\t\textract_frames_test(join(videos_path, video), join(sub_path, image_folder), x, y)\n\t\tcurrent_id += 1\n\n\tprint(\"end\")\n\n\n\nif __name__ == '__main__':\n\tp = argparse.ArgumentParser(\n\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter\n\t)\n\tp.add_argument('--data_path','-p' , type=str)\n\tp.add_argument('--nb_video', '-n', type=str, default='100')\n\targs = p.parse_args()\n\n\textract_videos(**vars(args))\n\n\n","sub_path":"Steganalyse/steg1/extract_patchs_test.py","file_name":"extract_patchs_test.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"234014542","text":"import numpy as np\nimport matplotlib.image as mpimg\nimport os\nfrom imutils import paths\nfrom skimage.color import rgb2gray\nimport skimage.feature as ft\nimport csv\nimport pandas as pd\n\n\n\nclass NearestNeighbor:\n def __init__(self):\n pass\n\n def train(self, X, y):\n self.Xtr = X\n self.ytr = y\n\n def predict(self, X):\n \"\"\" X é NxD onde cada linha é um exemplo que queremos predizer o label\"\"\"\n num_test = X.shape[0]\n\n print(\"%d amostras\"%(X.shape[0]) )\n\n #criando o tipo\n #Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\n Ypred = np.zeros(num_test, dtype = np.int)\n\n ind = []\n\n for i in range(num_test):\n #print(\"xtr %d x %d \" %(len(self.Xtr), len(X) ) )\n distances = np.sum(np.abs(self.Xtr - X[i, :]), axis = 1)\n min_index = np.argmin(distances) #pegar o menor\n Ypred[i] = self.ytr[min_index] #predizer o label do exemplo mais próximo\n\n ind.append(i+1)\n\n ret = {\n \"id\": ind,\n \"label\": Ypred\n }\n\n #print(ret)\n\n return ret\n\n\ndef main():\n\n trainImgs = []\n testImgs = []\n\n trainLabels = []\n testLabels = []\n\n with open( '../gencsv/lbp_train.csv', mode='r' ) as file:\n reader = csv.reader(file, delimiter=',')\n\n for row in reader:\n trainImgs.append( row[0:22] )\n trainLabels.append( row[-1] )\n\n with open( '../gencsv/lbp_test.csv', mode='r' ) as file:\n reader = csv.reader(file, delimiter=',')\n\n for row in reader:\n testImgs.append( row[0:22] )\n testLabels.append( row[-1] )\n\n\n cl = NearestNeighbor()\n\n #test = np.array(trainImgs, dtype=np.float32)\n\n cl.train( np.array(trainImgs, dtype=np.float32), np.array(trainLabels, dtype=np.float32) )\n\n y = cl.predict( np.array(testImgs, dtype=np.float32) )\n\n acc = 0\n\n print(len(y))\n\n df = pd.DataFrame(y)\n df.to_csv(\"res.csv\", index = None, header=True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"knn/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"448165937","text":"#!/usr/bin/env python\n# coding:utf8\n\"\"\"\n@Time : 2019/11/15\n@Author : fls\n@Contact : fls@darkripples.com\n@Desc : darkripples-中间件\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2019/11/15 14:24 fls 1.0 create\n\"\"\"\n\nimport traceback\nfrom jwt.exceptions import PyJWTError\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.http import JsonResponse\nfrom django.apps import apps\n\nfrom ez_utils import fls_log, match_url\nfrom ez_utils.models import ResModel\n\nflog = fls_log(handler_name=\"\")\n\n\ndef check_token(req_token):\n \"\"\"\n 校验token\n :param req_token:\n :return:\n \"\"\"\n if not req_token:\n ret = ResModel()\n ret.code = ret.ResCode.need_login\n ret.msg = \"\"\n return JsonResponse(ret.to_dic())\n from rest_framework_jwt.settings import api_settings\n jwt_decode_handler = api_settings.JWT_DECODE_HANDLER\n try:\n # {'user_id': 'a', 'username': 'fls',}\n jwt_decode_handler(req_token)\n except:\n ret = ResModel()\n ret.code = ret.ResCode.need_login\n ret.msg = \"无效的token\"\n return JsonResponse(ret.to_dic())\n return None\n\n\nclass DRMiddleware(MiddlewareMixin):\n \"\"\"\n 中间件类\n \"\"\"\n\n def process_request(self, request):\n \"\"\"\n 请求前调用,不可return\n :param request:\n \"\"\"\n print('>>>>process_request>>>>>>')\n # request.META['DR_PAR1'] = 9\n # 请求路径.如/app_dr/index/\n fpath = request.get_full_path()\n need_check_token = False\n for c in apps.get_app_configs():\n if not c.name.startswith('app_'):\n continue\n if 'check_token_url_list' not in dir(c):\n continue\n for check_u in c.check_token_url_list:\n # todo 匹配方式需再详尽测试\n if match_url(fpath, check_u):\n need_check_token = True\n break\n\n if need_check_token:\n # 校验token\n req_token = request.META.get('HTTP_TOKEN')\n ret_check = check_token(req_token)\n if ret_check is not None:\n return ret_check\n\n def process_response(self, request, response):\n \"\"\"\n view处理后调用,必须return\n 若有process_exception,则走完异常处理后再来到此处\n :param request:\n :param response:\n :return:\n \"\"\"\n return response\n\n def process_exception(self, request, exception):\n \"\"\"\n 视图函数发生异常时调用\n :param request:\n :param exception:\n :return:\n \"\"\"\n flog.log_error(traceback.format_exc())\n print('>>>>>>process_exception>>>>>>>', exception, \"|\", type(exception))\n\n # 默认返回值Response\n ret = ResModel()\n ret.code = ret.ResCode.err\n ret.msg = \"系统应用异常\"\n\n if isinstance(exception, PyJWTError):\n # jwt校验token无效,不在具体区分详细的异常类型了\n ret.code = ret.ResCode.need_login\n ret.msg = \"TOKEN无效\"\n\n return JsonResponse(ret.to_dic())\n","sub_path":"darkripples/middleware/dr_middleware.py","file_name":"dr_middleware.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"382395940","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport time\nimport types\n\nlist_df = pd.DataFrame(columns=['title','release','lyrics'])\n\nbase_url = 'https://www.uta-net.com'\nurl = 'https://www.uta-net.com/search/?Aselect=1&Keyword=UVER&Bselect=3&x=0&y=0'\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.text, 'lxml')\nlinks = soup.find_all('td', class_='side td1')\nfor link in links:\n a = base_url + (link.a.get('href'))\n response = requests.get(a)\n soup = BeautifulSoup(response.text, 'lxml')\n song_title = soup.find('h2', class_='prev_pad')\n if(song_title is None):\n song_title = soup.find('div', class_='title').text\n song_title = song_title.strip()\n else:\n song_title = song_title.string\n song_title = song_title.replace('

','')\n song_title = song_title.replace('

','')\n if 'ver.' in song_title or 'TV' in song_title or 'version' in song_title:\n pass\n else:\n song_release = soup.find('div', id='view_amazon').text[5:15]\n song_release = song_release.replace('-','')\n song_release = int(song_release)\n song_lyrics = soup.find('div', itemprop='lyrics')\n song_lyrics = song_lyrics.text\n song_lyrics = song_lyrics.replace('\\n','')\n time.sleep(0.5)\n tmp_se = pd.DataFrame([song_title, song_release, song_lyrics], index=list_df.columns).T\n\n list_df = list_df.append(tmp_se)\nprint(list_df)\n\nlist_df.to_csv('list.csv', mode = 'a', encoding='cp932')","sub_path":"scraping_uver.py","file_name":"scraping_uver.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491258361","text":"import time\n\nfrom src import *\nfrom src.experiments import *\n\nEXPERIMENTS = {\n \"german\": lambda **kwargs: German(**kwargs),\n \"habermans-survival\": lambda **kwargs: HabermansSurvival(**kwargs),\n \"breast-cancer\": lambda **kwargs: BreastCancer(**kwargs),\n \"banknote-auth\": lambda **kwargs: BanknoteAuth(**kwargs),\n \"synthetic\": lambda **kwargs: Synthetic(**kwargs),\n \"adult\": lambda **kwargs: Adult(**kwargs)\n}\n\n# Parameters for the clustering: if use_weights is true, use_labels must be true\nuse_labels = True\nuse_weights = True\n\n# Parameters for the synthetic experiment\nbalanced_db = True\ntiny_clusters = True\n\n# General parameters\nsplit_seed = 0\nmodel_seeds = [0]\nmax_iter = 100\nn_folds = 10\nn_clusters_list = [10]\nplots_off = True\n# List of sampling strategies\nmethods = [\"random\", \"al_least_confident\", \"sq_random\"]\nthetas = [1.0, 0.1, 0.01]\nfor theta in thetas:\n methods.append(\"egl_{}\".format(theta))\n\n# List of experiments that will be performed\nexperiments = [\n \"habermans-survival\",\n \"breast-cancer\",\n \"banknote-auth\",\n \"synthetic\",\n \"german\",\n \"adult\"\n]\nscorers = [\n \"f1_weighted\",\n \"roc_auc\"\n]\n\n# Initialization\npath_to_main_file = create_folders()\nfor experiment_name in experiments:\n # List of models that will be run for each experiment\n experiment_path = create_experiment_folder(path_to_main_file, experiment_name)\n models = [\n # Synthetic\n # SVM(name='SVM (gamma=100, C=100)', gamma=100, C=100),\n # SVM(name='SVM (gamma=1000, C=10)', gamma=1000, C=10),\n\n # Banknote\n # SVM(name='SVM (gamma=1, C=10)', gamma=1, C=10),\n # NeuralNetwork()\n # GradientBoosting(),\n\n # Breast cancer\n # SVM(name=\"SVM (gamma=0.01, C=100)\", gamma=0.01, C=100),\n # SVM(name=\"SVM\"),\n # RandomForrest(),\n # NeuralNetwork(),\n\n # Haberman's\n # SVM(name='SVM (gamma=10, C=10)', gamma=10, C=10),\n # SVM(name=\"SVM (gamma=0.01, C=100)\", gamma=0.01, C=100),\n # NeuralNetwork(),\n # RandomForrest(),\n # GradientBoosting(),\n\n # German\n # GradientBoosting(),\n # NeuralNetwork(),\n # RandomForrest()\n\n # Adult\n # GradientBoosting(),\n # NeuralNetwork()\n # RandomForrest(),\n # SVM(name='SVM (gamma=1, C=10)', gamma=1, C=10)\n ]\n for model in models:\n scores_dict_f1 = {}\n scores_test_dict_f1 = {}\n scores_dict_auc = {}\n scores_test_dict_auc = {}\n model_path = create_model_folder(experiment_path, model.name)\n\n for seed in model_seeds:\n # Set the model's seed\n model.rng = np.random.RandomState(seed)\n\n file = open(model_path + '\\\\out.txt', 'w')\n\n experiment = EXPERIMENTS[experiment_name](model=model, tiny_clusters=tiny_clusters, balanced_db=balanced_db)\n\n for n_clusters in n_clusters_list:\n learning_loop = ActiveLearningLoop(experiment, n_clusters, max_iter, model_path, file, plots_off, thetas,\n use_weights=use_weights, use_labels=use_labels)\n\n # Split the data into labeled and unlabeled\n folds = experiment.split(prop_known=experiment.prop_known, n_splits=n_folds, split_seed=split_seed)\n for k, (known_idx, train_idx, test_idx) in enumerate(folds):\n # Remove duplicates\n known_idx, train_idx, test_idx = np.unique(known_idx), np.unique(train_idx), np.unique(test_idx)\n file.write('split seed {}, model seed {}, fold {} : #known {}, #train {}, #test {} \\n'\n .format(split_seed, seed, k + 1, len(known_idx), len(train_idx), len(test_idx)))\n _, counts_known = np.unique(experiment.y[known_idx], return_counts=True)\n _, counts_train = np.unique(experiment.y[train_idx], return_counts=True)\n _, counts_test = np.unique(experiment.y[test_idx], return_counts=True)\n file.write(\"Known bincount: {}\\n\".format(counts_known))\n file.write(\"Train bincount: {}\\n\".format(counts_train))\n file.write(\"Test bincount: {}\\n\".format(counts_test))\n\n X_initial = Normalizer(experiment.normalizer).normalize(experiment.X)\n if experiment.X.shape[1] > 2:\n X_initial = get_tsne_embedding(X_initial)\n plot_points(X_initial, experiment.y, \"Initial points\", model_path)\n plot_points(X_initial[known_idx], experiment.y[known_idx], \"Known points\", model_path)\n plot_points(X_initial[test_idx], experiment.y[test_idx], \"Test points\", model_path)\n\n for method in methods:\n print(method)\n file.write(\"Method: {} \\n\".format(method))\n file.write(\"Model: {}\\n\".format(experiment.model._model))\n file.write(\"Using {} clusters, {} folds, {} model seeds, {} thetas\\n\".format(n_clusters, n_folds, model_seeds, thetas))\n file.write(\"use_weights={}, use_labels={}, n_clusters={}\\n\".format(use_weights, use_labels, n_clusters))\n start = time.time()\n scores_f1, test_scores_f1, scores_auc, test_scores_auc = learning_loop.run(method, known_idx, train_idx, test_idx)\n end = time.time()\n execution_time = end-start\n print(execution_time)\n file.write(\"Execution time: \" + str(execution_time))\n # key = str(n_clusters)\n key = method\n if key not in scores_dict_f1:\n scores_dict_f1[key] = [scores_f1]\n scores_test_dict_f1[key] = [test_scores_f1]\n scores_dict_auc[key] = [scores_auc]\n scores_test_dict_auc[key] = [test_scores_auc]\n else:\n scores_dict_f1[key].append(scores_f1)\n scores_test_dict_f1[key].append(test_scores_f1)\n scores_dict_auc[key].append(scores_auc)\n scores_test_dict_auc[key].append(test_scores_auc)\n\n plot_results(scores_dict_f1, scores_test_dict_f1, learning_loop.annotated_point, n_folds, experiment,\n split_seed, scorers[0], file, model_path, max_iter)\n plot_results(scores_dict_auc, scores_test_dict_auc, learning_loop.annotated_point, n_folds, experiment,\n split_seed, scorers[0], file, model_path, max_iter)\n","sub_path":"run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"102706274","text":"import matplotlib.pyplot as plt\n\ndef param_test_plot(experiments, config_ids, swept_variable, y_variable, *args):\n \"\"\"\n experiments is the simulation result dataframe.\n config_ids is the list configs executed upon in the simulation.\n swept_variable is the key (string) in config_ids that was being tested against.\n y_variable is the state_variable (string) to be plotted against default timestep.\n *args for plotting more state_variables (string).\n \"\"\"\n experiments = experiments.sort_values(by =['subset']).reset_index(drop=True)\n cols = 1\n rows = 1\n cc_idx = 0\n while cc_idx] [dmslot=<0-12>] [temp_id=<1-5,10>] [temp_status=<101-103>] [temp_value=<90-120>].\\n\"\r\n \"\\t- for box, temp_id: 1 = AIR_INTAKE, 2 = LM75_2, 3 = LM75_3, 4 = LM75_4 5 = TMP411_CPU 10 = MAC_TEMP_BEG.\\n\"\r\n \"\\t- temp_status: 101 = IDM_EVNT_TEMP_OK, 102 = IDM_EVNT_TEMP_FAIL, 103 = IDM_EVNT_NOT_EXIST.\\n\")\r\n \r\n dmslot = 1\r\n temp_id = 1\r\n temp_status = 102\r\n temp_value = 100\r\n \r\n info = con.run_idm_cmd(\" cmdidm fan_speed_cardtmp dev_id=1 dmslot=%d temp_id=%d temp_status=%d temp_value=%d\"\r\n % (dmslot, temp_id, temp_status, temp_value))\r\n if info.find(' fan speed by cardtemp fail') != -1:\r\n log_info(\"fail cmdidm fan_speed_cardtmp dev_id=1 dmslot=%d temp_id=%d temp_status=%d temp_value=%d\"\r\n % (dmslot, temp_id, temp_status, temp_value))\r\n ret = FAIL\r\n \r\n con.quit_idm_mode()\r\n return ret\r\n \r\ndef cmdidm_fan_speed_cardtmp(cb_arg):\r\n if len(cb_arg.dev_names) == 0:\r\n log_info(\"Failed: Need one switch to be test.\")\r\n return FAIL\r\n\r\n dev_name = cb_arg.dev_names[0]\r\n con = Console(dev_name)\r\n con.wake_up()\r\n result = FAIL\r\n try:\r\n result = _cmdidm_fan_speed_cardtmp(cb_arg)\r\n finally:\r\n con.exit()\r\n return result","sub_path":"cases_set/monitor/cmdidm_fan_speed_cardtmp.py","file_name":"cmdidm_fan_speed_cardtmp.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"482840808","text":"import socket \n\n# Create a socket object\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\n# Get local machine name\nhost = socket.gethostbyname('localhost')\n\ntry:\n port = 9993 \nexcept OSError:\n print(\"Address Error\") \nelse:\n # Bind to the port\n serversocket.bind((host, port)) \n\n # Listen\n serversocket.listen(5) \n\n # Connect\n while True:\n clientsocket,addr = serversocket.accept() \n\n print(\"Got a connection from %s\" % str(addr))\n \n msg = 'Thank you for connecting'+ \"\\n\"\n clientsocket.send(msg.encode('utf-8'))\n clientsocket.close()\n \n text = clientsocket.recv(1024)\n print (text.decode('utf-8'))\n \nserversocket.close()","sub_path":"Final_Projects/Socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420558795","text":"from BacktestBase import *\nimport datetime\nfrom utility_functions import *\n\nclass BacktestSMA(BacktestBase):\n\n def go_long(self, date, units=None, amount=None):\n if self.position == -1:\n self.place_buy_order(date, units=self.units)\n if units:\n self.place_buy_order(date, units=units)\n self.data.loc[date,'units'] = units\n elif amount:\n if amount == 'all':\n amount = self.amount\n self.place_buy_order(date, amount=amount)\n \n def go_short(self, date, units=None, amount=None):\n if self.position == 1:\n self.place_sell_order(date, units=self.units)\n if units:\n self.place_sell_order(date, units=units)\n self.data.loc[date,'units'] = units\n elif amount:\n if amount == 'all':\n amount = self.amount\n self.place_sell_order(date, amount=amount)\n\n def run_sma_strategy(self, SMA1, SMA2):\n msg = '\\n\\nRunning SMA strategy | SMA1 = %d & SMA2 = %d' % (SMA1, SMA2)\n msg += '\\nFixed costs %.2f | ' % self.ftc\n msg += 'proportional costs %.4f' % self.ptc\n print(msg)\n print('=' * 55)\n self.position = 0 # initial neutral position\n self.amount = self._amount # reset initial capital\n self.data['SMA1'] = self.data['ask_c'].rolling(SMA1).mean()\n self.data['SMA2'] = self.data['ask_c'].rolling(SMA2).mean()\n self.data = self.data.dropna()\n \n for date, row in self.data.iterrows():\n if self.position in [0, -1]:\n if self.data.loc[date,'SMA1'] > self.data.loc[date,'SMA2']:\n self.go_long(date, units=self.lot_size)\n self.position = 1 # long position\n elif self.position in [0, 1]:\n if self.data.loc[date,'SMA1'] < self.data.loc[date,'SMA2']:\n self.go_short(date, units=self.lot_size)\n self.position = -1 # short position\n\n if self.position == 1:\n self.data.loc[date,'position_long'] = 1\n elif self.position == -1:\n self.data.loc[date,'position_short'] = 1\n \n# self.value = \n\n self.close_out(date)\n\nif __name__ == '__main__':\n\n symbol = 'USD_TRY'\n account_type = 'practice'\n granularity = 'S5'\n decision_frequency = '1H'\n start_datetime = datetime.datetime(2016,1,1,0,0,0)\n end_datetime = datetime.datetime(2016,8,1,0,0,0)\n lot_size = 10000\n # A standard lot = 100,000 units of base currency. \n # A mini lot = 10,000 units of base currency.\n # A micro lot = 1,000 units of base currency.\n \n bb = BacktestSMA(symbol, account_type, granularity, decision_frequency, start_datetime, end_datetime, lot_size, 10000)\n bb.verbose = True\n bb.run_sma_strategy(34, 68)\n bb.calculate_stats()\n\n write2excel( bb.data, 'bb' )\n","sub_path":"_junk/BacktestStartegySMA.py","file_name":"BacktestStartegySMA.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"298690582","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom subprocess import Popen,PIPE\nfrom getopt import getopt\nfrom sys import path as python_path, argv, exit, stderr as STDERR\nfrom os.path import join as UNIX_join,dirname\nfrom os import environ\nimport pprint\n\npython_path.insert(0, UNIX_join(environ['HOME'],'compute'))\nimport common\n\nnull = open('/dev/null','w')\nSBATCH_OPTIONS=\"--export ALL -o /tmp/slurm-%j.out\"\nSCRIPT=\"\"\nVERBOSE=0\n\nopts, args = getopt (argv[1:],'v', [ 'input=', 'script=', 'verbose='])\nparams = common.jobParams()\nSCRIPT = params.get( \"thumbnail_convert_script\", \"\")\n\nInput_image_logical_path = \"\"\n\nfor k,v in opts:\n if k == '--input': Input_image_logical_path = v\n elif k == '--script': SCRIPT=v\n elif k == '-v': VERBOSE=1\n elif k == '--verbose': VERBOSE=int('0'+v,10)\n\nif not Input_image_logical_path.startswith (\"/\"):\n print ( \"--input value (logical path) needs absolute path\", file=STDERR)\n exit(121)\n\nscratch_resc = common.rescName_from_role_KVP( params[\"imageCompute_resc\"] )\n\n(Path,Basename) = Input_image_logical_path.rsplit(\"/\",1)\n(Filename,Extension) = Basename.rsplit(\".\",1)\n\nrelpath_for_output = params[\"relative_path_for_output\"]\n\nif not SCRIPT:\n print(\"Need a script to run\",file=STDERR)\n exit(122)\nelse:\n if VERBOSE:\n print('SCRIPT is {!r}'.format(SCRIPT),file=STDERR)\n\nenv_constants = { 'COMPUTE_JOB_TYPE':'generate_thumbnails',\n #--------------------------------------\n 'PARAM_data_object_logical_path': Input_image_logical_path, # for debug -- DWM\n 'PARAM_input_image_collection': Path,\n 'PARAM_input_image_name': Filename,\n 'PARAM_input_image_extension': Extension,\n 'PARAM_logical_output_path': Path + '/' + relpath_for_output,\n }\n\n# --- execute one `sbatch` command per job ---\n\nfor size_string in params[\"thumbnail_size_list\"]:\n\n env_vars = dict( environ )\n env_vars.update( env_constants )\n\n Use_Key = common.add_usage( Input_image_logical_path, scratch_resc )\n common.replicate_to_resource ( Input_image_logical_path, scratch_resc)\n\n physical_input_objpath = common.get_data_object_physical_path (Path, Basename, scratch_resc)\n physical_input_dirpath = dirname(physical_input_objpath)\n\n env_vars['PARAM_thumbnail_size'] = size_string\n env_vars['PARAM_scratch_resource_name'] = scratch_resc\n env_vars['PARAM_resource_use_key'] = Use_Key\n env_vars['PARAM_physical_output_path'] = UNIX_join( physical_input_dirpath, relpath_for_output )\n\n convert_args = [ '-thumbnail', size_string, physical_input_objpath,\n # the convert script will calculate the last argument, phys_output_path\n ]\n\n if VERBOSE:\n print ('--- dwm ---',file=STDERR)\n print('convert_args =',file=STDERR); pprint.pprint(convert_args, stream=STDERR)\n print('env_vars =',file=STDERR); pprint.pprint(env_vars, stream=STDERR)\n\n p = Popen( ['/usr/local/bin/sbatch'] + SBATCH_OPTIONS.split() + [SCRIPT] + convert_args, \n stdout = PIPE,\n stderr = null, env = env_vars )\n\n stdout_text = p.stdout.read()\n if VERBOSE:\n print( 'slurm job output -> ',stdout_text, file=STDERR )\n if VERBOSE > 1: break # -- DWM\n\n if p.wait() != 0:\n print(argv[0] + \" : Error submitting slurm batch job(s)\", file=STDERR)\n\n","sub_path":"submit_thumbnail_jobs.py","file_name":"submit_thumbnail_jobs.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"583834957","text":"#!/usr/bin/python\n\n# Import Libraries\nimport matplotlib.pyplot as plt\nimport sys\nimport Adafruit_DHT as dht\nimport time as tm\nimport datetime as dt\nimport numpy as np\nimport array as arr\nfrom oledDevice import get_device\nfrom luma.core.render import canvas\n\n# FUNCTIONS ####################################################################\n\n# displays the current time, and pasues execution untill interval reached\ndef dispUpdate(timeData, tempFmt, humidFmt):\n\n # reading interval length (min)\n intLen = 5\n\n # last time sec\n lastTimeSec = 0\n\n # gets the current minute reading\n timeStamp = dt.datetime.now()\n timeStampMin = int(timeStamp.strftime(\"%M\"))\n\n # calculates the next minute reading\n nxtTimeStamp = timeStampMin + intLen\n if nxtTimeStamp > 59:\n nxtTimeStamp = nxtTimeStamp - 60 \n\n # last time / temp / humid readings\n lstRdTime = timeData.strftime(\"%H:%M\")\n lstRdTemp = str(np.around(tempFmt, decimals=1)) \n lstRdHumid = str(np.around(humidFmt, decimals=1)) \n\n # while loop that prints the current time, exits\n # when the interval is up\n while timeStampMin != nxtTimeStamp: \n\n # keeps getting the current minute reading\n timeStamp = dt.datetime.now()\n timeStampMin = int(timeStamp.strftime(\"%M\"))\n\n # formats time and date for the display\n dispTimeFmt = timeStamp.strftime(\"%H:%M:%S\")\n dispDateFmt = timeStamp.strftime(\"%d %b %y\")\n\n timeStampSec = int(timeStamp.strftime(\"%S\"))\n\n # only updates oled when delta time = 1s\n if timeStampSec != lastTimeSec:\n lastTimeSec = timeStampSec \n \n with canvas(device) as draw:\n draw.text((1, 0), \"Date: \" + dispDateFmt, fill=\"yellow\")\n draw.text((1, 9), \"Time: \" + dispTimeFmt, fill=\"yellow\") \n draw.text((32, 24), \"Last Reading\", fill=\"yellow\")\n draw.text((1, 35), \"Time: \" + lstRdTime, fill=\"yellow\")\n draw.text((1, 44), \"Temp: \" + lstRdTemp + \" degC\", fill=\"yellow\")\n draw.text((1, 53), \"Humid: \" + lstRdHumid + \" %\", fill=\"yellow\") \n\n\n\n# read temp and humidity from sensor\ndef readData():\n # Try to grab a sensor reading. Use the read_retry method which will retry up\n # to 15 times to get a sensor reading (waiting 2 seconds between each retry).\n humidData, tempData = dht.read_retry(22, 4)\n\n if humidData is not None and humidData > 10 and humidData < 100 and tempData is not None and tempData > 5 and tempData < 60: \t\n timeData = dt.datetime.now()\n return (timeData, tempData, humidData)\n else:\n print('Failed to get reading, or over limits. Trying again!')\n tm.sleep(2)\n return readData()\n\n\n# plot data\ndef plotData(fig, axes, timeArr, tempArr, humidArr): \n\n # clears the data on each axis\n axes[0].clear()\n axes[1].clear()\n \n # plots data to each axes\n axes[0].plot(timeArr, tempArr)\n axes[1].plot(timeArr, humidArr)\n\n # temp plot labels\n axes[0].set_title('Temperature')\n axes[0].set_ylabel('Temp [degC]')\n\n # humid plot labels\n axes[1].set_title('Humidity')\n axes[1].set_xlabel('Date-Time')\n axes[1].set_ylabel('RH [%]')\n\n # grid\n axes[0].grid()\n axes[1].grid()\n\n # update the plot and pause for x seconds\n plt.pause(0.01)\n\n\n# convert data to a better format and then print to display\ndef fmtData(timeVal, tempVal, humidVal):\n \n timeFmt = timeVal.strftime(\"%d %m %y %H:%M\") \n tempFmt = np.around(tempVal, decimals=1)\n humidFmt = np.around(humidVal, decimals=1) \n\n # formats time for command line output\n timeFmtTmp = timeVal.strftime(\"%H:%M\")\n\n print(\"Time:\",timeFmtTmp,\" Temp:\",tempFmt,\" Humid:\",humidFmt)\n\n return timeFmt, tempFmt, humidFmt\n\n \n\n# MAIN #############################################################################\n\n# creates the figure window and axes for the plots\nplt.ion()\nfig, axes = plt.subplots(nrows=2, ncols=1)\n\n# global variables\ntimeArr = []\ntempArr = []\nhumidArr = []\n\n# max data elements to store\n# 4032 = 2 weeks worth of data at 5 min intervals\ndataLenMax = 4032\n\n# get the oled device info??\ndevice = get_device()\n\n# inf loop\nwhile True: \n\n # read the temp / humid sensor and record the time\n timeData, tempData, humidData = readData()\n\n # format time / temp / humid and print result to screen\n timeFmt, tempFmt, humidFmt = fmtData(timeData, tempData, humidData)\n\n # saves the data into a global array\n timeArr.append(timeFmt)\n tempArr.append(tempFmt)\n humidArr.append(humidFmt)\n\n # limits data length\n if len(tempArr) > dataLenMax:\n timeArr = timeArr[1:(dataLenMax+1)]\n tempArr = tempArr[1:(dataLenMax+1)]\n humidArr = humidArr[1:(dataLenMax+1)]\n\n # plot the data and pause\n plotData(fig, axes, timeArr, tempArr, humidArr)\n\n # displays the current time on the oled, pauses\n # execution until an interval time length has been\n # reached\n dispUpdate(timeData, tempFmt, humidFmt)\n\n\n \n","sub_path":"code/thReadPlot.py","file_name":"thReadPlot.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"654023117","text":"#########################################################################\n# (C) 2017 Department of Petroleum Engineering, # \n# Univeristy of Louisiana at Lafayette, Lafayette, US. #\n# #\n# This code is released under the terms of the BSD license, and thus #\n# free for commercial and research use. Feel free to use the code into #\n# your own project with a PROPER REFERENCE. #\n# #\n# PYBEM2D Code #\n# Author: Bin Wang # \n# Email: binwang.0213@gmail.com # \n# Reference: Wang, B., Feng, Y., Berrone, S., et al. (2017) Iterative #\n# Coupling of Boundary Element Method with Domain Decomposition. #\n# doi: #\n#########################################################################\n\n\nimport numpy as np\n\n#######################################\n#\n# Parallel Dirichlet-Dirichlet Method\n#\n#######################################\n\ndef PDD_parallel(obj,alpha,TOL,max_iter,opt):\n \"\"\"Dirichlet-Dirichlet iterative loop\n Boundary Conditions are updated by looping through intersections\n\n Reference: Section 3.1 in the reference paper\n ------------------------\n | Current | Connected |\n | Domain | Domain |\n ------------------------\n Intersection\n \n Non-conforming mesh are supported\n Intersection may have different nodes on two domain\n \n Update flux(q) in k+1 steps:\n h_k+1=h_k+alpha*(q_left_k+q_right_k)\n h_left=h_right=h_k+1\n \n Arguments\n ---------\n Num_shared_edge -- Number of intersection in a BEMobj domain. e.g. 1 domain may have 2 intersections\n ConnectObjID -- Index of connected domain in the list of obj.BEMobjs\n IntersectID -- Index of intersection in the list of obj.Intersects\n Intersect -- Two end coords of a intersection edge\n bdID -- Boundary index of the current domain\n bdID_connect -- Boundary index of the connected domain\n CurrentNodes -- Intersection nodes in the current domain\n ConnectNodes -- Intersection nodes in the connected domain\n P(Q)_current -- Solution variables in the nodes of current domain\n P(Q)_connect -- Interpolated solution variables from connected domain\n at the nodes of current domain\n Q_new(old) -- Updated(old) Neumann BC on the intersection\n MaxIter -- Maximum iteration number\n \n Author:Bin Wang(binwang.0213@gmail.com)\n Date: July. 2017\n \"\"\" \n debug1=0\n debug2=0\n \n #for optimal relxation parameters\n NumInt=len(obj.Intersects)\n P_old_old = [[] for i in range(NumInt)] # q^k-1 for current side\n Q_cur_old = [[] for i in range(NumInt)] # h^k-1 for current side\n Q_con_old = [[] for i in range(NumInt)] # h^k-1 for connect side\n\n\n MaxIter = max_iter\n for it in range(MaxIter):\n if(debug2): print('----Loop:',it+1)\n error_final=0.0\n error=[]\n\n if(it>2 and opt==1):\n alpha_opt=PDD_OPT(obj,P_old_old,Q_cur_old,Q_con_old,alpha)\n alpha=alpha_opt\n #print(alpha_opt)\n \n #Step1. Prepare and update BCs for all domains\n for IntID in range(NumInt):#For each intersection\n DomainID, DomainID_connect = obj.Intersects[IntID][0], obj.Intersects[IntID][1]\n EdgeID, EdgeID_connect = obj.Intersects[IntID][2], obj.Intersects[IntID][3]\n BDType=obj.BEMobjs[DomainID].Mesh.getBDType(EdgeID)\n\n if(debug1): \n print('Intersection',IntID,'Domain(%s->%s)'%(DomainID,DomainID_connect),'BD id(%s->%s)'%(EdgeID,EdgeID_connect))\n\n #Init iteration\n if(it==0):\n EdgeDof = obj.BEMobjs[DomainID].Mesh.getBDDof(EdgeID)\n P_old=np.zeros(EdgeDof)\n P_new = P_old\n P_new_connect=P_old\n Q_current=P_old\n Q_connect=P_old\n\n #Normal iterations\n elif(it>0):\n PQ = obj.BEMobjs[DomainID].PostProcess.get_BDSolution(EdgeID)\n PQ_connect = obj.BEMobjs[DomainID_connect].PostProcess.get_BDSolution(EdgeID_connect)\n h_current=obj.BEMobjs[DomainID].h\n h_connect=obj.BEMobjs[DomainID_connect].h\n\n #print('Orig',PQ_connect[0],PQ_connect[1])\n #print('New',np.flip(PQ_connect[0]),np.flip(PQ_connect[1]))\n #PQ_connect=[np.flip(PQ_connect[0]),np.flip(PQ_connect[1])]\n\n P_old=PQ[0]\n Q_current = PQ[1] \n if(BDType=='Edge'): Q_connect=np.flip(PQ_connect[1]) #the dof on the other side is reversed\n else: Q_connect = PQ_connect[1]\n if(debug2): print('Q_Current',Q_current,'Q_Connect',Q_connect)\n \n #* Consider thickness variation\n Q_current*=h_current \n Q_connect*=h_connect\n\n #print(it+1,'alpha',alpha)\n #* Key iteration equation\n P_new=P_old-alpha*(Q_current+Q_connect)\n if(debug2): print('p_new',P_new,'p_old',P_old)\n\n if(BDType=='Edge'): P_new_connect=np.flip(P_new)\n else: P_new_connect=P_new\n if(debug2): print('p_new_connect',P_new_connect,'p_old',P_old)\n\n if max(abs(P_new)) > 0:\n error.append(max(abs(P_new - P_old)) / max(abs(P_new)))\n else:\n error.append(1)\n #print(abs(Q_new-Q_old),abs(Q_new))\n \n #Update new Dirichlet into system\n bc_Dirichlet = [(EdgeID, P_new)]\n obj.BEMobjs[DomainID].set_BoundaryCondition(DirichletBC=bc_Dirichlet,update=1,mode=1,debug=0)\n bc_Dirichlet = [(EdgeID_connect, P_new_connect)]\n obj.BEMobjs[DomainID_connect].set_BoundaryCondition(DirichletBC=bc_Dirichlet,update=1,mode=1,debug=0)\n \n #Save last time iteration info\n P_old_old[IntID] = P_old #q_k-1 for current side\n Q_cur_old[IntID] = Q_current # h_k-1 for current side\n Q_con_old[IntID] = Q_connect # h_k-1 for current side\n \n \n #Collect error for plot convergence\n if(it>0):\n error_final=max(error)\n if(it%(MaxIter/50)==0):\n print('%s\\t%s\\t\\talpha:\\t%s'%(it,error_final,alpha))\n obj.error_abs.append(error_final)\n \n #Step2. Update the solution for all fractures\n if(it==0):\n obj.initLUSolver()\n for i,domain in enumerate(obj.BEMobjs):\n obj.bs[i]=domain.getRHS(obj.Bs[i])\n obj.LUSolve()\n \n if(it>5 and error_final1.0):\n # alpha_opt=1.0\n #alpha_opt=5.0\n #print('!!!',-nom,denom,alpha_opt)\n return alpha_opt\n","sub_path":"PyDFN3D/Flow_Solver/PyBEM2D/PyBEM2D/Domain_Decomposition/Schemes/P_DD_parallel.py","file_name":"P_DD_parallel.py","file_ext":"py","file_size_in_byte":10131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"323149966","text":"#!/usr/bin/python3\n# This monstrosity was created by Bryan Fisher (brf2010@med.cornell.edu)\n\nimport csv\n\nfields=['cwid',\n\t\t'name', \n\t\t'email',\n\t\t'institution',\n\t\t'department',\n\t\t'addresses_visible_to_department',\n\t\t'addresses_visible_to_institution',\n\t\t'addresses_visible_to_internet',\n\t\t'phones_visible_to_department',\n\t\t'phones_visible_to_institution',\n\t\t'phones_visible_to_internet',\n\t\t'AD_location',\n\t\t'last_updated']\n\n\n\ndef extract_data(in_csv):\n\t# merged data is implimented as a dictionary. keys are cwids\n\tmassaged_data = {}\n\tfor row in in_csv:\n\t\ttry:\n\t\t\t# extract important fields\n\t\t\tcwid = row[0]\n\t\t\tkey = row[1]\n\t\t\tvalue = row[2]\n\t\t\tlast_update = row[6]\n\n\t\t\t# ensure we have a key for this cwid\n\t\t\tif not cwid in massaged_data:\n\t\t\t\tmassaged_data[cwid] = {'LAST_UPDATE':last_update}\n\t\t\t\t\n\t\t\t# merge the data into the dictionary\n\t\t\tif not key in massaged_data[cwid]:\n\t\t\t\tmassaged_data[cwid][key] = [value]\n\t\t\telse:\n\t\t\t\tmassaged_data[cwid][key].append(value)\n\t\texcept:\n\t\t\tcontinue\n\t\t\t#print(row)\n\t\n\treturn(massaged_data)\n\t\ndef format_data(data_extract):\n\tfor key in data_extract:\n\t\titem = {}\n\t\t# extract the cwid and then make the \"key\" the actual dictionary contained in the value field for this cwid\n\t\t#print(key)\n\t\titem['cwid'] = key\n\t\tkey = data_extract[key]\n\n\t\t# we cannot guarantee that each item actually exists. so everything has a fallback of '' if it doesn't exist.\n\t\ttry:\n\t\t\titem['email'] = key['EMAIL'][0]\n\t\texcept:\n\t\t\titem['email'] = ''\n\n\t\ttry:\n\t\t\titem['department'] = key['DEPARTMENT'][0]\n\t\texcept:\n\t\t\titem['department'] = ''\n\n\t\ttry:\n\t\t\titem['name'] = \"%s %s %s\" %(key['FIRST_NAME'][0], key['OTHER_NAME'][0], key['LAST_NAME'][0])\n\t\texcept:\t\n\t\t\ttry:\n\t\t\t\titem['name'] = \"%s %s\" %(key['FIRST_NAME'][0], key['LAST_NAME'][0])\n\t\t\texcept:\n\t\t\t\titem['name'] = ''\n\n\t\ttry:\n\t\t\titem['AD_location'] = key['AD_STREET_ADDRESS'][0]\n\t\texcept:\n\t\t\titem['AD_location'] = '' \n\t\t\t\n\t\ttry:\n\t\t\titem['last_updated'] = key['LAST_UPDATE']\n\t\texcept:\n\t\t\titem['last_updated'] = ''\n\n\t\ttry:\n\t\t\titem['institution'] = key['INSTITUTION'][0]\n\t\texcept:\n\t\t\titem['institution'] = ''\n\n\n\t\t# phones and addresses are tab-delimited. they are the same as the above where they may or may not exist.\n\n\t\tphones_visible_to_institution = []\n\t\ttry:\n\t\t\tfor phone in key['INSTITUTION_PHONES']:\n\t\t\t\tfields = phone.split('\\t')\n\t\t\t\tphones_visible_to_institution.append(\"%s = %s\" %(fields[1], fields[2]))\n\t\t\titem['phones_visible_to_institution'] = ' : '.join(phones_visible_to_institution)\n\t\texcept:\n\t\t\titem['phones_visible_to_institution'] = ''\n\t\t\t\n\t\tphones_visible_to_department = []\n\t\ttry:\n\t\t\tfor phone in key['DEPARTMENT_PHONES']:\n\t\t\t\tfields = phone.split('\\t')\n\t\t\t\tphones_visible_to_department.append(\"%s = %s\" %(fields[1], fields[2]))\n\t\t\titem['phones_visible_to_department'] = ' : '.join(phones_visible_to_department)\n\t\texcept:\n\t\t\titem['phones_visible_to_department'] = ''\n\n\t\tphones_visible_to_internet = []\n\t\ttry:\n\t\t\tfor phone in key['INTERNET_PHONES']:\n\t\t\t\tfields = phone.split('\\t')\n\t\t\t\tphones_visible_to_internet.append(\"%s = %s\" %(fields[1], fields[2]))\n\t\t\titem['phones_visible_to_internet'] = ' : '.join(phones_visible_to_internet)\n\t\texcept:\n\t\t\titem['phones_visible_to_internet'] = ''\n\n\t\taddresses_visible_to_institution = []\n\t\ttry:\n\t\t\tfor address in key['ADDRESS_INSTITUTION']:\n\t\t\t\tfields = address.split('\\t')\n\t\t\t\taddresses_visible_to_institution.append(\"%s = Building-%s Room-%s\" %(fields[1], fields[2], fields[3]))\n\t\t\titem['addresses_visible_to_institution'] = ' : '.join(addresses_visible_to_institution) \n\t\texcept:\n\t\t\titem['addresses_visible_to_institution'] = ''\n\n\t\taddresses_visible_to_department = []\n\t\ttry:\n\t\t\tfor address in key['ADDRESS_DEPARTMENT']:\n\t\t\t\tfields = address.split('\\t')\n\t\t\t\taddresses_visible_to_department.append(\"%s = Building-%s Room-%s\" %(fields[1], fields[2], fields[3]))\n\t\t\titem['addresses_visible_to_department'] = ' : '.join(addresses_visible_to_department) \n\t\texcept:\n\t\t\titem['addresses_visible_to_department'] = ''\n\n\t\taddresses_visible_to_internet = []\n\t\ttry:\n\t\t\tfor address in key['ADDRESS_INTERNET']:\n\t\t\t\tfields = address.split('\\t')\n\t\t\t\taddresses_visible_to_internet.append(\"%s = Building-%s Room-%s\" %(fields[1], fields[2], fields[3]))\n\t\t\titem['addresses_visible_to_internet'] = ' : '.join(addresses_visible_to_internet) \n\t\texcept:\n\t\t\titem['addresses_visible_to_internet'] = ''\n\n\t\t#print(item)\n\t\t#input()\n\t\tyield(item)\n\t\t\n\t\t\n\n\t\t\n\nif __name__ == \"__main__\":\n\timport argparse, sys\n\t# configure arguments\n\tparser = argparse.ArgumentParser(description='Massage Hitachi DB dumps')\n\tparser.add_argument('-i', '--infile', help='input file', required=True)\n\tparser.add_argument('-o', '--outfile', help='output file')\n\targs = parser.parse_args()\n\n\t#open database file\n\tinfile = open(args.infile, 'r')\n\tdb = csv.reader(infile)\n\n\n\t# grab the header line from the CSV file\n\theader = next(db)\n\n\t# extract data from the csv\n\tdata_extract = extract_data(db)\n\n\t# format the data\n\twith open(args.outfile, 'w') as outfile:\n\t\tcsvwriter = csv.DictWriter(outfile, fields)\n\t\tcsvwriter.writeheader()\n\t\tfor x in format_data(data_extract):\n\t\t\tcsvwriter.writerow(x)\n\n\n\n\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"251298501","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass TaospiderPipeline(object):\n def process_item(self, item, spider):\n keys = item.keys()\n for key in keys:\n item[key] = item[key][0].encode('utf8').strip() \n return item\n","sub_path":"taospider/taospider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"43596719","text":"def wait_to_time(time,red_time,green_time):\n cycle = red_time + green_time\n if (time % cycle) < red_time :\n return red_time - (time % cycle)\n else:\n return 0\n\ndef Unmanned(L, N, track):\n result = 0\n for i in range(L):\n svet = 0\n for j in range(N):\n if track[j][0] == i:\n result = result + wait_to_time(result,track[j][1],track[j][2])\n svet = 1\n result = result + 1\n return result\n\n\nprint(Unmanned(10, 2, [ [3,5,5], [5,2,2] ] ))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"510135001","text":"from typing import List\n\n\ndef iterative_binary_search(array: List[int], target: int) -> int:\n first = 0;\n last = len(array) - 1;\n mid = 0;\n\n while (first <= last):\n mid = first + (last - first) // 2\n \n if (array[mid] == target):\n return mid\n \n if (array[mid] > target):\n last = mid - 1\n\n else:\n first = mid + 1\n\n return -1 \n\ndef recursive_binary_search(array: List[int], target: int, low: int, high: int) -> int:\n if (low <= high):\n mid = low + (high - low) // 2\n\n if (array[mid] == target):\n return mid\n \n if (array[mid] > target):\n return recursive_binary_search(array, target, low, mid - 1)\n\n else:\n return recursive_binary_search(array, target, mid + 1, high)\n\n \n return -1\n\ndef main():\n list = [1,2,3,4,5,6,7,8,9]\n\n print(iterative_binary_search(list, 8))\n print(recursive_binary_search(list, 8, 0, (len(list) - 1)))\n\nif __name__ == \"__main__\":\n main()","sub_path":"Sorting_Algorithms/BinarySearch/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274060663","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 16 15:21:09 2020\n\n@author: admin\n\"\"\"\n\n\nlist_hv = ['phongbd1', 'thanhnv25', 'kiencv']\nfor hv in list_hv:\n print(hv)\n \nmy_name = 'Bui Dang Phong'\n\nfor i in my_name:\n print(i)\n \n \n\nn = 5\nwhile n > 0 :\n print (n)\n n = n - 1\nprint ('OK')\nprint (n)\n\nn = 5\nwhile n > 0 :\n print (n)\n n = n - 1\n if n == 4:\n break\nprint ('Stop')\nprint (n)","sub_path":"devnet-lab01-for.py","file_name":"devnet-lab01-for.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"248113877","text":"# INSERT command\n# import the sqlite3 library\nimport sqlite3\n\n# create the connection object, insert data.\nwith sqlite3.connect(\"new.db\") as connection:\n\tc = connection.cursor()\n\tc.execute(\"INSERT INTO population VALUES('New York City', \\\n\t\t'NY', 8200000)\")\n\tc.execute(\"INSERT INTO population VALUES('San Francisco', \\\n\t\t'CA', 800000)\")\n","sub_path":"sqlb.py","file_name":"sqlb.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"374992277","text":"# Copyright Materialize, Inc. and contributors. All rights reserved.\n#\n# Use of this software is governed by the Business Source License\n# included in the LICENSE file at the root of this repository.\n#\n# As of the Change Date specified in that file, in accordance with\n# the Business Source License, use of this software will be governed\n# by the Apache License, Version 2.0.\n\nimport os\nimport subprocess\nimport time\nfrom datetime import datetime, timedelta\nfrom typing import List, Optional\n\nfrom pg8000.exceptions import InterfaceError\n\nfrom materialize import ROOT, mzbuild\nfrom materialize.cloudtest.app.application import Application\nfrom materialize.cloudtest.k8s.api.k8s_resource import K8sResource\nfrom materialize.cloudtest.k8s.cockroach import cockroach_resources\nfrom materialize.cloudtest.k8s.debezium import debezium_resources\nfrom materialize.cloudtest.k8s.environmentd import (\n EnvironmentdService,\n EnvironmentdStatefulSet,\n MaterializedAliasService,\n)\nfrom materialize.cloudtest.k8s.minio import Minio\nfrom materialize.cloudtest.k8s.persist_pubsub import PersistPubSubService\nfrom materialize.cloudtest.k8s.postgres import postgres_resources\nfrom materialize.cloudtest.k8s.redpanda import redpanda_resources\nfrom materialize.cloudtest.k8s.role_binding import AdminRoleBinding\nfrom materialize.cloudtest.k8s.ssh import ssh_resources\nfrom materialize.cloudtest.k8s.testdrive import Testdrive\nfrom materialize.cloudtest.k8s.vpc_endpoints_cluster_role import VpcEndpointsClusterRole\nfrom materialize.cloudtest.util.wait import wait\n\n\nclass MaterializeApplication(Application):\n def __init__(\n self,\n release_mode: bool = True,\n tag: Optional[str] = None,\n aws_region: Optional[str] = None,\n log_filter: Optional[str] = None,\n ) -> None:\n self.environmentd = EnvironmentdService()\n self.materialized_alias = MaterializedAliasService()\n self.testdrive = Testdrive(release_mode=release_mode, aws_region=aws_region)\n self.release_mode = release_mode\n self.aws_region = aws_region\n self.root = ROOT\n\n # Register the VpcEndpoint CRD.\n self.register_vpc_endpoint()\n\n self.start_metrics_server()\n\n self.resources = self.get_resources(release_mode, log_filter, tag)\n self.images = self.get_images()\n\n super().__init__()\n self.create()\n\n def get_resources(\n self, release_mode: bool, log_filter: Optional[str], tag: Optional[str]\n ) -> List[K8sResource]:\n return [\n *cockroach_resources(),\n *postgres_resources(),\n *redpanda_resources(),\n *debezium_resources(),\n *ssh_resources(),\n Minio(),\n VpcEndpointsClusterRole(),\n AdminRoleBinding(),\n EnvironmentdStatefulSet(\n release_mode=release_mode,\n tag=tag,\n log_filter=log_filter,\n coverage_mode=self.coverage_mode(),\n ),\n PersistPubSubService(),\n self.environmentd,\n self.materialized_alias,\n self.testdrive,\n ]\n\n def get_images(self) -> List[str]:\n return [\"environmentd\", \"clusterd\", \"testdrive\", \"postgres\"]\n\n def register_vpc_endpoint(self) -> None:\n self.kubectl(\n \"apply\",\n \"-f\",\n os.path.join(\n os.path.abspath(self.root),\n \"src/cloud-resources/src/crd/gen/vpcendpoints.json\",\n ),\n )\n\n def start_metrics_server(self) -> None:\n self.kubectl(\n \"apply\",\n \"-f\",\n \"https://github.com/kubernetes-sigs/metrics-server/releases/download/metrics-server-helm-chart-3.8.2/components.yaml\",\n )\n self.kubectl(\n \"patch\",\n \"deployment\",\n \"metrics-server\",\n \"--namespace\",\n \"kube-system\",\n \"--type\",\n \"json\",\n \"-p\",\n '[{\"op\": \"add\", \"path\": \"/spec/template/spec/containers/0/args/-\", \"value\": \"--kubelet-insecure-tls\" }]',\n )\n\n def create(self) -> None:\n super().create()\n self.wait_create_completed()\n\n def wait_create_completed(self) -> None:\n wait(condition=\"condition=Ready\", resource=\"pod/cluster-u1-replica-1-0\")\n\n def acquire_images(self) -> None:\n repo = mzbuild.Repository(\n self.root, release_mode=self.release_mode, coverage=self.coverage_mode()\n )\n for image in self.images:\n self._acquire_image(repo, image)\n\n def _acquire_image(self, repo: mzbuild.Repository, image: str) -> None:\n deps = repo.resolve_dependencies([repo.images[image]])\n deps.acquire()\n for dep in deps:\n subprocess.check_call(\n [\n \"kind\",\n \"load\",\n \"docker-image\",\n f\"--name={self.cluster_name()}\",\n dep.spec(),\n ]\n )\n\n def wait_replicas(self) -> None:\n # NOTE[btv] - This will need to change if the order of\n # creating clusters/replicas changes, but it seemed fine to\n # assume this order, since we already assume it in `create`.\n wait(condition=\"condition=Ready\", resource=\"pod/cluster-u1-replica-1-0\")\n wait(condition=\"condition=Ready\", resource=\"pod/cluster-s1-replica-2-0\")\n wait(condition=\"condition=Ready\", resource=\"pod/cluster-s2-replica-3-0\")\n\n def wait_for_sql(self) -> None:\n \"\"\"Wait until environmentd pod is ready and can accept SQL connections\"\"\"\n wait(condition=\"condition=Ready\", resource=\"pod/environmentd-0\")\n\n start = datetime.now()\n while datetime.now() - start < timedelta(seconds=300):\n try:\n self.environmentd.sql(\"SELECT 1\")\n break\n except InterfaceError as e:\n # Since we crash environmentd, we expect some errors that we swallow.\n print(f\"SQL interface not ready, {e} while SELECT 1. Waiting...\")\n time.sleep(2)\n\n def set_environmentd_failpoints(self, failpoints: str) -> None:\n \"\"\"Set the FAILPOINTS environmentd variable in the stateful set. This\n will most likely restart environmentd\"\"\"\n stateful_set = [\n resource\n for resource in self.resources\n if type(resource) == EnvironmentdStatefulSet\n ]\n assert len(stateful_set) == 1\n stateful_set = stateful_set[0]\n\n stateful_set.env[\"FAILPOINTS\"] = failpoints\n stateful_set.replace()\n self.wait_for_sql()\n","sub_path":"misc/python/materialize/cloudtest/app/materialize_application.py","file_name":"materialize_application.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"571531031","text":"from node import *\nclass Grafo:\n def __init__(self):\n self.listaDeNodos = {}\n self.numeroDeNodos = 0\n\n def adicionaNodo(self, nome):\n self.numeroDeNodos = self.numeroDeNodos + 1\n novoNodo = Nodos(nome)\n self.listaDeNodos[nome] = novoNodo\n\n return novoNodo\n\n def criaAresta(self, a, b, peso=0):\n self.listaDeNodos[a].adicionaVizinhos(self.listaDeNodos[b], peso)\n self.listaDeNodos[b].adicionaVizinhos(self.listaDeNodos[a], peso)\n\n def deletaMenorAresta(self,minimumPathList):\n initial_node = {}\n final_node = {}\n pesoFinal = 9999999\n\n #encontra aresta de menor peso entre as arestas relacionadas com elementos do menor caminho\n for x in minimumPathList:\n for y in self.listaDeNodos[x].conectadoCom:\n peso = self.listaDeNodos[x].conectadoCom[y]\n #print str(x) +\" conecta com: \" + str(y.nome) + \" com peso: \" +str(peso)\n if(pesoFinal> peso) and (existeNoPathMinimo(minimumPathList,y.nome)):\n pesoFinal = peso\n initial_node = x\n final_node = y.nome\n #print \"menor aresta: \"+ str(initial_node) + \",\" + str(final_node) + \"peso:\" + str(pesoFinal)\n\n\n #apaga do grafo caminho de ida e volta da aresta de menor peso\n if final_node != {}:\n for j in self.listaDeNodos[initial_node].conectadoCom:\n if j.nome == final_node:\n self.listaDeNodos[initial_node].conectadoCom.pop(j)\n break\n for i in self.listaDeNodos[final_node].conectadoCom:\n if i.nome == initial_node:\n self.listaDeNodos[final_node].conectadoCom.pop(i)\n break\n\n\n def __iter__(self):\n return iter(self.listaDeNodos.values())\n\n\ndef existeNoPathMinimo(minimumPathList, a):\n if a in minimumPathList:\n return True\n else:\n return False","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"477936809","text":"import csv\nimport numpy as np\nimport os\n\nimport matplotlib.image as mpimg\n\n\nclass dataLoader():\n def __init__(self, data_directory, mode):\n self.data_directory = data_directory\n if mode == 'train':\n self.data_info = os.path.join(self.data_directory, 'mos/train.csv')\n elif mode == 'test':\n self.data_info = os.path.join(self.data_directory, 'mos/test.csv')\n\n self.data_image_directory = os.path.join(data_directory, 'video_to_image')\n\n def loader(self):\n system_datas, image_datas, subject_scores = [], [], []\n with open(self.data_info, 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n for idx, line in enumerate(reader):\n subject_score = float(line[3])\n scene_name = line[1]\n\n system_data = self._system_data_loader(scene_name)\n image_data = self._image_data_loader(scene_name)\n\n system_datas.append(system_data)\n image_datas.append(image_data)\n subject_scores.append(subject_score)\n return system_datas, image_datas, subject_scores\n\n\n def _system_data_loader(self, scene_name):\n npy = os.path.join(self.data_directory, 'csv', scene_name)\n return np.load(npy)\n\n def _image_data_loader(self, scene_name):\n image_directory = os.path.join(self.data_directory, 'video_to_image', scene_name.split('.')[0])\n images = [mpimg.imread(os.path.join(image_directory, line)) for line in os.listdir(image_directory)]\n return images\n\n\n\n\n\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"319036522","text":"# A chara card contains two pngs (cover, head) and chara data.\n# The chara data contains lstInfo and four lists (custom, coordinate,\n# parameter, status).\n# The custom list contains three lists (face, body, hair).\n# Each list contains several tokens. Data types of tokens are shown below.\n# The file structure is really awful... Why don't they use a common pickler?\n\n# TODO:\n# Better parameter models\n# Generate costume for each type\n# Generate random accessories\n\nimport codecs\nimport io\nimport struct\nfrom collections import OrderedDict\nfrom functools import lru_cache\n\nimport h5py\nimport hsluv\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom scipy.special import ndtr\nfrom sklearn.mixture import GaussianMixture\n\nfrom trdm import TensorRankDecompositionModel\n\nDEBUG = False\n\n\ndef debug_print(*args):\n if DEBUG:\n print(*args)\n\n\n# MAX is inclusive\nSIGN_UINT1_MAX = 0x7f\nSIGN_PAIRS = 0x80\nSIGN_PAIRS_MAX = 0x8f\nSIGN_LIST = 0x90\nSIGN_LIST_MAX = 0x9f\nSIGN_STR = 0xa0\nSIGN_STR_MAX = 0xbf\nSIGN_FALSE = 0xc2\nSIGN_TRUE = 0xc3\nSIGN_LIST_ALTER = 0xc4\nSIGN_FIXED_SIZE_LIST = 0xc5\nSIGN_FLOAT4 = 0xca\nSIGN_UINT1_ALTER = 0xcc\nSIGN_UINT2 = 0xcd\nSIGN_UINT4 = 0xce\nSIGN_LONG_STR = 0xd9\nSIGN_LONG_LIST = 0xdc\nSIGN_LONG_PAIRS = 0xde\n\n\ndef parse_token(data, idx0):\n idx = idx0\n if idx >= len(data):\n return None, 0\n\n if data[idx] <= SIGN_UINT1_MAX:\n token = data[idx]\n idx += 1\n\n elif SIGN_PAIRS <= data[idx] <= SIGN_PAIRS_MAX:\n token_len = data[idx] - SIGN_PAIRS\n token = OrderedDict()\n idx += 1\n for i in range(token_len):\n key, delta_idx = parse_token(data, idx)\n idx += delta_idx\n value, delta_idx = parse_token(data, idx)\n idx += delta_idx\n token[key] = value\n\n elif SIGN_LIST <= data[idx] <= SIGN_LIST_MAX:\n token_len = data[idx] - SIGN_LIST\n token = []\n idx += 1\n for i in range(token_len):\n value, delta_idx = parse_token(data, idx)\n idx += delta_idx\n token.append(value)\n\n elif SIGN_STR <= data[idx] <= SIGN_STR_MAX:\n token_len = data[idx] - SIGN_STR\n try:\n token = data[idx + 1:idx + token_len + 1].decode()\n except UnicodeDecodeError:\n debug_print('STR', idx, data[idx:idx + token_len + 1])\n token = data[idx + 1:idx + token_len + 1]\n idx += token_len + 1\n\n elif data[idx] == SIGN_FALSE:\n token = False\n idx += 1\n\n elif data[idx] == SIGN_TRUE:\n token = True\n idx += 1\n\n elif data[idx] == SIGN_LIST_ALTER:\n token_len = data[idx + 1]\n token = []\n idx += 2\n for i in range(token_len):\n value, delta_idx = parse_token(data, idx)\n idx += delta_idx\n token.append(value)\n token = ('LIST_ALTER', token)\n\n elif data[idx] == SIGN_FIXED_SIZE_LIST:\n token_len = struct.unpack('>H', data[idx + 1:idx + 3])[0]\n token = []\n idx += 3\n max_idx = idx + token_len\n while idx < max_idx:\n # There may be an additional 0\n if data[idx + 4] == 0:\n token.append(0)\n idx += 1\n idx += 4 # Size of data chunk\n value, delta_idx = parse_token(data, idx)\n idx += delta_idx\n token.append(value)\n token = ('FIXED_SIZE_LIST', token)\n\n elif data[idx] == SIGN_FLOAT4:\n token = struct.unpack('>f', data[idx + 1:idx + 5])[0]\n idx += 5\n\n elif data[idx] == SIGN_UINT1_ALTER:\n debug_print('UINT1', idx, data[idx], data[idx + 1])\n token = data[idx + 1]\n idx += 2\n\n elif data[idx] == SIGN_UINT2:\n token = struct.unpack('>H', data[idx + 1:idx + 3])[0]\n idx += 3\n\n elif data[idx] == SIGN_UINT4:\n token = struct.unpack('>I', data[idx + 1:idx + 5])[0]\n idx += 5\n\n elif data[idx] == SIGN_LONG_STR:\n token_len = data[idx + 1]\n try:\n token = data[idx + 2:idx + token_len + 2].decode()\n except UnicodeDecodeError:\n debug_print('LONG_STR', idx, data[idx:idx + token_len + 2])\n token = data[idx + 2:idx + token_len + 2]\n idx += token_len + 2\n\n elif data[idx] == SIGN_LONG_LIST:\n token_len = struct.unpack('>H', data[idx + 1:idx + 3])[0]\n token = []\n idx += 3\n for i in range(token_len):\n value, delta_idx = parse_token(data, idx)\n idx += delta_idx\n token.append(value)\n\n elif data[idx] == SIGN_LONG_PAIRS:\n token_len = struct.unpack('>H', data[idx + 1:idx + 3])[0]\n token = OrderedDict()\n idx += 3\n for i in range(token_len):\n key, delta_idx = parse_token(data, idx)\n idx += delta_idx\n value, delta_idx = parse_token(data, idx)\n idx += delta_idx\n token[key] = value\n\n else:\n debug_print('?', idx, data[idx])\n token = ('?', data[idx])\n idx += 1\n\n delta_idx = idx - idx0\n return token, delta_idx\n\n\ndef parse_token_list(data):\n tokens = []\n idx = 0\n while idx < len(data):\n token, delta_idx = parse_token(data, idx)\n idx += delta_idx\n tokens.append(token)\n return tokens\n\n\ndef dump_token_with_len(token):\n data = dump_token(token)\n data = struct.pack('H', len(data)) + data)\n else:\n raise Exception('Unknown token <{}>: {}'.format(\n type(token), token))\n else:\n raise Exception('Unknown token <{}>: {}'.format(\n type(token), token))\n\n elif type(token) == list:\n if len(token) < 16:\n data = (bytes([SIGN_LIST + len(token)]) + b''.join(\n [dump_token(x) for x in token]))\n else:\n data = (bytes([SIGN_LONG_LIST]) + struct.pack('>H', len(token)) +\n b''.join([dump_token(x) for x in token]))\n\n elif type(token) == OrderedDict:\n if len(token) < 16:\n data = (bytes([SIGN_PAIRS + len(token)]) + b''.join(\n [dump_token(k) + dump_token(v) for k, v in token.items()]))\n else:\n data = (bytes([SIGN_LONG_PAIRS]) + struct.pack(\n '>H', len(token)) + b''.join(\n [dump_token(k) + dump_token(v) for k, v in token.items()]))\n\n elif type(token) == str:\n data = token.encode()\n if len(data) < 32:\n data = bytes([SIGN_STR + len(data)]) + data\n else:\n data = bytes([SIGN_LONG_STR, len(data)]) + data\n\n elif type(token) == int:\n if token <= SIGN_UINT1_MAX:\n data = bytes([token])\n elif token < 2**8:\n data = bytes([SIGN_UINT1_ALTER]) + bytes([token])\n elif token < 2**16:\n data = bytes([SIGN_UINT2]) + struct.pack('>H', token)\n else:\n data = bytes([SIGN_UINT4]) + struct.pack('>I', token)\n\n elif type(token) == float:\n data = bytes([SIGN_FLOAT4]) + struct.pack('>f', token)\n\n elif type(token) == bool:\n data = bytes([SIGN_TRUE if token else SIGN_FALSE])\n\n else:\n raise Exception('Unknown token <{}>: {}'.format(type(token), token))\n\n return data\n\n\ndef read_png(data, idx0):\n idx = idx0\n\n # PNG magic number\n assert data[idx:idx + 8] == b'\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a'\n\n idx += 8\n while True:\n chunk_len = struct.unpack('>I', data[idx:idx + 4])[0]\n chunk_type = data[idx + 4:idx + 8].decode()\n idx += chunk_len + 12\n if chunk_type == 'IEND':\n break\n\n img = data[idx0:idx]\n delta_idx = idx - idx0\n return img, delta_idx\n\n\ndef read_card(filename):\n with open(filename, 'rb') as f:\n card_data = f.read()\n\n # img1: cover, 252x352\n idx = 0\n img1, delta_idx = read_png(card_data, idx)\n idx += delta_idx\n\n # img2: head, 240x320\n idx += 33 # \\x64\\x00\\x00\\x00 【KoiKatuChara】 0.0.0\n img2, delta_idx = read_png(card_data, idx)\n idx += delta_idx\n\n # unknown_data is usually \\xb7\\x00\\x00\\x00\n unknown_data = card_data[idx:idx + 4]\n idx += 4\n\n lstinfo_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n\n has_kkex = (lstinfo_token['lstInfo'][0]['name'] == 'KKEx')\n\n idx += 8 # Size of lists\n idx += 4 # Size of face\n face_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n idx += 4 # Size of body\n body_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n idx += 4 # Size of hair\n hair_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n\n coordinate_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n parameter_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n status_token, delta_idx = parse_token(card_data, idx)\n idx += delta_idx\n\n if has_kkex:\n kkex_data = card_data[idx:]\n\n card = {\n 'img1': img1,\n 'img2': img2,\n 'unknown_data': unknown_data,\n 'lstInfo': lstinfo_token,\n 'face': face_token,\n 'body': body_token,\n 'hair': hair_token,\n 'coordinate': coordinate_token,\n 'parameter': parameter_token,\n 'status': status_token,\n }\n\n if has_kkex:\n card['KKEx'] = kkex_data\n\n return card\n\n\ndef write_card(filename, card):\n has_kkex = ('KKEx' in card)\n\n face_data = dump_token_with_len(card['face'])\n body_data = dump_token_with_len(card['body'])\n hair_data = dump_token_with_len(card['hair'])\n coordinate_data = dump_token(card['coordinate'])\n parameter_data = dump_token(card['parameter'])\n status_data = dump_token(card['status'])\n\n if has_kkex:\n # KKEx is not modified\n lst_idx = {\n 'KKEx': 0,\n 'Custom': 1,\n 'Coordinate': 2,\n 'Parameter': 3,\n 'Status': 4,\n }\n else:\n lst_idx = {\n 'Custom': 0,\n 'Coordinate': 1,\n 'Parameter': 2,\n 'Status': 3,\n }\n\n idx = 0\n token = card['lstInfo']['lstInfo']\n token[lst_idx['Custom']]['pos'] = idx\n token[lst_idx['Custom']]['size'] = (\n len(face_data) + len(body_data) + len(hair_data))\n idx += len(face_data) + len(body_data) + len(hair_data)\n token[lst_idx['Coordinate']]['pos'] = idx\n token[lst_idx['Coordinate']]['size'] = len(coordinate_data)\n idx += len(coordinate_data)\n token[lst_idx['Parameter']]['pos'] = idx\n token[lst_idx['Parameter']]['size'] = len(parameter_data)\n idx += len(parameter_data)\n token[lst_idx['Status']]['pos'] = idx\n token[lst_idx['Status']]['size'] = len(status_data)\n idx += len(status_data)\n lstinfo_data = dump_token(card['lstInfo'])\n\n data_len = (len(face_data) + len(body_data) + len(hair_data) +\n len(coordinate_data) + len(parameter_data) + len(status_data))\n\n if has_kkex:\n data_len += len(card['KKEx'])\n\n with open(filename, 'wb') as f:\n f.write(card['img1'])\n\n f.write(b''.join([\n b'\\x64\\x00\\x00\\x00',\n b'\\x12',\n '【KoiKatuChara】'.encode(),\n b'\\x05',\n '0.0.0'.encode(),\n struct.pack(' xmax: xmax = _xmax\n if _ymin < ymin: ymin = _ymin\n if _ymax > ymax: ymax = _ymax\n return xmin,ymin,xmax,ymax\n\n\n\n# 1 var\n\nclass Histogram:\n # create a barchart with 0 bargap\n def __init__(self, values, bins, **kwargs):\n # pool values into n bins and sum them\n self.options = kwargs\n self.bins = []\n minval, maxval = min(values), max(values)\n valuerange = maxval - minval\n binwidth = valuerange / float(bins)\n values = sorted(values)\n # begin\n count = 0\n curval = minval\n nextval = curval + binwidth\n for val in values:\n if val < nextval:\n count += 1\n else:\n self.bins.append((\"%s - %s\"%(curval,nextval), count))\n count = 0 + 1 # count towards next bin\n curval = nextval\n nextval = curval + binwidth\n \n def draw(self, width, height, background=(0,0,0)):\n # use these aggregated bin values as bars arg, and the bin range text as barlabels\n graph = BarChart()\n graph.bargap = 0\n labels, values = zip(*self.bins)\n graph.add_category(\"\",\n barlabels=labels,\n bars=values,\n **self.options)\n canvas = graph.draw(width, height, background)\n return canvas\n\nclass BarChart:\n \n def __init__(self):\n self.categories = dict()\n self.bargap = 1\n self.barwidth = 5\n \n def add_category(self, name, barlabels, bars, **kwargs):\n self.categories[name] = {\"barlabels\":barlabels, \"bars\":bars, \"options\":kwargs}\n\n def draw(self, width, height, background=(0,0,0)):\n canvas = Canvas(width, height, background)\n ymin = min((min(dict[\"bars\"]) for category,dict in self.categories.items()))\n ymin = min(0, ymin) # to ensure snapping to 0 if ymin is not negative\n ymax = max((max(dict[\"bars\"]) for category,dict in self.categories.items()))\n _barcount = sum((len(dict[\"bars\"]) for category,dict in self.categories.items()))\n xmin = 0\n xmax = self.bargap + ( (self.barwidth + self.bargap) * _barcount)\n # set coordinate bbox\n canvas.custom_space(xmin,ymax,xmax,ymin)\n canvas.zoom_factor(-1.2)\n # draw categories\n baroffset = 0\n for category,dict in self.categories.items():\n curx = self.bargap + baroffset\n for barlabel,barvalue in itertools.izip(dict[\"barlabels\"], dict[\"bars\"]):\n flat = [curx,0, curx+self.barwidth,0, curx+self.barwidth,barvalue, curx,barvalue]\n canvas.draw_polygon(flat, **dict[\"options\"])\n canvas.draw_text((curx+self.barwidth/2.0,0), unicode(barlabel), textcolor=\"white\", textanchor=\"n\", textsize=12)\n curx += self.barwidth + self.bargap\n baroffset += self.barwidth\n # return the drawed canvas\n return canvas\n\nclass PieChart:\n # similar to barchart, but each pie size decided by value's %share of total\n def __init__(self):\n self.categories = dict()\n \n def add_category(self, name, value, **kwargs):\n # only one possible category\n self.categories[name] = {\"value\":value, \"options\":kwargs}\n\n def draw(self, width, height, background=(0,0,0)):\n canvas = Canvas(width, height, background)\n canvas.custom_space(-50, 50, 50, -50)\n total = sum(cat[\"value\"] for cat in self.categories.values())\n\n # first pies\n curangle = 0\n for category in self.categories.values():\n value = category[\"value\"]\n ratio = value / float(total)\n degrees = 360 * ratio\n canvas.draw_pie((0,0), curangle, curangle + degrees,\n **category[\"options\"])\n curangle += degrees\n\n # then text label\n curangle = 0\n for name, category in self.categories.items():\n value = category[\"value\"]\n ratio = value / float(total)\n degrees = 360 * ratio\n midangle = curangle + (degrees / 2.0)\n midrad = math.radians(midangle)\n size = 20 #category[\"options\"][\"fillsize\"] / 2.0\n tx,ty = 0 + size * math.cos(midrad), 0 - size * math.sin(midrad)\n canvas.draw_text((tx,ty), name, **category[\"options\"])\n curangle += degrees\n return canvas\n \n \n\n\n\n# 2 vars\n\nclass LineGraph:\n \n def __init__(self):\n self.categories = dict()\n \n def add_category(self, name, xvalues, yvalues, **kwargs):\n if len(xvalues) != len(yvalues):\n raise Exception(\"x and y series must be same length\")\n self.categories[name] = {\"x\":xvalues, \"y\":yvalues, \"options\":kwargs}\n\n def draw(self, width, height, background=(0,0,0)):\n canvas = Canvas(width, height, background)\n xmin,ymin,xmax,ymax = bbox_categories(self.categories)\n # set coordinate bbox\n canvas.custom_space(xmin,ymax,xmax,ymin)\n canvas.zoom_factor(-1.2)\n # draw categories\n for category,dict in self.categories.items():\n valuepairs = zip(dict[\"x\"], dict[\"y\"])\n flat = [xory for xy in valuepairs for xory in xy]\n canvas.draw_line(flat, **dict[\"options\"])\n #canvas.draw_text((xmax,ymin), unicode(xmax), textcolor=(222,222,222))\n #canvas.draw_text((xmin,ymax), unicode(ymax), textcolor=(222,222,222))\n #canvas.draw_text((xmin+5,ymin), unicode(xmin), textcolor=(222,222,222))\n #canvas.draw_text((xmin,ymin+5), unicode(ymin), textcolor=(222,222,222))\n # return the drawed canvas\n return canvas\n\nclass ScatterPlot:\n \n def __init__(self):\n self.categories = dict()\n \n def add_category(self, name, xvalues, yvalues, **kwargs):\n if len(xvalues) != len(yvalues):\n raise Exception(\"x and y series must be same length\")\n self.categories[name] = {\"x\":xvalues, \"y\":yvalues, \"options\":kwargs}\n\n def draw(self, width, height, background=(0,0,0)):\n canvas = Canvas(width, height, background)\n xmin,ymin,xmax,ymax = bbox_categories(self.categories)\n # set coordinate bbox\n canvas.custom_space(xmin,ymax,xmax,ymin)\n canvas.zoom_factor(-1.2)\n # draw categories\n for category,dict in self.categories.items():\n valuepairs = zip(dict[\"x\"], dict[\"y\"])\n for xy in valuepairs:\n canvas.draw_circle(xy, **dict[\"options\"])\n # return the drawed canvas\n return canvas\n\n\n\n# 3 vars\n\nclass BubblePlot:\n \n def __init__(self):\n self.categories = dict()\n \n def add_category(self, name, xvalues, yvalues, zvalues, **kwargs):\n if len(xvalues) != len(yvalues):\n raise Exception(\"x and y series must be same length\")\n self.categories[name] = {\"x\":xvalues, \"y\":yvalues, \"z\":zvalues, \"options\":kwargs}\n\n def draw(self, width, height, background=(0,0,0)):\n canvas = Canvas(width, height, background)\n xmin,ymin,xmax,ymax = bbox_categories(self.categories)\n # set coordinate bbox\n canvas.custom_space(xmin,ymax,xmax,ymin)\n canvas.zoom_factor(-1.2)\n # draw categories\n for category,dict in self.categories.items():\n valuepairs = zip(dict[\"x\"], dict[\"y\"], dict[\"z\"])\n for xyz in valuepairs:\n x,y,z = xyz\n # convert z value to some minmax symbolsize\n # ...\n # draw the bubble\n canvas.draw_circle((x,y), fillsize=z, **dict[\"options\"])\n # return the drawed canvas\n return canvas\n\n\n\n\n\n\n","sub_path":"pyagg/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":8352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461558511","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport jieba\nimport pandas as pd\nfrom collections import Counter\nfrom hanziconv import HanziConv\n\npath1 = './Sample/AA/' # txt所在的文件夹\npath2 = './Sample/BB/' # 输出文件夹\n\nfiles = os.listdir(path1) # 获取文件夹下的所有文件名\nstop_lst = open('stopwords.txt').readlines()\nstpw = set([word.strip() for word in stop_lst]) | set(\n [\"\\n\", \"\\r\", \"\\r\\n\", \"\\u3000\"])\n\n\ndef clear(x):\n pat = re.compile(r'[\\u4e00-\\u9fa5]+')\n x = HanziConv.toSimplified(x) # 繁体转简体\n x = ''.join(pat.findall(x)) # 去除非中文字符\n segs = [x for x in jieba.cut(x) if x not in stpw]\n return ' '.join(segs)\n\n\ndef clean_files(files, path1, path2, name2):\n for file in files:\n if file.startswith('news'):\n fpath1 = os.path.join(path1, file)\n fpath2 = os.path.join(path2, name2)\n\n with open(fpath1, 'r') as f1:\n # print(f1.readline())\n # text = ''.join(f1.readlines())\n for line in f1.readlines():\n text_after = clear(line)\n if text_after:\n with open(fpath2, 'a') as f2:\n f2.write(text_after + '\\n')\n\n print(file, ' has already finished.')\n\n\ndef words_count(file1, file2):\n words_list = []\n with open(file1) as f:\n for line in f.readlines():\n for word in line.strip('\\n').split():\n words_list.append(word)\n\n dict_ = Counter(words_list)\n df = pd.DataFrame.from_dict(dict_, orient='index')\n df.sort_values(by=0, axis=0, inplace=True, ascending=False)\n df.to_csv(file2)\n\n print('Finished!')\n\n return df.shape\n\n\nif __name__ == '__main__':\n file = os.path.join(path1, 'news_corpus.txt')\n name = os.path.join(path1, 'news_corpus_count.txt')\n words_count(file, name)\n","sub_path":"data_preprocess/clear.py","file_name":"clear.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"184904250","text":"#8-4. Large Shirts: Modify the make_shirt() function so that shirts are large\n#by default with a message that reads I love Python. Make a large shirt and a\n#medium shirt with the default message, and a shirt of any size with a different\n#message.\n\ndef make_shirt(text, size = \"large\"):\n print(\"the size of the shirt is \" + str(size) + \" with a inscription of \" + text)\n\n\n\nmake_shirt(\"blah\", 20 )\nmake_shirt(text = \"oof\", size = 20)\nmake_shirt(\"I love python\")\nmake_shirt(\"ooh la la\", \"medium\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"practice_problems/8_functions/tshirt.8.6.py","file_name":"tshirt.8.6.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"198000639","text":"# -*- coding: utf-8 -*-\n\n## OS2OGR - Converts OS GML to OGR using OsmmLoader\n## Copyright (c) 2011 Faunalia\n\n## This program is free software: you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation, either version 3 of the License, or\n## (at your option) any later version.\n\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n\n## You should have received a copy of the GNU General Public License\n## along with this program. If not, see .\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom DlgAbout_ui import Ui_DlgAbout\nfrom os2ogr import name, description, version\nimport platform\n\ntry:\n import resources\nexcept ImportError:\n import resources_rc\n\nclass DlgAbout(QDialog, Ui_DlgAbout):\n\n def __init__(self, parent=None):\n QDialog.__init__(self, parent)\n self.setupUi(self)\n\n self.logo.setPixmap( QPixmap( \":/lutra/logo\" ) )\n self.title.setText( name() )\n self.description.setText( description() )\n\n text = self.txt.toHtml()\n text = text.replace( \"$PLUGIN_NAME$\", name() )\n\n subject = \"Help: %s\" % name()\n body = \"\"\"\\n\\n\n--------\nPlugin name: %s\nPlugin version: %s\nPython version: %s\nPlatform: %s - %s\n--------\n\"\"\" % ( name(), version(), platform.python_version(), platform.system(), platform.version() )\n\n mail = QUrl( \"mailto:abc@abc.com\" )\n mail.addQueryItem( \"subject\", subject )\n mail.addQueryItem( \"body\", body )\n\n text = text.replace( \"$MAIL_SUBJECT$\", unicode(mail.encodedQueryItemValue( \"subject\" )) )\n text = text.replace( \"$MAIL_BODY$\", unicode(mail.encodedQueryItemValue( \"body\" )) )\n\n self.txt.setHtml(text)\n\n\n\n","sub_path":"DlgAbout.py","file_name":"DlgAbout.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638419756","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 15 14:55:39 2019\r\n\r\n@author: MX\r\n\"\"\"\r\n\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 8 02:27:04 2019\r\n\r\n@author: kevin\r\n\"\"\"\r\nimport os \r\nimport numpy as np\r\nimport tensorflow as tf \r\nfrom PIL import Image \r\n \r\n#initial weights\r\ndef weight_variable(shape):\r\n initial = tf.truncated_normal(shape, stddev = 0.02)\r\n return tf.Variable(initial)\r\n#initial bias\r\ndef bias_variable(shape):\r\n initial = tf.constant(0.0, shape=shape)\r\n return tf.Variable(initial)\r\n\r\n#convolution layer\r\ndef conv2d(x,W):\r\n return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\r\n\r\n#max_pool layer\r\ndef max_pool_4x4(x):\r\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\r\n\r\ndef spp_layer(input_, levels=6, name = 'SPP_layer',pool_type = 'max_pool'):\r\n\r\n '''\r\n Multiple Level SPP layer.\r\n \r\n Works for levels=[1, 2, 3, 6].\r\n '''\r\n \r\n shape = input_.get_shape().as_list()\r\n \r\n with tf.variable_scope(name):\r\n\r\n for l in range(levels):\r\n \r\n l = l + 1\r\n ksize = [1, np.ceil(shape[1]/ l + 1).astype(np.int32), np.ceil(shape[2] / l + 1).astype(np.int32), 1]\r\n \r\n strides = [1, np.floor(shape[1] / l + 1).astype(np.int32), np.floor(shape[2] / l + 1).astype(np.int32), 1]\r\n \r\n if pool_type == 'max_pool':\r\n pool = tf.nn.max_pool(input_, ksize=ksize, strides=strides, padding='SAME')\r\n pool = tf.reshape(pool,(shape[0],-1),)\r\n \r\n else :\r\n pool = tf.nn.avg_pool(input_, ksize=ksize, strides=strides, padding='SAME')\r\n pool = tf.reshape(pool,(shape[0],-1))\r\n print(\"Pool Level {:}: shape {:}\".format(l, pool.get_shape().as_list()))\r\n if l == 1:\r\n\r\n x_flatten = tf.reshape(pool,(shape[0],-1))\r\n else:\r\n x_flatten = tf.concat((x_flatten,pool),axis=1)\r\n print(\"Pool Level {:}: shape {:}\".format(l, x_flatten.get_shape().as_list()))\r\n # pool_outputs.append(tf.reshape(pool, [tf.shape(pool)[1], -1]))\r\n \r\n\r\n return x_flatten\r\n\r\n\r\nx = tf.placeholder(tf.float32, [1,128,128,3])\r\ny_ = tf.placeholder(tf.float32,[1,82])\r\n\r\n#first convolution and max_pool layer\r\nW_conv1 = weight_variable([5,5,3,32])\r\nb_conv1 = bias_variable([32])\r\nh_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)\r\nh_pool1 = max_pool_4x4(h_conv1)\r\n\r\n#second convolution and max_pool layer\r\nW_conv2 = weight_variable([5,5,32,64])\r\nb_conv2 = bias_variable([64])\r\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\r\nh_pool2 = max_pool_4x4(h_conv2)\r\n\r\n#third convolution and max_pool layer\r\nW_conv3 = weight_variable([3,3,64,128])\r\nb_conv3 = bias_variable([128])\r\nh_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)\r\nh_pool3 = max_pool_4x4(h_conv3)\r\n\r\n#fourth convolution and max_pool layer\r\nW_conv4 = weight_variable([3,3,128,256])\r\nb_conv4 = bias_variable([256])\r\nh_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)\r\nh_pool4 = max_pool_4x4(h_conv4)\r\n\r\n#h_pool4 = spp_layer(h_conv4)\r\n\r\n#变成全连接层,用一个MLP处理\r\nreshape = tf.reshape(h_pool4,[1, -1])\r\ndim = reshape.get_shape()[1].value\r\nW_fc1 = weight_variable([dim, 1024])\r\nb_fc1 = bias_variable([1024])\r\nh_fc1 = tf.nn.relu(tf.matmul(reshape, W_fc1) + b_fc1)\r\n\r\n#dropout\r\nkeep_prob = tf.placeholder(tf.float32)\r\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\r\n\r\nW_fc2 = weight_variable([1024,82])\r\nb_fc2 = bias_variable([82])\r\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\r\n\r\n\r\n\r\nsaver = tf.train.Saver()\r\n\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n saver.restore(sess,r'C:\\\\Users\\\\MX\\\\Desktop\\\\model3\\\\.\\player.ckpt')\r\n\r\n im=Image.open(r'C:\\\\Users\\\\MX\\\\Desktop\\\\ozil.jpg')\r\n im=im.resize((128,128))\r\n im=np.array(im).astype(np.float32)\r\n im=np.reshape(im,[-1,128*128*3])\r\n im=(im-(255/2.0))/255\r\n x_img=np.reshape(im,[-1,128,128,3])\r\n output=sess.run(y_conv,feed_dict={x:x_img})\r\n print(output)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112969970","text":"# coding: utf-8\n\n\"\"\"\nhelper\n\"\"\"\n\nimport math\nimport random\nimport cluster\n\ndef __sumL(l1, l2):\n\t\"\"\"\n\tsum of two list\n\tresult is a list\n\t\"\"\"\n\tif l1 == []:\n\t\treturn l2\n\telif l2 == []:\n\t\treturn l1\n\tL = []\n\tn = len(l1)\n\tfor i in range(n):\n\t\tL.append(l1[i] + l2[i])\n\treturn L\n\ndef print_c(clusters):\n\t\"\"\"\n\tprint the content of all clusters\n\t\"\"\"\n\tfor cluster in clusters:\n\t\tprint(cluster)\n\ndef meanData(data):\n\t\"\"\"\n\tcompute the mean of all the data points\n\tGOAL: generate better clusters\n\t\"\"\"\n\tmean = []\n\tnb_pts = len(data)\n\tdim = len(data[0])\n\ttmp = []\n\n\tfor i in range(nb_pts):\n\t\ttmp = __sumL(tmp, data[i])\n\tmean = [e/nb_pts for e in tmp]\n\n\treturn mean\n\ndef generateClusters(k, dim, data):\n\t\"\"\"\n\tgenerate the list of cluster (generate cluster at one point)\n\tk : number of cluster\n\tdim \n\t\"\"\"\n\tif k > len(data):\n\t\traise Exception(\"Number of cluster cannot be greater than number of data points !\")\n\n\tmean_data = meanData(data) #mean of all the data points\n\tclusters = []\n\tfor i in range(k):\n\t\tL = []\n\t\tfor j in range(dim):\n\t\t\t#generate good clusters\n\t\t\tL.append(random.randint(int(mean_data[j]-k*2),int(mean_data[j]+k*2)))\n\t\tclust = cluster.Cluster(L, [])\n\t\tclusters.append(clust)\n\n\treturn clusters\n\ndef gap(data, clusters):\n\t\"\"\"\n\tcompute the gap between each points and each cluster\n\tdata : matrix with all the points\n\tclusters : list with all the cluster\n\t-> add points in the good cluster\n\t\"\"\"\n\n\tnb_point = len(data)\n\tdim = len(data[0])\n\tnb_clusters = len(clusters)\n\n\tfor i in range(nb_point):\n\t\tmin_gap, pos_c = float('inf'), 0 #biggest value in python, and pos of the good cluster\n\t\tfor p in range(nb_clusters):\n\t\t\tsum_pt = 0\n\t\t\tfor j in range(dim):\n\t\t\t\tdif = (data[i][j]-(clusters[p].l_coord[j]))\n\t\t\t\tdif *= dif\n\t\t\t\tsum_pt += dif\n\t\t\tgap = math.sqrt(sum_pt)\n\t\t\tif (gap < min_gap):\n\t\t\t\tmin_gap = gap #current minimum gap\n\t\t\t\tpos_c = p #current best cluster\n\t\tclusters[pos_c].dataP.append(i) #append the point to his cluster \n\n\ndef mean(clusters, data):\n\t\"\"\"\n\tcompute the mean of each cluster\n\tclusters : list with all cluster\n\tdata : matrix with all the data points\n\treturn matrix with the mean of each cluster\n\t\"\"\"\n\taverageList = [] #average for one cluster\n\taverageMatrix = [] #list of averageList\n\tfor cluster in clusters:\n\t\tdataP = cluster.dataP #list with the positions\n\t\tif dataP != []:\n\t\t\tsumList = [] #sum of the points of one cluster\n\t\t\tnb_pts = len(dataP)\n\t\t\tfor pos in dataP:\n\t\t\t\tsumList = __sumL(sumList, data[pos])\n\t\t\taverageList = [e/nb_pts for e in sumList]\n\t\t\taverageMatrix.append(averageList)\n\t\telse:\n\t\t\taverageMatrix.append(None)\n\treturn averageMatrix\n\ndef getFinalData(clusters, data):\n\t\"\"\"\n\tget the data that have been clustered\n\treturn : 3 dimensional matrix\n\t\"\"\"\n\tdataMatrix = []\n\tfor cluster in clusters:\n\t\tm = len(cluster.dataP)\n\t\tL = []\n\t\tfor i in range(m):\n\t\t\tL.append(data[cluster.dataP[i]])\n\t\tdataMatrix.append(L)\n\n\treturn dataMatrix\n","sub_path":"src/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239085839","text":"from sys import argv\nfrom collections import defaultdict\n\n\ndef train():\n\n transition = defaultdict(float)\n emission = defaultdict(float)\n context = defaultdict(float)\n\n\n # NOTE : calculate frequency\n for line in open(fpath):\n prev = ''\n context[prev] += 1\n\n wordtags = line.split()\n\n for wordtag in wordtags:\n word, tag = wordtag.split('_')\n\n # word -> pos\n transition['%s %s' % (prev, tag)] += 1\n context[tag] += 1\n\n # pos -> word\n emission['%s %s' % (tag, word)] += 1\n\n prev = tag\n\n transition['%s %s' % (prev, '')] += 1\n\n return (context, transition, emission)\n\n\n\nfpath = '../test/05-train-input.txt' if len(argv) > 1 else '../data/wiki-en-train.norm_pos'\ncontext, transition, emission = train()\n\nfor key, value in sorted(transition.items()):\n prev, word = key.split()\n print('T %s %6f' % (key, value / context[prev]))\n\nfor key, value in sorted(emission.items()):\n prev, tag = key.split()\n print('E %s %6f' % (key, value / context[prev]))\n","sub_path":"src/05-train-hmm.py","file_name":"05-train-hmm.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"513921077","text":"from sympy import symbols, cos, sin, tan, acos, asin, atan, sqrt, pi, log, ln\nfrom sympy.parsing.sympy_parser import parse_expr\nfrom sympy import simplify\nfrom domain_evaluator import evaluate_domain\n\nx = symbols('x')\n\n#make sure to add brackets to inner functions to keep order of operations\n#brackets may not be required\n#Maybe call domain checker because most composite questions ask for the domain\n#Might not need to call simplify. Itlooks like function.subs() simplifies\ndef composites_2(f, g):\n composite = f.subs({x:g})\n print(composite)\n evaluate_domain(composite)\n\n\ndef composites_3(f, g, h):\n composite = g.subs({x:h})\n composite = f.subs({x:composite})\n print(composite)\n evaluate_domain(composite)\n \n\ndef test_run():\n number = input(\"Input number of functions: \")\n number = int(number)\n print(\"Input order is f, g for f o g = f(g(x)) or f, g, h for f o g o h or f(g(h(x)))\")\n\n if (number == 2):\n f = input(\"Enter function 1: \")\n f = parse_expr(f)\n g = input(\"Enter function 2: \")\n g = parse_expr(g)\n composites_2(f, g)\n elif (number == 3):\n f = input(\"Enter function 1: \")\n f = parse_expr(f)\n g = input(\"Enter function 2: \")\n g = parse_expr(g)\n h = input(\"Enter function 3: \")\n h = parse_expr(h)\n composites_3(f, g, h)\n\n \n","sub_path":"find_composite.py","file_name":"find_composite.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288791179","text":"\nfrom django.conf.urls import patterns, url\nfrom django.contrib.auth.decorators import login_required, permission_required, user_passes_test\n\nfrom ikwen.cashout.views import Payments, manage_payment_address, request_cash_out\n\nfrom daraja.views import Home, RegisteredCompanyList, DeployCloud, ChangeProfile, Dashboard, CompanyList, \\\n SuccessfulDeployment, ViewProfile, login_router, DaraList, DaraRequestList, Configuration, InviteDara\n\nurlpatterns = patterns(\n '',\n url(r'^$', Home.as_view(), name='home'),\n url(r'^for-businesses$', Home.as_view(), name='for_businesses'),\n url(r'^invitation/(?P[-\\w]+)$', InviteDara.as_view(), name='invite_dara'),\n url(r'^companies$', RegisteredCompanyList.as_view(), name='registered_company_list'),\n url(r'^deploy$', login_required(DeployCloud.as_view()), name='deploy_cloud'),\n url(r'^successfulDeployment/(?P[-\\w]+)$', login_required(SuccessfulDeployment.as_view()), name='successful_deployment'),\n url(r'^profile/(?P[-\\w]+)/$', ViewProfile.as_view(), name='view_profile'),\n\n url(r'^configuration$', permission_required('daraja.ik_manage_daraja')(Configuration.as_view()), name='configuration'),\n url(r'^daraRequestList$', permission_required('daraja.ik_manage_daraja')(DaraRequestList.as_view()), name='dara_request_list'),\n url(r'^daraList$', permission_required('daraja.ik_manage_daraja')(DaraList.as_view()), name='dara_list'),\n\n url(r'^login_router$', login_router, name='login_router'),\n url(r'^dashboard/$', login_required(Dashboard.as_view()), name='dashboard'),\n url(r'^profile/$', login_required(ChangeProfile.as_view()), name='change_profile'),\n url(r'^companies/$', login_required(CompanyList.as_view()), name='company_list'),\n url(r'^wallet/$', login_required(Payments.as_view()), name='wallet'),\n url(r'^manage_payment_address/$', manage_payment_address, name='manage_payment_address'),\n url(r'^request_cash_out/$', request_cash_out, name='request_cash_out'),\n)\n","sub_path":"daraja/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593523040","text":"import sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nCORS(app)\ndb = SQLAlchemy(app)\napi = Api(app)\n\nfrom models import *\n\nclass TrackListByGenre(Resource):\n def get(self, genre_id):\n tracks = db.session.query(Track).filter(Track.genreid == genre_id)\n dict_tracks = []\n for track in tracks:\n dict = {\n 'id': track.trackid,\n 'name': track.name,\n 'composer': track.composer\n }\n dict_tracks.append(dict)\n return dict_tracks, 200\n\nclass CreateTrack(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('name',\n type=str,\n required=True,\n help=\"this field cant be blank\"\n )\n parser.add_argument('composer',\n type=str,\n required=True,\n help=\"this field cant be blank\"\n )\n parser.add_argument('genreid',\n type=int,\n required=True,\n help=\"this field cant be blank\"\n )\n\n def post(self):\n data = CreateTrack.parser.parse_args()\n name = data['name']\n composer = data['composer']\n genreid = data['genreid']\n new_track = Track(name, composer, genreid)\n db.session.add(new_track)\n db.session.commit()\n created_track = Track.query.get(new_track.trackid)\n return { \n 'id': created_track.trackid,\n 'name': created_track.name,\n 'composer': created_track.composer\n }, 201\n\nclass GenreList(Resource):\n def get(self):\n genres = Genre.query.all()\n dict_genres = []\n for genre in genres:\n dict = {\n 'id': genre.genreid,\n 'name': genre.name,\n }\n dict_genres.append(dict)\n return dict_genres, 200\n\n\n\napi.add_resource(TrackListByGenre, '/tracks/')\napi.add_resource(CreateTrack, '/track')\napi.add_resource(GenreList, '/genres')\n\nif __name__ == '__main__':\n app.run()\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589046810","text":"import numpy as np\nfrom eval import p_r_f\nfrom env import Env\nfrom constant import window_width, acoustics_dim, novelty_dim, homo_dim, dura_dim, action_dim, ZDIM, steps, batch_size, nb_epoch\nimport torch\nimport torch.optim as optim\n\nfrom model import BC_dagger\nfrom utils import FocalLoss\nfrom DataPool import DataPool\n#import os\n\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0, 1'\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n #dagger\n\ndef dagger(train_songs, bc_dagger, data_pool, optimizer_bc, focal_loss):\n for idx, audio_name in enumerate(train_songs):\n dagger_itr = 1\n for itr in range(dagger_itr):\n acoustics_list = []\n novelty_list = []\n homo_list = []\n dura_list = []\n action_list = []\n env = Env(audio_name, window_width)\n ob = env.reset()\n for p in bc_dagger.parameters():\n p.require_grad = False\n i = 0\n done = False\n while True:\n states = [torch.Tensor(ob[0]), torch.Tensor(ob[1]), torch.Tensor(ob[2]), torch.Tensor(ob[3])]\n for i in range(len(states)):\n states[i] = states[i].to(device)\n act = bc_dagger(states)\n act = torch.argmax(act, dim=-1)\n act_teacher = env.get_teacher_act()\n \n acoustics_list.append(ob[0])\n novelty_list.append(ob[1])\n homo_list.append(ob[2])\n dura_list.append(ob[3])\n action_list.append(act_teacher)\n if done is True:\n break\n ob, reward, done, _ = env.step(act)\n i += 1\n print('filename: {} Episode done {}rd'.format(audio_name, idx), itr, i, reward)\n #output_file.write('Number of Steps: %02d\\t Reward: %0.04f\\n'%(i, reward))\n\n data_pool.collect_data((acoustics_list, novelty_list, homo_list, dura_list, action_list))\n\n for p in bc_dagger.parameters():\n p.require_grad = True\n train_set = data_pool.get_data(batch_size)\n states, actions, done = train_set.__next__()\n # if use_cuda:\n # states = states.cuda(gpu)\n # actions = actions.cuda(gpu)\n while True:\n for i in range(len(states)):\n states[i] = states[i].to(device)\n actions = actions.to(device)\n bc_dagger.zero_grad()\n out = bc_dagger(states)\n loss = focal_loss(out, actions)\n optimizer_bc.zero_grad()\n loss.backward()\n optimizer_bc.step()\n if done:\n break\n states, actions, done = train_set.__next__()\n \n\n ##training\ndef train(bc_dagger, data_pool, optimizer_bc, train_songs, test_songs, focal_loss):\n train_set = train_set = data_pool.get_data(batch_size)\n for epoch in range(10000):\n states, actions, done = train_set.__next__()\n while not done:\n for i in range(len(states)):\n states[i] = states[i].cuda()\n actions = actions.cuda()\n \n bc_dagger.zero_grad()\n out = bc_dagger(states)\n loss = focal_loss(out, actions)\n optimizer_bc.zero_grad()\n loss.backward()\n optimizer_bc.step()\n states, actions, done = train_set.__next__()\n # if epoch % 10 == 0 and epoch != 0:\n # print('epoch:{}'.format(epoch))\n if epoch%1000 == 0 and epoch != 0:\n #state = {'net':bc_dagger.state_dict(), 'optimizer':optimizer_bc.state_dict(), 'epoch':epoch}\n torch.save(bc_dagger.state_dict(), 'models/epoch{}.pth'.format(epoch))\n test(train_songs, 'models/epoch{}.pth'.format(epoch))\n test(test_songs, 'models/epoch{}.pth'.format(epoch))\n else:\n pass\n\ndef test(test_songs, filepath=None):\n bc_dagger = BC_dagger(z_dim=ZDIM).to(device)\n bc_dagger = torch.nn.DataParallel(bc_dagger, device_ids=[0, 1])\n #cudnn.benchmark = True\n env = Env(np.random.choice(test_songs, replace=False))\n model_para = torch.load(filepath)\n bc_dagger.load_state_dict(model_para)\n bc_dagger.eval()\n ob = env.reset()\n done = False\n reward = 0\n while True:\n states= [torch.Tensor(ob[0]), torch.Tensor(ob[1]), torch.Tensor(ob[2]), torch.Tensor(ob[3])]\n for i in range(len(states)):\n states[i] = states[i].to(device)\n act = bc_dagger(states)\n act = torch.argmax(act, dim=-1)\n if done:\n #print('epoch:{}, reward:{}, squence:{}'.format(filepath, reward, env.pre))\n print('epoch:{}, f_score:{}'.format(filepath, reward))\n #print(np.where(env.pre==1)[0], np.where(env.gt==1)[0])\n break\n ob, reward, done, _ = env.step(act)\n\n\ndef main():\n\n import glob, os, random, json\n\n dirs = glob.glob(\"data/*.mp3\")\n file_names = []\n for dir_ in dirs:\n assert len(os.path.split(dir_)[1].split(\".\")) == 2\n file_names.append(os.path.split(dir_)[1].split(\".\")[0])\n \n random.shuffle(file_names)\n threshold = 10\n #train_songs = ['01_-_Magical_Mystery_Tour']\n train_songs = file_names[:threshold]\n test_songs = file_names[threshold:]\n with open('train_test_set.json', 'w') as f:\n json.dump({'train_set':train_songs, 'test_set':test_songs}, f, indent=2)\n\n '''\n state-action pool\n '''\n \n data_pool = DataPool(acoustics_dim, novelty_dim, homo_dim, dura_dim, action_dim)\n\n '''\n model define\n '''\n\n bc_dagger = BC_dagger(z_dim=ZDIM).to(device)\n bc_dagger = torch.nn.DataParallel(bc_dagger, device_ids=[0, 1])\n focal_loss = FocalLoss(class_num = 2, alpha=torch.tensor([[0.25],[1]]))\n optimizer_bc = optim.Adam(bc_dagger.parameters(), lr=1e-4, betas=(0.5, 0.9))\n\n '''\n collect expert action-state\n '''\n\n for idx, audio_name in enumerate(train_songs):\n acoustics_list = []\n novelty_list = []\n homo_list = []\n dura_list = []\n action_list = []\n #print('Collecting {} data... {}rd'.format(audio_name, idx))\n env = Env(audio_name, window_width)\n ob = env.reset()\n\n done = False\n while True:\n act = env.get_teacher_act()\n acoustics_list.append(ob[0])\n novelty_list.append(ob[1])\n homo_list.append(ob[2])\n dura_list.append(ob[3])\n action_list.append(act)\n if done:\n break\n ob, _, done, _ = env.step(act)\n\n #print('Packing data into arrays...')\n data_pool.collect_data((acoustics_list, novelty_list, homo_list, dura_list, action_list))\n\n\n # if use_cuda:\n # bc_dagger = bc_dagger.cuda(gpu) \n for p in bc_dagger.parameters():\n p.require_grad = True\n\n train_set = data_pool.get_data(batch_size)\n states, actions, done = train_set.__next__()\n while True:\n for i in range(len(states)):\n states[i] = states[i].to(device)\n actions = actions.to(device)\n\n bc_dagger.zero_grad()\n out = bc_dagger(states)\n loss = focal_loss(out, actions)\n optimizer_bc.zero_grad()\n loss.backward()\n optimizer_bc.step()\n if done:\n break\n states, actions, done = train_set.__next__()\n\n while True:\n dagger(train_songs, bc_dagger, data_pool, optimizer_bc, focal_loss)\n train(bc_dagger, data_pool, optimizer_bc, train_songs, test_songs, focal_loss)\n\nif __name__ == '__main__':\n # audio_name = '01_-_A_Hard_Day\\'s_Night'\n # bc_dagger = BC_dagger(z_dim=ZDIM)\n # env = Env(audio_name, window_width)\n # for i in range(100):\n # test(bc_dagger, env, 'epoch{}.pth'.format(i*1000))\n main()\n \n\n\n\n \n\n \n ","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"435038650","text":"class rectangle():\r\n def __init__(self,width,length):\r\n self.width=width\r\n self.length=length\r\n def area(self):\r\n return self.width*self.length\r\n\r\nl=int(input(\"Enter length of rectangle 1: \"))\r\nw=int(input(\"Enter width of rectangle 1: \"))\r\nobj1=rectangle(l,w)\r\narea1=obj1.area()\r\nl=int(input(\"Enter length of rectangle 2:\"))\r\nw=int(input(\"Enter width of rectangle 2:\"))\r\nobj2=rectangle(l,w)\r\narea2=obj2.area()\r\n\r\nprint(\"Area of rectangle 1:\",obj1.area())\r\nprint(\"Area of rectangle 2:\",obj2.area())\r\n\r\nif area1 > area2:\r\n print(\"Area of the 1st rectangle is greater\")\r\nelif area1==area2:\r\n print(\"Area of both the rectangle is equal\")\r\nelse:\r\n print(\"Area of the 2nd rectngle is greater\")\r\n\r\nprint()\r\n\r\n","sub_path":"rectangle/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215608198","text":"from project1.help_function2 import *\nstr=input(\"enter string\")\nprint(return_str_whithout_a_or_A(str))\n# for i in range (3):\n# print(return_reverse(str),end=' ')\n# return_first_and_last_char (str)\n# num=int(input(\"enter num you want to check if is prime\"))\n# print(return_if_num_is_prime(num))\n# num1=float(input(\"enter num you want to round\"))\n# print(return_rounded_num(num1))\n# age=int(input(\"enter age\"))\n# city=input(\"enter city\")\n# price=int (input(\"enter full price\"))\n# print(price-return_discount(city,age)*price)\n# upper=int(input(\"enter upper border\"))\n# lower=int(input(\"enter lower border\"))\n# num=int(input(\"enter number of random num you want\"))\n# print(fill_random_between_a_and_b(upper,lower,num))\n# num=int(input(\"enter positive num you want reverse\"))\n# print(num+return_reverse_num (num))\nnum=int(input(\"enter num you want reverse\"))\nprint(return_length_of_num (num))","sub_path":"project1/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"350142060","text":"\n\nfrom xai.brain.wordbase.nouns._flogging import _FLOGGING\n\n#calss header\nclass _FLOGGINGS(_FLOGGING, ):\n\tdef __init__(self,): \n\t\t_FLOGGING.__init__(self)\n\t\tself.name = \"FLOGGINGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"flogging\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_floggings.py","file_name":"_floggings.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"545626118","text":"import requests\nimport codecs\nfrom bs4 import BeautifulSoup\nimport json\nimport time\nimport random\nimport string\n\nURL=\"http://www.shiwurenling.com/thread-12174-1-1.html\" #要爬取的地址\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36'\n}#user-agent,模仿浏览器,防止被目标网站反爬\ndef create_random_date():\n a1=(2016,1,1,0,0,0,0,0,0)\n a2=(2017,12,28,23,59,59,0,0,0)\n start=time.mktime(a1)\n end=time.mktime(a2)\n\n t=random.randint(start,end)\n date_touple=time.localtime(t)\n date=time.strftime(\"%Y-%m-%d\",date_touple)\n return time.mktime(time.strptime(date, '%Y-%m-%d'))\n \ndef handle_data(data):\n random_time = create_random_date()\n example_data = {\n \"__v\": 0,\n \"_id\": {\n \"$oid\": ''.join(random.sample(string.ascii_letters + string.digits, 24))\n },\n \"contact_name\": data[0],\n \"createdAt\": {\n \"$date\": random_time\n },\n \"detail_area\": data[2],\n \"goods_description\": data[7],\n \"goods_images\": [\n data[6]\n ],\n \"goods_name\": data[1],\n \"goods_type\": data[1],\n \"latitude\": 0,\n \"location\": {\n \"long\": 0,\n \"lat\": 0\n },\n \"longitude\": 0,\n \"notice_type\": \"found\",\n \"occur_area_city\": \"\",\n \"occur_area_county\": \"\",\n \"occur_area_province\": \"\",\n \"occur_time\": random_time,\n \"reward\": 0,\n \"status\": \"close\",\n \"updatedAt\": {\n \"$date\": random_time\n },\n \"user_id\": ''.join(random.sample(string.ascii_letters + string.digits, 24))\n }\n print(example_data)\n print(json.dumps(example_data))\n return json.dumps(example_data, ensure_ascii=False)\n\ndef download_page(url):#下载页面\n data = requests.get(url,headers=HEADERS).content #请求页面,获取要爬取的页面内容\n return data\ndef special_parse_html(html):\n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class':'bobo'})\n trs = table.find_all('tr')\n data = {}\n for tr in trs:\n tds = tr.find_all('td')\n key_value = [];\n for td in tds:\n text = td.getText()\n image = td.select('img')\n image_src = '';\n if len(image) > 0:\n for img in td.find_all('img'):\n image_src = img['src']\n key_value.append(image_src)\n else:\n key_value.append(text)\n data[key_value[0]] = key_value[1]\n new_data = []\n for item in data:\n new_data.append(data[item])\n # handle_data(new_data)\n next_page = soup.find('div', attrs={'class':'sxart'}).find('li').find('a')\n if next_page:\n return handle_data(new_data), next_page['href']\n return handle_data(new_data), None\n\ndef main():\n url = URL\n with codecs.open('data.json', 'wb', encoding='utf-8') as fp:\n while url:\n html = download_page(url)\n data,url = special_parse_html(html)\n fp.write(u'{data},\\n'.format(data=''.join(data)))\nif __name__=='__main__':\n main()","sub_path":"lost_found_data.py","file_name":"lost_found_data.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608585382","text":"# cx_freeze build script\n# Written with cx-Freeze==4.3.4\n\n# Outputs by default to build\\exe.win32-3.4\\\n# Note: will NOT delete anything in that directory\n# Modify path_platforms as required\n\n# Usage:\n# build_trawl_analyzer.py build\n\n\nimport sys\nimport os\nimport shutil\nimport subprocess\nfrom pathlib import Path\n\n# Useful library. http://click.pocoo.org/5/api/#click.confirm\nimport click\n\nfrom cx_Freeze import setup, Executable\nfrom buildzipper import buildzipper\n\nPYTHON_DIR = sys.exec_prefix\n#path_platforms = os.path.join(PYTHON_DIR, 'Lib\\site-packages\\PyQt5\\plugins\\platforms\\qwindows.dll')\npath_sqlite_dll = os.path.join(PYTHON_DIR, 'Scripts\\sqlite3.dll')\n\nPYQT5_DIR = os.path.join(PYTHON_DIR, 'lib\\site-packages\\PyQt5')\nincludes = ['PyQt5.Qt', 'PyQt5.QtNetwork', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtWidgets', 'PyQt5.QtMultimedia', 'PyQt5.QtChart']\n\nos.environ[\"TCL_LIBRARY\"] = os.path.join(PYTHON_DIR, \"lib\", \"tcl8.6\")\nos.environ[\"TK_LIBRARY\"] = os.path.join(PYTHON_DIR, \"lib\", \"tk8.6\")\n\n# Compile the QML into the qrc.py file\nPYRCC_DIR = os.path.join(PYTHON_DIR, 'Scripts\\pyrcc5.exe')\nQRC_PATH = str(Path(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../qrc/trawl_analyzer.qrc')).resolve())\nQRCPY_PATH = str(Path(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../py/trawl/trawl_analyzer_qrc.py')).resolve())\nprint('\\npyrcc: ' + PYRCC_DIR + '\\nqrc: ' + QRC_PATH + '\\nqrcpy: ' + QRCPY_PATH + '\\n')\nsubprocess.check_output([PYRCC_DIR, QRC_PATH, '-o', QRCPY_PATH])\n\n\"\"\"\n\nNote for SCIPY. Had to modify the cx_freeze hooks.py file from (Lib\\site-packages\\cx_Freeze\\hooks.py):\n\nfinder.IncludePackage(\"scipy.lib\")\n\nto\n\nfinder.IncludePackage(\"scipy._lib\")\n\nalso added scipy to the list of packages to include, but dropped it from the includefiles\n\"\"\"\n\n\n\nincludefiles = [\n# path_platforms,\n path_sqlite_dll,\n ('../data/trawl_analyzer.db', 'data/trawl_analyzer.db'), # explicit source, destination path\n ('../resources/', 'resources'),\n ('../py/trawl_analyzer', 'py/trawl_analyzer'),\n ('../py/common', 'py/common'),\n ('../qml/trawl_analyzer', 'qml/trawl_analyzer'),\n ('../qml/common', 'qml/common'),\n (os.path.join(PYTHON_DIR, 'Scripts', 'tcl86t.dll'), 'tcl86t.dll'),\n (os.path.join(PYTHON_DIR, 'Scripts', 'tk86t.dll'), 'tk86t.dll')\n ]\nexcludes = []\npackages = ['os', 'apsw', 'asyncio', 'peewee', 'playhouse', 'cProfile', 'timeit', 'numpy',\n 'fractions', 'dateutil', 'winsound', 'serial', 'arrow', 'matplotlib', \n 'seaborn', 'pandas', 'tkinter', 'geographiclib']\npath = []\n\n# Dependencies are automatically detected, but it might need fine tuning.\nbuild_exe_options = {\n 'includes': includes, \n 'include_files': includefiles,\n 'excludes': excludes, \n 'packages': packages, \n 'path': path,\n 'build_exe': 'build/exe.win64-3.6/trawl_analyzer'\n}\n\n# GUI applications require a different base on Windows (the default is for a\n# console application).\nbase = None\nexe = None\nif sys.platform == 'win32':\n exe = Executable(\n script='../main_trawl_analyzer.py',\n initScript=None,\n # base='Console', # useful for debugging\n base='Win32GUI', # use this to hide console output (releases)\n targetName='trawl_analyzer.exe'\n )\n\n# Prompt to nuke existing directory\ndeployed_path = r'build\\exe.win64-3.6\\trawl_analyzer'\nif os.path.exists(deployed_path):\n\tshutil.rmtree(deployed_path, ignore_errors=True)\n\tprint('Deleted ' + deployed_path)\n\nsetup( \n name='Trawl Analyzer',\n version='0.1',\n author='FRAM Data',\n description='Trawl Analyzer',\n options={'build_exe': build_exe_options},\n executables=[exe]\n)\n\n# Zip up our creation\nbuildzipper.create_zip_archive(base_folder=deployed_path, filedesc='trawl_analyzer', folders_to_zip=['trawl_analyzer/trawl_analyzer.exe', 'trawl_analyzer/py', 'trawl_analyzer/qml'])\n","sub_path":"build/build_trawl_analyzer_changes_only.py","file_name":"build_trawl_analyzer_changes_only.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"384082582","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport testscenarios\nimport testtools\n\nimport oslo_messaging\nfrom oslo_messaging._drivers.zmq_driver import zmq_address\nfrom oslo_messaging._drivers.zmq_driver import zmq_async\nfrom oslo_messaging._drivers.zmq_driver import zmq_names\nfrom oslo_messaging.tests import utils as test_utils\n\n\nzmq = zmq_async.import_zmq()\n\nload_tests = testscenarios.load_tests_apply_scenarios\n\n\nclass TestZmqAddress(test_utils.BaseTestCase):\n\n scenarios = [\n ('router', {'listener_type': zmq_names.socket_type_str(zmq.ROUTER)}),\n ('dealer', {'listener_type': zmq_names.socket_type_str(zmq.DEALER)})\n ]\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def test_target_to_key_topic_only(self):\n target = oslo_messaging.Target(topic='topic')\n key = zmq_address.target_to_key(target, self.listener_type)\n self.assertEqual(self.listener_type + '/topic', key)\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def test_target_to_key_topic_server_round_robin(self):\n target = oslo_messaging.Target(topic='topic', server='server')\n key = zmq_address.target_to_key(target, self.listener_type)\n self.assertEqual(self.listener_type + '/topic/server', key)\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def test_target_to_key_topic_fanout(self):\n target = oslo_messaging.Target(topic='topic', fanout=True)\n key = zmq_address.target_to_key(target, self.listener_type)\n self.assertEqual(self.listener_type + '/topic', key)\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def test_target_to_key_topic_server_fanout(self):\n target = oslo_messaging.Target(topic='topic', server='server',\n fanout=True)\n key = zmq_address.target_to_key(target, self.listener_type)\n self.assertEqual(self.listener_type + '/topic', key)\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def test_target_to_key_topic_server_fanout_no_prefix(self):\n target = oslo_messaging.Target(topic='topic', server='server',\n fanout=True)\n key = zmq_address.target_to_key(target)\n self.assertEqual('topic', key)\n","sub_path":"filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/oslo_messaging/tests/drivers/zmq/test_zmq_address.py","file_name":"test_zmq_address.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"481050987","text":"# Create a file with the User class, including the __init__ and make_deposit methods\nclass User:\n def __init__(self, username, email_address):\n self.name = username\t\t\t\n self.email = email_address\t\t\n self.account_balance = 0.00\t\n\n def make_deposit(self,amount):\n self.account_balance += amount\n return self\n\n\n# Add a make_withdrawal method to the User class\n def make_withdraw(self, amount):\n self.account_balance -= amount\n return self\n\n# Add a display_user_balance method to the User class\n def display_user_balance(self):\n print('User:' + self.name + ', Balance: $' + str(self.account_balance))\n return self\n\n \n def transfer_money(self, other_user, amount):\n self.account_balance -= amount\n other_user.account_balance += amount\n return self\n\n# Create 3 instances of the User class\nCap_America= User (\"Steve Rogers\", \"srogers@avengers.com\")\nironman = User (\"Tony Starks\", \"tstarks@starksenterprises.com\")\nBlack_Widow = User (\"Natasha Romanova\", \"nromanova@avengers.com\")\n\n# Have the first user make 3 deposits and 1 withdrawal and then display their balance\nCap_America.make_deposit(500).make_deposit(200).make_deposit(200).make_withdraw(150).display_user_balance()\n\n# Have the second user make 2 deposits and 2 withdrawals and then display their balance\nironman.make_deposit(500).make_deposit(600).make_withdraw(200).make_withdraw(300).display_user_balance()\n\n# Have the third user make 1 deposits and 3 withdrawals and then display their balance\nBlack_Widow.make_deposit(900).make_withdraw(50).make_withdraw(100).make_withdraw(100).display_user_balance()\n\n# BONUS: Add a transfer_money method; have the first user transfer money to the third user and then print both users' balances\nCap_America.transfer_money(Black_Widow, 150).display_user_balance()\nBlack_Widow.display_user_balance()","sub_path":"channingMethod.py","file_name":"channingMethod.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"628420711","text":"from flask import Flask, request\nimport json\n\napp = Flask(__name__)\n\nfrom apiHandler import apiCallReturnJSON\nfrom tokenHandler import getBotToken\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef get_click():\n\n print(\"Here\")\n\n\n@app.route(\"/\", methods=[\"POST\"])\ndef get_login():\n\n print(\"Here instead\")\n print(request.method)\n hook = request.json\n print(json.dumps(hook, indent=4, sort_keys=True))\n\n message = getMsgInfo(hook[\"data\"][\"id\"])\n\n print(\"Bot Said: {}\".format(message[\"text\"]))\n\n return \"Hello\"\n\n # return redirect(\"{}?continue_url={}\".format(success_url,base_grant_url), code=302)\n\n\n@app.after_request\ndef add_header(response):\n response.cache_control.max_age = 300\n return response\n\n\ndef getMsgInfo(message_id):\n \n api_url = \"messages/{}\".format(message_id)\n payload = {}\n\n return apiCallReturnJSON(getBotToken(), \"GET\", api_url, payload)\n\n\nif __name__ == \"__main__\":\n # Hosted on localhost port 5004 - Remember to run \"ngrok http 5004\"\n app.run(host=\"0.0.0.0\", port=8000, debug=False)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"302256926","text":"__author__ = 'Ciddhi'\n\nfrom DatabaseManager import *\nfrom sqlalchemy import create_engine\nimport GlobalVariables as gv\nfrom decimal import Decimal\n\nclass DBUtils:\n\n databaseObject = None\n alpha = None\n gamma = None\n beta = None\n individualFactor = None\n zeroRange = None\n greedyLevel = None\n individualMaxAsset = None\n latestIndividualTable = None\n trainingTradesheetTable = None\n trainingAssetTable = None\n rankingTable = None\n dailyAssetTable = None\n newTradesheetTable = None\n assetTable = None\n qMatrixTable = None\n reallocationTable = None\n performanceTable = None\n\n def __init__(self, alpha_local=0, gamma_local=0, beta_local=0, individualFactor_local=1, zeroRange_local=0, greedyLevel_local=0,\n latestIndividualTable_local=\"\", trainingTradesheetTable_local=\"\", trainingAssetTable_local=\"\", qMatrixTable_local=\"\",\n reallocationTable_local=\"\", assetTable_local=\"\", dailyAssetTable_local=\"\", newTradesheetTable_local=\"\"):\n global alpha\n global gamma\n global beta\n global individualFactor\n global zeroRange\n global greedyLevel\n global individualMaxAsset\n global latestIndividualTable\n global trainingAssetTable\n global trainingTradesheetTable\n global rankingTable\n global dailyAssetTable\n global newTradesheetTable\n global assetTable\n global qMatrixTable\n global reallocationTable\n global performanceTable\n\n alpha = alpha_local\n gamma = gamma_local\n beta = beta_local\n individualFactor = individualFactor_local\n zeroRange = zeroRange_local\n greedyLevel = greedyLevel_local\n individualMaxAsset = gv.maxTotalAsset / individualFactor\n latestIndividualTable = latestIndividualTable_local\n trainingTradesheetTable = trainingTradesheetTable_local\n trainingAssetTable = trainingAssetTable_local\n rankingTable = gv.rankingTableBase\n dailyAssetTable = dailyAssetTable_local\n newTradesheetTable = newTradesheetTable_local\n assetTable = assetTable_local\n qMatrixTable = qMatrixTable_local\n reallocationTable = reallocationTable_local\n performanceTable = gv.performanceTableBase\n\n def dbConnect (self):\n db_username = gv.userName\n db_password = gv.password\n db_host = gv.dbHost\n db_name = gv.databaseName\n db_port = gv.dbPort\n db_connector = gv.dbConnector\n global databaseObject\n databaseObject = DatabaseManager(db_connector, db_username, db_password,db_host,db_port, db_name)\n databaseObject.Connect()\n\n def dbQuery (self, query):\n global databaseObject\n return databaseObject.Execute(query)\n\n def dbClose (self):\n global databaseObject\n databaseObject.Close()\n\n # Function to check if given day is a trading day\n def checkTradingDay(self, date):\n global databaseObject\n queryCheck = \"SELECT EXISTS (SELECT 1 FROM old_tradesheet_data_table WHERE entry_date='\" + str(date) + \"'), 1\"\n return databaseObject.Execute(queryCheck)\n\n # Function to insert new trade in tradesheet\n def insertNewTrade(self, tradeId, individualId, tradeType, entryDate, entryTime, entryPrice, entryQty, exitDate, exitTime, exitPrice):\n global databaseObject\n global newTradesheetTable\n queryInsertTrade = \"INSERT INTO \" + newTradesheetTable + \\\n \" (trade_id, individual_id, trade_type, entry_date, entry_time, entry_price, entry_qty, exit_date, exit_time, exit_price)\" \\\n \" VALUES\" \\\n \" (\" + str(tradeId) + \", \" + str(individualId) + \", \" + str(tradeType) + \", '\" + str(entryDate) + \"', '\" + str(entryTime) +\\\n \"', \" + str(entryPrice) + \", \" + str(entryQty) + \", '\" + str(exitDate) + \"', '\" + str(exitTime) + \"', \" + str(exitPrice) + \")\"\n #print(queryInsertTrade)\n databaseObject.Execute(queryInsertTrade)\n\n # Function to insert new trade in training_tradesheet\n def insertTrainingNewTrade(self, tradeId, individualId, tradeType, entryDate, entryTime, entryPrice, entryQty, exitDate, exitTime, exitPrice):\n global databaseObject\n global trainingTradesheetTable\n queryInsertTrade = \"INSERT INTO \" + trainingTradesheetTable + \\\n \" (trade_id, individual_id, trade_type, entry_date, entry_time, entry_price, entry_qty, exit_date, exit_time, exit_price)\" \\\n \" VALUES\" \\\n \" (\" + str(tradeId) + \", \" + str(individualId) + \", \" + str(tradeType) + \", '\" + str(entryDate) + \"', '\" + str(entryTime) +\\\n \"', \" + str(entryPrice) + \", \" + str(entryQty) + \", '\" + str(exitDate) + \"', '\" + str(exitTime) + \"', \" + str(exitPrice) + \")\"\n #print(queryInsertTrade)\n databaseObject.Execute(queryInsertTrade)\n\n # Function to get individuals which have active trades in a given interval of time on a given day\n def getIndividuals (self, startDate, startTime, endDate, endTime):\n global databaseObject\n global newTradesheetTable\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM \" + newTradesheetTable + \" WHERE entry_time<'\" + str(endTime) + \\\n \"' AND exit_time>'\" + str(startTime) + \"' AND entry_date='\" + str(startDate) + \"'\"\n return databaseObject.Execute(queryIndividuals)\n\n # Function to get individuals which have active trades in a given interval of time on a given day during training\n def getTrainingIndividuals (self, startDate, startTime, endDate, endTime):\n global databaseObject\n global trainingTradesheetTable\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM \" + trainingTradesheetTable + \" WHERE entry_time<'\" + str(endTime) + \\\n \"' AND exit_time>'\" + str(startTime) + \"' AND entry_date='\" + str(startDate) + \"'\"\n return databaseObject.Execute(queryIndividuals)\n\n # Function to get individuals from original tradesheet in a given interval of dates\n def getRefIndividuals(self, startDate, endDate):\n global databaseObject\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"'\"\n return databaseObject.Execute(queryIndividuals)\n\n # Function to get individuals from original tradesheet in a given interval of dates\n def getAllRefIndividuals(self):\n global databaseObject\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM old_tradesheet_data_table\"\n return databaseObject.Execute(queryIndividuals)\n\n # Function to get all individuals from original tradesheet\n def getAllIndividuals(self):\n global databaseObject\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM old_tradesheet_data_table\"\n return databaseObject.Execute(queryIndividuals)\n\n # Function to get trades that are active in a given time interval\n def getTrades (self, startDate, startTime, endDate, endTime):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT trade_id, individual_id, trade_type, entry_date, entry_time, entry_price, entry_qty, exit_date, exit_time \" \\\n \"FROM \" + newTradesheetTable + \" WHERE entry_time<='\" + str(endTime) + \"' AND exit_time>='\" + str(startTime) + \\\n \"' AND entry_date='\" + str(startDate) + \"'\"\n return databaseObject.Execute(queryTrades)\n\n # Function to get new trades from original tradesheet\n def getRefDayTrades (self, date):\n global databaseObject\n queryTrades = \"SELECT * FROM old_tradesheet_data_table WHERE entry_date='\" + str(date) + \"'\"\n #print(queryTrades)\n return databaseObject.Execute(queryTrades)\n\n # Function to add or update mtm for an individual in daily mtm table\n def insertOrUpdateMTM(self, individualId, date, mtm):\n global databaseObject\n queryCheck = \"SELECT EXISTS ( SELECT 1 FROM \" + gv.dailyMtmTableBase + \" WHERE individual_id=\"+ str(individualId) + \\\n \" AND mtm_date='\" + str(date) + \"' ), 1\"\n resultCheck = databaseObject.Execute(queryCheck)\n for check, dummy in resultCheck:\n if check==1:\n query = \"UPDATE \" + gv.dailyMtmTableBase + \" SET mtm=mtm+\" + str(mtm) + \" WHERE individual_id=\" + str(individualId) + \\\n \" AND mtm_date='\" + str(date) + \"'\"\n databaseObject.Execute(query)\n else:\n query = \"INSERT INTO \" + gv.dailyMtmTableBase + \\\n \" ( individual_id, mtm, mtm_date )\" \\\n \" VALUES\" \\\n \" ( \" + str(individualId) + \", \" + str(mtm) + \", '\" + str(date) + \"' )\"\n databaseObject.Execute(query)\n return\n\n # Function to return mtm for a given date\n def getDailyMTM(self, individualId, date):\n global databaseObject\n query = \"SELECT mtm, 1 FROM \" + gv.dailyMtmTableBase + \" WHERE mtm_date='\" + str(date) + \"' AND individual_id=\" + str(individualId)\n return databaseObject.Execute(query)\n\n # Function to get new trades from original tradesheet based on ranking\n def getRankedTradesOrdered (self, date, startTime, endTime, walkforward):\n global databaseObject\n global rankingTable\n queryTrades = \"SELECT t.* FROM old_tradesheet_data_table AS t JOIN \" + rankingTable + \" as r ON t.individual_id=r.individual_id\" \\\n \" WHERE t.entry_date='\" + str(date) + \"' AND t.entry_time<'\" + str(endTime) + \"' AND t.entry_time>='\" + str(startTime) + \\\n \"' AND r.ranking_walkforward_id=\" + str(walkforward) + \" ORDER BY t.entry_time, r.ranking\"\n #print(queryTrades)\n return databaseObject.Execute(queryTrades)\n\n # Function to get trades taken by an individual in an interval\n def getTradesIndividual(self, individualId, startDate, startTime, endDate, endTime):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT * FROM \" + newTradesheetTable + \" WHERE entry_date='\" + str(startDate) + \"' AND entry_time<='\" + str(endTime) + \\\n \"' AND exit_time>='\" + str(startTime) + \"' AND individual_id=\" + str(individualId)\n return databaseObject.Execute(queryTrades)\n\n # Function to get trades taken by an individual in an interval during training\n def getTrainingTradesIndividual(self, individualId, startDate, startTime, endDate, endTime):\n global databaseObject\n global trainingTradesheetTable\n queryTrades = \"SELECT * FROM \" + trainingTradesheetTable + \" WHERE entry_date='\" + str(startDate) + \"' AND entry_time<='\" + str(endTime) + \\\n \"' AND exit_time>='\" + str(startTime) + \"' AND individual_id=\" + str(individualId)\n return databaseObject.Execute(queryTrades)\n\n # Function to get trades that are to exit in a given interval\n def getTradesExit(self, date, startTime, endTime):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT individual_id, trade_type, entry_qty, entry_price, exit_price FROM \" + newTradesheetTable + \" WHERE exit_date='\" + str(date) + \\\n \"' AND exit_time>='\" + str(startTime) + \"' AND exit_time<'\" + str(endTime) + \"'\"\n return databaseObject.Execute(queryTrades)\n\n # Function to get trades that are to exit at day end\n def getTradesExitEnd(self, date, startTime, endTime):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT individual_id, trade_type, entry_qty, entry_price, exit_price FROM \" + newTradesheetTable + \" WHERE exit_date='\" + str(date) + \\\n \"' AND exit_time>='\" + str(startTime) + \"'\"\n return databaseObject.Execute(queryTrades)\n\n # Function to get trades that are to exit in a given interval during training\n def getTrainingTradesExit(self, date, startTime, endTime):\n global databaseObject\n global trainingTradesheetTable\n queryTrades = \"SELECT individual_id, trade_type, entry_qty, entry_price, exit_price FROM \" + trainingTradesheetTable + \" WHERE exit_date='\" + str(date) + \\\n \"' AND exit_time>='\" + str(startTime) + \"' AND exit_time<'\" + str(endTime) + \"'\"\n return databaseObject.Execute(queryTrades)\n\n # Function to get trades that are to exit at day end during training\n def getTrainingTradesExitEnd(self, date, startTime, endTime):\n global databaseObject\n global trainingTradesheetTable\n queryTrades = \"SELECT individual_id, trade_type, entry_qty, entry_price, exit_price FROM \" + trainingTradesheetTable + \" WHERE exit_date='\" + str(date) + \\\n \"' AND exit_time>='\" + str(startTime) + \"'\"\n return databaseObject.Execute(queryTrades)\n\n # Function to get price series in a time range\n # Not being used currently\n def getPriceSeries (self, startDate, startTime, endDate, endTime):\n global databaseObject\n queryPriceSeries = \"SELECT time, price FROM price_series_table WHERE date='\" + str(startDate) + \"' AND time>='\" + str(startTime) + \\\n \"' AND time<='\" + str(endTime) + \"'\"\n return databaseObject.Execute(queryPriceSeries)\n\n # Function to get price from price series for a given date and time\n def getPrice(self, startDate, startTime):\n global databaseObject\n queryPrice = \"SELECT time, price FROM price_series_table WHERE date='\" + str(startDate) + \"' AND time='\" + str(startTime) + \"'\"\n return databaseObject.Execute(queryPrice)\n\n # Function to get Q Matrix of an individual\n def getQMatrix (self, individualId):\n global databaseObject\n global qMatrixTable\n queryQM = \"SELECT row_num, column_num, q_value FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryQM)\n\n # Function to insert / update Q matrix of an individual\n def updateQMatrix(self, individualId, qm):\n global databaseObject\n global qMatrixTable\n queryCheck = \"SELECT EXISTS (SELECT 1 FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \"), 1\"\n resultCheck = databaseObject.Execute(queryCheck)\n for check, dummy in resultCheck:\n if check==1:\n for i in range(0,3,1):\n for j in range(0,3,1):\n queryUpdate = \"UPDATE \" + qMatrixTable + \" SET q_value=\" + str(round(qm[i,j], 10)) + \" WHERE individual_id=\" + str(individualId) + \\\n \" AND row_num=\" + str(i) + \" AND column_num=\" + str(j)\n databaseObject.Execute(queryUpdate)\n else:\n for i in range(0,3,1):\n for j in range(0,3,1):\n queryInsert = \"INSERT INTO \" + qMatrixTable + \\\n \" (individual_id, row_num, column_num, q_value)\" \\\n \" VALUES \" \\\n \" (\" + str(individualId) + \", \" + str(i) + \", \" + str(j) + \", \" + str(round(qm[i,j], 10)) + \")\"\n databaseObject.Execute(queryInsert)\n\n # Function to insert individual entry in assetTable\n def addIndividualAsset (self, individualId, usedAsset):\n global databaseObject\n global assetTable\n global individualMaxAsset\n queryAddAsset = \"INSERT INTO \" + assetTable + \\\n \"(individual_id, total_asset, used_asset, free_asset)\" \\\n \"VALUES\" \\\n \"(\" + str(individualId) + \", \" + str(round(individualMaxAsset,4)) + \", \" + str(round(usedAsset,4)) + \", \" + str(round((individualMaxAsset-usedAsset),4)) + \")\"\n return databaseObject.Execute(queryAddAsset)\n\n # Function to insert individual entry in trainingAssetTable\n def addTrainingIndividualAsset (self, individualId, usedAsset):\n global databaseObject\n global trainingAssetTable\n global individualMaxAsset\n queryAddAsset = \"INSERT INTO \" + trainingAssetTable + \\\n \"(individual_id, total_asset, used_asset, free_asset)\" \\\n \"VALUES\" \\\n \"(\" + str(individualId) + \", \" + str(round(individualMaxAsset,4)) + \", \" + str(round(usedAsset,4)) + \", \" + str(round((individualMaxAsset-usedAsset),4)) + \")\"\n return databaseObject.Execute(queryAddAsset)\n\n # Function to check if an individual's entry exists in assetTable\n def checkIndividualAssetExists (self, individualId):\n global databaseObject\n global assetTable\n queryCheck = \"SELECT EXISTS (SELECT 1 FROM \" + assetTable + \" WHERE individual_id=\" + str(individualId) + \"), 0\"\n return databaseObject.Execute(queryCheck)\n\n # Function to check if an individual's entry exists in trainingAssetTable\n def checkTrainingIndividualAssetExists (self, individualId):\n global databaseObject\n global trainingAssetTable\n queryCheck = \"SELECT EXISTS (SELECT 1 FROM \" + trainingAssetTable + \" WHERE individual_id=\" + str(individualId) + \"), 0\"\n return databaseObject.Execute(queryCheck)\n\n # Function to update individual's asset\n def updateIndividualAsset(self, individualId, toBeUsedAsset):\n global databaseObject\n global assetTable\n queryOldAsset = \"SELECT total_asset, used_asset, free_asset FROM \" + assetTable + \" WHERE individual_id=\" + str(individualId)\n resultOldAsset = databaseObject.Execute(queryOldAsset)\n for totalAsset, usedAsset, freeAsset in resultOldAsset:\n newUsedAsset = float(usedAsset) + toBeUsedAsset\n newFreeAsset = float(freeAsset) - toBeUsedAsset\n queryUpdate = \"UPDATE \" + assetTable + \" SET used_asset=\" + str(round(newUsedAsset,4)) + \", free_asset=\" + str(round(newFreeAsset,4)) + \\\n \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryUpdate)\n\n # Function to update individual's asset during training\n def updateTrainingIndividualAsset(self, individualId, toBeUsedAsset):\n global databaseObject\n global trainingAssetTable\n queryOldAsset = \"SELECT total_asset, used_asset, free_asset FROM \" + trainingAssetTable + \" WHERE individual_id=\" + str(individualId)\n resultOldAsset = databaseObject.Execute(queryOldAsset)\n for totalAsset, usedAsset, freeAsset in resultOldAsset:\n newUsedAsset = float(usedAsset) + toBeUsedAsset\n newFreeAsset = float(freeAsset) - toBeUsedAsset\n queryUpdate = \"UPDATE \" + trainingAssetTable + \" SET used_asset=\" + str(round(newUsedAsset,4)) + \", free_asset=\" + str(round(newFreeAsset,4)) + \\\n \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryUpdate)\n\n # Function to get the asset being used by an individual at a given time\n # Not used currently\n def getUsedAsset (self, individualId, startDate, startTime, endDate, endTime):\n global databaseObject\n global newTradesheetTable\n queryUsedAsset = \"SELECT entry_qty*entry_price, 1 FROM \" + newTradesheetTable + \" WHERE individual_id=\" + str(individualId) + \\\n \" AND entry_date='\" + str(startDate) + \"' AND entry_time<='\" + str(endTime) + \"' AND exit_time>'\" + str(endTime) + \"'\"\n return databaseObject.Execute(queryUsedAsset)\n\n # Function to add individual's entry in reallocation table\n def addNewState(self, individualId, date, time, state):\n global databaseObject\n global reallocationTable\n queryNewState = \"INSERT INTO \" + reallocationTable + \\\n \" (individual_id, last_reallocation_date, last_reallocation_time, last_state)\" \\\n \" VALUES\" \\\n \" (\" + str(individualId) + \", '\" + str(date) + \"', '\" + str(time) + \"', \" + str(state) + \")\"\n return databaseObject.Execute(queryNewState)\n\n # Function to get last state for an individual\n def getLastState (self, individualId):\n global databaseObject\n global reallocationTable\n queryLastState = \"SELECT last_state, individual_id FROM \" + reallocationTable + \" WHERE individual_id=\" + str(individualId) + \\\n \" AND last_reallocation_date=(SELECT MAX(last_reallocation_date) FROM \" + reallocationTable + \" WHERE \" \\\n \"individual_id=\" + str(individualId) + \") AND last_reallocation_time=(SELECT MAX(last_reallocation_time) \" \\\n \"FROM \" + reallocationTable + \" WHERE individual_id=\" + str(individualId) + \" AND last_reallocation_date=\" \\\n \"(SELECT MAX(last_reallocation_date) FROM \" + reallocationTable + \" WHERE individual_id=\" + str(individualId) + \"))\"\n return databaseObject.Execute(queryLastState)\n\n # Function to get next state for an individual\n def getNextState (self, individualId, currentState):\n global databaseObject\n global qMatrixTable\n global zeroRange\n queryMaxQValue = \"SELECT MAX(q_value), 1 FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \" AND row_num=\" + str(currentState)\n resultMaxQValue = databaseObject.Execute(queryMaxQValue)\n queryCurrentQValue = \"SELECT q_value, 1 FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \" AND row_num=\" + str(currentState) + \\\n \" AND column_num=1\"\n resultCurrentQValue = databaseObject.Execute(queryCurrentQValue)\n for maxQValue, dummy1 in resultMaxQValue:\n for currentQValue, dummy2 in resultCurrentQValue:\n # Checking with help of percentage difference between the maximum and current Q value\n if currentQValue!=0:\n diff = float(abs(maxQValue-currentQValue)/currentQValue*100)\n if diff>zeroRange:\n queryNextState = \"SELECT column_num, 1 FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \" AND row_num=\" + \\\n str(currentState) + \" AND q_value=(SELECT MAX(q_value) FROM \" + qMatrixTable + \" WHERE individual_id=\" + \\\n str(individualId) + \" AND row_num=\" + str(currentState) + \")\"\n return databaseObject.Execute(queryNextState)\n else:\n queryNextState = \"SELECT column_num, 1 FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \" AND row_num=\" + \\\n str(currentState) + \" AND q_value=(SELECT q_value FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \\\n \" AND row_num=\" + str(currentState) + \" AND column_num=1)\"\n return databaseObject.Execute(queryNextState)\n else:\n queryNextState = \"SELECT column_num, 1 FROM \" + qMatrixTable + \" WHERE individual_id=\" + str(individualId) + \" AND row_num=\" + \\\n str(currentState) + \" AND column_num=1\"\n return databaseObject.Execute(queryNextState)\n\n # Function to reduce free asset for an individual\n def reduceFreeAsset(self, individualId, unitQty):\n global databaseObject\n global assetTable\n resultCurrentFreeAsset = databaseObject.Execute(\"SELECT free_asset, total_asset FROM \" + assetTable +\n \" WHERE individual_id=\"+str(individualId))\n for freeAsset, totalAsset in resultCurrentFreeAsset:\n if (float(freeAsset)>=unitQty):\n newFreeAsset = float(freeAsset) - unitQty\n newTotalAsset = float(totalAsset) - unitQty\n queryUpdate = \"UPDATE \" + assetTable + \" SET free_asset=\" + str(round(newFreeAsset,4)) + \\\n \", total_asset=\" + str(round(newTotalAsset,4)) + \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryUpdate)\n else:\n newTotalAsset = float(totalAsset - freeAsset)\n queryUpdate = \"UPDATE \" + assetTable + \" SET free_asset=0, total_asset=\" + str(round(newTotalAsset,4)) + \\\n \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryUpdate)\n\n # Function to increase free asset for an individual\n def increaseFreeAsset(self, individualId, unitQty):\n global databaseObject\n global assetTable\n global individualMaxAsset\n resultCurrentTotalAsset = databaseObject.Execute(\"SELECT total_asset, free_asset FROM \" + assetTable +\n \" WHERE individual_id=\" + str(individualId))\n for totalAsset, freeAsset in resultCurrentTotalAsset:\n newTotalAsset = float(totalAsset) + unitQty\n newFreeAsset = float(freeAsset) + unitQty\n if newTotalAsset<=individualMaxAsset:\n queryUpdate = \"UPDATE \" + assetTable + \" SET free_asset=\" + str(round(newFreeAsset,4)) + \\\n \", total_asset=\" + str(round(newTotalAsset,4)) + \" WHERE individual_id=\" + str(individualId)\n #print(queryUpdate)\n return databaseObject.Execute(queryUpdate)\n else:\n newTotalAsset = individualMaxAsset\n newFreeAsset = float(freeAsset) + individualMaxAsset - float(totalAsset)\n queryUpdate = \"UPDATE \" + assetTable + \" SET free_asset=\" + str(round(newFreeAsset,4)) + \\\n \", total_asset=\" + str(round(newTotalAsset,4)) + \" WHERE individual_id=\" + str(individualId)\n #print(queryUpdate)\n return databaseObject.Execute(queryUpdate)\n\n # Function to get current free asset for an individual\n def getFreeAsset(self, individualId):\n global databaseObject\n global assetTable\n queryCheck = \"SELECT free_asset, 1 FROM \" + assetTable + \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryCheck)\n\n # Function to get current free asset for an individual during training\n def getTrainingFreeAsset(self, individualId):\n global databaseObject\n global trainingAssetTable\n queryCheck = \"SELECT free_asset, 1 FROM \" + trainingAssetTable + \" WHERE individual_id=\" + str(individualId)\n return databaseObject.Execute(queryCheck)\n\n # Function to reset assetTable at the beginning\n def resetAssetAllocation(self, date, time):\n global databaseObject\n global trainingAssetTable\n global dailyAssetTable\n global assetTable\n databaseObject.Execute(\"INSERT INTO \" + assetTable +\n \" (individual_id, total_asset, used_asset, free_asset)\"\n \" VALUES\"\n \" (\" + str(gv.dummyIndividualId) + \", \" + str(round(gv.maxTotalAsset,4)) + \", 0, \" + str(round(gv.maxTotalAsset,4)) + \")\")\n databaseObject.Execute(\"INSERT INTO \" + dailyAssetTable +\n \" (date, time, total_asset)\"\n \" VALUES\"\n \" ('\" + str(date) + \"', '\" + str(time) + \"', \" + str(round(gv.maxTotalAsset, 4)) + \")\")\n databaseObject.Execute(\"INSERT INTO \" + trainingAssetTable +\n \" (individual_id, total_asset, used_asset, free_asset)\"\n \" VALUES\"\n \" (\" + str(gv.dummyIndividualId) + \", \" + str(round(gv.trainingMaxTotalAsset,4)) + \", 0, \" + str(round(gv.trainingMaxTotalAsset,4)) + \")\")\n\n # Function to insert free asset at day end into dailyAssetTable\n def insertDailyAsset(self, date, time):\n global databaseObject\n global dailyAssetTable\n global assetTable\n resultAsset = databaseObject.Execute(\"SELECT free_asset, 1 from \" + assetTable + \" where individual_id=\" + str(gv.dummyIndividualId))\n for totalAsset, dummy in resultAsset:\n databaseObject.Execute(\"INSERT INTO \" + dailyAssetTable +\n \" (date, time, total_asset)\"\n \" VALUES\"\n \" ('\" + str(date) + \"', '\" + str(time) + \"', \" + str(totalAsset) + \")\")\n\n # Function to return Net Profit-Loss of Long trades within an interval\n def getLongNetPL(self, startDate, endDate):\n global databaseObject\n global newTradesheetTable\n queryPL = \"SELECT SUM((exit_price-entry_price)*entry_qty),1 FROM \" + newTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=1\"\n return databaseObject.Execute(queryPL)\n\n # Function to return Net Profit-Loss of Short trades within an interval\n def getShortNetPL(self, startDate, endDate):\n global databaseObject\n global newTradesheetTable\n queryPL = \"SELECT SUM((entry_price-exit_price)*entry_qty),1 FROM \" + newTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=0\"\n return databaseObject.Execute(queryPL)\n\n # Function to return number of Long trades in an interval\n def getLongTrades(self, startDate, endDate):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT COUNT(*),1 FROM \" + newTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \"' AND entry_date<='\" + str(endDate) + \\\n \"' AND trade_type=1\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return number of Short trades in an interval\n def getShortTrades(self, startDate, endDate):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT COUNT(*),1 FROM \" + newTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \"' AND entry_date<='\" + str(endDate) + \\\n \"' AND trade_type=0\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return Net Profit-Loss of Long trades in original table within an interval\n def getRefLongNetPL(self, startDate, endDate):\n global databaseObject\n queryPL = \"SELECT SUM((exit_price-entry_price)*entry_qty),1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=1\"\n return databaseObject.Execute(queryPL)\n\n # Function to return Net Profit-Loss of Short trades in original table within an interval\n def getRefShortNetPL(self, startDate, endDate):\n global databaseObject\n queryPL = \"SELECT SUM((entry_price-exit_price)*entry_qty),1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=0\"\n return databaseObject.Execute(queryPL)\n\n # Function to return number of Long trades in original table within an interval\n def getRefLongTrades(self, startDate, endDate):\n global databaseObject\n queryTrades = \"SELECT COUNT(*),1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \"' AND entry_date<='\" + str(endDate) + \\\n \"' AND trade_type=1\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return number of Short trades in original table within an interval\n def getRefShortTrades(self, startDate, endDate):\n global databaseObject\n queryTrades = \"SELECT COUNT(*),1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \"' AND entry_date<='\" + str(endDate) + \\\n \"' AND trade_type=0\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return Net Profit-Loss of Long trades in original table within an interval\n def getTrainingLongNetPL(self, startDate, endDate):\n global databaseObject\n global trainingTradesheetTable\n queryPL = \"SELECT SUM((exit_price-entry_price)*entry_qty),1 FROM \" + trainingTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=1\"\n return databaseObject.Execute(queryPL)\n\n # Function to return Net Profit-Loss of Short trades in original table within an interval\n def getTrainingShortNetPL(self, startDate, endDate):\n global databaseObject\n global trainingTradesheetTable\n queryPL = \"SELECT SUM((entry_price-exit_price)*entry_qty),1 FROM \" + trainingTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=0\"\n return databaseObject.Execute(queryPL)\n\n # Function to return number of Long trades in original table within an interval\n def getTrainingLongTrades(self, startDate, endDate):\n global databaseObject\n global trainingTradesheetTable\n queryTrades = \"SELECT COUNT(*),1 FROM \" + trainingTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \"' AND entry_date<='\" + str(endDate) + \\\n \"' AND trade_type=1\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return number of Short trades in original table within an interval\n def getTrainingShortTrades(self, startDate, endDate):\n global databaseObject\n global trainingTradesheetTable\n queryTrades = \"SELECT COUNT(*),1 FROM \" + trainingTradesheetTable + \" WHERE entry_date>='\" + str(startDate) + \"' AND entry_date<='\" + str(endDate) + \\\n \"' AND trade_type=0\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return Net PL for long trades per individual from original tradesheet within an interval\n def getIndividualLongNetPL(self, startDate, endDate, individualId):\n global databaseObject\n queryPL = \"SELECT SUM((exit_price-entry_price)*entry_qty), 1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=1 AND individual_id=\" + str(individualId)\n return databaseObject.Execute(queryPL)\n\n # Function to return Net PL for short trades per individual from original tradesheet within an interval\n def getIndividualShortNetPL(self, startDate, endDate, individualId):\n global databaseObject\n queryPL = \"SELECT SUM((entry_price-exit_price)*entry_qty), 1 FROM old_tradesheet_data_table WHERE entry_date>='\" + str(startDate) + \\\n \"' AND entry_date<='\" + str(endDate) + \"' AND trade_type=0 AND individual_id=\" + str(individualId)\n return databaseObject.Execute(queryPL)\n\n # Function to reset all ranks to maximum for initialization\n # Not being used\n def initializeRanks(self):\n global databaseObject\n global rankingTable\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM old_tradesheet_data_table\"\n queryCount = \"SELECT COUNT(DISTINCT(individual_id)), 1 FROM old_tradesheet_data_table\"\n resultCount = databaseObject.Execute(queryCount)\n resultIndividuals = databaseObject.Execute(queryIndividuals)\n for count, dummy in resultCount:\n for individualId, dummy in resultIndividuals:\n queryInsert = \"INSERT INTO \" + rankingTable + \\\n \" (individual_id, ranking)\" \\\n \" VALUES\" \\\n \" (\" + str(individualId) + \", \" + str(count) + \")\"\n databaseObject.Execute(queryInsert)\n\n # Function to reset all performances to minimum for initialization\n # Not being used\n def initializePerformance(self):\n global databaseObject\n global performanceTable\n queryIndividuals = \"SELECT DISTINCT(individual_id), 1 FROM old_tradesheet_data_table\"\n resultIndividuals = databaseObject.Execute(queryIndividuals)\n for individualId, dummy in resultIndividuals:\n queryInsert = \"INSERT INTO \" + performanceTable + \\\n \" (individual_id, performance)\" \\\n \" VALUES\" \\\n \" (\" + str(individualId) + \", \" + str(gv.dummyPerformance) + \")\"\n databaseObject.Execute(queryInsert)\n\n # Not being used\n def resetRanks(self):\n global databaseObject\n global rankingTable\n queryCount = \"SELECT COUNT(DISTINCT(individual_id)), 1 FROM old_tradesheet_data_table\"\n resultCount = databaseObject.Execute(queryCount)\n for count, dummy in resultCount:\n queryUpdate = \"UPDATE \" + rankingTable + \" SET ranking=\" + str(count)\n databaseObject.Execute(queryUpdate)\n\n # Not being used\n def resetPerformance(self):\n global databaseObject\n global performanceTable\n queryUpdate = \"UPDATE \" + performanceTable + \" SET performance=\" + str(gv.dummyPerformance)\n return databaseObject.Execute(queryUpdate)\n\n def insertRankingWalkforward(self, startDate, endDate, walkforward):\n global databaseObject\n query = \"INSERT INTO \" + gv.rankingWalkforwardTableBase + \\\n \" (ranking_walkforward_id, ranking_start_date, ranking_end_date)\" \\\n \" VALUES\" \\\n \" (\" + str(walkforward) + \", '\" + str(startDate) + \"', '\" + str(endDate) + \"')\"\n return databaseObject.Execute(query)\n\n\n # Function to insert rank of an individual\n def insertRank(self, individualId, rank, walkforward):\n global databaseObject\n global rankingTable\n queryInsert = \"INSERT INTO \" + rankingTable + \\\n \" (individual_id, ranking, ranking_walkforward_id)\" \\\n \" VALUES\" \\\n \" (\" + str(individualId) + \", \" + str(rank) + \", \" + str(walkforward) + \")\"\n databaseObject.Execute(queryInsert)\n\n # Function to insert performance of an individual\n def insertPerformance(self, individualId, performance, walkforward):\n global databaseObject\n global performanceTable\n queryInsert = \"INSERT INTO \" + performanceTable + \\\n \" (individual_id, performance, ranking_walkforward_id)\" \\\n \" VALUES\" \\\n \" (\" + str(individualId) + \", \" + str(performance) + \", \" + str(walkforward) + \")\"\n return databaseObject.Execute(queryInsert)\n\n # Function to get ordered individuals from\n def getRankedIndividuals(self, walkforward):\n global databaseObject\n global performanceTable\n query = \"SELECT individual_id, 1 FROM \" + performanceTable + \" WHERE ranking_walkforward_id=\" + str(walkforward) + \" ORDER BY performance DESC\"\n return databaseObject.Execute(query)\n\n # Function to return asset at month end\n def getAssetMonthly(self, month, year):\n global databaseObject\n global dailyAssetTable\n queryAsset = \"SELECT total_asset, 1 FROM \" + dailyAssetTable + \" WHERE \" \\\n \"date=(SELECT MAX(date) FROM \" + dailyAssetTable + \" WHERE MONTH(date)=\" + str(month) + \" AND YEAR(date)=\" + str(year) + \")\"\n return databaseObject.Execute(queryAsset)\n\n # Function to return maximum and minimum asset in the month\n def getAssetMonthlyMaxMin(self, month, year):\n global databaseObject\n global dailyAssetTable\n queryAsset = \"SELECT MAX(total_asset), MIN(total_asset) FROM \" + dailyAssetTable + \" WHERE MONTH(date)=\" + str(month) + \" AND YEAR(date)=\" + str(year)\n return databaseObject.Execute(queryAsset)\n\n # Function to return trades per month\n def getTradesMonthly(self):\n global databaseObject\n global newTradesheetTable\n queryTrades = \"SELECT count(*), MONTH(entry_date), YEAR(entry_date) FROM \" + newTradesheetTable + \" GROUP BY YEAR(entry_date), MONTH(entry_date)\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return trades per month in base tradesheet\n def getRefTradesMonthly(self):\n global databaseObject\n queryTrades = \"SELECT count(*), MONTH(entry_date), YEAR(entry_date) FROM old_tradesheet_data_table GROUP BY YEAR(entry_date), MONTH(entry_date)\"\n return databaseObject.Execute(queryTrades)\n\n # Function to return Long NetPL and Long trades per month\n def getNetPLLongMonthly(self):\n global databaseObject\n global newTradesheetTable\n queryPL = \"SELECT SUM((exit_price-entry_price)*entry_qty), COUNT(*), MONTH(entry_date), YEAR(entry_date) FROM \" + newTradesheetTable + \\\n \" WHERE trade_type=1 GROUP BY YEAR(entry_date), MONTH(entry_date)\"\n return databaseObject.Execute(queryPL)\n\n # Function to return Short NetPL and Short trades per month\n def getNetPLShortMonthly(self):\n global databaseObject\n global newTradesheetTable\n queryPL = \"SELECT SUM((entry_price-exit_price)*entry_qty), COUNT(*), MONTH(entry_date), YEAR(entry_date) FROM \" + newTradesheetTable + \\\n \" WHERE trade_type=0 GROUP BY YEAR(entry_date), MONTH(entry_date)\"\n return databaseObject.Execute(queryPL)\n\n # Function to return Long NetPL and Long trades per month in base tradesheet\n def getRefNetPLLongMonthly(self):\n global databaseObject\n queryPL = \"SELECT SUM((exit_price-entry_price)*entry_qty), COUNT(*), MONTH(entry_date), YEAR(entry_date) FROM old_tradesheet_data_table WHERE trade_type=1 GROUP BY YEAR(entry_date), MONTH(entry_date)\"\n return databaseObject.Execute(queryPL)\n\n # Function to return Short NetPL and Short trades per month in base tradesheet\n def getRefNetPLShortMonthly(self):\n global databaseObject\n queryPL = \"SELECT SUM((entry_price-exit_price)*entry_qty), COUNT(*), MONTH(entry_date), YEAR(entry_date) FROM old_tradesheet_data_table WHERE trade_type=0 GROUP BY YEAR(entry_date), MONTH(entry_date)\"\n return databaseObject.Execute(queryPL)\n\n # Function to delete all non-recent entries from qMatrixTable every walk-forward\n def updateQMatrixTableWalkForward(self):\n global databaseObject\n global latestIndividualTable\n global qMatrixTable\n queryUpdate = \"DELETE FROM \" + qMatrixTable + \" WHERE individual_id NOT IN (SELECT individual_id FROM \" + latestIndividualTable + \" )\"\n databaseObject.Execute(queryUpdate)\n\n # Function to reset latest_individual_table every walk-forward\n def resetLatestIndividualsWalkForward(self):\n global databaseObject\n global latestIndividualTable\n queryReset = \"DELETE FROM \" + latestIndividualTable\n databaseObject.Execute(queryReset)\n\n # Function to insert individual id in latest_individual_table every walk-forward\n def insertLatestIndividual(self, individualId):\n global databaseObject\n global latestIndividualTable\n queryCheck = \"SELECT EXISTS (SELECT 1 FROM \" + latestIndividualTable + \" WHERE individual_id=\" + str(individualId) + \"), 0\"\n resultCheck = databaseObject.Execute(queryCheck)\n for check, dummy in resultCheck:\n if check==0:\n queryInsert = \"INSERT INTO \" + latestIndividualTable + \\\n \" (individual_id)\" \\\n \" VALUES\" \\\n \" (\" + str(individualId) + \")\"\n databaseObject.Execute(queryInsert)\n\n # Function to reset assetTable every walk-forward\n def updateAssetWalkForward(self):\n global databaseObject\n global latestIndividualTable\n global assetTable\n queryUpdate = \"DELETE FROM \" + assetTable + \" WHERE individual_id NOT IN ( SELECT individual_id FROM \" + \\\n latestIndividualTable + \" ) AND individual_id<>\" + str(gv.dummyIndividualId)\n databaseObject.Execute(queryUpdate)\n\n # Function to reset trainingAssetTable every training period\n def resetAssetTraining(self):\n global databaseObject\n global trainingAssetTable\n databaseObject.Execute(\"DELETE FROM \" + trainingAssetTable)\n databaseObject.Execute(\"INSERT INTO \" + trainingAssetTable +\n \" (individual_id, total_asset, used_asset, free_asset)\"\n \" VALUES\"\n \" (\" + str(gv.dummyIndividualId) + \", \" + str(round(gv.trainingMaxTotalAsset,4)) + \", 0, \" + str(round(gv.trainingMaxTotalAsset,4)) + \")\")\n\n def checkQMatrix(self, individualId):\n global databaseObject\n global latestIndividualTable\n query = \"SELECT EXISTS( SELECT 1 FROM \" + latestIndividualTable + \" WHERE individual_id=\" + str(individualId) + \" ), 1\"\n return databaseObject.Execute(query)\n","sub_path":"feedback_walkforward_mtm_list_variables_parallel/DBUtils.py","file_name":"DBUtils.py","file_ext":"py","file_size_in_byte":45299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"208695106","text":"from django.shortcuts import get_object_or_404, render\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom filetransfers.api import prepare_upload, serve_file\nfrom django.views.generic import TemplateView\nfrom django.contrib.syndication.views import Feed\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\n\nimport webapp2\nimport datetime\nimport re\n\nfrom osfinalproject.models import Question, Answer, Vote, UploadModel\nfrom osfinalproject.forms import CreateQuestionForm, CreateAnswerForm, EditQuestionForm, EditAnswerForm, UploadForm\n\n#use google for user management\nproviders = {\n 'Google' : 'https://www.google.com/accounts/o8/id',\n}\n\n#show a list of questions \ndef index(request):\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n test_user(user)\n else: \n user = \"\"\n logged_in = False\n #order 10 questions per page by publication date \n highest_voted_question_list = Question.objects.order_by('-pub_date')\n paginator = Paginator(highest_voted_question_list, 10) \n page = request.GET.get('page')\n try:\n questions = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n questions = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n questions = paginator.page(paginator.num_pages)\n\n context = { 'highest_voted_question_list': questions, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/index.html', context)\n\n#search for questions with a given tag \ndef search(request):\n user = users.get_current_user()\n search_term = request.POST['search']\n if user: # signed in already\n logged_in = True\n test_user(user)\n else: \n user = \"\"\n logged_in = False\n #search for tags in questions \n highest_voted_question_list = Question.objects.order_by('-pub_date')[:10]\n search_results = []\n #check each question in the sorted list for a matching regex\n #the regex searches for a tag with an optional space on one or either side\n regex = re.compile(r'^(.+\\s+)*%s(\\s+.+)*$'%search_term)\n for q in highest_voted_question_list:\n tag_list = q.tags\n if regex.match(q.tags):\n search_results.append(q.id)\n #filter the full list of questions with those matching the tag \n highest_voted_question_list = Question.objects.filter(pk__in=search_results)\n context = { 'highest_voted_question_list': highest_voted_question_list, 'user': user, 'logged_in': logged_in, 'search_results':search_results}\n return render(request, 'osfinalproject/index.html', context)\n\n#login the user \ndef login(request):\n user = users.get_current_user()\n response = HttpResponse()\n for name, uri in providers.items():\n provider = name\n if user: # signed in already\n response.write('Hello %s! [
sign out]' % (\n user.nickname(), users.create_logout_url(request.get_full_path())))\n #check if the user already exists in db\n test_user(user)\n else: # let user choose authenticator\n response.write('Hello world! Sign in at: ')\n for name, uri in providers.items():\n response.write('[%s]' % (\n users.create_login_url(federated_identity=uri), name))\n response.write('
home')\n return response\n\n#check if user is signing in for the first time and add the user to the database\ndef test_user(user):\n user = users.get_current_user()\n if user: \n try:\n db_user = User.objects.get(username=user.nickname())\n except ObjectDoesNotExist:\n u = User.objects.create_user(user.nickname())\n u.save()\n return db_user\n\n#show details of question including images, full text and answers \ndef question_detail(request, question_id):\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n test_user(user)\n else: \n user = \"\"\n logged_in = False\n #get question and calculate votes \n question = get_object_or_404(Question, pk=question_id)\n sum = sum_votes(question, 0)\n question.votes=sum\n question.save()\n highest_voted_answer_list = question.answer_set.order_by('-votes')\n context = { 'highest_voted_answer_list': highest_voted_answer_list, 'question': question, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/question_detail.html', context)\n\n#shows results of the vote or update to a question or answer\ndef results(request, question_id):\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n test_user(user)\n else: \n user = \"\"\n logged_in = False\n question = get_object_or_404(Question, pk=question_id)\n highest_voted_answer_list = question.answer_set.order_by('-votes')\n context = { 'highest_voted_answer_list': highest_voted_answer_list, 'question': question, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/results.html', context)\n\n#vote a question up or down \ndef vote_question(request, question_id):\n q = get_object_or_404(Question, pk=question_id)\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n db_user = test_user(user) \n #check if vote was up or down \n if 'UP' in request.POST:\n vote = 1\n else:\n vote = -1\n try:\n v = Vote.objects.get(question_id=q, user=db_user, answer_id=0)\n except ObjectDoesNotExist:\n v = Vote(question_id=q, user=db_user, pub_date=datetime.datetime.now(), vote_text=user.nickname()+\", \"+q.question_text)\n v.save()\n #update or assign vote\n v.up_or_down = vote\n v.save()\n sum = sum_votes(q, 0)\n q.votes=sum\n q.save()\n else: \n user = \"\"\n logged_in = False\n\n return HttpResponseRedirect(reverse('osfinalproject:results', args=(q.id,)))\n\n#vote an answer up or down \ndef vote_answer(request, question_id):\n q = get_object_or_404(Question, pk=question_id)\n #check if the user already exists in db\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n db_user=test_user(user) \n #check if vote was up or down \n if 'UP' in request.POST:\n vote = 1\n else:\n vote = -1 \n try:\n selected_answer = q.answer_set.get(pk=request.POST['answer'])\n try:\n v = Vote.objects.get(question_id=q, answer_id=selected_answer, user=db_user)\n except ObjectDoesNotExist:\n v = Vote(question_id=q, answer_id=selected_answer, user=db_user, pub_date=datetime.datetime.now(), vote_text=user.nickname()+\", \"+q.question_text+\", \"+selected_answer.answer_text)\n v.save()\n #update or assign vote\n v.up_or_down = vote\n v.save() \n #update voted on item \n sum = sum_votes(q, selected_answer)\n selected_answer.votes=sum\n selected_answer.save()\n except (KeyError, Answer.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'osfinalproject/question_detail.html', {\n 'question': q,\n 'error_message': \"You didn't select an answer.\",\n })\n else:\n user = \"\"\n logged_in = False\n return HttpResponseRedirect(reverse('osfinalproject:results', args=(q.id,)))\n\n#calculate the votes for a question or answer \ndef sum_votes(question_id, answer_id):\n sum = 0\n positive_arr = Vote.objects.filter(question_id=question_id, answer_id=answer_id, up_or_down=1)\n negative_arr = Vote.objects.filter(question_id=question_id, answer_id=answer_id, up_or_down=-1)\n sum = len(positive_arr) - len(negative_arr)\n return sum\n\n#add a new question\ndef add_question(request):\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n db_user = test_user(user)\n #process and validate form\n if request.method == 'POST':\n form = CreateQuestionForm(request.POST)\n if form.is_valid():\n question = form.save(commit=False)\n question.user = db_user\n #update publication date and check for links in the text\n question.pub_date = datetime.datetime.now() \n question.pub_date = datetime.datetime.now()\n new_text = make_links_and_photos(question.question_text)\n question.question_text = new_text \n question.save()\n highest_voted_answer_list = question.answer_set.order_by('-votes')\n context = { 'highest_voted_answer_list': highest_voted_answer_list, 'question': question, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/question_detail.html', context)\n # make a blank form otherwise \n else:\n form = CreateQuestionForm()\n #only signed in users can make questions \n else: \n user = \"\"\n logged_in = False\n return render(request, 'osfinalproject/add_question.html', {'form': form, 'user':user , 'logged_in': logged_in})\n\n#add a new answer \ndef add_answer(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n user = users.get_current_user()\n if user: # signed in already\n logged_in = True\n db_user = test_user(user)\n #process and validate form\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = CreateAnswerForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n answer = form.save(commit=False)\n answer.user = db_user\n #update publication date and check for links in the text\n answer.question_id = question\n answer.pub_date = datetime.datetime.now()\n new_text = make_links_and_photos(answer.answer_text)\n answer.answer_text = new_text\n answer.save()\n question = answer.question_id\n highest_voted_answer_list = question.answer_set.order_by('-votes')\n context = { 'highest_voted_answer_list': highest_voted_answer_list, 'question': question, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/question_detail.html', context)\n # if a GET (or any other method) we'll create a blank form\n else:\n form = CreateAnswerForm()\n #only signed in users can make answers \n else: \n user = \"\"\n logged_in = False\n return render(request, 'osfinalproject/add_answer.html', {'form': form, 'user':user , 'logged_in': logged_in, 'question': question})\n\n#check for links in the text of a question or answer \n#makes anchor tags into image tags if an image is found \ndef make_links_and_photos(question_text):\n text = question_text\n regex_html = re.compile(r'.*.*')\n if regex_html.match(text):\n text = re.sub(r'.*', r'>', text)\n return text\n\n#edit the question text, tags and image \ndef edit_question(request, question_id): \n instance = get_object_or_404(Question, pk=question_id)\n author = instance.user\n user = users.get_current_user()\n is_author = False\n logged_in = False\n form = EditQuestionForm()\n if user: # signed in already\n logged_in = True\n db_user=test_user(user)\n #check if the current user is the author\n #only authors can edit questions\n if author == db_user:\n is_author = True\n form = EditQuestionForm(request.POST or None, instance=instance)\n if form.is_valid():\n if is_author:\n question = form.save(commit=False)\n #update published date and check for links in text \n question.pub_date = datetime.datetime.now()\n new_text = make_links_and_photos(question.question_text)\n question.question_text = new_text \n question.save()\n #render the question detail view \n highest_voted_answer_list = question.answer_set.order_by('-votes')\n context = { 'highest_voted_answer_list': highest_voted_answer_list, 'question': question, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/question_detail.html', context)\n #inform the user than they cannot edit the question \n context = {'question': instance, 'user': user, 'logged_in': logged_in, 'form': form, 'is_author': is_author, 'author': datetime.datetime.now(), 'db_user' : db_user}\n return render(request, 'osfinalproject/edit_question.html', context)\n\n#edit the answer text and image \ndef edit_answer(request, answer_id): \n instance = get_object_or_404(Answer, pk=answer_id)\n author = instance.user\n user = users.get_current_user()\n is_author = False\n logged_in = False\n form = EditAnswerForm()\n if user: # signed in already\n logged_in = True\n db_user=test_user(user)\n #check if the current user is the author\n #only authors can edit answers\n if author == db_user:\n is_author = True\n form = EditAnswerForm(request.POST or None, instance=instance)\n if form.is_valid():\n if is_author:\n answer = form.save(commit=False)\n #update published date and check for links in text \n answer.pub_date = datetime.datetime.now()\n new_text = make_links_and_photos(answer.answer_text)\n answer.answer_text = new_text\n answer.save()\n question = answer.question_id\n #render the question detail view \n highest_voted_answer_list = question.answer_set.order_by('-votes')\n context = { 'highest_voted_answer_list': highest_voted_answer_list, 'question': question, 'user': user, 'logged_in': logged_in }\n return render(request, 'osfinalproject/question_detail.html', context)\n #inform the user than they cannot edit the answer \n context = {'answer': instance, 'user': user, 'logged_in': logged_in, 'form': form, 'is_author': is_author, 'author': datetime.datetime.now(), 'db_user' : db_user}\n return render(request, 'osfinalproject/edit_answer.html', context)\n\n#handle image uploads \ndef upload(request):\n view_url = reverse('osfinalproject:upload')\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n form.save()\n return HttpResponseRedirect(view_url)\n\n upload_url, upload_data = prepare_upload(request, view_url)\n form = UploadForm()\n return render(request, 'osfinalproject/upload.html',\n {'form': form, 'upload_url': upload_url, 'upload_data': upload_data,\n 'uploads': UploadModel.objects.all()})\n\n#handle downloads to make urls for images\ndef download(request, pk):\n upload = get_object_or_404(UploadModel, pk=pk)\n return serve_file(request, upload.file, save_as=False)\n\nclass QuestionFeed(Feed):\n title = \"Question RSS Feed\"\n link = \"/feed/\"\n description = \"A feed for you.\"\n\n def get_object(self, request, question_id):\n return get_object_or_404(Question, pk=question_id)\n\n def description(self, obj):\n return obj.answer_set.order_by('-votes')\n\n def items(self, obj):\n return Question.objects.filter(pk=obj.id)\n\n def title(self, obj):\n return obj.question_text\n\n # item_link is only needed if NewsItem has no get_absolute_url method.\n def item_link(self, question):\n return reverse('osfinalproject:question_detail', args=[question.pk])\n","sub_path":"osfinalproject/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582301214","text":"import time\nfrom slackclient import SlackClient\n\ntoken = \"xoxb-17223843696-V2Q7IKQ0OyTqWOP98OsFLzef\"\ntoken1 = 'xoxb-23475566993-BJgYCWVVCx4zzdWIHfRJp0Gp'\nsc = SlackClient(token)\n\ndef start_time():\n global start_time\n start_time = time.time()\n\ndef print_to_slack(chan,message):\n token = \"xoxb-17223843696-V2Q7IKQ0OyTqWOP98OsFLzef\"\n token1 = 'xoxb-23475566993-BJgYCWVVCx4zzdWIHfRJp0Gp'\n sc = SlackClient(token)\n message = str(message)\n sc.api_call(\"chat.postMessage\", as_user=\"true\",channel=chan, text=message)\n\ndef run_time(user,chan,lang='norsk'):\n print(start_time)\n up_time = time.time()-start_time\n day = up_time//86400\n hour_sec = up_time%86400\n hour = hour_sec//3600\n min_sec=hour_sec%3600\n min = min_sec//60\n sec = min_sec%60\n\n if lang == 'nor' or lang == 'norsk' or lang =='':\n up_time = day,\" dager \",hour,\" timer \", min, \" minutter \", sec, \" sekunder \"\n elif lang == 'eng' or lang == 'english':\n up_time = day,\" days \",hour,\" hours \", min, \" minutes \", sec, \" seconds \"\n else:\n response = 'Å fyfaen, det språket kan æ ikkje.'\n print_to_slack(chan,response)\n up_time = day,\" days \",hour,\" hours \", min, \" minutes \", sec, \" seconds \"\n print_to_slack(chan,up_time)\n\ndef pong(user,chan):\n print_to_slack(chan,'Pong')\n","sub_path":"sassbot/_functions.py","file_name":"_functions.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"107214296","text":"'''\nThis is an NN with one LSTM layer and then one\nfull connected layers.\n'''\n\nfrom utils import loadData\nimport pandas as pd\nimport numpy as np\nfrom os.path import join\nfrom time import ctime\nimport matplotlib.pyplot as plt\n\nimport recruit_config\nfrom featureExtraction import extractDateFeatures,\\\nextractPrevDaysAsFeatrures\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler,scale,MinMaxScaler\nfrom sklearn.externals import joblib\n\nimport lightgbm as gbm\nprint('\\014')\n\n#===config===\n#if we want to forecast for days x to x+38, the number of visitors\n#in days x-n_prev_days to x-1 are used as featues.\nn_prev_days=40\nmodelSelection=True\nifLoadFeatures=False\nmodelFitFlg=1#1:fit model,2:grid search,3:load model\n\n\ndataDir=join(recruit_config.DATADIR,'processed_data')\nfittedModelDir='/home/arash/MEGA/MEGAsync/Machine Learning/'+\\\n 'Kaggle/Recruit/Fitted models'\nsubmissionsDir='/home/arash/datasets/Kaggle/Recruit/submissions' \n \nfeature_names=[u'dow', u'holiday_flg', u'gldn_flg',u'year', u'month',u'day',\n u'air_genre_name',u'air_area_name',\n u'avg_visit',u'avg_visit_holiday', u'avg_visit_dow', \n u'avg_visit_month', u'latitude',u'longitude']\n#===config===\n\n#===feature extraction===\nif ifLoadFeatures: \n df_train_pred=pd.read_csv(join(dataDir,'model9_trainData.csv'))\n df_test=pd.read_csv(join(dataDir,'model9_testData.csv'),\n parse_dates=['visit_date'])\nelse:\n #===load data===\n dataDict=loadData(['air_reserve','air_store_info','air_visit_data',\n 'date_info'])\n df_R,df_S,df_V,df_date=\\\n (dataDict['air_reserve'],dataDict['air_store_info'],\n dataDict['air_visit_data'],dataDict['date_info'])\n df_test=pd.read_csv(join(dataDir,'test.csv'),parse_dates=['visit_date']) \n #===load data===\n \n #===split train and eval sets===\n train_feat_rng=pd.date_range('2016-01-01','2017-03-14')\n train_pred_rng=pd.date_range('2017-03-15','2017-04-22')\n \n df_V_train_feat=df_V[df_V.visit_date.isin(train_feat_rng)]\n df_V_train_pred=df_V[df_V.visit_date.isin(train_pred_rng)]\n #===split train and eval sets===\n \n #===date-related features===\n #---golden week---\n rng=pd.date_range('2016-04-29',periods=7,freq='D').\\\n append(pd.date_range('2017-04-29',periods=7,freq='D'))\n df_date['gldn_flg']=0\n df_date.loc[df_date.calendar_date.isin(rng),'gldn_flg']=1\n #---golden week---\n \n #---encode day of week---\n df_date.day_of_week=df_date.calendar_date.dt.dayofweek \n df_date.rename(columns={'calendar_date':'visit_date',\n 'day_of_week':'dow'},inplace=True)\n #---encode day of week--- \n \n #---merge df_V and df_date--- \n df_V_train_feat=df_V_train_feat.merge(df_date,on='visit_date')\n df_V_train_pred=df_V_train_pred.merge(df_date,on='visit_date')\n df_V=df_V.merge(df_date,on='visit_date')\n df_test=df_test.merge(df_date,on='visit_date')\n #---merge df_V and df_date---\n \n #---other date-related features---\n df_V_train_feat=extractDateFeatures(df_V_train_feat)\n df_V_train_pred=extractDateFeatures(df_V_train_feat,df_V_train_pred)\n \n df_V=extractDateFeatures(df_V)\n df_test=extractDateFeatures(df_V,df_test)\n #---other date-related features---\n #===date-related features===\n \n #===use # of visitors in prev. days as featuers===\n df_V_train_pred=extractPrevDaysAsFeatrures(df_V_train_feat,\n df_V_train_pred,\n ifStandardize=False,\n n_prev_days=n_prev_days)\n\n df_test=extractPrevDaysAsFeatrures(df_V,df_test,\n ifStandardize=False,\n n_prev_days=n_prev_days)\n #===use # of visitors in prev. days as featuers===\n \n #===store-related features===\n #---encoding categorical features in df_S---\n df_S['air_genre_name']=LabelEncoder().fit_transform(df_S.air_genre_name)\n df_S['air_area_name']=LabelEncoder().fit_transform(df_S.air_area_name)\n #---encoding categorical features in df_S---\n \n #---scale lon and lat in df_S---\n df_S['latitude']=scale(df_S.latitude);\n df_S['longitude']=scale(df_S.longitude);\n #---scale lon and lat in df_S---\n\n #---join df_V and df_S--- \n df_train_pred=df_V_train_pred.merge(df_S,on=['air_store_id'])\n df_train_pred.drop(['air_store_id','visit_date'],axis=1,inplace=True) \n \n df_test=df_test.merge(df_S,on=['air_store_id']) \n #---join df_V and df_S---\n #===store-related features===\n \n #===save derived data===\n df_train_pred.to_csv(join(dataDir,'model9_trainData.csv'),index=False)\n df_test.to_csv(join(dataDir,'model9_testData.csv'),index=False)\n #===save derived data===\n#===feature extraction===\n\n#===prepare data for keras===\n'''\nNo. of visitors in the previous days are fed into the LSTM\nand then the output of this layer is concatenated with other features.\nSo the observations in the previous days should be in shape\n[n_samples,n_prev_days,1]\n'''\n\nfeat=['day-{}'.format(d) for d in np.arange(n_prev_days,0,-1)]\nX_train_lags=np.log1p(df_train_pred.loc[:,feat].values.\\\n reshape((-1,n_prev_days,1)))\nX_test_lags=np.log1p(df_test.loc[:,feat].values.reshape((-1,n_prev_days,1)))\n\ntrain_features=df_train_pred.loc[:,feature_names].values\ntest_features=df_test.loc[:,feature_names].values\n\n#---scale inputs---\nscaler=MinMaxScaler().fit(np.concatenate((train_features,test_features),\n axis=0))\ntrain_features=scaler.transform(train_features)\ntest_features=scaler.transform(test_features)\n#---scale inputs---\n\nY_train=np.log1p(df_train_pred.visitors.values)\n#===prepare data for keras===\n\n#===build NN===\ndate=str(pd.to_datetime(ctime()).date())\nfittedMdlPath='/home/arash/MEGA/MEGAsync/Machine Learning/'+\\\n 'Kaggle/Recruit/Fitted models/model9_nonCV_{}.pkl'.\\\n format(date)\n \nfrom keras.layers import Input,Dense,Dropout,LSTM,Flatten,concatenate\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import load_model\nfrom keras.optimizers import rmsprop\nimport keras.backend as K\n\nK.clear_session()\n\ninp_lags = Input(shape=(X_train_lags.shape[1],X_train_lags.shape[2]))\nrec_lay=LSTM(10)(inp_lags)\n\ninp_feats = Input(shape=(train_features.shape[1],))\n\nmerged_features=concatenate([rec_lay,inp_feats])\n\ndense1=Dense(10,activation='relu')(merged_features)\ndense2=Dense(10,activation='relu')(dense1)\ndr=Dropout(.1)(dense2)\noutputs = Dense(1)(dr)\n\nmodel = Model(inputs=[inp_lags,inp_feats],outputs=outputs)\n\n#---train the model---\nearly=EarlyStopping(monitor='val_loss', min_delta=0, patience=1)\ncheckpoint = ModelCheckpoint(fittedMdlPath, monitor='val_loss', \n save_best_only=True, mode='min', period=1)\n\nopt = rmsprop(lr=.001)\nmodel.compile(optimizer=opt,\n loss='mean_squared_error',\n metrics=['mean_squared_error'])\n\nhistory = model.fit([X_train_lags,train_features],Y_train,\n validation_split=.1,\n batch_size=30,epochs=10,\n verbose=1,callbacks=[checkpoint])\n\nplt.plot(np.sqrt(history.history['val_loss']))\n#---train the model---\n#===build NN===\n\n#===plot models predictions===\nplt.figure()\nmodel = load_model(fittedMdlPath)\ny_train_pred=model.predict([X_train_lags,train_features])\nplt.plot(Y_train,y_train_pred,'o',alpha=.3)\nplt.plot([Y_train.min(),Y_train.max()],[Y_train.min(),Y_train.max()])\n#===plot models predictions===\n\n\n#===make prediction for test set==\nmodel = load_model(fittedMdlPath)\ny_test=model.predict([X_test_lags,test_features])\n\ndf=pd.DataFrame({'id':df_test.air_store_id+'_'+\\\n df_test.visit_date.dt.strftime('%Y-%m-%d'),\n 'visitors':np.expm1(y_test.flatten())})\ndf.sort_values(by='id',inplace=True) \ndf.to_csv(join(submissionsDir,'model9_{}.csv'.format(date)),index=False)\n#===make prediction for test set=== \n\n\n\n\n ","sub_path":"model9.py","file_name":"model9.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"502409115","text":"'''\r\n4.\r\n탐욕 알고리즘은 최적해를 구하는 상황에서 사용하는 방법입니다.\r\n여러 경우 중 하나를 선택할 때 그것이 그상황에서 가장 좋다고 생각하는 것을\r\n선택해 나가는 방식으로 진행하여 답을 구합니다.\r\n하지만 탐욕알고리즘은 그 상황에서 가장 좋다고 생각하는 것을 선택해 나가는\r\n방식이기 때문에 가장 좋은 결과를 얻는 것이 보장되는것은 아닙니다.\r\n탐욕 알고리즘을 이용하여 동전을 지불하는 함수(greedy)를 짜는데 지불해야 하는\r\n동전의 갯수가 최소가 되도록 함수를 구현하시오\r\n(input 으로 액수와 동전의 종류를 입력하게 구현)\r\n\r\n<입력>\r\nprint(greedy())\r\n\r\n<출력>\r\n액수입력 : 1050\r\n동전의 종류 : 100 50 10\r\n100원 동전 10개, 50원 동전 1개, 10원 동전 0개\r\n'''\r\n\r\ncoin = list(map(int, input('동전의 종류를 입력해 주세요.:').split()))\r\nchange = int(input('거스름 돈을 입력해주세요 :'))\r\n\r\ndef greedy(change, coin):\r\n coin.sort(reverse=True)\r\n i = 0\r\n solution = 0\r\n result= {}\r\n first_change = change\r\n while change != 0:\r\n if change > coin[i]:\r\n change -=coin[i]\r\n solution +=1\r\n elif change == coin[i]:\r\n change-=coin[i]\r\n solution = 1\r\n result[coin[i]] = solution\r\n else:\r\n result[coin[i]] = solution\r\n i+=1\r\n solution = 0\r\n\r\n print('액수 입력 : {}'.format(first_change))\r\n print('동전의 종류 : ', end = '')\r\n print(\",\".join(map(str, coin)))\r\n for i in range(len(coin)):\r\n if i !=len(coin)-1:\r\n if coin[i] in result:\r\n print('{}원 동전 {}개, '.format(coin[i], result[coin[i]]), end = '')\r\n else:\r\n print('{}원 동전 0개, '.format(coin[i]), end = '')\r\n else:\r\n if coin[i] in result:\r\n print('{}원 동전 {}개'.format(coin[i], result[coin[i]]))\r\n else:\r\n print('{}원 동전 0개'.format(coin[i]))\r\n return''\r\n\r\nprint(greedy(change, coin))\r\n","sub_path":"quiz/algorithm_quiz4.py","file_name":"algorithm_quiz4.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367746371","text":"# You have an array arr of length n where arr[i] = (2 * i) + 1 for all valid values of i (i.e. 0 <= i < n).\n\n# In one operation, you can select two indices x and y where 0 <= x, y < n and subtract 1 from arr[x] and add 1 to arr[y] (i.e. perform arr[x] -=1 and arr[y] += 1). The goal is to make all the elements of the array equal. It is guaranteed that all the elements of the array can be made equal using some operations.\n\n# Given an integer n, the length of the array. Return the minimum number of operations needed to make all the elements of arr equal.\n\ndef minOperations(self, n: int) -> int:\n target = 0\n arr = []\n for i in range(n):\n num = (2*i)+1\n arr.append(num)\n target += num\n target = target/n \n i = 0\n j = n-1\n op = 0\n while i < j:\n if target - arr[i] > 0:\n op += target-arr[i]\n i += 1\n j -= 1\n \n return int(op)\n\n\n","sub_path":"Min Operations/MinOperations.py","file_name":"MinOperations.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"336855022","text":"\"\"\"\n百钱百鸡问题(穷举法、暴力搜索法)\n百钱百鸡是我国古代数学家张丘建在《算经》一书中提出的数学问题:\n鸡翁一值钱五,鸡母一值钱三,鸡雏三值钱一。百钱买百鸡,问鸡翁、鸡母、鸡雏各几何?\n\"\"\"\ncock = 0\nhen = 0\nchick = 0\n\nfor x in range(0, 21):\n for y in range(0, 34):\n cock = x\n hen = y\n chick = 100-x-y\n if chick % 3 == 0 and cock*5+hen*3+(chick//3) == 100:\n print(\"cock:%d,hen:%d,chick:%d\" % (cock, hen, chick))\n","sub_path":"Day1-15/Code/Day5/Chick.py","file_name":"Chick.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"494956787","text":"#!/usr/bin/python\r\n#-------------------------------------------------------------------------------\r\n# Name: ExecCalamp.py\r\n# Purpose: Programa para inicializar a aplicacao off line Votorantim - Silos\r\n#\r\n# Author: renato.correa\r\n#\r\n# Created: 10/07/2017\r\n# Copyright: (c) renato.correa 2017\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n\r\n\r\nimport Exceptions.Execpt\r\nfrom DataBase import ProcessDataBase\r\nimport asyncio\r\nfrom Serial import ProcessSerial\r\nfrom Log import LogFile\r\nimport Process\r\nimport time\r\nfrom pyArango.connection import *\r\n\r\n\r\n\r\ndef testeSerial():\r\n print('Teste serial')\r\n ProcessSerial.ConfiguraSerial()\r\n\r\n\r\n\r\n ProcessSerial.Write('AT***********\\r\\n')\r\n #seq = ProcessSerial.ReadTotal()\r\n valor = ProcessSerial.ReadCount(20)\r\n print('VALOR = ',valor)\r\n\r\n resp = valor[0:13]\r\n stringDec = resp.decode()\r\n print(resp)\r\n print('STRING FINAL: '+stringDec)\r\n\r\n ProcessSerial.CloseSerial()\r\n\r\n print('**** FIM TESTE SERIAL ***')\r\n stringDec = ''\r\n\r\ndef testeDataBaseArango():\r\n try:\r\n #Inserindo nas tabelas log e tb_influx\r\n #Tabela log\r\n conn = Connection(username=\"root\", password=\"yasrlc\")\r\n db = conn.createDatabase(name=\"school\")\r\n studentsCollection = db.createCollection(name=\"Students\")\r\n print(db[\"Students\"])\r\n\r\n\r\n\r\n\r\n except Exception as e:\r\n print('ERROR!!!')\r\n err = e.args\r\n print('ERROR: ' + err[0])\r\n finally:\r\n print('--REINICIA CICLO--')\r\n\r\ndef testeDataBaseSqlite():\r\n try:\r\n #Inserindo nas tabelas log e tb_influx\r\n #Tabela log\r\n ret = ProcessDataBase.insertLiga('GELADEIRA')\r\n print(str(ret))\r\n retinflux = ProcessDataBase.insertInflux(1)\r\n\r\n ret = ProcessDataBase.insertDesliga('GELADEIRA')\r\n retinflux = ProcessDataBase.insertInflux(0)\r\n\r\n #Inserindo na tabela\r\n\r\n except Exception as e:\r\n print('ERROR!!!')\r\n err = e.args\r\n print('ERROR: ' + err[0])\r\n finally:\r\n print('--REINICIA CICLO--')\r\n\r\ndef testeLog():\r\n try:\r\n print('Iniciando teste de Log')\r\n LogFile.writeLog('Texto - Info',0)\r\n LogFile.writeLog('Texto - Debug', 1)\r\n LogFile.writeLog('Texto - Warning', 2)\r\n LogFile.writeLog('Texto - Error', 3)\r\n LogFile.writeLog('Texto - Critical', 4)\r\n\r\n except Exception as e:\r\n print('ERROR!!!')\r\n err = e.args\r\n print('ERROR: ' + err[0])\r\n finally:\r\n print('--REINICIA CICLO--')\r\n\r\ndef START():\r\n\r\n while True:\r\n try:\r\n\r\n print('START APLICATION')\r\n LogFile.writeLog('START APLICATION',0)\r\n Process.start()\r\n time.sleep(5)\r\n\r\n except Exceptions.Execpt.FalhaBanco as e:\r\n print(\"ERROR BANCO: \"), print(e.args)\r\n LogFile.writeLog('Falha funcao START', 3)\r\n\r\n except Exception as e:\r\n print('ERROR!!!')\r\n err = e.args\r\n print('ERROR: ' + err[0])\r\n LogFile.writeLog('Falha funcao START', 3)\r\n\r\n finally:\r\n print('--REINICIA CICLO--')\r\n\r\nif __name__ == '__main__':\r\n #testeSerial()\r\n #testeLog()\r\n #testeDataBaseSqlite()\r\n testeDataBaseArango()\r\n START()\r\n print('FIM')\r\n\r\n\r\n\r\n\r\n","sub_path":"Principal.py","file_name":"Principal.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138224488","text":"#!/bin/python3\n\nimport sys\n\nn, k = input().strip().split(' ')\nn, k = [int(n), int(k)]\nc = [int(c_temp) for c_temp in input().strip().split(' ')]\n\n\ndef cloudjump(n, k, c):\n # while loop, do it manually, might run out of time\n E = 100\n cloud_index = 0\n while True:\n cloud_index += k % n\n cloud_index = cloud_index % n\n E -= 1\n if c[cloud_index] == 1:\n E -= 2\n if cloud_index == 0:\n break\n return E\n # use modulo operations to 1: see how many jumps until back on cloud 0\n # 2: see how many times it jumped on each space.\n\n\nresult = cloudjump(n, k, c)\nprint(result)\n","sub_path":"hackerrank/implementation/JumpingOnTheCloudsRevisited.py","file_name":"JumpingOnTheCloudsRevisited.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"134439026","text":"import threading\nimport configparser\nimport sys\nimport os\nimport pyshark\nfrom threading import Thread\nfrom pprint import pprint\nimport datetime\nimport signal\nimport time\nfrom collections import Counter\n\nfrom pyshark.capture.capture import StopCapture\nfrom db_con import db_client, db_pkt_col_packets, db_sessions_col, db_archive_pkt_col, db_login, db_white_list\nimport asyncio\n\n# ['id', 'flags', 'flags_response', 'flags_opcode', 'flags_authoritative', 'flags_truncated', 'flags_recdesired', 'flags_recavail', 'flags_z', 'flags_authenticated', 'flags_checkdisable', 'flags_rcode', 'count_queries', 'count_answers', 'count_auth_rr', 'count_add_rr', '', 'qry_name', 'qry_name_len', 'count_labels', 'qry_type', 'qry_class', 'resp_name', 'resp_type', 'resp_class', 'resp_ttl', 'resp_len', 'cname', 'a', 'ns', 'response_to', 'time']\n# ['id', 'flags', 'flags_response', 'flags_opcode', 'flags_truncated', 'flags_recdesired', 'flags_z', 'flags_checkdisable', 'count_queries', 'count_answers', 'count_auth_rr', 'count_add_rr', '', 'qry_name', 'qry_name_len', 'count_labels', 'qry_type', 'qry_class']\n# ['version', 'hdr_len', 'dsfield', 'dsfield_dscp', 'dsfield_ecn', 'len', 'id', 'flags', 'flags_rb', 'flags_df', 'flags_mf', 'frag_offset', 'ttl', 'proto', 'checksum', 'checksum_status', 'src', 'addr', 'src_host', 'host', 'dst', 'dst_host']\n\n\nclass Sniffer(object):\n\n def update_white_list(self):\n self.res = [el['name'] for el in db_white_list.find()]\n\n def __init__(self, path_to_config):\n self.update_white_list()\n self.config = configparser.ConfigParser()\n self.config.read(path_to_config)\n #logger.setLevel(log_level[self.config['DEFAULT']['LOG_LEVEL']])\n #log_stream_handler = logging.StreamHandler(sys.stdout)\n #log_file_handler = logging.FileHandler('logs.log')\n #formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n #log_stream_handler.setFormatter(formatter)\n #logger.addHandler(log_stream_handler)\n #logger.debug('Init Complete')\n return None\n\n def decompile_packet(self, pkt):\n if self.run_flag == False:\n raise StopCapture()\n try:\n if '.'.join(pkt.dns.qry_name.split('.')[-2:]) in self.res:\n return\n result = {\n 'src': pkt.ip.src,\n 'dst': pkt.ip.dst,\n 'len': pkt.ip.len,\n 'qr': pkt.dns.flags_response,\n 'count_queries': pkt.dns.count_queries,\n 'qry_name': pkt.dns.qry_name,\n 'qry_type': pkt.dns.qry_type,\n 'count_labels': pkt.dns.count_labels\n }\n\n if pkt.dns.flags_response == '1':\n attributes = pkt.dns.field_names\n r_types = [\n 'a', 'aaaa', 'cname', 'txt', 'ns', 'mx', 'soa', 'dnskey'\n ]\n result.update({\n 'resp_type':\n pkt.dns.resp_type,\n 'resp_ttl':\n pkt.dns.resp_ttl,\n 'resp_len':\n pkt.dns.resp_len,\n 'count_auth_rr':\n pkt.dns.count_auth_rr,\n 'count_add_rr':\n pkt.dns.count_add_rr,\n 'count_answers':\n pkt.dns.count_answers,\n 'count_rtypes':\n len([x for x in attributes if x in r_types])\n })\n elif pkt.dns.flags_response == '0':\n result.update({\n 'resp_type': 0,\n 'resp_ttl': 0,\n 'resp_len': 0,\n 'count_auth_rr': 0,\n 'count_add_rr': 0,\n 'count_answers': 0,\n 'count_rtypes': 0\n })\n self.send_to_db(result)\n except:\n pass\n\n def run_capture(self):\n self.run_flag = True\n\n while self.run_flag:\n try:\n '''interface=self.config['DEFAULT'],['INTERFACE']'''\n capture = pyshark.LiveCapture(display_filter='dns')\n capture.apply_on_packets(self.decompile_packet)\n except StopCapture:\n break\n\n @staticmethod\n def send_to_db(document=None):\n db_pkt_col_packets.insert_one(document)\n\n def stop_capture(self):\n self.run_flag = False\n pass\n","sub_path":"main_module/sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"387821485","text":"from map import Map, Obstacle\nfrom reference_path import ReferencePath\nfrom simulator import Simulator\nfrom mpc import MPC\nfrom model import simple_bycicle_model\nimport do_mpc\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom casadi import *\nfrom casadi.tools import *\nimport pdb\nimport sys\nimport time\nimport globals\nsys.path.append('../../')\n\n\n\"\"\" User settings: \"\"\"\nshow_animation = True\nstore_results = False\n\n# Load map file\nmap = Map(file_path='maps/sim_map.png', origin=[-1, -2],\n resolution=0.005)\n\n# Specify waypoints\nwp_x = [-0.75, -0.25, -0.25, 0.25, 0.25, 1.25, 1.25, 0.75, 0.75, 1.25,\n 1.25, -0.75, -0.75, -0.25]\nwp_y = [-1.5, -1.5, -0.5, -0.5, -1.5, -1.5, -1, -1, -0.5, -0.5, 0, 0,\n -1.5, -1.5]\n\n# Specify path resolution\npath_resolution = 0.05 # m / wp\n\n# Create smoothed reference path\nreference_path = ReferencePath(map, wp_x, wp_y, path_resolution,\n smoothing_distance=5, max_width=0.23,\n circular=True)\n\n# Add obstacles\nuse_obstacles = False\nif use_obstacles:\n obs1 = Obstacle(cx=0.0, cy=0.0, radius=0.05)\n obs2 = Obstacle(cx=-0.8, cy=-0.5, radius=0.08)\n obs3 = Obstacle(cx=-0.7, cy=-1.5, radius=0.05)\n obs4 = Obstacle(cx=-0.3, cy=-1.0, radius=0.08)\n obs5 = Obstacle(cx=0.27, cy=-1.0, radius=0.05)\n obs6 = Obstacle(cx=0.78, cy=-1.47, radius=0.05)\n obs7 = Obstacle(cx=0.73, cy=-0.9, radius=0.07)\n obs8 = Obstacle(cx=1.2, cy=0.0, radius=0.08)\n obs9 = Obstacle(cx=0.67, cy=-0.05, radius=0.06)\n map.add_obstacles([obs1, obs2, obs3, obs4, obs5, obs6, obs7,\n obs8, obs9])\n\n\"\"\"\nGet configured do-mpc modules:\n\"\"\"\nvehicle = simple_bycicle_model(\n length=0.12, width=0.06, Ts=0.05, reference_path=reference_path)\nmodel = vehicle.model\n\ncontroller = MPC(vehicle)\nmpc = controller.mpc\n\nsimulator = Simulator(vehicle).simulator\n\n# Compute speed profile\nay_max = 4.0 # m/s^2\na_min = -0.1 # m/s^2\na_max = 0.5 # m/s^2\nSpeedProfileConstraints = {'a_min': a_min, 'a_max': a_max,\n 'v_min': 0.0, 'v_max': 1.0, 'ay_max': ay_max}\nvehicle.reference_path.compute_speed_profile(SpeedProfileConstraints)\n\n\n\"\"\"\nSet initial state\n\"\"\"\n\nx0 = np.array([vehicle.reference_path.waypoints[0].x, vehicle.reference_path.waypoints[0].y,\n 0, 0])\nmpc.x0 = x0\nsimulator.x0 = x0\n\n# Use initial state to set the initial guess.\nmpc.set_initial_guess()\n\n\"\"\"\nSetup graphic:\n\"\"\"\n\nfig, ax, graphics = do_mpc.graphics.default_plot(mpc.data)\nplt.ion()\n\n\"\"\"\nRun MPC main loop:\n\"\"\"\n\nk = 0\nwhile globals.s < reference_path.length:\n vehicle.get_current_waypoint()\n print(\"======= wp_id ======== \", vehicle.wp_id)\n u0 = mpc.make_step(x0)\n x0 = simulator.make_step(u0)\n controller.distance_update(x0)\n\n if show_animation:\n\n # Plot path and drivable area\n reference_path.show(wp=vehicle.current_waypoint)\n vehicle.show(x0)\n plt.axis('off')\n plt.pause(0.001)\n plt.show()\n\n graphics.plot_results(t_ind=k)\n graphics.plot_predictions(t_ind=k)\n graphics.reset_axes()\n plt.show()\n plt.pause(0.01)\n\n k += 1\n\ninput('Press any key to exit.')\n\n# Store results:\nif store_results:\n do_mpc.data.save_results([mpc, simulator], 'lateral control')\n","sub_path":"Multi-Purpose-MPC-master/Test/Toy_bicycle_exmple/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"203412826","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport cotyledon\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom racoon import endpoint\nfrom racoon import messaging\n\nLOG = log.getLogger(__name__)\n\n\nclass CollectorService(cotyledon.Service):\n\n def __init__(self, worker_id):\n super(CollectorService, self).__init__(worker_id)\n self.event_listener = None\n\n def run(self):\n transport = messaging.get_transport()\n if transport:\n event_targets = messaging.get_targets(\n cfg.CONF.oslo_messaging_notifications.topics\n )\n self.event_listener = messaging.get_event_listener(\n transport, event_targets,[endpoint.SampleEndpoint()],\n allow_requeue=True\n )\n LOG.info('start event listener')\n self.event_listener.start()\n\n def terminate(self):\n\n \"\"\"\n kill listener\n \"\"\"\n\n if self.event_listener:\n self.event_listener.stop()\n self.event_listener.wait()\n super(CollectorService, self).terminate()\n\nif __name__ == \"__main__\":\n from racoon import service\n service.prepare_service()\n cs = CollectorService(1)\n cs.run()\n","sub_path":"racoon/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279072752","text":"# This file is part of Phoenix\n#\n# Copyright (c) 2016, 2017 Vasantha Ganesh K.\n#\n# For the full copyright and license information, please view the LICENSE file\n# that was distributed with is source code.\n\nimport seccomp\nimport prctl\n\ndef syscall_filter():\n\n prctl.set_dumpable(0)\n prctl.set_no_new_privs(1)\n\n fltr = seccomp.SyscallFilter(defaction=seccomp.ALLOW)\n\n fltr.add_rule(seccomp.KILL, 'clone')\n fltr.add_rule(seccomp.KILL, 'fork')\n\n fltr.load()\n\n \n\n","sub_path":"src/engine/syscall_filter.py","file_name":"syscall_filter.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215748783","text":"\"\"\"\nOpen AI Gym MountaiCar-v1\nNick Kaparinos\n2021\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom stable_baselines3.common.callbacks import BaseCallback\nfrom tqdm import tqdm\n\n\nclass LogStepsCallback(BaseCallback):\n def __init__(self, log_dir, verbose=0):\n self.log_dir = log_dir\n super(LogStepsCallback, self).__init__(verbose)\n\n def _on_training_start(self) -> None:\n self.results = pd.DataFrame(columns=['Reward', 'Done'])\n print(\"Τraining starts!\")\n\n def _on_step(self) -> bool:\n if 'reward' in self.locals:\n keys = ['reward', 'done']\n else:\n keys = ['rewards', 'dones']\n self.results.loc[len(self.results)] = [self.locals[keys[0]][0], self.locals[keys[1]][0]]\n return True\n\n def _on_training_end(self) -> None:\n self.results.to_csv(self.log_dir + 'training_data.csv', index=False)\n print(\"Τraining ends!\")\n\n\nclass TqdmCallback(BaseCallback):\n def __init__(self):\n super().__init__()\n self.progress_bar = None\n\n def _on_training_start(self):\n self.progress_bar = tqdm(total=self.locals['total_timesteps'])\n\n def _on_step(self):\n self.progress_bar.update(1)\n return True\n\n def _on_training_end(self):\n self.progress_bar.close()\n self.progress_bar = None\n\n\ndef save_dict_to_file(dict, path):\n f = open(path + '/hyperparameter_dict.txt', 'w')\n f.write(str(dict))\n f.close()\n\n\ndef calc_episode_rewards(training_data):\n # Calculate the rewards for each training episode\n episode_rewards = []\n temp_reward_sum = 0\n\n for step in range(training_data.shape[0]):\n reward, done = training_data.iloc[step, :]\n temp_reward_sum += reward\n if done:\n episode_rewards.append(temp_reward_sum)\n temp_reward_sum = 0\n\n result = pd.DataFrame(columns=['Reward'])\n result['Reward'] = episode_rewards\n return result\n\n\ndef learning_curve(log_dir, window=10):\n # Read data\n training_data = pd.read_csv(log_dir + 'training_data.csv', index_col=None)\n\n # Calculate episode rewards\n episode_rewards = calc_episode_rewards(training_data)\n\n # Calculate rolling window metrics\n rolling_average = episode_rewards.rolling(window=window, min_periods=window).mean().dropna()\n rolling_max = episode_rewards.rolling(window=window, min_periods=window).max().dropna()\n rolling_min = episode_rewards.rolling(window=window, min_periods=window).min().dropna()\n\n # Change column name\n rolling_average.columns = ['Average Reward']\n rolling_max.columns = ['Max Reward']\n rolling_min.columns = ['Min Reward']\n rolling_data = pd.concat([rolling_average, rolling_max, rolling_min], axis=1)\n\n # Plot\n sns.set()\n ax = sns.lineplot(data=rolling_data)\n ax.fill_between(rolling_average.index, rolling_min.iloc[:, 0], rolling_max.iloc[:, 0], alpha=0.2)\n ax.set_title('Learning Curve')\n ax.set_ylabel('Reward')\n ax.set_xlabel('Episodes')\n\n # Save figure\n plt.savefig(log_dir + 'learning_curve.png')\n","sub_path":"Classic Control/CartPole/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262186598","text":"# 文字列\"stressed\"の文字を逆に(末尾から先頭に向かって)並べた文字列を得よ\nimport random\nimport re\nword = \"stressed\"\nrev_word = word[::-1]\nprint(rev_word)\n\nword = \"stressed\"\nrev_word = \"\".join(reversed(list(word)))\nprint(rev_word)\n\n# ----\n# 「パタトクカシーー」という文字列の1,3,5,7���字目を取り出して連結した文字列を得よ.\nword2 = \"パタトクカシーー\"\nprint(word2[::2])\n\nword2 = \"パタトクカシーー\"\nfor i in range(len(word2)):\n if not i % 2:\n print(word2[i], end='')\n\n# ---\n# 02. 「パトカー」+「タクシー」=「パタトクカシーー」\n# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\nword_p = \"パトカー\"\nword_t = \"タクシー\"\n\nresult = \"\"\nfor i in range(len(word_p)):\n result += word_p[i] + word_t[i]\nprint(result)\n\nword_p = \"パトカー\"\nword_t = \"タクシー\"\n\nresult = \"\"\nfor p, t in zip(word_p, word_t):\n result += p + t\nprint(result)\n\n# ---\n# 円周率\n# \"Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.\"\n# という文を単語に分解し,各単語の(アルファベットの)文字数を先頭から出現順に並べたリストを作成せよ.\nsentence = \"Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.\"\n\nwords = re.split(r'[,|.| ]', sentence)\n\n# lambda の使用例\n# original_list = list(range(10))\n# map(第一関数, リスト())\n# mapped_list = map(lambda x: x ** 2, original_list)\nwords = list(filter(lambda w: w != '', words))\nprint(words)\n\n# map(第一関数, リスト())\nwords_len = list(map(len, words))\nprint(words_len)\n\n# ---\n# 04. 元素記号\n# \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"という文を単語に分解し,\n# 1, 5, 6, 7, 8, 9, 15, 16, 19番目の単語は先頭の1文字,\n# それ以外の単語は先頭に2文字を取り出し,取り出した文字列から単語の位置(先頭から何番目の単語か)への連想配列(辞書型もしくはマップ型)を作成せよ\n\nsentence = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\n\ninitials = [1, 5, 6, 6, 8, 9, 15, 16, 19]\n\nwords = sentence.split()\n# print(words)\n\nelement_symbols = {}\n\nfor i, word in enumerate(words):\n if i + 1 in initials:\n element_symbols[i + 1] = word[0]\n else:\n element_symbols[i + 1] = word[:2]\nprint(element_symbols)\n\n# ---------------------------------------\n# 05. n-gram\n# 与えられたシーケンス(文字列やリストなど)からn-gramを作る関数を作成せよ\n# この関数を用い,\"I am an NLPer\"という文から単語bi-gram,文字bi-gramを得よ.\n\n# ---------------------------------------\n# 1\nsentence = \"I am an NLPer\"\n\n\ndef word_n_gram(sentence, N):\n \"\"\"\n 単語のn-gramを返す\n \"\"\"\n words = sentence.split() # ['I', 'am', 'an', 'NLPer']\n result = []\n for i, c in enumerate(words):\n if i + N > len(words):\n return result\n result.append(words[i:i + N])\n\n\nprint(word_n_gram(sentence, 2))\n# [['I', 'am'], ['am', 'an'], ['an', 'NLPer']]\nprint(word_n_gram(sentence, 3))\n# [['I', 'am', 'an'], ['am', 'an', 'NLPer']]\n\n\ndef char_n_gram(sentence, N):\n \"\"\"\n 文字のn-gramを返す\n \"\"\"\n result = []\n for i in range(len(sentence)):\n if i + N > len(sentence):\n return result\n result.append(sentence[i:i + N])\n\n\n# ['I ', ' a', 'am', 'm ', ' a', 'an', 'n ', ' N', 'NL', 'LP', 'Pe', 'er']\nprint(char_n_gram(sentence, 2))\n# ['I a', ' am', 'am ', 'm a', ' an', 'an ', 'n N', ' NL', 'NLP', 'LPe', 'Per']\nprint(char_n_gram(sentence, 3))\n\n# ---------------------------------------\n# 07. テンプレートによる文生成\n# 引数x, y, zを受け取り「x時のyはz」という文字列を返す関数を実装せよ.\n# さらに,x = 12, y = \"気温\", z = 22.4として,実行結果を確認せよ\n\nx = 12\ny = \"気温\"\nz = 22.4\n\n\ndef template(x, y, z):\n return \"{0}時の{1}は{2}\".format(x, y, z)\n\n\nprint(template(x, y, z))\n\n# ---------------------------------------\n# 暗号文\n# 与えられた文字列の各文字を,以下の仕様で変換する関数cipherを実装せよ.\n# 英小文字ならば(219 - 文字コード)の文字に置換\n# その他の文字はそのまま出力\n# この関数を用い,英語のメッセージを暗号化・復号化せよ\n\n# ---------------------------------------\n\ns = 'aaa@xxx.com bbb@yyy.com ccc@zzz.com'\n\n# re.sub('正規表現パターン','置換先文字列(関数も使用可),'処理対象')\nprint(re.sub('[a-z]*@', 'ABC@', s))\n# ABC@xxx.com ABC@yyy.com ABC@zzz.com\n\n\ndef cipher(src):\n \"\"\"\n 英小文字なら(219 - 文字コード)の文字に置換\n その他の文字はそのまま出力\n \"\"\"\n # m はマッチオブジェクト, マッチオブジェクトから文字列を取り出すために m.group(0)を使用\n # ord()は文字を数値に chr()は数値を文字列に変換\n return re.sub(r'[a-z]', lambda m: chr(219 - ord(m.group(0))), src)\n\n\ntext = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\n\nprint(cipher(text))\n# Hr Hv Lrvw Bvxzfhv Blilm Clfow Nlg Ocrwrav Foflirmv. Nvd Nzgrlmh Mrtsg Aohl Srtm Pvzxv Svxfirgb Cozfhv. Aigsfi Krmt Czm.\nprint(cipher(cipher(text)))\n# Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\nprint(cipher(cipher(cipher(text))))\n# Hr Hv Lrvw Bvxzfhv Blilm Clfow Nlg Ocrwrav Foflirmv. Nvd Nzgrlmh Mrtsg Aohl Srtm Pvzxv Svxfirgb Cozfhv. Aigsfi Krmt Czm.\n\n# ---------------------------------------\n# 09. Typoglycemia\n# スペースで区切られた単語列に対して,各単語の先頭と末尾の文字は残し,\n# それ以外の文字の順序をランダムに並び替えるプログラムを作成せよ.\n# ただし,長さが4以下の単語は並び替えないこととする.\n# 適当な英語の文(例えば\"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\")を与え,その実行結果を確認せよ.\n# ---------------------------------------\n\n\ndef Typoglycemia(text):\n def random_word(word):\n # 5文字以下はそのまま出力\n if len(word) < 5:\n return word\n\n # 5文字の場合先頭と末尾を固定\n if len(word) == 5:\n # リストを文字列に変換する\n # 先頭と末尾以外をランダムに並び替える\n arr = list(word[1:-1])\n random.shuffle(arr)\n # 先頭と末尾を加え返す\n return word[0] + \"\".join(arr) + word[-1]\n\n # 6文字以上は前後2文字を固定\n else:\n arr = list(word[3:-3])\n random.shuffle(arr)\n return word[:3] + \"\".join(arr) + word[-3:]\n\n # text.split()で単語ごとに分けたリストを作る\n # => ['I', \"couldn't\", 'believe', 'that', 'I', 'could', 'actually', 'understand', 'what', 'I', 'was', 'reading:', 'the', 'phenomenal', 'power', 'of', 'the', 'human', 'mind', '.']\n\n # map(第一関数,リスト()) で順番に単語を処理\n # ※この時点ではmap型オブジェクトのため中身は見えない \n\n # map型オブジェクトを list()でリスト型オブジェクトに変換\n # => ['I', \"clnuo'dt\", 'bivelee', 'that', 'I', 'cloud', 'autcally', 'unsdtaenrd', 'what', 'I', 'was', 'rigedna:', 'the', 'panhmeenol', 'pewor', 'of', 'the', 'hmuan', 'mind', '.']\n\n return \" \".join(list(map(random_word, text.split())))\n\n\ntext = \"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\"\nprint(text)\nprint(Typoglycemia(text))\n","sub_path":"language-processing/00-09.py","file_name":"00-09.py","file_ext":"py","file_size_in_byte":8073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233703314","text":"import csv\n\n# how to write in cvs file\n\n# with open(\"data.cvs\", \"w\") as file:\n# # open writer to write in the file\n# writer = csv.writer(file)\n# writer.writerow([\"transaction_id\", \"product_name\", \"product_price\"])\n# writer.writerow([1, \"laptop\", 39])\n# writer.writerow([2, \"mac\", 59])\n\n# how to read\nwith open(\"data.cvs\") as file:\n # open writer to write in the file\n reader = csv.reader(file)\n # print(list(reader))\n for row in reader:\n print(row)\n","sub_path":"work_with_cvs.py","file_name":"work_with_cvs.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474081827","text":"import turtle\n\n\ndef draw_triangle():\n windows = turtle.Screen()\n\n mango = turtle.Turtle()\n\n mango.shape('turtle')\n mango.speed(1)\n for i in range(0, 3):\n mango.right(60)\n mango.forward(200)\n mango.right(60)\n\n windows.exitonclick()\n\n\ndraw_triangle()\n","sub_path":"Fun/Drawing with code/draw-triangle_turtle.py","file_name":"draw-triangle_turtle.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466260754","text":"################################################################################\n# 2810ict - Assignment 2\n#\n#\n# Query the Database\n#\n#\n# Created by Brianna Sonter | s2930629\n#\n################################################################################\n\n#imports\nimport re\nimport sqlite3\nimport openpyxl\nfrom openpyxl import *\nimport sys\n\n#function to connect to Database\ndef connectDB():\n try:\n db_fname = \"data/foodData.db\"\n connection = sqlite3.connect(db_fname)\n #let user know that connection to the database was successful\n print(\"Connected to Database successfully\")\n return connection\n\n except:\n #let user know that connection to database couldnt be created\n print(\"Error: Could not connect to Database\")\n\n#function to close the connection to database\ndef closeDB(_connection):\n try:\n connection = _connection\n connection.close()\n print(\"Connection closed\")\n\n except:\n #let user know database was unable to close\n print(\"Error: Was unable to close connection to Database\")\n\n#Open database\nconnection = connectDB()\ncursor = connection.cursor()\n\n#function to create new table in the database\ndef createPreviousViolationsTable():\n try:\n #create table for the previous violations\n create_violations_table = \"\"\" CREATE TABLE IF NOT EXISTS previousviolations (\n name TEXT,\n address TEXT,\n zip NUMERIC,\n city TEXT\n );\n \"\"\"\n cursor.execute(create_violations_table)\n\n print(\"New table creation in database was successful\")\n\n except:\n print(\"Error: Database table creation was a failure :(\")\n closeDB(connection)\n\n#function to query the Database\ndef findBusinessViolations():\n try:\n #let user know that violation codes are being found\n sys.stdout.write(\"Finding Businesses with previous violations... \")\n sys.stdout.flush()\n\n #find every different violation code, its description and\n #how many times it occurs\n query = \"\"\"\n SELECT facility_name, facility_address, facility_zip, facility_city\n FROM inspections i, violations v\n WHERE i.serial_number=v.serial_number\n GROUP BY facility_name\n ORDER BY facility_name;\n \"\"\"\n\n #execute query on the violations and inspections tables\n cursor.execute(query)\n\n #notify user that the query has been found\n print(\"Violations have been found\")\n\n except:\n #notify user that the query could not be completed\n print(\"Could not complete query.\")\n\n#insert query into new table\ndef insertDatatoTable():\n\n try:\n sys.stdout.write(\"Writing query data to database... \")\n sys.stdout.flush()\n\n businessdata = cursor.fetchall()\n\n for row in businessdata:\n _name = row[0]\n _address = row[1]\n _zip = row[2]\n _city = row[3]\n\n sql = \"\"\"\n INSERT INTO previousviolations(\n name,\n address,\n zip,\n city\n )\n VALUES( ?, ?, ?, ?)\n \"\"\"\n\n values = (_name, _address, _zip, _city)\n\n #match values with how to insert data and insert into database\n cursor.execute(sql, values)\n\n #commit data to database\n connection.commit()\n #let user know data has been commited\n print(\"Data was inserted successfully for previous violations\")\n\n except:\n print(\"Could not insert data into table\")\n\n\n#function find how many violations each business has (that has at least 1)\ndef businessViolationCount():\n try:\n #let user know that the businesses are being found\n sys.stdout.write(\"Finding Businesses with previous violations... \")\n sys.stdout.flush()\n\n #find every business with a violation code\n query = \"\"\"\n SELECT facility_name, COUNT(*)\n FROM inspections i, violations v\n WHERE i.serial_number=v.serial_number\n GROUP BY facility_name\n ORDER BY COUNT(*);\n \"\"\"\n\n #execute query on the violations database\n cursor.execute(query)\n\n data = cursor.fetchall()\n for row in data:\n print(row)\n #notify user that violations have been found\n print(\"Violations have been found\")\n\n except:\n #notify user that the query could not be completed\n print(\"Could not complete query.\")\n\n\n#run funtions as program\nif __name__ == '__main__':\n createPreviousViolationsTable()\n\n findBusinessViolations()\n\n insertDatatoTable()\n\n businessViolationCount()\n","sub_path":"sql_food.py","file_name":"sql_food.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631626541","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport functools\nimport numpy as np\nimport tensorflow as tf\n\nTRAIN_DATA_URL = \"https://storage.googleapis.com/tf-datasets/titanic/train.csv\"\nTEST_DATA_URL = \"https://storage.googleapis.com/tf-datasets/titanic/eval.csv\"\n\ntrain_file_path = tf.keras.utils.get_file(\"train.csv\", TRAIN_DATA_URL)\ntest_file_path = tf.keras.utils.get_file(\"eval.csv\", TEST_DATA_URL)\n\nnp.set_printoptions(precision=3, suppress=True)\n\nimport pandas as pd\ndesc = pd.read_csv(train_file_path)[NUMERIC_FEATURES].describe()\ndesc.T()\n\nCATEGORIES = {\n 'sex': ['male', 'female'],\n 'class' : ['First', 'Second', 'Third'],\n 'deck' : ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],\n 'embark_town' : ['Cherbourg', 'Southhampton', 'Queenstown'],\n 'alone' : ['y', 'n']\n}\n\ncategorical_columns = []\nfor feature, vocab in CATEGORIES.items():\n cat_col = tf.feature_column.categorical_column_with_vocabulary_list(\n key=feature, vocabulary_list=vocab)\n categorical_columns.append(tf.feature_column.indicator_column(cat_col))","sub_path":"modle/data_csv.py","file_name":"data_csv.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523417042","text":"import numpy as np;\nimport math;\n\n# 一番上の星の座標(これは気合いで計算)の行列\na = np.array([\n [0., -21.6], # 1点目\n [3.9092, -9.468], # 2点目\n [-6.4704, -17.0168], # 3点目\n [6.3356, -17.0168], # 4点目\n [-4.044, -9.468] # 5点目\n]);\n\n# 一番上の星から時計回りに 2pi/5 * num ラジアンだけ回転させた星(の5点)の座標を計算\n# 回転の中心は(0, 0)\ndef rotate(num, X, Y):\n X2 = math.cos((2 * (math.pi) / 5) * num) * X - math.sin((2 * (math.pi) / 5) * num) * Y\n Y2 = math.sin((2 * (math.pi) / 5) * num) * X + math.cos((2 * (math.pi) / 5) * num) * Y\n return np.hstack((X2.reshape(5, 1), Y2.reshape(5, 1)))\n\n# 移動用行列\nb = np.array([\n [60, 36],\n [60, 36],\n [60, 36],\n [60, 36],\n [60, 36],\n])\n\n# 計算した星5個×5点=25点を(+60, +36)だけ移動させる\nfor i in range(5):\n Z = rotate(i, a[:,0], a[:,1]) + b\n print('beginShape();')\n for x in range(Z.shape[0]):\n print(' vertex({0}, {1});'.format(round(Z[x][0], 1), round(Z[x][1], 1)))\n print('endShape();')\n print('')\n","sub_path":"python/introduction_to_programming/singapore_stars.py","file_name":"singapore_stars.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591498776","text":"# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for executor_service.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport asyncio\n\nfrom absl.testing import absltest\n\nimport grpc\nfrom grpc.framework.foundation import logging_pool\nimport portpicker\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.proto.v0 import executor_pb2\nfrom tensorflow_federated.proto.v0 import executor_pb2_grpc\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.impl import eager_executor\nfrom tensorflow_federated.python.core.impl import executor_service\nfrom tensorflow_federated.python.core.impl import executor_service_utils\n\n\nclass ExecutorServiceTest(absltest.TestCase):\n\n def setUp(self):\n super(ExecutorServiceTest, self).setUp()\n port = portpicker.pick_unused_port()\n server_pool = logging_pool.pool(max_workers=1)\n self._server = grpc.server(server_pool)\n self._server.add_insecure_port('[::]:{}'.format(port))\n self._service = executor_service.ExecutorService(\n eager_executor.EagerExecutor())\n executor_pb2_grpc.add_ExecutorServicer_to_server(self._service,\n self._server)\n self._server.start()\n self._channel = grpc.insecure_channel('localhost:{}'.format(port))\n self._stub = executor_pb2_grpc.ExecutorStub(self._channel)\n\n def tearDown(self):\n # TODO(b/134543154): Find some way of cleanly disposing of channels that is\n # consistent between Google-internal and OSS stacks.\n try:\n self._channel.close()\n except AttributeError:\n # The `.close()` method does not appear to be present in grpcio 1.8.6, so\n # we have to fall back on explicitly calling the destructor.\n del self._stub\n del self._channel\n self._server.stop(None)\n super(ExecutorServiceTest, self).tearDown()\n\n def _extract_value_from_service(self, value_id):\n # pylint: disable=protected-access\n with self._service._lock:\n future_val = self._service._values[value_id]\n # pylint: enable=protected-access\n\n value = asyncio.get_event_loop().run_until_complete(future_val)\n self.assertIsInstance(value, eager_executor.EagerValue)\n return value\n\n def test_executor_service_create_tensor_value(self):\n value_proto = executor_service_utils.serialize_value(\n tf.constant(10.0).numpy(), tf.float32)\n request = executor_pb2.CreateValueRequest(value=value_proto)\n response = self._stub.CreateValue(request)\n self.assertIsInstance(response, executor_pb2.CreateValueResponse)\n value_id = str(response.value_ref.id)\n value = self._extract_value_from_service(value_id)\n self.assertEqual(value.internal_representation.numpy(), 10.0)\n\n def test_executor_service_create_computation_value(self):\n\n @computations.tf_computation\n def comp():\n return tf.constant(10)\n\n value_proto = executor_service_utils.serialize_value(comp)\n request = executor_pb2.CreateValueRequest(value=value_proto)\n response = self._stub.CreateValue(request)\n self.assertIsInstance(response, executor_pb2.CreateValueResponse)\n value_id = str(response.value_ref.id)\n value = self._extract_value_from_service(value_id)\n self.assertTrue(callable(value.internal_representation))\n self.assertEqual(value.internal_representation().numpy(), 10.0)\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_v2_behavior()\n absltest.main()\n","sub_path":"tensorflow_federated/python/core/impl/executor_service_test.py","file_name":"executor_service_test.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"510224702","text":"'''\nCreated on 30.04.2015\n\n@author: marscher\n'''\nimport unittest\nimport os\nimport tempfile\nfrom glob import glob\n\nfrom pyemma.coordinates.data.traj_info_cache import _TrajectoryInfoCache as TrajectoryInfoCache\nimport mdtraj\n\npath = os.path.join(os.path.split(__file__)[0], 'data')\n# os.path.join(path, 'bpti_mini.xtc')\nxtcfiles = glob(path + os.path.sep + \"*.xtc\")\npdbfile = os.path.join(path, 'bpti_ca.pdb')\n\n\nclass TestTrajectoryInfoCache(unittest.TestCase):\n\n def setUp(self):\n self.tmpfile = tempfile.mktemp()\n self.db = TrajectoryInfoCache(self.tmpfile)\n\n def testCacheResults(self):\n # cause cache failures\n results = {}\n for f in xtcfiles:\n results[f] = self.db[f]\n\n desired = {}\n for f in xtcfiles:\n with mdtraj.open(f) as fh:\n desired[f] = len(fh)\n\n self.assertEqual(results, desired)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pyemma/coordinates/tests/test_traj_info_cache.py","file_name":"test_traj_info_cache.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548585717","text":"__author__ = 'user'\n\nfrom tkinter.ttk import Frame, Label\nfrom tkinter import Tk, BOTH, Listbox, StringVar, END, ttk, W, E\nimport sqlite3\n\n\nclass BookManager(Frame):\n # 생성자\n # DB와 연결\n # 라벨과 엔트리 생성\n # UI 초기 설정\n # 윈도우 중앙 위치\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.conn = sqlite3.connect('bookInfo.db')\n self.cur = self.conn.cursor()\n self.parent = parent\n self.nameLabel = StringVar()\n self.authorLabel = StringVar()\n self.priceLabel = StringVar()\n self.infoLabel = StringVar()\n self.nameEntry = ttk.Entry(self)\n self.authorEntry = ttk.Entry(self)\n self.priceEntry = ttk.Entry(self)\n self.initUI()\n self.centerWindow()\n\n # 윈도우 생성과 위젯에 배치를 담당\n def initUI(self):\n self.parent.title(\"Book DataBase\")\n self.pack(fill=BOTH, expand=1)\n\n self.lb = Listbox(self, height=15, width=93)\n self.lb.place(x=20, y=100)\n\n # 라벨을 생성해서 화면에 뿌려줌\n self.createLabel(65, 25, self.nameLabel, '책 이름 : ')\n self.createLabel(65, 45, self.authorLabel, '저 자: ')\n self.createLabel(65, 65, self.priceLabel, '가 격: ')\n # 엔트리를 생성해서 화면에 뿌려줌\n self.createEntry(2, 1, self.nameEntry)\n self.createEntry(3, 1, self.authorEntry)\n self.createEntry(4, 1, self.priceEntry)\n\n # 버튼 생성과 이벤트 연결\n add = ttk.Button(self, text=\"ADD\", command=lambda: self.addBook())\n add.grid(row=1, column=0, padx=20)\n delete = ttk.Button(self, text=\"DEL\", command=lambda: self.delBook())\n delete.grid(row=1, column=1, padx=20)\n search = ttk.Button(self, text=\"SEARCH\", command=lambda: self.search())\n search.grid(row=1, column=2, padx=20)\n show = ttk.Button(self, text=\"SHOW\", command=lambda: self.showBookInfo())\n show.grid(row=1, column=3, padx=20)\n\n # 책 정보를 출력\n self.showBookInfo()\n\n # 전달받은 인자를 통해 적재적소에 라벨 생성\n def createLabel(self, x, y, o, s):\n o.set(s)\n self.label = Label(self, text=0, textvariable=o)\n self.label.place(x=x, y=y)\n\n # 전달받은 인자를 통해 적재적소에 엔트리 위치\n def createEntry(self, row, col, o):\n o.grid(row=row, column=col, columnspan=4, sticky=W + E)\n\n # 윈도우 중앙 위치\n def centerWindow(self):\n w = 700\n h = 400\n\n # screen 크기를 구함\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n # 쿼리 실행 담당\n def execteQuery(self, query):\n self.query = query\n self.cur.execute(query)\n self.conn.commit()\n\n # 책 정보를 검색한다\n def search(self):\n # 리스트를 모두 지운다\n self.lb.delete(0, END)\n\n # 이름에 값이 존재할 경우\n if (self.nameEntry.get() != ''):\n # 어느정도만 입력해도 검색할 수 있도록 쿼리문 작성\n self.query = 'select count (*) from book where name like \\'%{0}%\\''.format(self.nameEntry.get())\n self.execteQuery(self.query)\n # 책 정보가 존재하지 않을 경우\n if ('{0}', format(self.cur.fetchone()) == 0):\n self.createLabel(20, 350, self.infoLabel, '해당하는 정보가 존재하지 않습니다')\n\n # 책 정보가 존재할 때 패턴 정보를 가진 책을 모두 가져옴\n self.query = 'select * from book where name like \\'%{0}%\\''.format(self.nameEntry.get())\n self.execteQuery(self.query)\n # 저자에 값이 존재할 경우\n elif (self.authorEntry.get() != ''):\n # 어느정도만 입력해도 검색할 수 있도록 쿼리문 작성\n self.query = 'select count (*) from book where name like \\'%{0}%\\''.format(self.authorEntry.get())\n self.execteQuery(self.query)\n # 책 정보가 존재하지 않을 경우\n if ('{0}', format(self.cur.fetchone()) == 0):\n self.createLabel(20, 350, self.infoLabel, '해당하는 정보가 존재하지 않습니다')\n\n # 책 정보가 존재할 때 패턴 정보를 가진 책을 모두 가져옴\n self.query = 'select * from book where author like \\'%{0}%\\''.format(self.authorEntry.get())\n self.execteQuery(self.query)\n # 값이 입력되지 않았을 때\n else:\n self.createLabel(20, 350, self.infoLabel, '정보를 정확히 입력해주세요')\n return\n\n # 검색된 책 정보를 출력\n for book in self.cur.fetchall():\n print(book)\n s = '책 이름 : {0[0]:30}저 자 : {0[1]:30}가 격 : {0[2]:30}'.format(book)\n self.lb.insert(END, s)\n\n # 모든 책 정보를 출력\n def showBookInfo(self):\n temp = ''\n\n self.lb.delete(0, END)\n # 모든 정보를 가져오는 쿼리문\n self.query = 'select * from book'\n self.execteQuery(self.query)\n # 책을 제거 했을 때 동일한 정보를 가진 책 정보가 본의 아니게 만들어 져서\n # 임시값을 이용해 같은 정보가 있는지 검사하여 없을 경우 출력\n for book in self.cur.fetchall():\n rec = '책 이름 : {0[0]:30}저 자 : {0[1]:30}가 격 : {0[2]:30}'.format(book)\n if (temp == rec):\n continue\n self.lb.insert(END, rec)\n temp = rec\n\n # 책 정보 추가\n def addBook(self):\n # 책 정보를 입력할 때 책 이름, 저자, 가격 정보가 모두 필요함\n if (self.nameEntry.get() == '' or self.authorEntry.get() == '' or self.priceEntry.get() == ''):\n self.createLabel(20, 350, self.infoLabel, '정보를 정확히 입력해주세요')\n return\n\n # 책 정보를 추가하는 쿼리문\n self.query = 'INSERT INTO book VALUES (\\'{0}\\', \\'{1}\\', \\'{2}\\')'.format(self.nameEntry.get(),\n self.authorEntry.get(),\n self.priceEntry.get())\n self.cur.execute(self.query)\n self.createLabel(20, 350, self.infoLabel, '추가완료')\n self.showBookInfo()\n self.clearEntry()\n\n # 책 정보 제거\n def delBook(self):\n # 책을 제거할 때 책 이름과 저자정보 모두가 있어야 제거가 가능하도록 구현\n if (self.nameEntry.get() == '' or self.authorEntry.get() == ''):\n self.createLabel(20, 350, self.infoLabel, '정보를 정확히 입력해주세요')\n return\n\n # 책 정보를 제거하는 쿼리문\n self.query = 'DELETE FROM book WHERE name = \\'{0}\\' and author = \\'{1}\\''.format(self.nameEntry.get(),\n self.authorEntry.get())\n self.cur.execute(self.query)\n self.showBookInfo()\n self.createLabel(20, 350, self.infoLabel, '제거완료')\n self.clearEntry()\n\n # 책 정보가 입력되고 제거되었을 때 다시 입력할 수 있도록 엔트리를 비워준다\n def clearEntry(self):\n self.nameEntry.delete(0)\n self.authorEntry.delete(0)\n self.priceEntry.delete(0)\n\n\ndef main():\n query = '''CREATE TABLE IF NOT EXISTS book (\n name VARCHAR(30),\n author VARCHAR(10),\n price VARCHAR(10))'''\n root = Tk()\n manager = BookManager(root)\n manager.execteQuery(query)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"book/BookManager.py","file_name":"BookManager.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"639546632","text":"from flask import Flask, jsonify, request, abort\nfrom flask_json import FlaskJSON, json_response\nimport peewee\nfrom app import app\nfrom app.models.user import User\nfrom app.models.state import State\n\n@app.route('/states', methods=['GET'])\ndef list_states():\n\t\"\"\"\n\tGet all states\n\tThis will list all states in the database\n\t---\n\ttags:\n\t\t- state\n\tresponses:\n\t\t200:\n\t\t\tdescription: return list of all states\n\t\t\tschema:\n\t\t\t\ttype: array\n\t\t\t\titems:\n\t\t\t\t\t$ref: '#/definitions/State'\n\t\"\"\"\n\tstates = []\n\tfor state in State.select():\n\t\tstates.append(state.to_hash())\n\treturn jsonify(states), 200\n\n@app.route('/states', methods=['POST'])\ndef create_state():\n \"\"\"\n Create a new states\n Creates a new states and appends to database\n ---\n tags:\n - state\n parameters:\n -\n name: name\n in: form\n type: string\n required: True\n description: the name of the state\n\n responses:\n 200:\n description: the User representation\n schema:\n id: User\n properties:\n id:\n type: number\n description: Unique identifier\n required: true\n created_at:\n type: date-time\n description: Datetime of the item creation\n required: true\n updated_at:\n type: date-time\n description: Datetime of the last item update\n required: true\n name:\n type: string\n description: name of the state\n required: true\n 409:\n description: email already exists\n \"\"\"\n try:\n state = State(\n name=str(request.form['name']),\n )\n state.save()\n return jsonify(state.to_hash())\n except:\n import sys\n print(\"Unexpected error:\", sys.exc_info())\n\n return jsonify({'code' : 10000, 'msg' : \"State name already exhists\"}), 409\n\n@app.route('/states/', methods=['GET'])\ndef get_state_by_id(state_id):\n\t\"\"\"\n\tGet state by id\n\tlist of the given state using state_id in databse\n\t---\n\ttags:\n\t\t- state\n\tparameters:\n\t\t-\n\t\t\tname: state_id\n\t\t\tin: path\n\t\t\ttype: integer\n\t\t\trequired: True\n\t\t\tdescription: state id\n\tresponses:\n\t\t200:\n\t\t\tdescription: the State representation\n\t\t\tschema:\n\t\t\t\t$ref: '#/definitions/State'\n\t\t404:\n\t\t\tdescripton: aboarts route, can not list user by id\n\t\"\"\"\n\ttry:\n\t\tstate = State.get(State.id == state_id)\n\t\treturn jsonify(state.to_hash())\n\texcept:\n\t\tabort(404)\n\n\n@app.route('/states/', methods=['PUT'])\ndef update_state_by_id(state_id):\n\t\"\"\"\n\tUpdate state\n\tUpdates existing state and appends to database\n\t---\n\ttags:\n\t\t- state\n\tparameters:\n\t\t-\n\t\t\tname: state_id\n\t\t\tin: path\n\t\t\ttype: integer\n\t\t\trequired: True\n\t\t\tdescription: state id\n\t\t-\n\t\t\tname: name\n\t\t\tin: form\n\t\t\ttype: string\n\t\t\trequired: True\n\t\t\tdescription: the name of the state\n\tresponses:\n\t\t200:\n\t\t\tdescripton: the State representation\n\t\t\tschema:\n\t\t\t\t$ref: '#/definitions/State'\n\t\t404:\n\t\t\tdescripton: state was not updated, error occured\n\t\t409:\n\t\t\tdescripton: name is already taken in the database,\n\t\"\"\"\n\ttry:\n\t\tstate = State.get(State.id == state_id)\n\t\tfor key in request.values:\n\t\t\tif key == 'updated_at' or key == 'created_at':\n\t\t\t\t continue\n\t\t\telse:\n\t\t\t\t setattr(state, key, request.values.get(key))\n\t\tstate.save()\n\t\treturn jsonify(state.to_hash()), 200\n\texcept:\n\t\tabort(404)\n\n\n@app.route('/states/', methods=['DELETE'])\ndef delete_state_by_id(state_id):\n\t\"\"\"\n\tDelete state\n\tRemoves state specified by id from database\n\t---\n\ttags:\n\t\t- state\n\tparameters:\n\t\t-\n\t\t\tname: state_id\n\t\t\tin: path\n\t\t\ttype: integer\n\t\t\trequired: True\n\t\t\tdescription: state id\n\tresponses:\n\t\t200:\n\t\t\tdescripton: sucessfully deletes state\n\t\t404:\n\t\t\tdescripton: state was not delted from database\n\t\"\"\"\n\ttry:\n\t\tstate = State.get(State.id == state_id)\n\t\tstate.delete_instance()\n\t\treturn jsonify({'msg' : 'success'}), 200\n\texcept:\n\t\tabort(404)\n","sub_path":"api/app/views/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"637362984","text":"from __future__ import unicode_literals\n\nimport re\nfrom collections import OrderedDict, defaultdict\n\nfrom conllu.tree_helpers import create_tree\n\nDEFAULT_FIELDS = ('id', 'form', 'lemma', 'upostag', 'xpostag', 'feats', 'head', 'deprel', 'deps', 'misc')\n\ndeps_pattern = r\"\\d+:[a-z][a-z_-]*(:[a-z][a-z_-]*)?\"\nMULTI_DEPS_PATTERN = re.compile(r\"^{}(\\|{})*$\".format(deps_pattern, deps_pattern))\n\nclass ParseException(Exception):\n pass\n\ndef parse(text, fields=DEFAULT_FIELDS):\n return [\n [\n parse_line(line, fields)\n for line in sentence.split(\"\\n\")\n if line and not line.strip().startswith(\"#\")\n ]\n for sentence in text.split(\"\\n\\n\")\n if sentence\n ]\n\ndef parse_with_comments(text, fields=DEFAULT_FIELDS):\n sentences = []\n for sentence in text.split(\"\\n\\n\"):\n lemmas = []\n metadata = OrderedDict()\n if sentence:\n for line in sentence.split(\"\\n\"):\n if line:\n if line.strip().startswith(\"#\"):\n var_name, var_value = parse_comment_line(line)\n if var_name:\n metadata[var_name] = var_value\n else:\n lemmas.append(parse_line(line, fields))\n sentences.append(OrderedDict([\n ('metadata', metadata),\n ('lemmas', lemmas)\n ]))\n return sentences\n\ndef sent_to_tree(sentence):\n head_indexed = defaultdict(list)\n for token in sentence:\n # If HEAD is negative, treat it as child of the root node\n head = max(token[\"head\"] or 0, 0)\n head_indexed[head].append(token)\n\n return create_tree(head_indexed)\n\ndef parse_tree(text):\n result = parse(text)\n\n if \"head\" not in result[0][0]:\n raise ParseException(\"Can't parse tree, missing 'head' field.\")\n\n trees = []\n for sentence in result:\n trees += sent_to_tree(sentence)\n\n return trees\n\ndef parse_line(line, fields=DEFAULT_FIELDS):\n line = re.split(r\"\\t| {2,}\", line)\n\n if len(line) == 1 and \" \" in line[0]:\n raise ParseException(\"Invalid line format, line must contain either tabs or two spaces.\")\n\n data = OrderedDict()\n\n for i, field in enumerate(fields):\n # Allow parsing CoNNL-U files with fewer columns\n if i >= len(line):\n break\n\n if field == \"id\":\n value = parse_int_value(line[i])\n\n elif field == \"xpostag\":\n value = parse_nullable_value(line[i])\n\n elif field == \"feats\":\n value = parse_dict_value(line[i])\n\n elif field == \"head\":\n value = parse_int_value(line[i])\n\n elif field == \"deps\":\n value = parse_paired_list_value(line[i])\n\n elif field == \"misc\":\n value = parse_dict_value(line[i])\n\n else:\n value = line[i]\n\n data[field] = value\n\n return data\n\ndef parse_comment_line(line):\n line = line.strip()\n if line[0] != '#':\n raise ParseException(\"Invalid comment format, comment must start with '#'\")\n if '=' not in line:\n return None, None\n var_name, var_value = line[1:].split('=', 1)\n var_name = var_name.strip()\n var_value = var_value.strip()\n return var_name, var_value\n\ndef parse_int_value(value):\n if value == '_':\n return None\n try:\n return int(value)\n except ValueError:\n return None\n\ndef parse_paired_list_value(value):\n if re.match(MULTI_DEPS_PATTERN, value):\n return [\n (part.split(\":\", 1)[1], parse_int_value(part.split(\":\", 1)[0]))\n for part in value.split(\"|\")\n ]\n\n return parse_nullable_value(value)\n\ndef parse_dict_value(value):\n if \"=\" in value:\n return OrderedDict([\n (part.split(\"=\")[0], parse_nullable_value(part.split(\"=\")[1]))\n for part in value.split(\"|\") if len(part.split('=')) == 2\n ])\n\n return parse_nullable_value(value)\n\ndef parse_nullable_value(value):\n if not value or value == \"_\":\n return None\n\n return value\n\ndef serialize_field(field):\n if field is None:\n return '_'\n\n if isinstance(field, OrderedDict):\n serialized_fields = []\n for key_value in field.items():\n serialized_fields.append('='.join(key_value))\n\n return '|'.join(serialized_fields)\n\n return \"{}\".format(field)\n\ndef serialize_tree(root):\n def add_subtree(root_token, token_list):\n for child_token in root_token.children:\n token_list = add_subtree(child_token, token_list)\n\n token_list.append(root_token.data)\n return token_list\n\n tokens = []\n add_subtree(root, tokens)\n\n sorted_tokens = sorted(tokens, key=lambda t: t['id'])\n lines = []\n for token_data in sorted_tokens:\n line = '\\t'.join(serialize_field(val) for val in token_data.values())\n lines.append(line)\n\n text = '\\n'.join(lines)\n return text\n","sub_path":"conllu/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"625334339","text":"import datetime\n\nimport requests\nimport json\nfrom time import sleep\nfrom kafka import KafkaProducer\n\nfrom logService.logService import LogService\n\nif __name__ == \"__main__\":\n\n\tlogger = LogService(is_logging_on=True, log_output=LogService.console_output, file_path=\"./ethParserLog\")\n\n\t# kafka_ip = \"localhost\"\n\tkafka_ip = \"192.168.53.11\"\n\tkafka_port = \"9092\"\n\n\tlogger.log(\"starting with kafka config: \" + kafka_ip + \":\" + str(kafka_port))\n\n\tkafka_store_topic = 'eth-blocks'\n\tkafka_cleaned_topic = 'stats'\n\n\tkafka_sender = KafkaProducer(bootstrap_servers=kafka_ip + \":\" + str(kafka_port))\n\n\tlast_stored_height = 0\n\n\twhile True:\n\t\teth_main_status = json.loads(\n\t\t\trequests.get('https://api.blockcypher.com/v1/eth/main').content.decode('utf-8'))\n\n\t\tlatest_height = int(eth_main_status[\"height\"])\n\n\t\twhile latest_height > last_stored_height:\n\t\t\tlatest_block = json.loads(requests.get('https://api.blockcypher.com/v1/eth/main/blocks/' +\n\t\t\t\t\t\t\t\t\t\t\t\t str(latest_height)).content.decode('utf-8'))\n\n\t\t\trequired_data = {}\n\t\t\trequired_data[\"height\"] = latest_block[\"height\"]\n\t\t\trequired_data[\"total\"] = latest_block[\"total\"]\n\t\t\trequired_data[\"fees\"] = latest_block[\"fees\"]\n\t\t\trequired_data[\"n_tx\"] = latest_block[\"n_tx\"]\n\t\t\trequired_data[\"time\"] = int(\n\t\t\t\tdatetime.datetime.strptime(latest_block[\"time\"], \"%Y-%m-%dT%H:%M:%SZ\").timestamp())\n\n\t\t\tkafka_sender.send(kafka_store_topic, json.dumps(required_data).encode('utf-8'))\n\t\t\tkafka_sender.flush()\n\n\t\t\tlogger.log(\"put block \" + str(latest_block[\"height\"]) + \" in kafka storage topic\")\n\n\t\t\titem = {\"name\": \"eth.total\", \"value\": latest_block[\"total\"]}\n\t\t\tkafka_sender.send(kafka_cleaned_topic, json.dumps(item).encode('utf-8'))\n\n\t\t\titem = {\"name\": \"eth.fees\", \"value\": latest_block[\"fees\"]}\n\t\t\tkafka_sender.send(kafka_cleaned_topic, json.dumps(item).encode('utf-8'))\n\n\t\t\titem = {\"name\": \"eth.n_tx\", \"value\": latest_block[\"n_tx\"]}\n\t\t\tkafka_sender.send(kafka_cleaned_topic, json.dumps(item).encode('utf-8'))\n\n\t\t\tkafka_sender.flush()\n\n\t\t\tlogger.log(\"put block \" + str(latest_block[\"height\"]) + \" in kafka stats topic\")\n\n\t\t\tlast_stored_height = latest_height\n\n\t\tsleep(5)\n","sub_path":"eth/simpleParser/ethParser.py","file_name":"ethParser.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153412663","text":"num = int(input())\nover = 0\nnxtnum = num\nres = 0\npower = 1\nwhile True:\n res += power * nxtnum\n power *= 10\n nxtnum *= num\n nxtnum += over\n over = nxtnum//10\n nxtnum %= 10\n if nxtnum == num and over == 0:\n break\nprint(res)\n","sub_path":"SwapFive.py","file_name":"SwapFive.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108010987","text":"#!/usr/bin/env libtbx.python\nimport cProfile\n\ntry:\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.rank\n size = comm.size\n has_mpi = True\nexcept ImportError:\n rank = 0\n size = 1\n has_mpi = False\nfrom numpy import load as np_load\n\nfrom dxtbx.model.detector import DetectorFactory\ndet_from_dict = DetectorFactory.from_dict\nfrom dxtbx.model.beam import BeamFactory\nbeam_from_dict = BeamFactory.from_dict\nfrom simtbx.diffBragg.refiners.global_refiner import GlobalRefiner\nfrom cxid9114.utils import open_flex\nfrom simtbx.diffBragg.utils import map_hkl_list\nimport sys\nfrom IPython import embed\n\n# import functions on rank 0 only\nif rank == 0:\n print(\"Rank0 imports\")\n import time\n from argparse import ArgumentParser\n parser = ArgumentParser(\"Load and refine bigz\")\n parser.add_argument(\"--readoutless\", action=\"store_true\")\n parser.add_argument(\"--badgeom\", action=\"store_true\")\n parser.add_argument(\"--optgeom\", action=\"store_true\")\n parser.add_argument(\"--checkbackground\", action=\"store_true\")\n parser.add_argument(\"--checkbackgroundsavename\",default=\"_fat_data_background_residual_file\", type=str, help=\"name of the residual background image\")\n parser.add_argument(\"--protocol\", choices=[\"per_shot\", \"global\"], default=\"per_shot\", type=str, help=\"refinement protocol\")\n parser.add_argument(\"--tradeps\", default=5e-10, type=float, help=\"traditional convergence epsilon. Convergence happens if |G| < |X|*tradeps where |G| is norm gradient and |X| is norm parameters\")\n parser.add_argument(\"--imagecorr\", action=\"store_true\")\n parser.add_argument(\"--plot\", action='store_true')\n parser.add_argument(\"--fixrotZ\", action='store_true')\n parser.add_argument(\"--Ncells_size\", default=30, type=float)\n parser.add_argument(\"--cella\", default=None, type=float)\n parser.add_argument(\"--gradientonly\", action='store_true') \n parser.add_argument(\"--cellc\", default=None, type=float)\n parser.add_argument(\"--Nmos\", default=1, type=int)\n parser.add_argument(\"--scipyfactr\", default=1e7, type=float, help=\"Factor for terminating scipy lbfgs see \\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html\")\n parser.add_argument(\"--mosspread\", default=0, type=float)\n parser.add_argument(\"--preopttag\", default=\"preopt\", type=str)\n parser.add_argument(\"--gainval\", default=28, type=float)\n parser.add_argument(\"--curseoftheblackpearl\", action=\"store_true\", help=\"This argument does nothing... \")\n parser.add_argument(\"--ignorelinelow\", action=\"store_true\", help=\"ignore line search in LBFGS\")\n parser.add_argument(\"--xrefinedonly\", action=\"store_true\" )\n parser.add_argument(\"--outdir\", type=str, default=None, help=\"where to write output files\")\n parser.add_argument(\"--imgdirname\", type=str, default=None)\n parser.add_argument(\"--rotscale\", default=1, type=float)\n parser.add_argument(\"--noiseless\", action=\"store_true\")\n parser.add_argument(\"--forcecurva\", action=\"store_true\")\n parser.add_argument(\"--optoutname\", type=str, default=\"results\")\n parser.add_argument(\"--stride\", type=int, default=10, help='plot stride')\n parser.add_argument(\"--minmulti\", type=int, default=2, help='minimum multiplicity for refinement')\n parser.add_argument(\"--boop\", action=\"store_true\")\n parser.add_argument(\"--bigdump\", action=\"store_true\")\n parser.add_argument(\"--residual\", action='store_true')\n parser.add_argument(\"--NoRescaleFcellRes\", action='store_true')\n parser.add_argument(\"--setuponly\", action='store_true')\n parser.add_argument('--filterbad', action='store_true')\n parser.add_argument(\"--alist\", type=str, default=None)\n parser.add_argument(\"--tryscipy\", action=\"store_true\", help=\"use scipy's LBFGS implementation instead of cctbx's\")\n parser.add_argument(\"--restartfile\", type=str, default=None)\n parser.add_argument(\"--Fobslabel\", type=str, default=None)\n parser.add_argument(\"--Freflabel\", type=str, default=None)\n parser.add_argument(\"--xinitfile\", type=str, default=None)\n parser.add_argument(\"--globalNcells\", action=\"store_true\")\n parser.add_argument(\"--globalUcell\", action=\"store_true\")\n parser.add_argument(\"--scaleR1\", action=\"store_true\")\n parser.add_argument(\"--recenter\", action=\"store_true\")\n parser.add_argument(\"--stpmax\", default=1e20, type=float)\n parser.add_argument(\"--usepreoptAmat\", action=\"store_true\")\n parser.add_argument(\"--usepreoptscale\", action=\"store_true\")\n parser.add_argument(\"--usepreoptncells\", action=\"store_true\")\n parser.add_argument(\"--usepreoptbg\", action=\"store_true\")\n parser.add_argument(\"--noprintresbins\", action=\"store_true\")\n\n parser.add_argument(\"--sad\", action=\"store_true\")\n parser.add_argument(\"--symbol\", default=\"P43212\", type=str)\n parser.add_argument(\"--p9\", action=\"store_true\")\n parser.add_argument(\"--ucellsigma\", default=0.005, type=float)\n parser.add_argument(\"--bgcoefsigma\", default=1, type=float)\n parser.add_argument(\"--ncellssigma\", default=0.0005, type=float)\n parser.add_argument(\"--rotXYZsigma\", nargs=3, default=[0.003, 0.003, 0.001], type=float)\n parser.add_argument(\"--bgsigma\", nargs=3, default=[0.005, 0.005, 0.01], type=float)\n parser.add_argument(\"--spotscalesigma\",default=0.01, type=float)\n parser.add_argument(\"--fcellsigma\",default=0.005, type=float)\n parser.add_argument(\"--bs7\", action=\"store_true\")\n parser.add_argument(\"--bs7real\", action=\"store_true\")\n parser.add_argument(\"--loadonly\", action=\"store_true\")\n parser.add_argument(\"--poissononly\", action=\"store_true\")\n parser.add_argument(\"--boopi\", type=int, default=0)\n parser.add_argument(\"--Nmax\", type=int, default=-1, help='NOT USING. Max number of images to process per rank')\n parser.add_argument(\"--nload\", type=int, default=None, help='Max number of images to load per rank')\n parser.add_argument(\"--loadstart\", type=int, default=None)\n parser.add_argument(\"--ngroups\", type=int, default=1)\n parser.add_argument(\"--groupId\", type=int, default=0)\n parser.add_argument('--perturblist', default=None, type=int)\n parser.add_argument(\"--verbose\", action='store_true')\n parser.add_argument(\"--forcemono\", action='store_true')\n parser.add_argument(\"--unknownscale\", default=None, type=float, help=\"Initial scale factor to apply to shots...\")\n parser.add_argument(\"--printallmissets\", action='store_true')\n parser.add_argument(\"--gainrefine\", action=\"store_true\")\n parser.add_argument(\"--fcellbump\", default=0.1, type=float)\n parser.add_argument(\"--initpickle\", default=None, type=str, help=\"path to a pandas pkl file containing optimized parameters\")\n parser.add_argument(\"--oversample\", default=0, type=int)\n parser.add_argument(\"--hack\", action=\"store_true\", help=\"use the local 6 tester files\")\n parser.add_argument(\"--curvatures\", action='store_true')\n parser.add_argument(\"--numposcurvatures\", default=7, type=int)\n parser.add_argument(\"--startwithtruth\", action='store_true')\n parser.add_argument(\"--testmode2\", action=\"store_true\", help=\"debug flag for doing a test run\")\n parser.add_argument(\"--glob\", type=str, required=True, help=\"glob for selecting files (output files of process_mpi\")\n parser.add_argument(\"--partition\", action=\"store_true\")\n parser.add_argument(\"--partitiontime\", default=5, type=float, help=\"seconds allowed for partitioning inputs\")\n parser.add_argument(\"--Fobs\", type=str, required=True)\n parser.add_argument(\"--Fref\", type=str, default=None)\n parser.add_argument(\"--keeperstags\", type=str, nargs=\"+\", default=[\"keepers\"], help=\"names of keepers selection flags\")\n parser.add_argument(\"--plotstats\", action=\"store_true\")\n parser.add_argument(\"--fcellrange\", nargs=2, default=None, type=float, \n help=\"2 args specifying lower and upper resolution bounds, then only miller indices within the bound are refined\")\n parser.add_argument(\"--fcell\", nargs=\"+\", default=None, type=int)\n parser.add_argument(\"--ncells\", nargs=\"+\", default=None, type=int)\n parser.add_argument(\"--scale\", nargs=\"+\", default=None, type=int)\n parser.add_argument(\"--umatrix\", nargs=\"+\", default=None, type=int)\n parser.add_argument(\"--bmatrix\", nargs=\"+\", default=None, type=int)\n parser.add_argument(\"--bg\", nargs=\"+\", default=None, type=int)\n parser.add_argument(\"--maxcalls\", nargs=\"+\", required=True, type=int)\n parser.add_argument(\"--plotfcell\", action=\"store_true\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--savepickleonly\", action=\"store_true\")\n parser.add_argument(\"--perturbfcell\", default=None, type=float)\n parser.add_argument(\"--bgextracted\", action=\"store_true\")\n parser.add_argument(\"--savemodels\", action=\"store_true\")\n\n args = parser.parse_args()\n print(\"ARGS:\")\n print(args)\n import sys\n print(\"COMMAND LINE LOOKED LIKE:\\n %s\" % \" \".join(sys.argv))\n import os\n if args.outdir is not None:\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n from h5py import File as h5py_File\n from cxid9114.integrate.integrate_utils import Integrator\n from numpy import array, sqrt, percentile\n from numpy import indices as np_indices\n from numpy import zeros as np_zeros\n from numpy import sum as np_sum\n from psutil import Process\n from glob import glob\n from os import getpid\n from six import PY3\n from numpy import load as numpy_load\n from numpy import exp as EXP\n from resource import getrusage\n from cxid9114.helpers import compare_with_ground_truth\n import resource\n RUSAGE_SELF = resource.RUSAGE_SELF\n from simtbx.diffBragg.refiners.crystal_systems import TetragonalManager\n from dxtbx.model import Crystal\n from scitbx.matrix import sqr\n from simtbx.diffBragg.sim_data import SimData\n from simtbx.diffBragg.nanoBragg_beam import nanoBragg_beam\n from simtbx.diffBragg.nanoBragg_crystal import nanoBragg_crystal\n from simtbx.diffBragg.refiners import RefineAllMultiPanel\n if args.badgeom:\n from cxid9114.geom.multi_panel import CSPAD2 as CSPAD\n elif args.optgeom:\n from cxid9114.geom.opt import CSPAD\n else:\n from cxid9114.geom.multi_panel import CSPAD\n from cctbx.array_family import flex\n flex_double = flex.double\n from cctbx import sgtbx, miller\n from cctbx.crystal import symmetry\n\n # let the root load the structure factors and energies to later broadcast\n from cxid9114.sf import struct_fact_special\n from cxid9114.parameters import ENERGY_CONV, ENERGY_LOW\n import numpy as np\n LOADTXT = np.loadtxt\n np_log = np.log\n ARANGE = np.arange\n LOGICAL_OR = np.logical_or\n BASENAME = os.path.basename\n # grab the structure factors at the edge energy (ENERGY_LOW=8944 eV)\n ALIST = None\n if args.alist is not None:\n ALIST = list(np.loadtxt(args.alist, str))\n \n\n from dials.algorithms.indexing.compare_orientation_matrices import difference_rotation_matrix_axis_angle as diff_rot\n\nelse:\n PY3 = None\n ALIST = None\n np_indices = None\n EXP = ARANGE = LOGICAL_OR= LOADTXT = None\n np_log = None\n diff_rot = None\n flex_double = None\n compare_with_ground_truth = None\n args = None\n RefineAllMultiPanel = None\n nanoBragg_beam = nanoBragg_crystal = None\n SimData = None\n # beam_from_dict = det_from_dict = None\n h5py_File = None\n Integrator = None\n CSPAD = None\n array = sqrt = percentile = np_zeros = np_sum = None\n Process = None\n glob = None\n getpid = None\n numpy_load = None\n RUSAGE_SELF = None\n getrusage = None\n TetragonalManager = None\n Crystal = None\n sqr = None\n BASENAME = None\n\n\nif has_mpi:\n if rank == 0:\n print(\"Broadcasting imports\")\n PY3 = comm.bcast(PY3)\n ALIST = comm.bcast(ALIST)\n EXP = comm.bcast(EXP)\n LOADTXT = comm.bcast(LOADTXT)\n LOGICAL_OR = comm.bcast(LOGICAL_OR)\n ARANGE = comm.bcast(ARANGE)\n RefineAllMultiPanel = comm.bcast(RefineAllMultiPanel)\n np_indices = comm.bcast(np_indices, root=0)\n np_log = comm.bcast(np_log, root=0)\n glob = comm.bcast(glob, root=0)\n BASENAME = comm.bcast(BASENAME)\n flex_double = comm.bcast(flex_double, root=0)\n diff_rot = comm.bcast(diff_rot, root=0)\n compare_with_ground_truth = comm.bcast(compare_with_ground_truth, root=0)\n args = comm.bcast(args, root=0)\n Crystal = comm.bcast(Crystal, root=0)\n sqr = comm.bcast(sqr, root=0)\n CSPAD = comm.bcast(CSPAD, root=0)\n nanoBragg_beam = comm.bcast(nanoBragg_beam, root=0)\n nanoBragg_crystal = comm.bcast(nanoBragg_crystal, root=0)\n SimData = comm.bcast(SimData, root=0)\n # beam_from_dict = comm.bcast(beam_from_dict, root=0)\n # det_from_dict = comm.bcast(det_from_dict, root=0)\n h5py_File = comm.bcast(h5py_File, root=0)\n Integrator = comm.bcast(Integrator, root=0)\n array = comm.bcast(array, root=0)\n sqrt = comm.bcast(sqrt, root=0)\n percentile = comm.bcast(percentile, root=0)\n np_zeros = comm.bcast(np_zeros, root=0)\n np_sum = comm.bcast(np_sum, root=0)\n Process = comm.bcast(Process, root=0)\n getpid = comm.bcast(getpid, root=0)\n numpy_load = comm.bcast(numpy_load, root=0)\n getrusage = comm.bcast(getrusage, root=0)\n RUSAGE_SELF = comm.bcast(RUSAGE_SELF, root=0)\n TetragonalManager = comm.bcast(TetragonalManager, root=0)\n\n\nclass GlobalData:\n\n def __init__(self):\n self.int_radius = 5 #\n self.gain = args.gainval # gain of panels, can be refined, can be panel dependent\n self.symbol = args.symbol\n self.anomalous_flag = True\n self.flux_min = 1e2 # minimum number of photons to simulate (assume flux is N-photons, e.g. 1 second exposure)\n self.n_ucell_param = 2 # tetragonal cell\n self.Nload = args.nload #\n self.all_pix = 0\n self.global_ncells_param = args.globalNcells\n self.global_ucell_param = args.globalUcell\n self.time_load_start = 0\n self.fnames = [] # the filenames containing the datas\n self.all_spot_roi = {} # one list per shot, rois are x1,x2,y1,y2 per reflection\n self.global_image_id = {} # gives a unique ID for each image so multiple ranks can process roi from same image\n self.all_abc_inits = {} # one list per shot, abc_inits are a,b,c per reflection\n self.all_panel_ids = {} # one list per shot, panel_ids are single number per reflection\n self.all_ucell_mans = {} # one per shot, UcellManager instance (Tetragonal in this case)\n self.all_spectra = {} # one list of (wavelength, flux) tuples per shot\n self.all_crystal_models = {}\n self.all_shot_idx = {}\n self.all_crystal_GT = {}\n self.all_xrel = {}\n self.all_yrel = {}\n self.all_Hi_asu = {}\n self.all_crystal_scales = {}\n self.log_of_init_crystal_scales = {}\n self.all_Hi = {}\n self.all_nanoBragg_rois = {}\n self.SIM = None # simulator; one per rank!\n self.all_roi_imgs = {}\n self.all_fnames = {}\n self.background_estimate = None\n self.all_proc_idx = {}\n self.all_proc_fnames = {}\n self.m_init = {}\n self.spot_scale_init = {}\n self.nbbeam = self.nbcryst = None\n self.miller_data_map = None\n\n self.reduced_bbox_keeper_flags = {}\n self.all_bg_coef = {}\n\n def initialize_simulator(self, init_crystal, init_beam, init_spectrum, init_miller_array):\n # create the sim_data instance that the refiner will use to run diffBragg\n # create a nanoBragg crystal\n self.nbcryst = nanoBragg_crystal()\n self.nbcryst.dxtbx_crystal = init_crystal\n self.nbcryst.thick_mm = 0.1\n self.nbcryst.Ncells_abc = args.Ncells_size, args.Ncells_size, args.Ncells_size\n\n self.nbcryst.miller_array = init_miller_array\n self.nbcryst.n_mos_domains = args.Nmos\n self.nbcryst.mos_spread_deg = args.mosspread\n\n # create a nanoBragg beam\n self.nbbeam = nanoBragg_beam()\n self.nbbeam.size_mm = 0.000886226925452758 # NOTE its a circular beam whoops\n #self.nbbeam.size_mm = 0.001\n self.nbbeam.unit_s0 = init_beam.get_unit_s0()\n self.nbbeam.spectrum = init_spectrum\n\n # sim data instance\n self.SIM = SimData()\n self.SIM.detector = CSPAD\n self.SIM.crystal = self.nbcryst\n self.SIM.beam = self.nbbeam\n self.SIM.panel_id = 0 # default\n self.SIM.instantiate_diffBragg(default_F=0, oversample=args.oversample)\n if args.sad:\n if args.p9:\n self.SIM.D.spot_scale = 3050\n elif args.bs7 or args.bs7real:\n self.SIM.D.spot_scale = 250\n else:\n self.SIM.D.spot_scale = .7\n else:\n self.SIM.D.spot_scale = 12\n\n if args.unknownscale is not None:\n #self.SIM.D.spot_scale = 1e6\n self.SIM.D.spot_scale = args.unknownscale\n #self.SIM.D.spot_scale = 15555.1313 kaladin_2k after a small batch starting from some high number\n #self.SIM.D.spot_scale = 17884 # determined from refinement using the syl3 starting model\n else:\n self.SIM.D.spot_scale = 1150\n self.SIM.D.polarization = .999\n\n def _process_miller_data(self):\n idx, data = self.SIM.D.Fhkl_tuple\n self.miller_data_map = {idx: val for idx, val in zip(idx, data)}\n\n # @profile\n def load(self):\n\n # some parameters\n\n # NOTE: for reference, inside each h5 file there is\n # [u'Amatrices', u'Hi', u'bboxes', u'h5_path']\n\n # get the total number of shots using worker 0\n if rank == 0:\n self.time_load_start = time.time()\n print(\"I am root. I am calculating total number of shots\")\n h5s = [h5py_File(f, \"r\") for f in self.fnames]\n Nshots_per_file = [h[\"h5_path\"].shape[0] for h in h5s]\n Nshots_tot = sum(Nshots_per_file)\n print(\"I am root. Total number of shots is %d\" % Nshots_tot)\n\n print(\"I am root. I will divide shots amongst workers.\")\n shot_tuples = []\n roi_per = []\n for i_f, fname in enumerate(self.fnames):\n fidx_shotidx = [(i_f, i_shot) for i_shot in range(Nshots_per_file[i_f])]\n shot_tuples += fidx_shotidx\n\n # store the number of usable roi per shot in order to divide shots amongst ranks equally\n roi_per += [h5s[i_f][\"bboxes\"][\"shot%d\" % (i_shot)].shape[0] \n for i_shot in range(Nshots_per_file[i_f])]\n\n from numpy import array_split\n from numpy.random import permutation\n print (\"I am root. Number of uniques = %d\" % len(set(shot_tuples)))\n\n # divide the array into chunks of roughly equal sum (total number of ROI)\n if args.partition and args.restartfile is None and args.xinitfile is None:\n diff = np.inf\n roi_per = np.array(roi_per)\n tstart = time.time()\n best_order = range(len(roi_per))\n print(\"Partitioning for better load balancing across ranks.. \")\n while 1:\n order = permutation(len(roi_per))\n res = [sum(a) for a in np.array_split(roi_per[order], size)]\n new_diff = max(res) - min(res)\n t_elapsed = time.time() - tstart\n t_remain = args.partitiontime - t_elapsed\n if new_diff < diff:\n diff = new_diff\n best_order = order.copy()\n print(\"Best diff=%d, Parition time remaining: %.3f seconds\" % (diff, t_remain))\n if t_elapsed > args.partitiontime:\n break\n shot_tuples = [shot_tuples[i] for i in best_order]\n\n elif args.partition and args.restartfile is not None:\n print (\"Warning: skipping partitioning time to use shot mapping as laid out in restart file dir\")\n else:\n print (\"Proceeding without partitioning\")\n\n # optional to divide into a sub group\n shot_tuples = array_split(shot_tuples, args.ngroups)[args.groupId]\n shots_for_rank = array_split(shot_tuples, size)\n import os # FIXME, I thought I was imported already!\n if args.outdir is not None: # save for a fast restart (shot order is important!)\n np.save(os.path.join(args.outdir, \"shots_for_rank\"), shots_for_rank)\n if args.restartfile is not None:\n # the directory containing the restart file should have a shots for rank file\n dirname = os.path.dirname(args.restartfile)\n print (\"Loading shot mapping from dir %s\" % dirname)\n shots_for_rank = np.load(os.path.join(dirname, \"shots_for_rank.npy\"))\n # propagate the shots for rank file...\n if args.outdir is not None:\n np.save(os.path.join(args.outdir, \"shots_for_rank\"), shots_for_rank)\n if args.xinitfile is not None:\n # the directory containing the restart file should have a shots for rank file\n dirname = os.path.dirname(args.xinitfile)\n print (\"Loading shot mapping from dir %s\" % dirname)\n shots_for_rank = np.load(os.path.join(dirname, \"shots_for_rank.npy\"))\n # propagate the shots for rank file...\n if args.outdir is not None:\n np.save(os.path.join(args.outdir, \"shots_for_rank\"), shots_for_rank)\n \n # close the open h5s..\n for h in h5s:\n h.close()\n\n else:\n Nshots_tot = None\n shots_for_rank = None\n h5s = None\n\n # Nshots_tot = comm.bcast( Nshots_tot, root=0)\n if has_mpi:\n if rank==0:\n np.save(\"shots_for_rank\", shots_for_rank)\n shots_for_rank = comm.bcast(shots_for_rank, root=0)\n # h5s = comm.bcast( h5s, root=0) # pull in the open hdf5 files\n\n my_shots = shots_for_rank[rank]\n if self.Nload is not None:\n start = 0\n if args.loadstart is not None:\n start = args.loadstart\n my_shots = my_shots[start: start+self.Nload]\n print(\"Rank %d: I will load %d shots, first shot: %s, last shot: %s\"\n % (rank, len(my_shots), my_shots[0], my_shots[-1]))\n\n # open the unique filenames for this rank\n # TODO: check max allowed pointers to open hdf5 file\n import h5py\n my_unique_fids = set([fidx for fidx, _ in my_shots])\n self.my_open_files = {fidx: h5py_File(self.fnames[fidx], \"r\") for fidx in my_unique_fids}\n #for fidx in my_unique_fids:\n # fpath = self.fnames[fidx]\n # if args.imgdirname is not None:\n # fpath = fpath.split(\"/kaladin/\")[1]\n # fpath = os.path.join(args.imgdirname, fpath)\n # self.my_open_files[fidx] = h5py.File(fpath, \"r\")\n Ntot = 0\n\n #self.n_shots = len(my_shots)\n self.n_shots = 0\n img_num = 0\n for iii, (fname_idx, shot_idx) in enumerate(my_shots):\n \n h = self.my_open_files[fname_idx]\n\n # load the dxtbx image data directly:\n npz_path = h[\"h5_path\"][shot_idx]\n try:\n npz_path = npz_path.decode()\n except AttributeError:\n pass\n\n if ALIST is not None:\n if BASENAME(npz_path) not in ALIST:\n continue\n\n if args.imgdirname is not None:\n import os\n npz_path = npz_path.split(\"/kaladin/\")[1]\n npz_path = os.path.join(args.imgdirname, npz_path)\n\n #if args.noiseless:\n # noiseless_path = npz_path.replace(\".npz\", \".noiseless.npz\")\n # img_handle = numpy_load(noiseless_path)\n\n #elif args.readoutless:\n # import os\n # #readoutless_path = npz_path.split(\"tang/\")[1]\n # #readoutless_path = os.path.join(\"/global/project/projectdirs/lcls/dermen/d9114_sims/bear\",\n # # readoutless_path)\n # readoutless_path = npz_path.replace(\"tang\", \"bear\")\n # img_handle = numpy_load(readoutless_path)\n else:\n if args.readoutless:\n npz_path = npz_path.replace(\".npz\", \".readoutless.npz\")\n print(\"READOULESSNESSNKASKJDKJASKDJKAJS!!!!!!!!!!!!!!!!<><><><><><><\")\n if PY3:\n img_handle = numpy_load(npz_path, allow_pickle=True)\n else:\n img_handle = numpy_load(npz_path)\n\n img = img_handle[\"img\"]\n\n if len(img.shape) == 2: # if single panel\n img = array([img])\n\n # D = det_from_dict(img_handle[\"det\"][()])\n B = beam_from_dict(img_handle[\"beam\"][()])\n \n m_init = args.Ncells_size \n if args.usepreoptncells:\n m_init = h[\"ncells_%s\" % args.preopttag][shot_idx]\n\n spot_scale_init = 1\n if args.usepreoptscale:\n spot_scale_init = h[\"spot_scale_%s\" % args.preopttag][shot_idx]\n \n Amat = h[\"Amatrices\"][shot_idx]\n if args.usepreoptAmat:\n Amat = h[\"Amatrices_%s\" % args.preopttag][shot_idx]\n amat_elems = list(sqr(Amat).inverse().elems)\n # real space basis vectors:\n a_real = amat_elems[:3]\n b_real = amat_elems[3:6]\n c_real = amat_elems[6:]\n\n # dxtbx indexed crystal model\n C = Crystal(a_real, b_real, c_real, \"P43212\")\n\n # change basis here ? Or maybe just average a/b\n a, b, c, _, _, _ = C.get_unit_cell().parameters()\n a_init = .5 * (a + b)\n c_init = c\n\n # shoe boxes where we expect spots\n bbox_dset = h[\"bboxes\"][\"shot%d\" % shot_idx]\n n_bboxes_total = bbox_dset.shape[0]\n # is the shoe box within the resolution ring and does it have significant SNR (see filter_bboxes.py)\n # tilt plane to the background pixels in the shoe boxes\n tilt_abc_dset = h[\"tilt_abc\"][\"shot%d\" % shot_idx]\n if args.usepreoptbg and not args.bgextracted:\n tilt_abc_dset = h[\"tilt_abc_%s\" % args.preopttag][\"shot%d\" % shot_idx]\n\n bg_coef = -1\n if args.bgextracted:\n # if its the first image, load the backgorund estimate array\n if img_num==0:\n # this should be same length as number of panels in detector e.g. len(CSPAD)\n self.background_estimate = h[\"background_estimate\"][()] / self.gain\n \n bg_coef = h[\"background_coefficients\"][shot_idx]\n if args.usepreoptbg:\n bg_coef = h[\"background_coefficients_%s\" % args.preopttag][shot_idx]\n\n # miller indices (not yet reduced by symm equivs)\n Hi_dset = h[\"Hi\"][\"shot%d\" % shot_idx]\n try:\n panel_ids_dset = h[\"panel_ids\"][\"shot%d\" % shot_idx]\n has_panels = True\n except KeyError:\n has_panels = False\n\n # BEGIN bbox selection flag management\n # only keep a shoebox if its potentially a keeper\n # Here we provide a means for loading shoeboxes if and only if they\n # will ever be simulated as determined by the keeper flags\n # If a bound box is not flagged anywhere, it will be removed from memory\n bbox_id = ARANGE(n_bboxes_total)\n is_a_keeper = np_zeros(n_bboxes_total).astype(bool)\n kept_bbox_ids = {}\n for keeperstag in set(args.keeperstags):\n keeper_flags = h[\"bboxes\"][\"%s%d\" % (keeperstag, shot_idx)][()]\n is_a_keeper = LOGICAL_OR(is_a_keeper, keeper_flags)\n kept_bbox_ids[keeperstag] = bbox_id[keeper_flags]\n \n # The following provides a means for selecting the different subsets\n # of ROIs based on the original keeper flags\n all_kept_bbox_ids = bbox_id[is_a_keeper]\n self.reduced_bbox_keeper_flags[img_num] = {}\n for keeperstag in set(args.keeperstags):\n flags = array([i in kept_bbox_ids[keeperstag] for i in all_kept_bbox_ids])\n self.reduced_bbox_keeper_flags[img_num][keeperstag] = flags\n # END selection flag management\n\n # BEGIN apply the keeper filters:\n bboxes = [bbox_dset[i_bb] for i_bb in range(n_bboxes_total) if is_a_keeper[i_bb]]\n tilt_abc = [tilt_abc_dset[i_bb] for i_bb in range(n_bboxes_total) if is_a_keeper[i_bb]]\n Hi = [tuple(Hi_dset[i_bb]) for i_bb in range(n_bboxes_total) if is_a_keeper[i_bb]]\n proc_file_idx = [i_bb for i_bb in range(n_bboxes_total) if is_a_keeper[i_bb]]\n if has_panels:\n panel_ids = [panel_ids_dset[i_bb] for i_bb in range(n_bboxes_total) if is_a_keeper[i_bb]]\n else:\n panel_ids = [0] * len(tilt_abc)\n # END apply the keeper filters\n\n # BEGIN counting pixels\n tot_pix = [(j2 - j1)*(i2 - i1) for i1, i2, j1, j2 in bboxes]\n Ntot += sum(tot_pix)\n # END counting pixels\n\n # load some ground truth data from the simulation dumps (e.g. spectrum)\n #h5_fname = h[\"h5_path\"][shot_idx].replace(\".npz\", \"\")\n h5_fname = npz_path.replace(\".npz\", \"\")\n if args.noiseless:\n h5_fname = npz_path.replace(\".noiseless.npz\", \"\")\n elif args.readoutless:\n h5_fname = npz_path.replace(\".readoutless.npz\", \"\")\n\n data = h5py_File(h5_fname, \"r\")\n\n xtal_scale_truth = data[\"spot_scale\"][()]\n tru = sqr(data[\"crystalA\"][()]).inverse().elems\n a_tru = tru[:3]\n b_tru = tru[3:6]\n c_tru = tru[6:]\n C_tru = Crystal(a_tru, b_tru, c_tru, \"P43212\")\n try:\n angular_offset_init = compare_with_ground_truth(a_tru, b_tru, c_tru, [C], symbol=\"P43212\")[0]\n except Exception as err:\n print(\"Rank %d: Boo cant use the comparison w GT function: %s\" % (rank, err))\n\n fluxes = data[\"spectrum\"][()]\n es = data[\"exposure_s\"][()]\n\n #spec = fluxes * es\n #spec_f = h5py_File(\"/Users/dermen/crystal/modules/cxid9114/spec/realspec.h5\", \"r\")\n #vals = spec_f[\"hist_spec\"][()]\n #res = np.array([np.allclose(spec, v) for v in vals])\n #idx = np.where(res)[0]\n #if not idx.size==1:\n # print (\"Rank %d couldnt get it\" % rank)\n #else:\n # idx = idx[0]\n # run = spec_f[\"runs\"][idx]\n # _shot = spec_f[\"shot_idx\"][idx]\n # print \"Rank %d: filename %s, run %f, shot %d\" % (rank, npz_path, run, _shot)\n\n #comm.Barrier()\n\n fluxes *= es # multiply by the exposure time\n # TODO: wavelens should come from the imageset file itself\n if \"wavelengths\" in data.keys():\n wavelens = data[\"wavelengths\"][()]\n else:# elif args.bs7 or args.bs7real:\n from cxid9114.parameters import WAVELEN_HIGH\n wavelens = [WAVELEN_HIGH]\n\n spectrum = zip(wavelens, fluxes)\n # dont simulate when there are no photons!\n spectrum = [(wave, flux) for wave, flux in spectrum if flux > self.flux_min]\n \n if args.checkbackground:\n true_background = data[\"background\"][()]\n if img_num==0:\n self.true_residual = np_zeros(true_background.shape)\n self.true_residual_Nsamples = np_zeros(true_background.shape)\n\n if args.forcemono:\n spectrum = [(B.get_wavelength(), sum(fluxes))]\n\n # make a unit cell manager that the refiner will use to track the B-matrix\n aa, _, cc, _, _, _ = C_tru.get_unit_cell().parameters()\n ucell_man = TetragonalManager(a=a_init, c=c_init)\n\n if args.startwithtruth:\n ucell_man = TetragonalManager(a=aa, c=cc)\n\n if args.startwithtruth:\n C = C_tru\n # create the sim_data instance that the refiner will use to run diffBragg\n # create a nanoBragg crystal\n if img_num == 0: # only initialize the simulator after loading the first image\n if args.sad:\n if args.Fobslabel is not None:\n self.Fhkl_obs = GlobalData.open_mtz(args.Fobs, args.Fobslabel)\n else:\n self.Fhkl_obs = open_flex(args.Fobs).as_amplitude_array()\n self.Fhkl_ref = args.Fref\n if args.Fref is not None:\n if args.Freflabel is not None:\n self.Fhkl_ref = GlobalData.open_mtz(args.Fref, args.Freflabel)\n else:\n self.Fhkl_ref = open_flex(args.Fref).as_amplitude_array() # this reference miller array is used to track CC and R-factor\n\n if args.p9:\n wavelen = 0.9793\n #from cxid9114.sf.struct_fact_special import load_p9\n #Fhkl_guess = load_p9()\n raise NotImplementedError()\n elif args.bs7 or args.bs7real:\n from cxid9114.parameters import WAVELEN_HIGH\n #from cxid9114.sf import struct_fact_special\n #import os\n wavelen = WAVELEN_HIGH\n else:\n from cxid9114.parameters import WAVELEN_LOW\n wavelen = WAVELEN_LOW\n #from cxid9114.sf.struct_fact_special import load_4bs7_sf\n #Fhkl_guess = load_4bs7_sf()\n raise NotImplementedError()\n\n if not args.bs7real:\n spectrum = [(wavelen, fluxes[0])]\n # end if sad\n self.initialize_simulator(C, B, spectrum, self.Fhkl_obs)\n\n # map the miller array to ASU\n Hi_asu = map_hkl_list(Hi, self.anomalous_flag, self.symbol)\n #sg_type = sgtbx.space_group_info(symbol=self.symbol).type()\n #Hi_flex = flex.miller_index(tuple(map(tuple, Hi)))\n #miller.map_to_asu(sg_type, self.anomalous_flag, Hi_flex) # mods Hi_flex in place\n #Hi_asu = list(Hi_flex)\n\n # copy the image as photons (NOTE: Dont forget to ditch its references!)\n img_in_photons = (img/args.gainval).astype('float32')\n\n # Here, takeout from the image only whats necessary to perform refinement\n # first filter the spot rois so they dont occur exactly at the boundary of the image (inclusive range in nB)\n assert len(img_in_photons.shape) == 3 # sanity\n nslow, nfast = img_in_photons[0].shape\n bboxes = array(bboxes)\n # OLD WAY: \n #for i_bbox, (_, x2, _, y2) in enumerate(bboxes):\n # if x2 == nfast:\n # bboxes[i_bbox][1] = x2 - 1 # update roi_xmax\n # if y2 == nslow:\n # bboxes[i_bbox][3] = y2 - 1 # update roi_ymax\n \n # now cache the roi in nanoBragg format ((x1,x2), (y1,y1))\n # and also cache the pixels and the coordinates\n\n nanoBragg_rois = [] # special nanoBragg format\n xrel, yrel, roi_img = [], [], []\n for i_roi, (x1, x2, y1, y2) in enumerate(bboxes):\n nanoBragg_rois.append(((x1, x2-1), (y1, y2-1)))\n yr, xr = np_indices((y2 - y1, x2 - x1))\n xrel.append(xr)\n yrel.append(yr)\n pid = panel_ids[i_roi]\n sY = slice(y1,y2,1)\n sX = slice(x1,x2,1)\n roi_img.append(img_in_photons[pid, sY, sX])\n if args.checkbackground:\n pid = panel_ids[i_roi]\n tx, ty, tz = tilt_abc[i_roi]\n tilt_plane = tx*xr + ty*yr + tz\n self.true_residual[pid, sY,sX] = true_background[pid,sY, sX] - tilt_plane\n self.true_residual_Nsamples[pid, sY, sX] += 1\n\n # make sure to clear that damn memory\n img = None\n img_in_photons = None\n del img # not sure if needed here..\n del img_in_photons\n\n # peak at the memory usage of this rank\n #mem = getrusage(RUSAGE_SELF).ru_maxrss # peak mem usage in KB\n #mem = mem / 1e6 # convert to GB\n mem = self._usage()\n\n #print \"RANK %d: %.2g total pixels in %d/%d bboxes (file %d / %d); MemUsg=%2.2g GB\" \\\n # % (rank, Ntot, len(bboxes), n_bboxes_total, img_num +1, len(my_shots), mem)\n self.all_pix += Ntot\n\n # accumulate per-shot information\n self.global_image_id[img_num] = None # TODO\n self.all_spot_roi[img_num] = bboxes\n self.all_abc_inits[img_num] = tilt_abc\n self.all_panel_ids[img_num] = panel_ids\n self.all_ucell_mans[img_num] = ucell_man\n self.all_spectra[img_num] = spectrum\n self.all_crystal_models[img_num] = C\n self.spot_scale_init[img_num] = spot_scale_init\n self.m_init[img_num] = m_init\n\n self.all_crystal_scales[img_num] = xtal_scale_truth\n self.all_crystal_GT[img_num] = C_tru\n self.all_xrel[img_num] = xrel\n self.all_yrel[img_num] = yrel\n self.all_nanoBragg_rois[img_num] = nanoBragg_rois\n self.all_roi_imgs[img_num] = roi_img\n self.all_fnames[img_num] = npz_path\n self.all_proc_fnames[img_num] = h.filename\n self.all_Hi[img_num] = Hi\n self.all_Hi_asu[img_num] = Hi_asu\n self.all_proc_idx[img_num] = proc_file_idx\n self.all_shot_idx[img_num] = shot_idx # this is the index of the shot in the process*h5 file\n self.all_bg_coef[img_num] = bg_coef\n\n img_num += 1\n self.n_shots += 1\n\n for h in self.my_open_files.values():\n h.close()\n\n #print (\"Rank %d; all subimages loaded!\" % rank)\n\n def _usage(self):\n mem = getrusage(RUSAGE_SELF).ru_maxrss # peak mem usage in KB\n conv = 1e-6\n try:\n if \"darwin\" in sys.platform:\n conv = 1e-9\n except:\n pass\n mem = mem * conv # convert to GB\n return mem\n\n def init_global_ucell(self):\n if self.global_ucell_param:\n n_images = len(self.all_spot_roi)\n if args.cella is None and args.cellc is None:\n if rank == 0:\n print (\"Init global ucell without cella and cellc\")\n\n # TODO: implement for non tetragonal\n a_vals, _, c_vals, _, _, _ = zip(*[self.all_crystal_models[i].get_unit_cell().parameters()\n for i in range(n_images)])\n a_vals = list(a_vals)\n c_vals = list(c_vals)\n if has_mpi:\n a_vals = comm.reduce(a_vals, MPI.SUM, root=0)\n c_vals = comm.reduce(c_vals, MPI.SUM, root=0)\n\n a_mean = c_mean = None\n if rank == 0:\n a_mean = np.median(a_vals)\n c_mean = np.median(c_vals)\n if has_mpi:\n a_mean = comm.bcast(a_mean, root=0)\n c_mean = comm.bcast(c_mean, root=0)\n else:\n a_mean = args.cella\n c_mean = args.cellc\n\n print (\"Rank %d: Updating ucell mean for each ucell manager to %.4f %.4f\" % (rank, a_mean, c_mean))\n for i_shot in range(n_images):\n self.all_ucell_mans[i_shot].variables = a_mean, c_mean\n\n def tally_statistics(self):\n\n # tally up all miller indices in this refinement\n self._gather_Hi_information()\n self.num_hkl_global = len(self.idx_from_asu)\n\n n_images = len(self.all_spot_roi)\n self.n_images = n_images\n n_spot_per_image = [len(self.all_spot_roi[i_image]) for i_image in range(n_images)]\n n_spot_tot = sum(n_spot_per_image)\n total_pix = 0\n for i_image in range(n_images):\n nspot = n_spot_per_image[i_image]\n for x1,x2,y1,y2 in self.all_spot_roi[i_image]:\n total_pix += (x2-x1)*(y2-y1)\n\n #total_pix = self.all_pix\n # Per image we have 3 rotation angles to refine\n n_rot_param = 3\n\n # by default we assume each shot refines its own ncells param (mosaic domain size Ncells_abc in nanoBragg)\n n_global_ncells_param = 0\n n_per_image_ncells_param = 1\n if self.global_ncells_param:\n n_global_ncells_param = 1\n n_per_image_ncells_param = 0\n\n # by default each shot refines its own unit cell parameters (e.g. a,b,c,alpha, beta, gamma)\n n_global_ucell_param = 0\n n_per_image_ucell_param = self.n_ucell_param\n if self.global_ucell_param:\n n_global_ucell_param = self.n_ucell_param\n n_per_image_ucell_param = 0\n\n # 1 crystal scale factor refined per shot (overall scale)\n n_per_image_scale_param = 1\n\n # NOTE: n_param_per_image is no longer a constant when we refine background planes\n # NOTE: (unless we do a per-image polynomial fit background plane model)\n \n if not args.bgextracted:\n self.n_param_per_image = [n_rot_param + n_per_image_ncells_param + n_per_image_ucell_param +\n n_per_image_scale_param + 3*n_spot_per_image[i]\n for i in range(n_images)]\n else:\n self.n_param_per_image = [n_rot_param + n_per_image_ncells_param + n_per_image_ucell_param +\n n_per_image_scale_param + 1\n for _ in range(n_images)]\n\n total_per_image_unknowns = sum(self.n_param_per_image)\n\n # NOTE: local refers to per-image\n self.n_local_unknowns = total_per_image_unknowns\n\n mem = self._usage() # get memory usage\n # note: roi para\n\n # totals across ranks\n if has_mpi:\n n_images = comm.reduce(n_images, MPI.SUM, root=0)\n n_spot_tot = comm.reduce(n_spot_tot, MPI.SUM, root=0)\n total_pix = comm.reduce(total_pix, MPI.SUM, root=0)\n mem = comm.reduce(mem,MPI.SUM, root=0)\n\n # Gather so that each rank knows exactly how many local unknowns are on the other ranks\n if has_mpi:\n local_unknowns_per_rank = comm.gather(self.n_local_unknowns)\n else:\n local_unknowns_per_rank = [self.n_local_unknowns]\n\n if rank == 0:\n total_local_unknowns = sum(local_unknowns_per_rank) # across all ranks\n else:\n total_local_unknowns = None\n \n self.local_unknowns_across_all_ranks = total_local_unknowns\n if has_mpi:\n self.local_unknowns_across_all_ranks = comm.bcast(self.local_unknowns_across_all_ranks, root=0)\n\n # TODO: what is the 2 for (its gain and detector distance which are not currently refined...\n self.n_global_params = 2 + n_global_ucell_param + n_global_ncells_param + self.num_hkl_global # detdist and gain + ucell params\n\n self.n_total_unknowns = self.local_unknowns_across_all_ranks + self.n_global_params # gain and detdist (originZ)\n\n # also report total memory usage\n #mem_tot = mem\n #if has_mpi:\n # mem_tot = comm.reduce(mem_tot, MPI.SUM, root=0)\n\n if has_mpi:\n comm.Barrier()\n if rank == 0:\n print(\"\\n<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\")\n print(\"MPIWORLD TOTALZ: images=%d, spots=%d, pixels=%2.2g, Nlocal/Nglboal=%d/%d, usage=%2.2g GigaBytes\"\n % (n_images, n_spot_tot, total_pix, total_local_unknowns,self.n_global_params, mem))\n print(\"Total time elapsed= %.4f seconds\" % (time.time()-self.time_load_start))\n print(\"<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\\n\")\n\n # determine where in the global parameter array does this rank\n # parameters begin\n self.starts_per_rank = {}\n xpos = 0\n for _rank, n_unknown in enumerate(local_unknowns_per_rank):\n self.starts_per_rank[_rank] = xpos\n xpos += n_unknown\n else:\n self.starts_per_rank = None\n\n if has_mpi:\n self.starts_per_rank = comm.bcast(self.starts_per_rank, root=0)\n\n def _gather_Hi_information(self):\n nshots_on_this_rank = len(self.all_Hi)\n self.Hi_all_ranks, self.Hi_asu_all_ranks = [], []\n for i in range(nshots_on_this_rank):\n self.Hi_all_ranks += self.all_Hi[i]\n self.Hi_asu_all_ranks += self.all_Hi_asu[i]\n\n #print(\"Rank %d: Num miller vars on rank=%d\" % (comm.rank, len(set(Hasu))))\n if has_mpi:\n self.Hi_all_ranks = comm.reduce(self.Hi_all_ranks, root=0) # adding python lists concatenates them\n self.Hi_all_ranks = comm.bcast(self.Hi_all_ranks, root=0)\n self.Hi_asu_all_ranks = comm.reduce(self.Hi_asu_all_ranks, root=0)\n self.Hi_asu_all_ranks = comm.bcast(self.Hi_asu_all_ranks, root=0)\n\n if rank==0:\n from cctbx.array_family import flex as cctbx_flex\n uc = self.all_ucell_mans[0]\n params = uc.a, uc.b, uc.c, uc.al*180/np.pi, uc.be*180/np.pi, uc.ga*180/np.pi\n params = 79.1, 79.1, 38.4, 90,90,90\n symm = symmetry(unit_cell=params, space_group_symbol=self.symbol)\n hi_asu_flex = cctbx_flex.miller_index(self.Hi_asu_all_ranks)\n mset = miller.set(symm, hi_asu_flex, anomalous_flag=True)\n marr = miller.array(mset)# ,data=flex.double(len(hi_asu_felx),0))\n n_bin=10\n binner = marr.setup_binner(d_max=999, d_min=2.125, n_bins=n_bin)\n from collections import Counter\n print(\"Average multiplicities:\")\n print(\"<><><><><><><><><><><><>\")\n for i_bin in range(n_bin-1):\n dmax, dmin = binner.bin_d_range(i_bin+1)\n F_in_bin = marr.resolution_filter(d_max=dmax, d_min=dmin)\n #multi_data = in_bin.data().as_numpy_array()\n multi_in_bin = array(list(Counter(F_in_bin.indices()).values()))\n print (\"%2.5g-%2.5g : Multiplicity=%.4f\" % (dmax, dmin,multi_in_bin.mean()))\n for ii in range(1,100,8):\n print(\"\\t %d refls with multi %d\" % (sum(multi_in_bin==ii), ii))\n \n print(\"Overall completeness\\n<><><><><><><><>\")\n symm = symmetry(unit_cell=params, space_group_symbol=self.symbol)\n hi_flex_unique = cctbx_flex.miller_index(list(set(self.Hi_asu_all_ranks)))\n mset = miller.set(symm, hi_flex_unique, anomalous_flag=True)\n binner = mset.setup_binner(d_min=2.125, d_max=999, n_bins=10)\n mset.completeness(use_binning=True).show()\n marr_unique_h = miller.array(mset)# ,data=flex.double(len(hi_asu_felx),0))\n print(\"Rank %d: total miller vars=%d\" % (rank, len(set(self.Hi_asu_all_ranks))))\n if rank > 0:\n marr_unique_h = None\n\n if has_mpi:\n marr_unique_h = comm.bcast(marr_unique_h)\n\n # this will map the measured miller indices to their index in the LBFGS parameter array self.x\n self.idx_from_asu = {h: i for i, h in enumerate(set(self.Hi_asu_all_ranks))}\n # we will need the inverse map during refinement to update the miller array in diffBragg, so we cache it here\n self.asu_from_idx = {i: h for i, h in enumerate(set(self.Hi_asu_all_ranks))}\n\n fres = marr_unique_h.d_spacings()\n self.res_from_asu = {h:res for h,res in zip(fres.indices(), fres.data())}\n # will we only refine a range of miller indices ? \n self.freeze_idx = None\n if args.fcellrange is not None:\n self.freeze_idx = {}\n resmax, resmin = args.fcellrange\n for h in self.idx_from_asu:\n res = self.res_from_asu[h]\n if res >= resmin and res < resmax:\n self.freeze_idx[h] = False\n else:\n self.freeze_idx[h] = True\n\n def pre_refine_setup(self, i_trial=0, refine_fcell=None, refine_spot_scale=None, refine_Umatrix=None, \n refine_Bmatrix=None, refine_ncells=None, refine_bg=None, max_calls=None, x_init=None):\n\n self.RUC = GlobalRefiner(\n n_total_params=self.n_total_unknowns,\n n_local_params=self.n_local_unknowns,\n n_global_params=self.n_global_params,\n local_idx_start=self.starts_per_rank[rank],\n shot_ucell_managers=self.all_ucell_mans,\n shot_rois=self.all_spot_roi,\n shot_nanoBragg_rois=self.all_nanoBragg_rois,\n shot_roi_imgs=self.all_roi_imgs, shot_spectra=self.all_spectra,\n shot_crystal_GTs=self.all_crystal_GT, shot_crystal_models=self.all_crystal_models,\n shot_xrel=self.all_xrel, shot_yrel=self.all_yrel, shot_abc_inits=self.all_abc_inits,\n shot_asu=self.all_Hi_asu,\n global_param_idx_start=self.local_unknowns_across_all_ranks,\n shot_panel_ids=self.all_panel_ids,\n all_crystal_scales=self.all_crystal_scales,\n perturb_fcell=args.perturbfcell,\n global_ncells=args.globalNcells, global_ucell=args.globalUcell,\n #shot_originZ_init= {img_num:CSPAD[0].get_origin()[2] for img_num in range(self.n_shots)},\n shot_originZ_init= {img_num:0 for img_num in range(self.n_shots)},\n shot_bg_coef=self.all_bg_coef, background_estimate=self.background_estimate)\n \n self.i_trial = i_trial\n\n if refine_Bmatrix is not None:\n self.RUC.refine_Bmatrix = refine_Bmatrix \n if refine_Umatrix is not None:\n self.RUC.refine_Umatrix = refine_Umatrix \n if refine_fcell is not None:\n self.RUC.refine_Fcell = refine_fcell \n if refine_ncells is not None:\n self.RUC.refine_ncells = refine_ncells \n if refine_bg is not None:\n self.RUC.refine_background_planes = refine_bg\n if refine_spot_scale is not None:\n self.RUC.refine_crystal_scale = refine_spot_scale\n if max_calls is not None:\n self.RUC.max_calls = max_calls \n\n self.RUC.x_init = x_init\n self.RUC.only_pass_refined_x_to_lbfgs = args.xrefinedonly\n self.RUC.bg_extracted = args.bgextracted\n self.RUC.save_model = args.savemodels \n \n self.RUC.recenter = args.recenter\n # parameter rescaling...\n self.RUC.rescale_params = True\n self.RUC.rescale_fcell_by_resolution = not args.NoRescaleFcellRes\n \n self.RUC.spot_scale_init = self.spot_scale_init \n self.RUC.m_init = self.m_init \n\n self.RUC.ignore_line_search_failed_step_at_lower_bound = args.ignorelinelow\n #FIXME \n self.RUC.ucell_inits = [self.all_ucell_mans[i_shot].variables for i_shot in range(self.n_shots)]\n #FIXME\n self.RUC.rotX_sigma = args.rotXYZsigma[0]\n self.RUC.rotY_sigma = args.rotXYZsigma[1]\n self.RUC.rotZ_sigma = args.rotXYZsigma[2]\n self.RUC.ucell_sigmas = [args.ucellsigma, args.ucellsigma]\n self.RUC.bg_coef_sigma = args.bgcoefsigma\n self.RUC.originZ_sigma = 1 # 0.01\n self.RUC.m_sigma = args.ncellssigma\n self.RUC.spot_scale_sigma = args.spotscalesigma # stage1/2.01\n asig, bsig, csig = args.bgsigma\n self.RUC.a_sigma = asig # 0.005\n self.RUC.b_sigma = bsig #0.005\n self.RUC.c_sigma = csig #0.01\n self.RUC.fcell_sigma_scale = args.fcellsigma #0.005\n self.RUC.fcell_resolution_bin_Id = None\n self.RUC.compute_image_model_correlation = args.imagecorr\n # end of parameter rescaling\n\n # plot things\n self.RUC.sigma_r = 3./args.gainval\n #if args.readoutless:\n # self.RUC.sigma_r = 2 \n \n self.RUC.gradient_only=args.gradientonly\n self.RUC.fix_params_with_negative_curvature = args.forcecurva\n #self.RUC.stpmax = args.stpmax\n self.RUC.debug = args.debug\n self.RUC.binner_dmax = 999\n self.RUC.binner_dmin = 2.1\n self.RUC.binner_nbin = 10\n self.RUC.trial_id = self.i_trial\n self.RUC.print_all_missets = args.printallmissets\n self.RUC.print_all_corr = False\n self.RUC.Fref = self.Fhkl_ref\n self.RUC.merge_stat_frequency=3\n self.RUC.min_multiplicity=args.minmulti\n self.RUC.print_resolution_bins= not args.noprintresbins\n self.RUC.refine_rotZ = not args.fixrotZ\n self.RUC.plot_images = args.plot\n self.RUC.plot_fcell = args.plotfcell\n self.RUC.plot_residuals = args.residual\n self.RUC.plot_statistics = args.plotstats\n self.RUC.setup_plots()\n\n self.RUC.log_fcells = True\n self.RUC.big_dump = args.bigdump\n\n self.RUC.idx_from_asu = self.idx_from_asu\n self.RUC.asu_from_idx = self.asu_from_idx\n self.RUC.freeze_idx = self.freeze_idx\n self.RUC.scale_r1 = True\n self.RUC.request_diag_once = False\n self.RUC.S = self.SIM\n self.RUC.restart_file = args.restartfile\n self.RUC.has_pre_cached_roi_data = True\n self.RUC.trad_conv = True\n self.RUC.fcell_bump = args.fcellbump\n self.RUC.refine_detdist = False\n self.RUC.S.D.update_oversample_during_refinement = False\n self.RUC.refine_gain_fac = False\n self.RUC.use_curvatures = args.forcecurva\n self.RUC.use_curvatures_threshold = args.numposcurvatures\n if not args.curvatures:\n self.RUC.S.D.compute_curvatures=False\n self.RUC.calc_curvatures = args.curvatures\n self.RUC.poisson_only = args.poissononly\n self.RUC.plot_stride = args.stride\n self.RUC.trad_conv_eps = args.tradeps #5e-10 # NOTE this is for single panel model\n self.RUC.verbose = False\n self.RUC.use_rot_priors = False\n self.RUC.use_ucell_priors = False\n self.RUC.filter_bad_shots = args.filterbad\n #TODO optional properties.. make this obvious\n self.RUC.FNAMES = self.all_fnames\n self.RUC.PROC_FNAMES = self.all_proc_fnames\n self.RUC.PROC_IDX = self.all_shot_idx\n self.RUC.BBOX_IDX = self.all_proc_idx\n\n self.RUC.Hi = self.all_Hi\n self.RUC.output_dir = args.outdir\n if args.verbose:\n if rank == 0: # only show refinement stats for rank 0\n self.RUC.verbose = True\n self.RUC.run(setup_only=True)\n\n def refine(self, selection_flags=None):\n self.RUC.num_positive_curvatures = 0\n self.RUC.use_curvatures = args.forcecurva\n self.RUC.hit_break_to_use_curvatures = False\n self.RUC.selection_flags = selection_flags\n \n if args.tryscipy:\n self.RUC.calc_curvatures = False\n #self.RUC._setup()\n self.RUC.calc_func = True\n self.RUC.compute_functional_and_gradients()\n\n from scitbx.array_family import flex\n def func(x, RUC):\n RUC.calc_func = True\n RUC.x = flex.double(x)\n f, g = RUC.compute_functional_and_gradients()\n return f\n\n def fprime(x, RUC):\n RUC.calc_func = False\n RUC.x = flex.double(x)\n RUC.x = flex.double(x)\n f, g = RUC.compute_functional_and_gradients()\n return g.as_numpy_array()\n\n from scipy.optimize import fmin_l_bfgs_b\n out = fmin_l_bfgs_b(func=func, x0=array(self.RUC.x),\n fprime=fprime, args=[self.RUC], factr=args.scipyfactr)\n\n else:\n self.RUC.run(setup=False)\n if self.RUC.hit_break_to_use_curvatures:\n self.RUC.fix_params_with_negative_curvature = False\n self.RUC.num_positive_curvatures = 0\n self.RUC.use_curvatures = True\n self.RUC.run(setup=False)\n\n\n def save_lbfgs_x_array_as_dataframe(self, outname):\n # Here we can save the refined parameters\n my_shots = self.all_shot_idx.keys()\n x = self.RUC.Xall\n data_to_send = []\n image_corr = self.RUC.image_corr\n if image_corr is None:\n image_corr = [-1]*len(my_shots)\n for i_shot in my_shots:\n rotX = self.RUC._get_rotX(i_shot)\n rotY = self.RUC._get_rotY(i_shot)\n rotZ = self.RUC._get_rotZ(i_shot)\n if not args.savepickleonly:\n ang, ax = self.RUC.get_correction_misset(as_axis_angle_deg=True, i_shot=i_shot)\n Bmat = self.RUC.get_refined_Bmatrix(i_shot)\n else:\n ang,ax = self.RUC.get_correction_misset(as_axis_angle_deg=True, anglesXYZ = (rotX, rotY, rotZ))\n pars = self.RUC._get_ucell_vars(i_shot)\n self.all_ucell_mans[i_shot].variables = pars\n Bmat = self.all_ucell_mans[i_shot].B_recipspace \n\n bg_coef = -1\n if args.bgextracted:\n bg_coef = self.RUC._get_bg_coef(i_shot)\n \n C = self.RUC.CRYSTAL_MODELS[i_shot]\n C.set_B(Bmat)\n try:\n C.rotate_around_origin(ax, ang)\n except RuntimeError:\n pass\n#############################################\n if args.savepickleonly:\n a_init, _, c_init, _, _, _ = self.all_crystal_models[i_shot].get_unit_cell().parameters()\n a_tru, b_tru, c_tru = self.all_crystal_GT[i_shot].get_real_space_vectors()\n try:\n final_misori = compare_with_ground_truth(a_tru, b_tru, c_tru,[C],symbol=\"P43212\")[0]\n except Exception as err:\n final_misori = -1\n###############################\n\n Amat_refined = C.get_A()\n ucell_a,_,ucell_c,_,_,_ = C.get_unit_cell().parameters()\n\n fcell_xstart = self.RUC.fcell_xstart\n ucell_xstart = self.RUC.ucell_xstart[i_shot]\n scale_xpos = self.RUC.spot_scale_xpos[i_shot]\n ncells_xstart = self.RUC.ncells_xstart[i_shot]\n nspots = len(self.RUC.NANOBRAGG_ROIS[i_shot])\n \n bgplane_xpos = -1 \n bgplane = 0\n if not args.bgextracted:\n bgplane_a_xpos = [self.RUC.bg_a_xstart[i_shot][i_spot] for i_spot in range(nspots)]\n bgplane_b_xpos = [self.RUC.bg_b_xstart[i_shot][i_spot] for i_spot in range(nspots)]\n bgplane_c_xpos = [self.RUC.bg_c_xstart[i_shot][i_spot] for i_spot in range(nspots)]\n bgplane_xpos = list(zip(bgplane_a_xpos, bgplane_b_xpos, bgplane_c_xpos))\n bgplane = [self.RUC._get_bg_vals(i_shot, i_spot) for i_spot in range(nspots)]\n\n crystal_scale = self.RUC._get_spot_scale(i_shot)\n proc_h5_fname = self.all_proc_fnames[i_shot]\n proc_h5_idx = self.all_shot_idx[i_shot]\n proc_bbox_idx = self.all_proc_idx[i_shot]\n\n ncells_val = tuple(self.RUC._get_m_val(i_shot))\n if not args.savepickleonly: \n init_misori = self.RUC.get_init_misorientation(i_shot)\n final_misori = self.RUC.get_current_misorientation(i_shot)\n img_corr= self.RUC._get_image_correlation(i_shot)\n init_img_corr = self.RUC._get_init_image_correlation(i_shot)\n else:\n init_misori = self.init_misori[i_shot]\n #final_misori = self.init_misori[i_shot] # computed above\n img_corr = -1\n init_img_corr = -1\n \n data_to_send.append((proc_h5_fname, proc_h5_idx, proc_bbox_idx,crystal_scale, Amat_refined, ncells_val, bgplane, \\\n img_corr, init_img_corr, fcell_xstart, ucell_xstart, rotX, rotY, rotZ, scale_xpos, \\\n ncells_xstart, bgplane_xpos, init_misori, final_misori, ucell_a, ucell_c, bg_coef))\n \n if has_mpi:\n data_to_send = comm.reduce(data_to_send, MPI.SUM, root=0)\n if rank == 0:\n import pandas\n import h5py\n fnames, shot_idx, bbox_idx, xtal_scales, Amats, ncells_vals, bgplanes, image_corr, init_img_corr, \\\n fcell_xstart, ucell_xstart, rotX, rotY, rotZ, scale_xpos, ncells_xstart, bgplane_xpos, \\\n init_misori, final_misori, ucell_a, ucell_c, bg_coef = zip(*data_to_send)\n\n df = pandas.DataFrame({\"proc_fnames\": fnames, \"proc_shot_idx\": shot_idx, \"bbox_idx\": bbox_idx,\n \"spot_scales\": xtal_scales, \"Amats\": Amats, \"ncells\": ncells_vals,\n \"bgplanes\": bgplanes, \"image_corr\": image_corr,\n \"init_image_corr\": init_img_corr,\n \"fcell_xstart\": fcell_xstart,\n \"ucell_xstart\": ucell_xstart,\n \"init_misorient\": init_misori, \"final_misorient\": final_misori,\n \"bg_coef\": bg_coef,\n \"rotX\": rotX,\n \"rotY\": rotY,\n \"rotZ\": rotZ,\n \"a\": ucell_a, \"c\": ucell_c, \n \"scale_xpos\": scale_xpos,\n \"ncells_xpos\": ncells_xstart,\n \"bgplanes_xpos\": bgplane_xpos})\n u_fnames = df.proc_fnames.unique()\n\n u_h5s = {f:h5py.File(f,'r')[\"h5_path\"][()] for f in u_fnames}\n img_fnames = []\n for f,idx in df[['proc_fnames','proc_shot_idx']].values:\n img_fnames.append( u_h5s[f][idx] )\n df[\"imgpaths\"] = img_fnames\n\n df.to_pickle(outname)\n\n def init_misset_results(self):\n results = []\n self.init_misori = {}\n nshots = len(self.all_crystal_GT)\n for i_shot in range(nshots): # (angx, angy, angz, a,c) in enumerate(zip(rotx, roty, rotz, avals, cvals)):\n\n a_init, _, c_init, _, _, _ = self.all_crystal_models[i_shot].get_unit_cell().parameters()\n a_tru, b_tru, c_tru = self.all_crystal_GT[i_shot].get_real_space_vectors()\n try:\n angular_offset_init = compare_with_ground_truth(a_tru, b_tru, c_tru,\n [self.all_crystal_models[i_shot]],\n symbol=\"P43212\")[0]\n except Exception as err:\n print(\"Rank %d img %d err %s\" % (rank, i_shot, err))\n angular_offset_init = -1\n results.append(angular_offset_init)\n self.init_misori[i_shot] = angular_offset_init\n return results\n\n #TODO: test this method ;)\n @staticmethod\n def open_mtz(mtzfname, mtzlabel=None):\n if mtzlabel is None:\n mtzlabel = \"fobs(+)fobs(-)\"\n print (\"Opening mtz file %s\" % mtzfname)\n from iotbx.reflection_file_reader import any_reflection_file\n miller_arrays = any_reflection_file(mtzfname).as_miller_arrays()\n\n possible_labels = []\n foundlabel = False\n for ma in miller_arrays:\n label = ma.info().label_string()\n possible_labels.append(label)\n if label == mtzlabel:\n foundlabel = True\n break\n\n assert foundlabel, \"MTZ Label not found... \\npossible choices: %s\" % (\" \".join(possible_labels))\n ma = ma.as_amplitude_array()\n return ma\n\n def write_residual_background_image(self):\n assert args.checkbackground\n if has_mpi:\n self.true_residual = comm.reduce(self.true_residual)\n self.true_residual_Nsamples = comm.reduce(self.true_residual_Nsamples)\n if rank==0:\n RES = self.true_residual / self.true_residual_Nsamples\n from numpy import nan_to_num\n RES = nan_to_num(RES)\n from numpy import abs as numpy_abs\n ABS_RES = numpy_abs(RES)\n from dxtbx.model.beam import BeamFactory\n from cxid9114.parameters import WAVELEN_HIGH\n beam = BeamFactory.simple(WAVELEN_HIGH)\n np.savez(args.checkbackgroundsavename, img=RES, det=CSPAD.to_dict(), beam=beam.to_dict())\n np.savez(args.checkbackgroundsavename+ \"_abs\", img=ABS_RES, det=CSPAD.to_dict(), beam=beam.to_dict())\n print(\"Saved background residual image to file %s\" % args.checkbackgroundsavename)\n if has_mpi:\n comm.Barrier()\n exit()\n####pr = cProfile.Profile()\n####pr.enable()\n\n##############\n# LOAD STAGE #\n##############\n\nB = GlobalData()\nfnames = glob(args.glob)\nB.fnames = fnames\nB.load()\nprint(\"Finished with the load!\")\n\nang_res = B.init_misset_results()\nif has_mpi:\n ang_res = comm.reduce(ang_res, MPI.SUM, root=0)\nif rank == 0:\n miss = [a for a in ang_res if a > 0]\n\n print(\"INIT MISSETS\\n%s\"%\", \".join(map(str,ang_res)))\n print(\"INITIAL MEDIAN misset = %f\" % np.median(miss))\n print(\"INITIAL MAX misset = %f\" % np.max(miss))\n print(\"INITIAL MIN misset = %f\" % np.min(miss))\nif has_mpi:\n comm.Barrier()\nif args.checkbackground:\n B.write_residual_background_image()\nB.tally_statistics()\nB.init_global_ucell()\nif has_mpi:\n comm.Barrier()\n\ntrials = {\"fcell\": args.fcell,\n \"scale\": args.scale,\n \"umatrix\": args.umatrix,\n \"bmatrix\": args.bmatrix,\n \"ncells\": args.ncells,\n \"bg\": args.bg,\n \"max_calls\": args.maxcalls}\n\nNtrials = len(trials[\"max_calls\"])\n\n################\n# SETUP/REFINE #\n################\nx_init = None\nif args.xinitfile is not None:\n x_init = flex_double(np_load(args.xinitfile)[\"x\"])\nif rank==0:\n import time\n\nif args.protocol==\"per_shot\":\n\n for i_trial in range(Ntrials): \n if rank==0:\n tstart = time.time()\n setup_args = {\"max_calls\": args.maxcalls[i_trial],\n \"refine_fcell\": bool(args.fcell[i_trial]),\n \"refine_Umatrix\": bool(args.umatrix[i_trial]),\n \"refine_Bmatrix\": bool(args.bmatrix[i_trial]),\n \"refine_ncells\": bool(args.ncells[i_trial]),\n \"refine_bg\": bool(args.bg[i_trial]),\n \"refine_spot_scale\": bool(args.scale[i_trial]),\n \"i_trial\": i_trial, \n \"x_init\": x_init}\n B.pre_refine_setup(**setup_args) \n #TODO MPI select for global_refiner.py \n keeperstag = args.keeperstags[i_trial] \n for i_shot in range(B.n_shots):\n flags = {i_shot: B.reduced_bbox_keeper_flags[i_shot][keeperstag]}\n B.refine(selection_flags = flags)\n if rank == 0:\n print (\"<><><><><><><><><><><><><><><><><><><><><><><>\")\n print (\"<><><> END OF TRIAL %02d ; shot %d/%d <><><><>\" % (i_trial+1, i_shot+1, B.n_shots))\n print (\"<><><><><><><><><><><><><><><><><><><><><><><>\")\n if has_mpi:\n comm.Barrier()\n x_init = B.RUC.Xall\n\n if rank==0:\n tdone = time.time()-tstart\n print(\"TRIAL %d TIMEINGZ = %f secz\" % (i_trial+1, tdone))\n outname = \"%s_trial%d.pkl\" % (args.optoutname, i_trial+1)\n B.save_lbfgs_x_array_as_dataframe(outname)\n\nelif args.protocol == \"global\":\n for i_trial in range(Ntrials): \n if rank==0:\n tstart = time.time()\n setup_args = {\"max_calls\": args.maxcalls[i_trial],\n \"refine_fcell\": bool(args.fcell[i_trial]),\n \"refine_Umatrix\": bool(args.umatrix[i_trial]),\n \"refine_Bmatrix\": bool(args.bmatrix[i_trial]),\n \"refine_ncells\": bool(args.ncells[i_trial]),\n \"refine_bg\": bool(args.bg[i_trial]),\n \"refine_spot_scale\": bool(args.scale[i_trial]),\n \"i_trial\": i_trial, \n \"x_init\": x_init}\n B.pre_refine_setup(**setup_args) \n if not args.savepickleonly:\n B.refine() \n if rank == 0:\n print (\"<><><><><><><><><><><><><><><><><><><><><><><>\")\n print (\"<><><> END OF TRIAL %02d ; <><><><>\" % (i_trial+1))\n print (\"<><><><><><><><><><><><><><><><><><><><><><><>\")\n if has_mpi:\n comm.Barrier()\n x_init = B.RUC.Xall\n\n if rank==0:\n tdone = time.time()-tstart\n print(\"TRIAL %d TIMEINGZ = %f secz\" % (i_trial+1, tdone))\n outname = \"%s_trial%d.pkl\" % (args.optoutname, i_trial+1)\n B.save_lbfgs_x_array_as_dataframe(outname)\n\n\n#proc_fnames_shots = [(B.all_proc_fnames[i], B.all_shot_idx[i]) for i in my_shots]\n\n#parameters =[\n# (f, i, np.exp(x[B.RUC.spot_scale_xpos[i]]), x[B.RUC.rotX_xpos[i]], x[B.RUC.rotY_xpos[i]], x[B.RUC.rotZ_xpos[i]])\n# for f, i in proc_fnames_shots]\n\n#pr.disable()\n#\n#pr.dump_stats('cpu_%d.prof' %comm.rank)\n## - for text dump\n#with open( 'cpu_%d.txt' %comm.rank, 'w') as output_file:\n# sys.stdout = output_file\n# pr.print_stats(sort='time')\n# sys.stdout = sys.__stdout__\n\n#comm.Barrier()\n#B.print_results()\n\n","sub_path":"io/fat_data.py","file_name":"fat_data.py","file_ext":"py","file_size_in_byte":70200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257865137","text":"systemMenu = {\"ข้าวมันไก่ต้ม\":40, \"ข้าวมันไก่ทอด\":45, \"ข้าวมันผสม\": 50}\nmenuList = []\ndef showBill():\n totalPrice = 0\n print(\"---- My Food----\")\n for number in range(len(menuList)):\n print(menuList[number][0], menuList[number][1])\n totalPrice += int(menuList[number][1])\n print(\"Total :\" , totalPrice)\n\n\nwhile True:\n menuName = input(\"Plese Enter Menu :\")\n if(menuName.lower() == \"exit\"):\n break\n else:\n menuList.append([menuName, systemMenu[menuName]])\n\nshowBill()","sub_path":"Lec73_Putita_M.py","file_name":"Lec73_Putita_M.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"591222655","text":"from sklearn.model_selection import train_test_split\nimport sys\nimport pandas as pd\nimport os\n\ndef main(csv_path, split_ratio=0.3, sep = \",\", root_path=\"./\"):\n df = pd.read_csv(csv_path, names=None, header=None, sep=sep)\n train, test = train_test_split(df, test_size=split_ratio, random_state=4213, shuffle=True, stratify=df[1])\n train.to_csv(os.path.join(root_path, \"split_train.csv\"), sep = \",\", index = False, header = False)\n test.to_csv(os.path.join(root_path, \"split_val.csv\"), sep = \",\", index = False, header = False)\n \n \n \nif __name__ == '__main__':\n try:\n csv_path = sys.argv[1].rstrip()\n split_ratio = float(sys.argv[2].rstrip())\n root_path = sys.argv[3].rstrip()\n except Exception as e:\n print(\"Incorrect usage.\")\n sep = \",\"\n main(csv_path, split_ratio, sep, root_path)\n","sub_path":"PyTorch/image_classification/flickr_style/helper_scripts/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"225583136","text":"import logging\n\nimport pajbot.models\nfrom pajbot.modules import BaseModule\n\nlog = logging.getLogger('pajbot')\n\n\nclass DeckModule(BaseModule):\n\n ID = __name__.split('.')[-1]\n NAME = 'Decks (Hearthstone)'\n DESCRIPTION = 'Handles displaying/updating decks through commands and the website.'\n CATEGORY = 'Functie'\n\n def load_commands(self, **options):\n self.commands['setdeck'] = pajbot.models.command.Command.raw_command(self.set_deck,\n level=420,\n delay_all=0,\n delay_user=0,\n description='Stel de HearthStone deck in die nu wordt gebruikt.',\n examples=[\n pajbot.models.command.CommandExample(None, 'Add a new deck',\n chat='user:!set deck http://i.imgur.com/rInqJv0.png\\n'\n 'bot>user:Dit is een nieuw deck. Het ID is 32',\n description='Dit krijg je te zien als het een nieuwe deck is.').parse(),\n pajbot.models.command.CommandExample(None, 'Set a pre-existing deck',\n chat='user:!set deck http://i.imgur.com/rInqJv0.png\\n'\n 'bot>user:Een bestaand deck bijgewerkt. Het ID is 32',\n description='Dit krijg je te zien als het een deck is dat al eerder is gebruikt.').parse(),\n ])\n self.commands['set'] = pajbot.models.command.Command.multiaction_command(\n level=100,\n delay_all=0,\n delay_user=0,\n default=None,\n command='set',\n commands={\n 'deck': self.commands['setdeck']\n })\n\n self.commands['update'] = pajbot.models.command.Command.multiaction_command(\n level=100,\n delay_all=0,\n delay_user=0,\n default=None,\n command='update',\n commands={\n 'deck': pajbot.models.command.Command.raw_command(self.update_deck,\n level=420,\n description='Bewerk een bestaand deck.',\n examples=[\n pajbot.models.command.CommandExample(None, 'Stel de naam en class in van het huidige deck',\n chat='user:!update deck --name Midrange Secret --class paladin\\n'\n 'bot>user:Deck met ID 32 bijgewerkt. Naam en class bijgewerkt').parse(),\n pajbot.models.command.CommandExample(None, 'Werkt de link van het deck bij',\n chat='user:!update deck --link http://i.imgur.com/QEVwrVV.png\\n'\n 'bot>user:Deck met ID 32 bijgewerk. Link bijgewerkt',\n description='Verandert de link van de huidige deck. Dit kan handig zijn als je een screenshot opnieuw wilt uploaden.').parse(),\n pajbot.models.command.CommandExample(None, 'Set the name and class of an old deck',\n chat='user:!update deck --id 12 --name Aggro --class hunter\\n'\n 'bot>user:Deck met ID 12 bijgewerk. Naam en class bijgewerkt',\n description='Werkt de naam en class bij van een oud deck. Handig als je oude decks wilt bijwerken.').parse(),\n ]),\n })\n\n self.commands['remove'] = pajbot.models.command.Command.multiaction_command(\n level=100,\n delay_all=0,\n delay_user=0,\n default=None,\n command='remove',\n commands={\n 'deck': pajbot.models.command.Command.raw_command(self.remove_deck,\n level=420,\n description='Verwijderd een deck met een ID.',\n examples=[\n pajbot.models.command.CommandExample(None, 'Verwijder een deck via ID',\n chat='user:!remove deck 123\\n'\n 'bot>user:Succesvol het deck verwijderd.',\n description='Het ID in dit geval is 123').parse(),\n pajbot.models.command.CommandExample(None, 'Verwijder een deck via URL',\n chat='user:!remove deck http://i.imgur.com/rInqJv0.png\\n'\n 'bot>user:Successvol het deck verwijderd.',\n description='De link in is in dit geval http://i.imgur.com/rInqJv0.png').parse(),\n ]),\n })\n\n def set_deck(self, **options):\n \"\"\"Dispatch method for setting the current deck.\n The command takes a link as its argument.\n If the link is an already-added deck, the deck should be set as the current deck\n and its last use date should be set to now.\n Usage: !setdeck imgur.com/abcdefgh\"\"\"\n\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n deck, new_deck = bot.decks.set_current_deck(message)\n if new_deck is True:\n bot.whisper(source.username, 'Dit deck is een nieuw deck. Het ID is {deck.id}'.format(deck=deck))\n else:\n bot.whisper(source.username, 'Een bestaand deck bijgewerkt. Het ID is {deck.id}'.format(deck=deck))\n\n bot.say('Succesvol de laatste deck bijgewerkt.')\n return True\n\n return False\n\n def update_deck(self, **options):\n \"\"\"Dispatch method for updating a deck.\n By default this will update things for the current deck, but you can update\n any deck assuming you know its ID.\n Usage: !updatedeck --name Midrange Secret --class paladin\n \"\"\"\n\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n options, response = bot.decks.parse_update_arguments(message)\n if options is False:\n bot.whisper(source.username, 'Niet geldig bijwerk command')\n return False\n\n if 'id' in options:\n deck = bot.decks.find(id=options['id'])\n # We remove id from options here so we can tell the user what\n # they have updated.\n del options['id']\n else:\n deck = bot.decks.current_deck\n\n if deck is None:\n bot.whisper(source.username, 'Geen geldige decks om bij te werken.')\n return False\n\n if len(options) == 0:\n bot.whisper(source.username, 'Je hebt mij niks gegeven om dit deck bij te werken NotLikeThis')\n return False\n\n bot.decks.update_deck(deck, **options)\n bot.whisper(source.username, 'Deck met ID {deck.id} bijgewerkt. {list} bijgewerkt'.format(deck=deck, list=', '.join([key for key in options])))\n\n return True\n else:\n bot.whisper(source.username, 'Gebruiksvoorbeeld: !updatedeck --name Midrange Secret --class paladin')\n return False\n\n def remove_deck(self, **options):\n \"\"\"Dispatch method for removing a deck.\n Usage: !removedeck imgur.com/abcdef\n OR\n !removedeck 123\n \"\"\"\n\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n id = None\n try:\n id = int(message)\n except Exception:\n pass\n\n deck = bot.decks.find(id=id, link=message)\n\n if deck is None:\n bot.whisper(source.username, 'Geen decks gevonden met kloppende parameters.')\n return False\n\n try:\n bot.decks.remove_deck(deck)\n bot.whisper(source.username, 'Succesvol het deck verwijderd.')\n except:\n log.exception('Er trad een fout op tijdens het verwijderen van dit deck')\n bot.whisper(source.username, 'Er trad een fout op tijdens het verwijderen van dit deck')\n return False\n return True\n else:\n bot.whisper(source.username, 'Gebruiksvoorbeeld: !removedeck http://imgur.com/abc')\n return False\n","sub_path":"pajbot/modules/deck.py","file_name":"deck.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401279860","text":"import numpy as np\nimport math\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\n\n# Local calls to own modules\nfrom gauss import GAUSS_PY\n\nt=[]\nx=[]\na=[]\nxd=[]\nx_hat=[]\nxd=[]\nxdh=[]\nxdd=[]\nxdddd=[]\nxd_hat=[]\nxs=[]\nxh=[]\nxdd_hat=[]\nxddd_hat=[]\nxdddd_hat=[]\nx_hat_ERR=[]\nsp11=[]\nsp11P=[]\nxd_hat_ERR=[]\nsp22=[]\nsp22P=[]\nxdd_hat_ERR=[]\nsp33=[]\nsp33P=[]\nxddd_hat_ERR=[]\nsp44=[]\nsp44P=[]\nArrayRES = []\n\nPHIS=1.\nXLOSE = 99\nSIGMA_NOISE=50.0\nP0 = 999999999999999.0\nadd_noise = 1\n\n# ***************************************************************************************\n#\n# The higher-order polynomial Kalman filter is less accurate than the lower-order filter \n# in estimating the lower-order derivatives of the signal when only a few measurements \n# are available.\n# Of course, the advantage of a higher-order polynomial Kalman filter is that we are able\n# to estimate higher-order derivatives of the signal.\n#\n# ***************************************************************************************\nW = .1\nTS = 1.0\n\nI = np.identity(3)\nP = np.matrix([[P0, 0.0, 0.0],[0.0, P0, 0.0],[0.0, 0.0, P0]])\nF = np.matrix([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0],[0.0, 0.0, 0.0]])\nXH = np.matrix([[0.0],[0.0],[0.0]])\nQ = np.matrix([[TS**5/20.0, TS**4/8.0, TS**3/6.0],\n\t\t\t [TS**4/8.0, TS**3/3.0, TS**2/2.0],\n\t\t\t [TS**3/3.0, TS**2/2.0, TS]])\nHMAT = np.matrix([[1.0, 0.0, 0.0]])\nPHI = I + TS*F + TS**2*F*F/2\nRMAT = np.matrix([[SIGMA_NOISE**2]])\n\nfor T in [x*TS for x in range(0,201)]:\n\tif(T>XLOSE):\n\t\tRMAT[0,0]=999999999999999.\n\t# Estimate the Riccatti matrices and solve the Riccati differential equation\n\tM=PHI*P*PHI.transpose()+PHIS*Q\n\tK = M*HMAT.transpose()*(inv(HMAT*M*HMAT.transpose() + RMAT))\n\tP=(I-K*HMAT)*M\t\n\tif add_noise == 1:\n\t\tXNOISE = GAUSS_PY(SIGMA_NOISE)\n\telse:\n\t\tXNOISE = 0.0\n\t# Instead of using a single value for each variable, a matrix is defined\n\tX=np.matrix([[100*T - (20*math.cos(W*T))/W + 20/W],[100 + 20*math.sin(W*T)],[20*W*math.cos(W*T)]])\n\tXS=X[0,0]+XNOISE\n\t# The is the residual.\n\tRES = XS - HMAT*PHI*XH\n\tXH = PHI*XH + K*(XS - HMAT*PHI*XH)\n\tSP11=math.sqrt(P[0,0])\n\tSP22=math.sqrt(P[1,1])\n\t# Theoretical value of the residuals estimated in the book\n\tSP44=math.sqrt(HMAT*M*HMAT.transpose()+RMAT)\n\tXhat = XH[0,0]\n\tXDhat= XH[1,0]\n\tXDDhat = XH[2,0]\n\tXHERR=X-XH\n\tSP11P=-SP11\n\tSP22P=-SP22\n\tSP44P=-SP44\n\tt.append(T)\n\tArrayRES.append(RES[0,0])\n\txs.append(XS)\n\tx.append(X[0,0])\n\tx_hat.append(XH[0,0])\n\txd.append(X[1,0])\n\txd_hat.append(XH[1,0])\n\txdd.append(X[2,0])\n\txdd_hat.append(XH[2,0])\n\t\n\tx_hat_ERR.append(XHERR[0,0])\n\txd_hat_ERR.append(XHERR[1,0])\n\tsp11.append(SP11)\n\tsp11P.append(SP11P)\n\tsp22.append(SP22)\n\tsp22P.append(SP22P)\n\nplt.figure(1)\nplt.grid(True)\nplt.plot(t,x,label='X actual', linewidth=0.6)\nplt.plot(t,x_hat,label='X estimate', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('X Estimate and True Signal')\nplt.ylim(0,20000)\nplt.xlim(0,200)\nplt.legend()\n\nplt.figure(2)\nplt.grid(True)\nplt.plot(t,xd,label='XD actual', linewidth=0.6)\nplt.plot(t,xd_hat,label='XD estimate', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('XD Estimate and True Signal')\nplt.ylim(0,200)\nplt.xlim(0,200)\n\nplt.figure(3)\nplt.grid(True)\nplt.plot(t,x_hat_ERR,label='ERROR', linewidth=0.6)\nplt.plot(t,sp11,label='sp11', linewidth=0.6)\nplt.plot(t,sp11P,label='sp11p', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('Velocity Estimate and True Signal')\nplt.xlim(0,200)\nplt.ylim(-200,200)\n\nplt.figure(4)\nplt.grid(True)\nplt.plot(t,x_hat_ERR,label='ERROR', linewidth=0.6)\nplt.xlabel('Time (Sec)')\nplt.ylabel('Velocity Estimate and True Signal')\nplt.xlim(0,200)\nplt.ylim(0,8000)\nplt.show()\n","sub_path":"Chapter14/listing14_7.py","file_name":"listing14_7.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13687704","text":"#\n# 返回一个数字表示输出计算n个数字和的最小花费的时间。\n# @param n int整型 表示有n个数。\n# @param c int整型 参数c\n# @param a int整型一维数组 ai表示第i个数的大小\n# @return long长整型\n#\nimport heapq\nclass Solution:\n def solve(self , n , c , a ):\n # write code here\n heapq.heapify(a) # 转化为小顶堆\n sum_ = 0\n while len(a) > 1:\n a_1 = heapq.heappop(a)\n a_2 = heapq.heappop(a)\n b = a_1 + a_2\n sum_ += b\n heapq.heappush(a, b)\n return c*sum_\n\nSol = Solution()\nprint(Sol.solve(5,76,[81,30,76,24,84]))\n\n\n","sub_path":"基础练习/牛客_牛��算数.py","file_name":"牛客_牛牛算数.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353136605","text":"\"\"\"add profile_image\n\nRevision ID: 1f5a6a1a28c\nRevises: 20e76fbc8f6\nCreate Date: 2014-08-22 11:15:57.345984\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1f5a6a1a28c'\ndown_revision = '20e76fbc8f6'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('se_user', sa.Column('profile_image', sa.String))\n op.execute(\"update se_user set profile_image = ''\")\n op.alter_column('se_user', 'profile_image', nullable=False)\n\n\ndef downgrade():\n op.drop_column('se_user', 'profile_image')\n","sub_path":"sopy/migrations/1f5a6a1a28c_add_profile_image.py","file_name":"1f5a6a1a28c_add_profile_image.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"94918529","text":"\"\"\"\n@author:Liushihao\n@time:2020/3/5:0:29\n@email:Liushihao_1224@163.com\n@describe:\n\"\"\"\na, b, c = map(float, input(\"请输入三个浮点数作为长方体棱长:\").split())\nvolume = a*b*c\narea = (a*b+b*c+c*a)*2\nprint(\"长方体体积为:\",volume)\nprint(\"长方体表面积为:\",area)\n","sub_path":"chapter2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80176723","text":"'''Script asks for a .txt file with words, names, cities,\r\netc that need to be counted and returns a dictionary\r\nwith information'''\r\n\r\n\r\ndef file_reader(file_name):\r\n #Opens file with word/names to be counted; creates tuple.\r\n with open(file_name, 'r') as f_obj:\r\n name = f_obj.read().strip()\r\n x = name.split('\\n')\r\n name_tup = tuple(x)\r\n\r\n return name_tup\r\n \r\n \r\ndef word_dict(name_tup):\r\n #Makes dictionary of words/name as key and count as value.\r\n name_dict = {}\r\n for name in name_tup: \r\n name_dict[name] = name_tup.count(name)\r\n \r\n print(name_dict)\r\n\r\ndef main():\r\n #Runs program\r\n filename = input('Enter the name of file: ')\r\n name_tup = file_reader(filename)\r\n word_dict(name_tup)\r\n\r\nmain()\r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"readfilewordtally.py","file_name":"readfilewordtally.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214293202","text":"import re\nimport os\nfrom time import sleep\n\ndef rect(line, grid):\n m = re.search(r\"rect (\\d+)x(\\d+)\", line.strip())\n for i in range(int(m.group(2))):\n for j in range(int(m.group(1))):\n grid[i][j] = \"#\"\n return grid\n\ndef rotate(line, grid):\n m = re.match(r\"rotate (column|row) (x|y)=(\\d+) by (\\d+)\", line.strip())\n if m.group(2) == \"y\":\n for _ in range(int(m.group(4))):\n row = int(m.group(3))\n last_char = grid[row][-1]\n for i in range(len(grid[row])-1, 0, -1):\n grid[row][i] = grid[row][i-1]\n grid[row][0] = last_char\n elif m.group(2) == \"x\":\n for _ in range(int(m.group(4))):\n col = int(m.group(3))\n last_char = grid[-1][col]\n for i in range(len(grid)-1, 0, -1):\n grid[i][col] = grid[i-1][col]\n grid[0][col] = last_char\n else:\n raise(ValueError(\"Missing x/y\"))\n return grid\n\ndef display(grid):\n os.system(\"clear\")\n for l in grid:\n print(\"\".join(i for i in l))\n\ndef run(filename = \"input.txt\"):\n grid = [['.' for x in range(50)] for i in range(6)]\n with open(filename) as f:\n for line in f.readlines():\n if \"rect\" in line:\n grid = rect(line, grid)\n else:\n grid = rotate(line, grid)\n # display(grid)\n # sleep(0.05)\n n = 0\n for l in grid:\n for i in l:\n if i == '#':\n n += 1\n return (n, grid)\n\nif __name__ == \"__main__\":\n\n print(run()[0])\n","sub_path":"day8_Two_factor/python/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"513253661","text":"# Copyright 2017-present Adtran, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport random\nimport arrow\n\nimport structlog\nfrom port import AdtnPort\nfrom twisted.internet import reactor, defer\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nfrom adtran_olt_handler import AdtranOltHandler\nfrom net.adtran_rest import RestInvalidResponseCode\nfrom codec.olt_config import OltConfig\nfrom onu import Onu\nfrom voltha.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm\nfrom voltha.protos.common_pb2 import AdminState\nfrom voltha.protos.device_pb2 import Port\n\ntry:\n from voltha.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm\nexcept ImportError:\n from voltha.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm\n\n\nclass PonPort(AdtnPort):\n \"\"\"\n GPON Port\n \"\"\"\n MAX_ONUS_SUPPORTED = 256\n DEFAULT_ENABLED = False\n MAX_DEPLOYMENT_RANGE = 25000 # Meters (OLT-PB maximum)\n\n _MCAST_ONU_ID = 253\n _MCAST_ALLOC_BASE = 0x500\n\n _SUPPORTED_ACTIVATION_METHODS = ['autodiscovery'] # , 'autoactivate']\n _SUPPORTED_AUTHENTICATION_METHODS = ['serial-number']\n\n def __init__(self, parent, **kwargs):\n\n super(PonPort, self).__init__(parent, **kwargs)\n\n assert 'pon-id' in kwargs, 'PON ID not found'\n\n self._parent = parent\n self._pon_id = kwargs['pon-id']\n self.log = structlog.get_logger(device_id=parent.device_id, pon_id=self._pon_id)\n self._port_no = kwargs['port_no']\n self._name = 'xpon 0/{}'.format(self._pon_id+1)\n self._label = 'pon-{}'.format(self._pon_id)\n\n self._in_sync = False\n self._expedite_sync = False\n self._expedite_count = 0\n\n self._discovery_tick = 20.0\n self._no_onu_discover_tick = self._discovery_tick / 2\n self._discovered_onus = [] # List of serial numbers\n\n self._onus = {} # serial_number-base64 -> ONU (allowed list)\n self._onu_by_id = {} # onu-id -> ONU\n self._next_onu_id = Onu.MIN_ONU_ID + 128\n self._mcast_gem_ports = {} # VLAN -> GemPort\n\n self._discovery_deferred = None # Specifically for ONU discovery\n self._active_los_alarms = set() # ONU-ID\n\n # xPON configuration\n\n self._xpon_name = None\n self._downstream_fec_enable = False\n self._upstream_fec_enable = False\n self._deployment_range = 25000\n self._authentication_method = 'serial-number'\n self._mcast_aes = False\n self._line_rate = 'down_10_up_10'\n self._activation_method = 'autodiscovery'\n\n # Statistics\n self.tx_bip_errors = 0\n\n def __str__(self):\n return \"PonPort-{}: Admin: {}, Oper: {}, OLT: {}\".format(self._label,\n self._admin_state,\n self._oper_status,\n self.olt)\n\n def get_port(self):\n \"\"\"\n Get the VOLTHA PORT object for this port\n :return: VOLTHA Port object\n \"\"\"\n if self._port is None:\n self._port = Port(port_no=self._port_no,\n label=self._label,\n type=Port.PON_OLT,\n admin_state=self._admin_state,\n oper_status=self._oper_status)\n\n return self._port\n\n @property\n def xpon_name(self):\n return self._xpon_name\n\n @xpon_name.setter\n def xpon_name(self, value):\n assert '/' not in value, \"xPON names cannot have embedded forward slashes '/'\"\n self._xpon_name = value\n\n @property\n def pon_id(self):\n return self._pon_id\n\n @property\n def onus(self):\n \"\"\"\n Get a set of all ONUs. While the set is immutable, do not use this method\n to get a collection that you will iterate through that my yield the CPU\n such as inline callback. ONUs may be deleted at any time and they will\n set some references to other objects to NULL during the 'delete' call.\n Instead, get a list of ONU-IDs and iterate on these and call the 'onu'\n method below (which will return 'None' if the ONU has been deleted.\n\n :return: (frozenset) collection of ONU objects on this PON\n \"\"\"\n return frozenset(self._onus.values())\n\n @property\n def onu_ids(self):\n return frozenset(self._onu_by_id.keys())\n\n def onu(self, onu_id):\n return self._onu_by_id.get(onu_id)\n\n @property\n def in_service_onus(self):\n return len({onu.onu_id for onu in self.onus\n if onu.onu_id not in self._active_los_alarms})\n\n @property\n def closest_onu_distance(self):\n distance = -1\n for onu in self.onus:\n if onu.fiber_length < distance or distance == -1:\n distance = onu.fiber_length\n return distance\n\n @property\n def downstream_fec_enable(self):\n return self._downstream_fec_enable\n\n @downstream_fec_enable.setter\n def downstream_fec_enable(self, value):\n assert isinstance(value, bool), 'downstream FEC enabled is a boolean'\n\n if self._downstream_fec_enable != value:\n self._downstream_fec_enable = value\n if self.state == AdtnPort.State.RUNNING:\n self.deferred = self._set_pon_config(\"downstream-fec-enable\", value)\n\n @property\n def upstream_fec_enable(self):\n return self._upstream_fec_enable\n\n @upstream_fec_enable.setter\n def upstream_fec_enable(self, value):\n assert isinstance(value, bool), 'upstream FEC enabled is a boolean'\n if self._upstream_fec_enable != value:\n self._upstream_fec_enable = value\n if self.state == AdtnPort.State.RUNNING:\n self.deferred = self._set_pon_config(\"upstream-fec-enable\", value)\n\n @property\n def any_upstream_fec_enabled(self):\n for onu in self.onus:\n if onu.upstream_fec_enable and onu.enabled:\n return True\n return False\n\n @property\n def mcast_aes(self):\n return self._mcast_aes\n\n @mcast_aes.setter\n def mcast_aes(self, value):\n assert isinstance(value, bool), 'MCAST AES is a boolean'\n if self._mcast_aes != value:\n self._mcast_aes = value\n if self.state == AdtnPort.State.RUNNING:\n pass # TODO\n\n @property\n def line_rate(self):\n return self._line_rate\n\n @line_rate.setter\n def line_rate(self, value):\n assert isinstance(value, (str, unicode)), 'Line Rate is a string'\n # TODO cast to enum\n if self._line_rate != value:\n self._line_rate = value\n if self.state == AdtnPort.State.RUNNING:\n pass # TODO\n\n @property\n def deployment_range(self):\n \"\"\"Maximum deployment range (in meters)\"\"\"\n return self._deployment_range\n\n @deployment_range.setter\n def deployment_range(self, value):\n \"\"\"Maximum deployment range (in meters)\"\"\"\n if not 0 <= value <= PonPort.MAX_DEPLOYMENT_RANGE:\n raise ValueError('Deployment range should be 0..{} meters'.\n format(PonPort.MAX_DEPLOYMENT_RANGE))\n if self._deployment_range != value:\n self._deployment_range = value\n if self.state == AdtnPort.State.RUNNING:\n self.deferred = self._set_pon_config(\"deployment-range\", value)\n\n @property\n def discovery_tick(self):\n return self._discovery_tick * 10\n \n @discovery_tick.setter\n def discovery_tick(self, value):\n if value < 0:\n raise ValueError(\"Polling interval must be >= 0\")\n\n if self.discovery_tick != value:\n self._discovery_tick = value / 10\n\n try:\n if self._discovery_deferred is not None and \\\n not self._discovery_deferred.called:\n self._discovery_deferred.cancel()\n except:\n pass\n self._discovery_deferred = None\n\n if self._discovery_tick > 0:\n self._discovery_deferred = reactor.callLater(self._discovery_tick,\n self._discover_onus)\n\n @property\n def activation_method(self):\n return self._activation_method\n\n @activation_method.setter\n def activation_method(self, value):\n value = value.lower()\n if value not in PonPort._SUPPORTED_ACTIVATION_METHODS:\n raise ValueError('Invalid ONU activation method')\n self._activation_method = value\n\n @property\n def authentication_method(self):\n return self._authentication_method\n\n @authentication_method.setter\n def authentication_method(self, value):\n value = value.lower()\n if value not in PonPort._SUPPORTED_AUTHENTICATION_METHODS:\n raise ValueError('Invalid ONU authentication method')\n self._authentication_method = value\n\n def cancel_deferred(self):\n super(PonPort, self).cancel_deferred()\n\n d, self._discovery_deferred = self._discovery_deferred, None\n\n try:\n if d is not None and not d.called:\n d.cancel()\n except Exception as e:\n pass\n\n def _update_adapter_agent(self):\n \"\"\"\n Update the port status and state in the core\n \"\"\"\n self.log.debug('update-adapter-agent', admin_state=self._admin_state,\n oper_status=self._oper_status)\n\n # because the core does not provide methods for updating admin\n # and oper status per port, we need to copy any existing port\n # info so that we don't wipe out the peers\n if self._port is not None:\n agent_ports = self.adapter_agent.get_ports(self.olt.device_id, Port.PON_OLT)\n\n agent_port = next((ap for ap in agent_ports if ap.port_no == self._port_no), None)\n\n # copy current Port info\n if agent_port is not None:\n self._port = agent_port\n\n # set new states\n self._port.admin_state = self._admin_state\n self._port.oper_status = self._oper_status\n\n # adapter_agent add_port also does an update of existing port\n self.adapter_agent.add_port(self.olt.device_id, self.get_port())\n\n @inlineCallbacks\n def finish_startup(self):\n \"\"\"\n Do all startup offline since REST may fail\n \"\"\"\n if self.state != AdtnPort.State.INITIAL:\n returnValue('Done')\n\n self.log.debug('final-startup')\n results = None\n\n try:\n self.deferred = self._get_pon_config()\n results = yield self.deferred\n\n except Exception as e:\n self.log.exception('initial-GET', e=e)\n self.deferred = reactor.callLater(5, self.finish_startup)\n returnValue(self.deferred)\n\n # Load config from hardware\n\n enabled = results.get('enabled', False)\n downstream_fec_enable = results.get('downstream-fec-enable', False)\n upstream_fec_enable = results.get('upstream-fec-enable', False)\n deployment_range = results.get('deployment-range', 25000)\n self._in_sync = True\n\n if enabled != self._enabled:\n try:\n self.deferred = self._set_pon_config(\"enabled\", True)\n yield self.deferred\n\n except Exception as e:\n self.log.exception('final-startup-enable', e=e)\n self.deferred = reactor.callLater(3, self.finish_startup)\n returnValue(self.deferred)\n\n if downstream_fec_enable != self._downstream_fec_enable:\n try:\n self.deferred = self._set_pon_config(\"downstream-fec-enable\",\n self._downstream_fec_enable)\n yield self.deferred\n\n except Exception as e:\n self.log.warning('final-startup-downstream-FEC', e=e)\n self._in_sync = False\n # Non-fatal. May have failed due to no SFQ in slot\n\n if upstream_fec_enable != self._upstream_fec_enable:\n try:\n self.deferred = self._set_pon_config(\"upstream-fec-enable\",\n self._upstream_fec_enable)\n yield self.deferred\n\n except Exception as e:\n self.log.warning('final-startup-upstream-FEC', e=e)\n self._in_sync = False\n # Non-fatal. May have failed due to no SFQ in slot\n\n if deployment_range != self._deployment_range:\n try:\n self.deferred = self._set_pon_config(\"deployment-range\",\n self._deployment_range)\n yield self.deferred\n\n except Exception as e:\n self.log.warning('final-startup-deployment-range', e=e)\n self._in_sync = False\n # Non-fatal. May have failed due to no SFQ in slot\n\n if len(self._onus) > 0:\n dl = []\n for onu_id in self.onu_ids:\n onu = self.onu(onu_id)\n if onu is not None:\n dl.append(onu.restart())\n yield defer.gatherResults(dl, consumeErrors=True)\n\n # Begin to ONU discovery and hardware sync\n\n self._discovery_deferred = reactor.callLater(5, self._discover_onus)\n\n # If here, initial settings were successfully written to hardware\n\n super(PonPort, self).finish_startup()\n returnValue('Enabled')\n\n def finish_stop(self):\n # Remove all existing ONUs. They will need to be re-discovered\n dl = []\n onu_ids = frozenset(self._onu_by_id.keys())\n for onu_id in onu_ids:\n try:\n dl.append(self.delete_onu(onu_id))\n\n except Exception as e:\n self.log.exception('onu-cleanup', onu_id=onu_id, e=e)\n\n dl.append(self._set_pon_config(\"enabled\", False))\n\n return defer.gatherResults(dl, consumeErrors=True)\n\n @inlineCallbacks\n def reset(self):\n \"\"\"\n Set the PON Port to a known good state on initial port startup. Actual\n PON 'Start' is done elsewhere\n \"\"\"\n initial_port_state = AdminState.DISABLED\n self.log.info('reset', initial_state=initial_port_state)\n\n try:\n self.deferred = self._get_pon_config()\n results = yield self.deferred\n enabled = results.get('enabled', False)\n\n except Exception as e:\n self.log.exception('get-config', e=e)\n enabled = False\n\n enable = initial_port_state == AdminState.ENABLED\n\n if enable != enabled:\n try:\n self.deferred = yield self._set_pon_config(\"enabled\", enable)\n except Exception as e:\n self.log.exception('reset-enabled', e=e, enabled=enabled)\n\n # TODO: Move to 'set_pon_config' method and also make sure GRPC/Port is ok\n self._admin_state = AdminState.ENABLED if enable else AdminState.DISABLED\n\n try:\n # Walk the provisioned ONU list and disable any exiting ONUs\n results = yield self._get_onu_config()\n\n if isinstance(results, list) and len(results) > 0:\n onu_configs = OltConfig.Pon.Onu.decode(results)\n dl = []\n for onu_id in onu_configs.iterkeys():\n dl.append(self.delete_onu(onu_id))\n\n try:\n if len(dl) > 0:\n yield defer.gatherResults(dl, consumeErrors=True)\n\n except Exception as e:\n self.log.exception('rest-ONU-delete', e=e)\n pass # Non-fatal\n\n except Exception as e:\n self.log.exception('onu-delete', e=e)\n\n returnValue('Reset complete')\n\n def gem_ids(self, logical_port, untagged_gem, multicast_gems=False):\n \"\"\"\n Get all GEM Port IDs used on a given PON\n\n :param logical_port: (int) Logical port umber of ONU. None if for all ONUs\n on PON, if Multicast, VID for Multicast, or None for all\n Multicast GEMPorts\n :param untagged_gem: (boolean) Select from special purpose untagged GEM Port\n :param multicast_gems: (boolean) Select from available Multicast GEM Ports\n :return: (dict) data_gem -> key -> onu-id, value -> tuple(sorted list of GEM Port IDs, onu_vid)\n mcast_gem-> key -> mcast-vid, value -> GEM Port IDs\n \"\"\"\n gem_ids = {}\n\n if multicast_gems:\n # Multicast GEMs belong to the PON, but we may need to register them on\n # all ONUs. Rework when BBF MCAST Gems are supported\n for vlan, gem_port in self._mcast_gem_ports.iteritems(): # TODO: redo logic\n if logical_port is None or (logical_port == vlan and logical_port in self.olt.multicast_vlans):\n gem_ids[vlan] = ([gem_port.gem_id], None)\n else:\n for onu_id, onu in self._onu_by_id.iteritems():\n if logical_port is None or logical_port == onu.logical_port:\n gem_ids[onu_id] = (onu.gem_ids(untagged_gem),\n onu.onu_vid if not untagged_gem\n else self.olt.untagged_vlan)\n return gem_ids\n\n def _get_pon_config(self):\n uri = AdtranOltHandler.GPON_PON_CONFIG_URI.format(self._pon_id)\n name = 'pon-get-config-{}'.format(self._pon_id)\n return self._parent.rest_client.request('GET', uri, name=name)\n\n def _get_onu_config(self, onu_id=None):\n if onu_id is None:\n uri = AdtranOltHandler.GPON_ONU_CONFIG_LIST_URI.format(self._pon_id)\n else:\n uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, onu_id)\n\n name = 'pon-get-onu_config-{}-{}'.format(self._pon_id, onu_id)\n return self._parent.rest_client.request('GET', uri, name=name)\n\n def _set_pon_config(self, leaf, value):\n data = json.dumps({leaf: value})\n uri = AdtranOltHandler.GPON_PON_CONFIG_URI.format(self._pon_id)\n name = 'pon-set-config-{}-{}-{}'.format(self._pon_id, leaf, str(value))\n return self._parent.rest_client.request('PATCH', uri, data=data, name=name)\n\n def _discover_onus(self):\n self.log.debug('discovery', state=self._admin_state, in_sync=self._in_sync)\n if self._admin_state == AdminState.ENABLED:\n if self._in_sync:\n data = json.dumps({'pon-id': self._pon_id})\n uri = AdtranOltHandler.GPON_PON_DISCOVER_ONU\n name = 'pon-discover-onu-{}'.format(self._pon_id)\n\n self._discovery_deferred = self._parent.rest_client.request('POST', uri, data, name=name)\n self._discovery_deferred.addBoth(self._onu_discovery_init_complete)\n else:\n self.discovery_deferred = reactor.callLater(0,\n self._onu_discovery_init_complete,\n None)\n\n def _onu_discovery_init_complete(self, _result):\n \"\"\"\n This method is called after the REST POST to request ONU discovery is\n completed. The results (body) of the post is always empty / 204 NO CONTENT\n \"\"\"\n delay = self._no_onu_discover_tick if len(self._onus) == 0 else self._discovery_tick\n delay += random.uniform(-delay / 10, delay / 10)\n self._discovery_deferred = reactor.callLater(delay, self._discover_onus)\n\n def sync_hardware(self):\n if self.state == AdtnPort.State.RUNNING or self.state == AdtnPort.State.STOPPED:\n def read_config(results):\n self.log.debug('read-config', results=results)\n config = OltConfig.Pon.decode([results])\n assert self.pon_id in config, 'sync-pon-not-found-{}'.format(self.pon_id)\n config = config[self.pon_id]\n self._in_sync = True\n\n dl = []\n\n if self.enabled != config.enabled:\n self._in_sync = False\n self._expedite_sync = True\n dl.append(self._set_pon_config(\"enabled\", self.enabled))\n\n elif self.state == AdtnPort.State.RUNNING:\n if self.deployment_range != config.deployment_range:\n self._in_sync = False\n self._expedite_sync = True\n dl.append(self._set_pon_config(\"deployment-range\",\n self.deployment_range))\n\n if self.downstream_fec_enable != config.downstream_fec_enable:\n self._in_sync = False\n self._expedite_sync = True\n dl.append(self._set_pon_config(\"downstream-fec-enable\",\n self.downstream_fec_enable))\n\n if self.upstream_fec_enable != config.upstream_fec_enable:\n self._in_sync = False\n self._expedite_sync = True\n dl.append(self._set_pon_config(\"upstream-fec-enable\",\n self.upstream_fec_enable))\n defer.gatherResults(dl, consumeErrors=True)\n return config.onus\n\n def sync_onus(hw_onus):\n if self.state == AdtnPort.State.RUNNING:\n self.log.debug('sync-pon-onu-results', config=hw_onus)\n\n # ONU's have their own sync task, extra (should be deleted) are\n # handled here. Missing are handled by normal discovery mechanisms.\n\n hw_onu_ids = frozenset(hw_onus.keys())\n my_onu_ids = frozenset(self._onu_by_id.keys())\n\n extra_onus = hw_onu_ids - my_onu_ids\n dl = [self.delete_onu(onu_id) for onu_id in extra_onus]\n\n return defer.gatherResults(dl, consumeErrors=True)\n\n def failure(reason, what):\n self.log.error('hardware-sync-{}-failed'.format(what), reason=reason)\n self._in_sync = False\n self._expedite_sync = False\n\n def reschedule(_):\n # Speed up sequential resync a limited number of times if out of sync.\n\n delay = self.sync_tick\n\n if self._expedite_sync:\n self._expedite_count += 1\n if self._expedite_count < 5:\n delay = 1\n else:\n self._expedite_count = 0\n\n delay += random.uniform(-delay / 10, delay / 10)\n self.sync_deferred = reactor.callLater(delay, self.sync_hardware)\n\n self.sync_deferred = self._get_pon_config()\n self.sync_deferred.addCallbacks(read_config, failure, errbackArgs=['get-config'])\n self.sync_deferred.addCallbacks(sync_onus, failure, errbackArgs=['pon-sync'])\n self.sync_deferred.addBoth(reschedule)\n\n def process_status_poll(self, status):\n \"\"\"\n Process PON status poll request\n \n :param status: (OltState.Pon object) results from RESTCONF GET\n \"\"\"\n self.log.debug('process-status-poll', status=status)\n\n if self._admin_state != AdminState.ENABLED:\n return\n\n # Process LOS list\n self._process_los_alarms(frozenset(status.ont_los))\n\n # Get new/missing from the discovered ONU leaf. Stale ONUs from previous\n # configs are now cleaned up during h/w re-sync/reflow.\n\n new, rediscovered_onus = self._process_status_onu_discovered_list(status.discovered_onu)\n\n # Process newly discovered ONU list and rediscovered ONUs\n\n for serial_number in new | rediscovered_onus:\n reactor.callLater(0, self.add_onu, serial_number, status)\n\n # PON Statistics\n timestamp = arrow.utcnow().float_timestamp\n self._process_statistics(status, timestamp)\n\n # Process ONU info. Note that newly added ONUs will not be processed\n # until the next pass\n self._update_onu_status(status.onus, timestamp)\n\n # Process GEM Port information\n self._update_gem_status(status.gems, timestamp)\n\n def _handle_discovered_onu(self, child_device, ind_info):\n pon_id = ind_info['_pon_id']\n olt_id = ind_info['_olt_id']\n\n if ind_info['_sub_group_type'] == 'onu_discovery':\n self.log.info('Activation-is-in-progress', olt_id=olt_id,\n pon_ni=pon_id, onu_data=ind_info,\n onu_id=child_device.proxy_address.onu_id)\n\n elif ind_info['_sub_group_type'] == 'sub_term_indication':\n self.log.info('ONU-activation-is-completed', olt_id=olt_id,\n pon_ni=pon_id, onu_data=ind_info)\n\n msg = {'proxy_address': child_device.proxy_address,\n 'event': 'activation-completed', 'event_data': ind_info}\n\n # Send the event message to the ONU adapter\n self.adapter_agent.publish_inter_adapter_message(child_device.id,\n msg)\n if ind_info['activation_successful'] is True:\n for key, v_ont_ani in dict(): # self.v_ont_anis.items():\n if v_ont_ani.v_ont_ani.data.onu_id == \\\n child_device.proxy_address.onu_id:\n for tcont_key, tcont in v_ont_ani.tconts.items():\n owner_info = dict()\n # To-Do: Right Now use alloc_id as schduler ID. Need to\n # find way to generate uninqe number.\n id = tcont.alloc_id\n owner_info['type'] = 'agg_port'\n owner_info['intf_id'] = \\\n child_device.proxy_address.channel_id\n owner_info['onu_id'] = \\\n child_device.proxy_address.onu_id\n owner_info['alloc_id'] = tcont.alloc_id\n # self.bal.create_scheduler(id, 'upstream', owner_info, 8)\n else:\n self.log.info('Invalid-ONU-event', olt_id=olt_id,\n pon_ni=ind_info['_pon_id'], onu_data=ind_info)\n\n def _process_statistics(self, status, timestamp):\n self.timestamp = timestamp\n self.rx_packets = status.rx_packets\n self.rx_bytes = status.rx_bytes\n self.tx_packets = status.tx_packets\n self.tx_bytes = status.tx_bytes\n self.tx_bip_errors = status.tx_bip_errors\n\n def _update_onu_status(self, onus, timestamp):\n \"\"\"\n Process ONU status for this PON\n :param onus: (dict) onu_id: ONU State\n \"\"\"\n for onu_id, onu_status in onus.iteritems():\n if onu_id in self._onu_by_id:\n onu = self._onu_by_id[onu_id]\n onu.timestamp = timestamp\n onu.rssi = onu_status.rssi\n onu.equalization_delay = onu_status.equalization_delay\n onu.equalization_delay = onu_status.equalization_delay\n onu.fiber_length = onu_status.fiber_length\n onu.password = onu_status.reported_password\n\n def _update_gem_status(self, gems, timestamp):\n for gem_id, gem_status in gems.iteritems():\n onu = self._onu_by_id.get(gem_status.onu_id)\n if onu is not None:\n gem_port = onu.gem_port(gem_status.gem_id)\n if gem_port is not None:\n gem_port.timestamp = timestamp\n gem_port.rx_packets = gem_status.rx_packets\n gem_port.rx_bytes = gem_status.rx_bytes\n gem_port.tx_packets = gem_status.tx_packets\n gem_port.tx_bytes = gem_status.tx_bytes\n\n def _process_los_alarms(self, ont_los):\n \"\"\"\n Walk current LOS and set/clear LOS as appropriate\n :param ont_los: (frozenset) ONU IDs of ONUs in LOS alarm state\n \"\"\"\n cleared_alarms = self._active_los_alarms - ont_los\n new_alarms = ont_los - self._active_los_alarms\n\n if len(cleared_alarms) > 0 or len(new_alarms) > 0:\n self.log.info('onu-los', cleared=cleared_alarms, new=new_alarms)\n\n for onu_id in cleared_alarms:\n self._active_los_alarms.remove(onu_id)\n OnuLosAlarm(self.olt.alarms, onu_id).clear_alarm()\n\n for onu_id in new_alarms:\n self._active_los_alarms.add(onu_id)\n OnuLosAlarm(self.olt.alarms, onu_id).raise_alarm()\n self.delete_onu(onu_id)\n\n def _process_status_onu_discovered_list(self, discovered_onus):\n \"\"\"\n Look for new ONUs\n \n :param discovered_onus: (frozenset) Set of ONUs currently discovered\n \"\"\"\n self.log.debug('discovered-ONUs', list=discovered_onus)\n\n # Only request discovery if activation is auto-discovery or auto-activate\n continue_discovery = ['autodiscovery'] # , 'autoactivate']\n\n if self._activation_method not in continue_discovery:\n return set(), set()\n\n my_onus = frozenset(self._onus.keys())\n\n new_onus = discovered_onus - my_onus\n rediscovered_onus = my_onus & discovered_onus\n\n return new_onus, rediscovered_onus\n\n def _get_onu_info(self, serial_number):\n \"\"\"\n Parse through available xPON information for ONU configuration settings\n :param serial_number: (string) Decoded (not base64) serial number string\n :return: (dict) onu config data or None on lookup failure\n \"\"\"\n try:\n if self.activation_method == \"autodiscovery\":\n if self.authentication_method == 'serial-number':\n gpon_info = self.olt.get_xpon_info(self.pon_id)\n\n try:\n # TODO: Change iteration to itervalues below\n vont_info = next(info for _, info in gpon_info['vont-anis'].items()\n if info.get('expected-serial-number') == serial_number)\n\n # ont_info = next(info for _, info in gpon_info['ont-anis'].items()\n # if info.get('name') == vont_info['name'])\n\n vont_ani = vont_info['data']\n onu_id = vont_info['onu-id']\n enabled = vont_info['enabled']\n channel_speed = vont_info['upstream-channel-speed']\n xpon_name = vont_info['name']\n upstream_fec_enabled = True # TODO: ont_info.get('upstream-fec', False)\n\n tconts = {key: val for key, val in gpon_info['tconts'].iteritems()\n if val.vont_ani == vont_info['name']}\n\n gem_ports = {key: val for key, val in gpon_info['gem-ports'].iteritems()\n if val.tcont_ref in tconts.keys()}\n\n venet = next((val for val in gpon_info['v-enets'].itervalues()\n if val['vont-ani'] == vont_info['name']), None)\n # TODO: need to handle case where ont_ani, gems, venets, tconts are assigned\n # after activation is started. only vont-ani needs to be set to get here\n\n except StopIteration:\n # Can happen if vont-ani or ont-ani has not yet been configured\n self.log.debug('no-vont-or-ont')\n return None\n\n except Exception as e:\n self.log.exception('autodiscovery', e=e)\n raise\n else:\n self.log.debug('not-serial-number-authentication')\n return None\n else:\n self.log.debug('not-auto-discovery')\n return None\n\n onu_info = {\n 'device-id': self.olt.device_id,\n 'serial-number': serial_number,\n 'xpon-name': xpon_name,\n 'pon': self,\n 'onu-id': onu_id,\n 'enabled': enabled,\n 'upstream-channel-speed': channel_speed,\n 'upstream-fec': upstream_fec_enabled,\n 'password': Onu.DEFAULT_PASSWORD,\n 't-conts': tconts,\n 'gem-ports': gem_ports,\n 'onu-vid': self.olt.get_onu_vid(onu_id),\n 'channel-id': self.olt.get_channel_id(self._pon_id, onu_id),\n 'vont-ani': vont_ani,\n 'venet': venet\n }\n # Hold off ONU activation until at least one GEM Port is defined.\n self.log.debug('onu-info', gem_ports=gem_ports)\n\n # return onu_info\n return onu_info if len(gem_ports) > 0 and venet is not None else None\n\n except Exception as e:\n self.log.exception('get-onu-info', e=e)\n return None\n\n @inlineCallbacks\n def add_onu(self, serial_number_64, status):\n serial_number = Onu.serial_number_to_string(serial_number_64)\n self.log.info('add-onu', serial_number=serial_number,\n serial_number_64=serial_number_64, status=status)\n onu_info = self._get_onu_info(serial_number)\n\n if onu_info is None:\n self.log.info('onu-lookup-failure', serial_number=serial_number,\n serial_number_64=serial_number_64)\n OnuDiscoveryAlarm(self.olt.alarms, self.pon_id, serial_number).raise_alarm()\n returnValue('new-onu')\n\n if serial_number_64 not in status.onus or onu_info['onu-id'] in self._active_los_alarms:\n onu = None\n onu_id = onu_info['onu-id']\n\n if serial_number_64 in self._onus and onu_id in self._onu_by_id:\n # Handles fast entry into this task before FPGA can set/clear results\n returnValue('sticky-onu')\n\n elif (serial_number_64 in self._onus and onu_id not in self._onu_by_id) or \\\n (serial_number_64 not in self._onus and onu_id in self._onu_by_id):\n # May be here due to unmanaged power-cycle on OLT or fiber bounced for a\n # previously activated ONU. Drop it and add back on next discovery cycle\n self.delete_onu(onu_id)\n\n elif len(self._onus) >= self.MAX_ONUS_SUPPORTED:\n self.log.warning('max-onus-provisioned', count=len(self._onus))\n returnValue('max-onus-reached')\n\n else:\n # TODO: Make use of upstream_channel_speed variable\n onu = Onu(onu_info)\n self._onus[serial_number_64] = onu\n self._onu_by_id[onu.onu_id] = onu\n\n if onu is not None:\n try:\n tconts = onu_info['t-conts']\n gem_ports = onu_info['gem-ports']\n\n # Add Multicast to PON on a per-ONU basis until xPON multicast support is ready\n # In xPON/BBF, mcast gems tie back to the channel-pair\n # MCAST VLAN IDs stored as a negative value\n\n for id_or_vid, gem_port in gem_ports.iteritems(): # TODO: Deprecate this when BBF ready\n try:\n if gem_port.multicast:\n self.log.debug('id-or-vid', id_or_vid=id_or_vid)\n vid = self.olt.multicast_vlans[0] if len(self.olt.multicast_vlans) else None\n if vid is not None:\n self.add_mcast_gem_port(gem_port, vid)\n except Exception as e:\n self.log.exception('id-or-vid', e=e)\n\n yield onu.create(tconts, gem_ports)\n\n except Exception as e:\n self.log.exception('add-onu', serial_number=serial_number_64, e=e)\n del self._onus[serial_number_64]\n del self._onu_by_id[onu.onu_id]\n\n def get_next_onu_id(self):\n used_ids = [onu.onu_id for onu in self.onus]\n\n while True:\n onu_id = self._next_onu_id\n self._next_onu_id += 1\n\n if self._next_onu_id > Onu.MAX_ONU_ID:\n self._next_onu_id = Onu.MIN_ONU_ID + 128\n\n if onu_id not in used_ids:\n return onu_id\n\n @inlineCallbacks\n def _remove_from_hardware(self, onu_id):\n uri = AdtranOltHandler.GPON_ONU_CONFIG_URI.format(self._pon_id, onu_id)\n name = 'pon-delete-onu-{}-{}'.format(self._pon_id, onu_id)\n\n try:\n yield self._parent.rest_client.request('DELETE', uri, name=name)\n\n except RestInvalidResponseCode as e:\n if e.code != 404:\n self.log.exception('onu-delete', e=e)\n\n except Exception as e:\n self.log.exception('onu-hw-delete', onu_id=onu_id, e=e)\n\n @inlineCallbacks\n def delete_onu(self, onu_id):\n onu = self._onu_by_id.get(onu_id)\n\n # Remove from any local dictionary\n if onu_id in self._onu_by_id:\n del self._onu_by_id[onu_id]\n\n for sn_64 in [onu.serial_number_64 for onu in self.onus if onu.onu_id == onu_id]:\n del self._onus[sn_64]\n\n if onu is not None:\n proxy = onu.proxy_address\n try:\n onu.delete()\n\n except Exception as e:\n self.log.exception('onu-delete', serial_number=onu.serial_number, e=e)\n\n else:\n try:\n yield self._remove_from_hardware(onu_id)\n\n except Exception as e:\n self.log.exception('onu-remove', serial_number=onu.serial_number, e=e)\n\n def add_mcast_gem_port(self, mcast_gem, vlan):\n \"\"\"\n Add any new Multicast GEM Ports to the PON\n :param mcast_gem: (GemPort)\n \"\"\"\n if vlan in self._mcast_gem_ports:\n return\n\n assert len(self._mcast_gem_ports) == 0, 'Only 1 MCAST GEMPort until BBF Support'\n assert 1 <= vlan <= 4095, 'Invalid Multicast VLAN ID'\n assert len(self.olt.multicast_vlans) == 1, 'Only support 1 MCAST VLAN until BBF Support'\n\n self._mcast_gem_ports[vlan] = mcast_gem\n\n @inlineCallbacks\n def channel_partition(self, name, partition=0, xpon_system=0, operation=None):\n \"\"\"\n Delete/enable/disable a specified channel partition on this PON.\n\n When creating a new Channel Partition, create it disabled, then define any associated\n Channel Pairs. Then enable the Channel Partition.\n\n :param name: (string) Name of the channel partition\n :param partition: (int: 0..15) An index of the operator-specified channel subset\n in a NG-PON2 system. For XGS-PON, this is typically 0\n :param xpon_system: (int: 0..1048575) Identifies a specific xPON system\n :param operation: (string) 'delete', 'enable', or 'disable'\n \"\"\"\n if operation.lower() not in ['delete', 'enable', 'disable']:\n raise ValueError('Unsupported operation: {}'.format(operation))\n\n try:\n xml = 'interfaces xmlns=\"urn:ietf:params:xml:ns:yang:ietf-interfaces\"'\n\n if operation.lower() is 'delete':\n xml += ''\n else:\n xml += ''\n xml += '' +\\\n 'adtn-xp:xpon-channel-partition'\n xml += ''\n xml += ' {}'.format(partition)\n xml += ' {}'.format(xpon_system)\n xml += ''\n xml += '{}'.format('true' if operation.lower() == 'enable' else 'false')\n\n xml += '{}'.format(name)\n xml += ''\n\n results = yield self.olt.netconf_client.edit_config(xml)\n returnValue(results)\n\n except Exception as e:\n self.log.exception('channel_partition')\n raise\n\n @inlineCallbacks\n def channel_pair(self, name, partition, operation=None, **kwargs):\n \"\"\"\n Create/delete a channel pair on a specific channel_partition for a PON\n\n :param name: (string) Name of the channel pair\n :param partition: (string) Name of the channel partition\n :param operation: (string) 'delete', 'enable', or 'disable'\n :param kwargs: (dict) Additional leaf settings if desired\n \"\"\"\n if operation.lower() not in ['delete', 'enable', 'disable']:\n raise ValueError('Unsupported operation: {}'.format(operation))\n\n try:\n xml = 'interfaces xmlns=\"urn:ietf:params:xml:ns:yang:ietf-interfaces\"'\n\n if operation.lower() is 'delete':\n xml += ''\n else:\n xml += ''\n xml += '' +\\\n 'adtn-xp:xpon-channel-pair'\n xml += ''\n xml += ' {}'.format(partition)\n xml += ' channel-termination {}'.\\\n format(self.pon_id)\n xml += ' {}'.\\\n format(kwargs.get('upstream-admin-label', 1))\n xml += ' {}'.\\\n format(kwargs.get('downstream-admin-label', 1))\n xml += ' {}'.\\\n format(kwargs.get('upstream-channel-id', 15))\n xml += ' {}'.\\\n format(kwargs.get('downstream-channel-id', 15))\n xml += ' {}'. \\\n format('true' if kwargs.get('downstream-channel-fec-enable', True) else 'false')\n xml += ' {}'. \\\n format('true' if kwargs.get('upstream-channel-fec-enable', True) else 'false')\n xml += ''\n # TODO: Add support for upstream/downstream FEC-enable coming from here and not hard-coded\n\n xml += '{}'.format(name)\n xml += ''\n\n results = yield self.olt.netconf_client.edit_config(xml)\n returnValue(results)\n\n except Exception as e:\n self.log.exception('channel_pair')\n raise\n","sub_path":"voltha/adapters/adtran_olt/pon_port.py","file_name":"pon_port.py","file_ext":"py","file_size_in_byte":44254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558390045","text":"from rsa import decrypt_rsa\n\nencoded = open(\"encoded.txt\")\n\nencrypted_ints = []\n\nfor line in encoded:\n if line[0] == 'p':\n if line[1] == 'u':\n continue\n elif line[0] == 'n':\n n = line[3:]\n elif line[0] == 'd':\n d = line[3:]\n private_key=(int(n.strip()),int(d.strip()))\n else:\n encrypted_ints.append(int(line.strip()))\n\nencoded.close() \n\ndecrypted_ints = []\nfor i in encrypted_ints:\n decrypted_ints.append(decrypt_rsa(private_key, i))\n\nplain_chars = []\nfor c in decrypted_ints:\n plain_chars.append(chr(c))\n \nplain_msg = ''.join(plain_chars)\n\ndecoded = open(\"decoded.txt\", \"a\")\ndecoded.write(plain_msg)\ndecoded.close()","sub_path":"encrypt_message/dec.py","file_name":"dec.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233494126","text":"#!/usr/bin/python3\n\nimport pandas as pd\n\n\n\nsize = 1000000\n\ndf = pd.read_csv('all_data.csv', chunksize=size)\n\n\nheader=True\n\nfor chunk in df:\n\tchunk_filter= chunk['cadenaComercial']\n\t#print(chunk_filter.unique())\n\tchunk_filter.to_csv('out.csv', header=header, mode='a',index=False)\n\theader= False\n","sub_path":"solopregunta1.py","file_name":"solopregunta1.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"510133708","text":"#Import pandas\nimport pandas as pd\nfrom pandas import Series, DataFrame\n\n#Read data from csv file\ndata2 = pd.read_csv(\"close_price_matched_date.csv\")\n\n#Function to create 20 days moving average\ndef rolling_mean(df):\n for column in df:\n index = df.columns.get_loc(column)\n if index == 0:\n continue\n else:\n df.insert(index+1,column+\"_20_days_mean\",df[column].rolling(20).mean())\n \n#Apply rolling mean column to data frame\nrolling_mean(data2)\n\n","sub_path":"bollinger.py","file_name":"bollinger.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108480436","text":"class Solution(object):\n def arrayNesting(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ret = 0\n\n visit = set()\n for i in nums:\n if i in visit:\n continue\n\n tmp = 0\n idx = i\n while idx not in visit:\n visit.add(idx)\n tmp += 1\n idx = nums[idx]\n\n ret = max(ret, tmp)\n\n return ret\n\n\ns = Solution()\nprint(s.arrayNesting([5, 4, 0, 3, 1, 6, 2]))\n","sub_path":"leetcode/algorithm/array-nesting.py","file_name":"array-nesting.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"349456496","text":"import pygame as pg\nfrom Teil_25_Vektor import Vec, pol2cart\nimport math\n\n\ndef fib(n):\n if n < 2:\n return n\n return fib(n-1) + fib(n-2)\n\n\n\ndef golden_spiral(n):\n ausgl = Vec(22, 23)\n G=(1+5**.5)/2\n w=int(G**(4*(n//4)))\n k=math.pi/180\n return [Vec(G**(j/90)*math.cos(j*k)-w/2,G**(j/90)*math.sin(j*k)-w/2)*3+ausgl+offset for j in range(n*90)]\n\n\npg.init()\nSCALE = 20\nauflösung = Vec(1000, 1000)\nscreen = pg.display.set_mode(auflösung)\noffset = auflösung / 3 \n\nweitermachen = True\nclock = pg.time.Clock()\nboxes = [Vec(0,0), Vec(-1,0), Vec(-1,1), Vec(1,0), Vec(-1,-5), Vec(-9,-5), Vec(-9,3), Vec(4,-5)]\npoints = golden_spiral(8)\n\n\nwhile weitermachen:\n clock.tick(40)\n for ereignis in pg.event.get():\n if ereignis.type == pg.QUIT:\n weitermachen = False\n for n,b in enumerate(boxes):\n size = fib(n+1)*SCALE\n pg.draw.rect(screen, pg.Color('grey'), (b*SCALE+offset, (size, size)), 3)\n pg.draw.lines(screen,pg.Color('red'), False, points, 7)\n \n pg.display.update()\n\npg.quit()\n","sub_path":"Teil_xx_Finbonacci2.py","file_name":"Teil_xx_Finbonacci2.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"516455420","text":"from math import sqrt\n\n\ndef get_answer():\n \"\"\"\n It is mainly a brute force method, though one optimization was recognizing that the first underscore\n in the number 1_2_3_4_5_6_7_8_9_0 must be 0 (its square root must be a multiple of 10, which means\n its square must be a multiple of 100). This reduces the search density by a factor of 10.\n :return: The unique number whose square is of the form 1_2_3_4_5_6_7_8_9_0.\n \"\"\"\n for i in range(int(sqrt(1_92_93_94_95_96_97_98_99_90 // 100)),\n int(sqrt(1_02_03_04_05_06_07_08_09_00 // 100)),\n -1):\n if check((i * i)):\n return i * 10\n\n\ndef check(number):\n for digit in range(9, 0, -1):\n if number % 10 != digit:\n return False\n number //= 100\n return True\n","sub_path":"project_euler/solutions/problem206.py","file_name":"problem206.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"19609073","text":"\"\"\"\nRegex postprocess rst output\n\"\"\"\nimport sys\nimport re\n\nhat = \"\"\"\nPython API\n==========\n_\n++++++++++\n_\n??????????\n_\n~~~~~~~~~~\n\n\"\"\"\n\n\ndef join_lines(text: str, indent: int):\n indentation = ''.join([' ' for i in range(indent)])\n return re.sub(\n r'((?:\\n|^){dent}\\*\\*[^\\r\\n]+)\\r?\\n{dent}([^\\s\\r\\n][^\\r\\n]*\\*\\*\\r?\\n)'.format(dent=indentation),\n lambda m: '{} {}'.format(m.group(1), m.group(2)),\n text\n )\n\n\ndef to_header(text: str, indent: int):\n \"\"\"\n ``======`` denotes top header\n ``------`` denotes smallest header\n \"\"\"\n indentation = ''.join([' ' for i in range(indent)])\n return re.sub(\n r'(?:\\n|^){dent}\\*\\*([^\\*\\n\\(\\)]+[^\\n\\(\\)]*)\\(([^\\n]*?)\\)\\*\\*\\r?\\n'.format(dent=indentation),\n lambda m: '\\n\\n======\\n\\n{} (*{}*)\\n------\\n'.format(m.group(1), m.group(2).replace('*', '\\\\*')),\n text\n )\n\n\ndef attr_rep(text):\n return re.sub(r'\\n(.*?) : (.*?)\\r?\\n', # [^\\S\\r\\n]+\n lambda m: '\\n{} (*{}*)\\n------\\n'.format(m.group(1), m.group(2).replace('*', '\\\\*')),\n text)\n\ndef rep(text):\n # fix parameters tables on GitHub:\n text = text.replace('\\n * ', '\\n * ')\n # make broken bold lines whole again:\n text = join_lines(join_lines(text, 0), 3)\n # bold lines to smallest header, plus ==== above:\n text = to_header(to_header(text, 0), 3)\n # remove 2nd quotes:\n text = text.replace('\\n ', '\\n')\n # before and after 2nd ====:\n m = re.search(r'(.*?\\n===[=]+\\r?\\n.*?\\n===[=]+\\r?\\n)(.*)', text, re.DOTALL)\n text = attr_rep(m.group(1)) + m.group(2)\n #\n text = text.replace('class shortcutter.base', 'class shortcutter')\n return hat + text\n\ndef main():\n sys.stdout.write(rep(sys.stdin.read()))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docs/rst_fix.py","file_name":"rst_fix.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"28321812","text":"\nimport matplotlib.patches as patch\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom scipy import linalg\nfrom numpy import poly1d\nfrom sklearn import svm\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport glob\nimport sys\nimport os\n### matplotlib inline\n### precision 4\nplt.style.use('ggplot')\nnp.set_printoptions(suppress=True)\n# let see how to create a multi dimentional Array with Numpy\na = np.zeros((2, 3, 4))\n#l = [[[ 0., 0., 0., 0.],\n # [ 0., 0., 0., 0.],\n # [ 0., 0., 0., 0.]],\n # [[ 0., 0., 0., 0.],\n # [ 0., 0., 0., 0.],\n # [ 0., 0., 0., 0.]]]\nprint(a)\nprint(a.shape)\n\n# Declaring Vectors\n\nx = [1, 2, 3]\ny = [4, 5, 6]\n\nprint(type(x))\n\n# This does'nt give the vector addition.\nprint(x + y)\n\n# Vector addition using Numpy\n\nz = np.add(x, y)\nprint(z)\nprint(type(z))\n\n# Vector Cross Product\nmul = np.cross(x, y)\nprint(mul)\n# initializing matrices \nx = np.array([[1, 2], [4, 5]]) \ny = np.array([[7, 8], [9, 10]])\n# using add() to add matrices \nprint (\"The element wise addition of matrix is : \") \nprint (np.add(x,y)) \n# using subtract() to subtract matrices \nprint (\"The element wise subtraction of matrix is : \") \nprint (np.subtract(x,y)) \n# using divide() to divide matrices \nprint (\"The element wise division of matrix is : \") \nprint (np.divide(x,y)) \n# using multiply() to multiply matrices element wise \nprint (\"The element wise multiplication of matrix is : \") \nprint (np.multiply(x,y))\nx = [1, 2, 3]\ny = [4, 5, 6]\nnp.cross(x, y)\nx = np.array([1, 2, 3, 4])\ny = np.array([5, 6, 7, 8])\nprint(\"x:\", x)\nprint(\"y:\", y)\nnp.dot(x, y)\nnp.dot(y, x)\nprint(\"x:\", x)\nx.shape = (4, 1)\nprint(\"xT:\", x)\nprint(\"y:\", y)\ny.shape = (4, 1)\nprint(\"yT:\", y)\nx = np.array([1, 2, 3, 4])\ny = np.array([5, 6, 7, 8])\nprint(\"x:\", x)\nprint(\"y:\", y)\nprint(\"xT:\", x.T)\nprint(\"yT:\", y.T)\nx = np.array([[1, 2, 3, 4]])\ny = np.array([[5, 6, 7, 8]])\nprint(\"x:\", x)\nprint(\"y:\", y)\nprint(\"xT:\", x.T)\nprint(\"yT:\", y.T)\n\nprint(\"x:\", x)\nprint(\"y:\", y.T)\nnp.dot(x, y.T)\nprint(\"x:\", x.T)\nprint(\"y:\", y)\nnp.dot(y, x.T)\nnp.dot(y, x.T)[0][0]\nx = np.array([[1, 2, 3, 4]])\nprint(\"x:\", x)\nprint(\"xT:\", np.reshape(x, (4, 1)))\nprint(\"xT:\", x.T)\nprint(\"xT:\", x.transpose())\nx = np.array([[1, 2, 3, 4]])\ny = np.array([[5, 6, 7, 8]])\nx.T * y\nnp.outer(x, y)\nx = np.array([1, 2, 3, 4])\ny = np.array([5, 6, 7, 8])\nnp.outer(x, y)\na = np.array([[ 5, 1 ,3], [ 1, 1 ,1], [ 1, 2 ,1]])\nb = np.array([1, 2, 3])\nprint (a.dot(b))\nA = np.array([[4, 5, 6],\n [7, 8, 9]])\nx = np.array([1, 2, 3])\nA.dot(x)\na = [[1, 0], [0, 1]]\nb = [[4, 1], [2, 2]]\nnp.matmul(a, b)\nmatrix1 = np.matrix(a)\nmatrix2 = np.matrix(b)\nmatrix1 + matrix2\nmatrix1 - matrix2\nnp.dot(matrix1, matrix2)\n\nmatrix1 * matrix2\nnp.identity(3)\nidenty = np.array([[21, 5, 7],[9, 8, 16]])\nprint(\"identy:\", identy)\nidenty.shape\nnp.identity(identy.shape[1], dtype=\"int\")\nnp.identity(identy.shape[0], dtype=\"int\")\ninverse = np.linalg.inv(matrix1)\nprint(inverse)\nimport numpy as np\nA = np.array([[0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n [12, 13, 14, 15]])\nnp.diag(A)\nnp.diag(A, k=1)\nnp.diag(A, k=-1)\na = np.array([[1, 2], [3, 4]])\na\na.transpose()\nN = 100\nb = np.random.random_integers(-2000,2000,size=(N,N))\nb_symm = (b + b.T)/2\nnp.trace(np.eye(3))\nprint(np.trace(matrix1))\ndet = np.linalg.det(matrix1)\nprint(det)\nv = np.array([1,2,3,4])\nnorm.median(v)\n#How to find linearly independent rows from a matrix\nmatrix = np.array(\n [\n [0, 1 ,0 ,0],\n [0, 0, 1, 0],\n [0, 1, 1, 0],\n [1, 0, 0, 1]\n ])\n\nlambdas, V = np.linalg.eig(matrix.T)\n# The linearly dependent row vectors \nprint (matrix[lambdas == 0,:])\nimport numpy as np\nprint(\"np.arange(9):\", np.arange(9))\nprint(\"np.arange(9, 18):\", np.arange(9, 18))\nA = np.arange(9, 18).reshape((3, 3))\nB = np.arange(9).reshape((3, 3))\nprint(\"A:\", A)\nprint(\"B:\", B)\nA + B\nA - B\nx = np.array([[1,2],[3,4]]) \ny = np.linalg.inv(x) \nprint (x )\nprint (y )\nprint (np.dot(x,y))\n## based on https://stackoverflow.com/questions/38426349/how-to-create-random-orthonormal-matrix-in-python-numpy\ndef rvs(dim=3):\n random_state = np.random\n H = np.eye(dim)\n D = np.ones((dim,))\n for n in range(1, dim):\n x = random_state.normal(size=(dim-n+1,))\n D[n-1] = np.sign(x[0])\n x[0] -= D[n-1]*np.sqrt((x*x).sum())\n # Householder transformation\n Hx = (np.eye(dim-n+1) - 2.*np.outer(x, x)/(x*x).sum())\n mat = np.eye(dim)\n mat[n-1:, n-1:] = Hx\n H = np.dot(H, mat)\n # Fix the last sign such that the determinant is 1\n D[-1] = (-1)**(1-(dim % 2))*D.prod()\n # Equivalent to np.dot(np.diag(D), H) but faster, apparently\n H = (D*H.T).T\n return H\nfrom scipy.linalg import null_space\nA = np.array([[1, 1], [1, 1]])\nns = null_space(A)\nns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector\na = np.array([[1, 2], [3, 4]])\nnp.linalg.det(a)\n# credits: https://www.tensorflow.org/api_docs/python/tf/Variable\nA = tf.Variable(np.zeros((5, 5), dtype=np.float32), trainable=False)\nnew_part = tf.ones((2,3))\nupdate_A = A[2:4,2:5].assign(new_part)\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run()\nprint(update_A.eval())\n##based on this address: https://stackoverflow.com/questions/46511017/plot-hyperplane-linear-svm-python\nnp.random.seed(0)\nX = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]\nY = [0] * 20 + [1] * 20\n\nfig, ax = plt.subplots()\nclf2 = svm.LinearSVC(C=1).fit(X, Y)\n\n# get the separating hyperplane\nw = clf2.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(-5, 5)\nyy = a * xx - (clf2.intercept_[0]) / w[1]\n\n# create a mesh to plot in\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx2, yy2 = np.meshgrid(np.arange(x_min, x_max, .2),\n np.arange(y_min, y_max, .2))\nZ = clf2.predict(np.c_[xx2.ravel(), yy2.ravel()])\n\nZ = Z.reshape(xx2.shape)\nax.contourf(xx2, yy2, Z, cmap=plt.cm.coolwarm, alpha=0.3)\nax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.coolwarm, s=25)\nax.plot(xx,yy)\n\nax.axis([x_min, x_max,y_min, y_max])\nplt.show()\nnp.mgrid[0:5,0:5]\na=np.array([1,2,3])\nb=np.array([(1+5j,2j,3j), (4j,5j,6j)])\nc=np.array([[(1.5,2,3), (4,5,6)], [(3,2,1), (4,5,6)]])\nnp.transpose(b)\nb.flatten()\nnp.hsplit(c,2)\np=poly1d([3,4,5])\np","sub_path":"sources/linear-algebra-for-data-scientists.py","file_name":"linear-algebra-for-data-scientists.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"498469690","text":"import os\nimport torch\nimport torchvision\nimport argparse\nimport time\nimport numpy as np\n\nfrom torch.utils.tensorboard import SummaryWriter\n\napex = False\ntry:\n from apex import amp\n apex = True\nexcept ImportError:\n print(\n \"Install the apex package from https://www.github.com/nvidia/apex to use fp16 for training\"\n )\n\nfrom model import load_model, save_model\nfrom modules import NT_Xent\nfrom modules.sync_batchnorm import convert_model\nfrom modules import TransformsSimCLR\nfrom utils import post_config_hook\nfrom msidata.dataset_msi import PreProcessedMSIDataset as dataset_msi\nfrom msidata.dataset_tcga_tiles import TiledTCGADataset as dataset_tcga\n\n#### pass configuration\nfrom experiment import ex\n\ndef train_simclr(args, train_loader, model, criterion, optimizer, writer):\n loss_epoch = 0\n t0=time.time()\n\n t_port=0\n t_model=0\n t_criterion=0\n t_optimize=0\n t_data=0\n total_time = 0 \n\n for step, ((x_i, x_j), _, _, _, _) in enumerate(train_loader):\n t1=time.time()\n\n optimizer.zero_grad()\n x_i = x_i.to(args.device)\n x_j = x_j.to(args.device)\n\n t2=time.time()\n\n # positive pair, with encoding\n h_i, z_i = model(x_i)\n h_j, z_j = model(x_j)\n\n t3=time.time()\n\n loss = criterion(z_i, z_j)\n\n t4=time.time()\n\n if apex and args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n optimizer.step()\n\n t5=time.time()\n \n t_port+=t2-t1\n t_model+=t3-t2\n t_criterion+=t4-t3\n t_optimize+=t5-t4\n t_data+=t1-t0\n total_time += t5-t0\n\n\n if step % 50 == 0:\n print(f\"{time.ctime()} | Step [{step}/{len(train_loader)}]\\t Loss: {loss.item()}\")\n print(f\"Total: {total_time} \\t port: {np.sum(t_port)/total_time} \\t model: {np.sum(t_model)/total_time} \\t criterion: {np.sum(t_criterion)/total_time} \\t optimize: {np.sum(t_optimize)/total_time} \\t data: {np.sum(t_data)/total_time}\")\n\n writer.add_scalar(\"Loss/train_epoch\", loss.item(), args.global_step)\n loss_epoch += loss.item()\n args.global_step += 1\n\n\n\n t0=time.time()\n \n return loss_epoch\n\ndef train_byol(args, train_loader, model, criterion, optimizer, writer, gradient_accumulation_target_batch_size=4096):\n if 'byol_gradient_accumulation_target_batch_size' in vars(args).keys():\n gradient_accumulation_target_batch_size = args.byol_gradient_accumulation_target_batch_size\n\n loss_epoch = 0\n loss_accumulation = 0\n t_port=0\n t_model=0\n t_data=0\n total_time=0\n t0=time.time()\n \n accumulate_gradient_steps = gradient_accumulation_target_batch_size / train_loader.batch_size\n\n print(f\"Training BYOL! Batch size of {train_loader.batch_size}, being accumulated to a virtual batch size of {gradient_accumulation_target_batch_size}\")\n\n\n for step, ((x_i, x_j), _, _, _, _) in enumerate(train_loader):\n # augmentations are done within the model\n # loss is computed within the model\n t1 = time.time()\n\n x_i = x_i.to(args.device)\n x_j = x_j.to(args.device)\n\n t2=time.time()\n\n loss = model(image_one=x_i, image_two=x_j)\n loss.backward()\n\n loss_accumulation += loss.cpu().item()\n loss_epoch += loss.cpu().item()\n\n\n if (step+1) % accumulate_gradient_steps == 0:\n optimizer.step()\n model.update_moving_average()\n optimizer.zero_grad()\n\n t3=time.time()\n\n t_data += t1-t0\n t_port += t2-t1\n t_model += t3-t2\n total_time += t3-t0\n\n mean_accumulated_loss = (loss_accumulation / accumulate_gradient_steps)\n loss_accumulation = 0\n if ((step+1) % (accumulate_gradient_steps*5)) == 0:\n print(f\"{time.ctime()} | Step [{step}/{len(train_loader)}]\\t Loss: {mean_accumulated_loss}\")\n print(f\"Total: {total_time} \\t port: {t_port/total_time} \\t model: {t_model/total_time} \\t data: {t_data/total_time}\")\n\n writer.add_scalar(\"Loss/train_epoch\", mean_accumulated_loss, args.global_step)\n\n args.global_step += 1\n t0=time.time()\n\n return loss_epoch\n\ndef train(args, train_loader, model, criterion, optimizer, writer):\n\n if args.unsupervised_method == 'simclr':\n train_method = train_simclr\n elif args.unsupervised_method == 'byol':\n train_method = train_byol\n else:\n raise NotImplementedError\n\n loss_epoch = train_method(args, train_loader, model, criterion, optimizer, writer)\n \n return loss_epoch\n\n\n@ex.automain\ndef main(_run, _log):\n args = argparse.Namespace(**_run.config)\n args = post_config_hook(args, _run)\n\n if torch.cuda.is_available():\n print(\"--- USING GPU ---\")\n args.device=torch.device(\"cuda:0\")\n else:\n print(\"--- USING CPU ----\")\n args.device=torch.device('cpu')\n args.n_gpu = torch.cuda.device_count()\n\n root = \"./datasets\"\n\n train_sampler = None\n\n transform = TransformsSimCLR(size=224) # if args.unsupervised_method == 'simclr' else None\n # When transform = None, the dataloader will retrieve only a single image that is not transformed, as this will be done inside BYOL\n\n if args.dataset == \"STL10\":\n train_dataset = torchvision.datasets.STL10(\n root, split=\"unlabeled\", download=True, transform=TransformsSimCLR(size=96)\n )\n elif args.dataset == \"CIFAR10\":\n train_dataset = torchvision.datasets.CIFAR10(\n root, download=True, transform=TransformsSimCLR(size=32)\n )\n elif args.dataset == 'msi-kather':\n train_dataset = dataset_msi(root_dir=args.path_to_msi_data, transform=transform, data_fraction=args.data_pretrain_fraction)\n\n elif args.dataset == 'msi-tcga' or args.dataset == 'basis':\n args.data_pretrain_fraction=1 \n assert ('.csv' in args.path_to_msi_data), \"Please provide the tcga .csv file in path_to_msi_data\"\n assert (args.root_dir_for_tcga_tiles), \"Please provide the root dir for the tcga tiles\"\n train_dataset = dataset_tcga(\n args=args, \n csv_file=args.path_to_msi_data, \n root_dir=args.root_dir_for_tcga_tiles, \n transform=transform,\n split_num=args.kfold,\n label=None,\n split='train',\n dataset=args.dataset) \n else:\n raise NotImplementedError\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=args.batch_size,\n shuffle=(train_sampler is None),\n drop_last=True,\n num_workers=args.workers,\n sampler=train_sampler,\n )\n\n if args.unsupervised_method=='byol':\n # Backbone is a reference to the network being used and updated. We should save the state of this network\n model, optimizer, scheduler, backbone, _ = load_model(args, reload_model=args.reload_model, model_type=args.unsupervised_method)\n \n # Criterion is defined within BYOL\n criterion = None\n else:\n model, optimizer, scheduler = load_model(args, reload_model=args.reload_model, model_type=args.unsupervised_method)\n criterion = NT_Xent(args.batch_size, args.temperature, args.device)\n\n \n if 'use_multi_gpu' not in vars(args).keys():\n args.use_multi_gpu=False\n if args.n_gpu > 1 and args.use_multi_gpu:\n model = torch.nn.DataParallel(model)\n model = convert_model(model)\n print(f\"Using {args.n_gpu} GPUs\")\n #TODO Check the batch size.. are we only training with 32 total so 8 per GPU? That's veeeery few.\n\n model = model.to(args.device)\n\n print(model)\n\n tb_dir = os.path.join(args.out_dir, _run.experiment_info[\"name\"])\n os.makedirs(tb_dir)\n writer = SummaryWriter(log_dir=tb_dir)\n \n\n args.global_step = 0\n args.current_epoch = 0\n for epoch in range(args.start_epoch, args.epochs):\n lr = optimizer.param_groups[0]['lr']\n loss_epoch = train(args, train_loader, model, criterion, optimizer, writer)\n\n if scheduler:\n scheduler.step()\n\n if epoch % args.save_each_epochs == 0:\n if args.unsupervised_method == \"simclr\":\n # Save entire model\n save_model(args, model, optimizer)\n elif args.unsupervised_method == \"byol\":\n # Save only the resnet backbone\n save_model(args, backbone, optimizer)\n\n\n writer.add_scalar(\"Loss/train\", loss_epoch / len(train_loader), epoch)\n writer.add_scalar(\"Misc/learning_rate\", lr, epoch)\n print(\n f\"Epoch [{epoch}/{args.epochs}]\\t Loss: {loss_epoch / len(train_loader)}\\t lr: {round(lr, 5)}\"\n )\n args.current_epoch += 1\n\n ## end training\n if args.unsupervised_method == \"simclr\":\n # Save entire model\n save_model(args, model, optimizer)\n elif args.unsupervised_method == \"byol\":\n # Save only the resnet backbone\n save_model(args, backbone, optimizer)\n","sub_path":"feature_learning/main_unsupervised.py","file_name":"main_unsupervised.py","file_ext":"py","file_size_in_byte":9145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"240639956","text":"# gravity_subroutines.py\n# Created: February 21st, 2020\n\n\"\"\"\nImplementations of fortran subroutines found in: Blakely, R.J., 1996. Potential Theory in Gravity and Magnetic Applications. Potential Theory in Gravity and Magnetic Applications, by Richard J. Blakely, pp. 461. ISBN 0521575478. Cambridge, UK: Cambridge University Press, September 1996.\n\nRefer to the above citation for details. \n\n\"\"\"\n\nimport sys \nimport numpy as np \nfrom scipy.integrate import tplquad\n\ndef gbox(x0, y0, z0, x1, y1, z1, x2, y2, z2, rho):\n gamma = 6.670e-11 \n twopi = 2 * np.pi \n km2m = 1e3\n si2mg = 1e5 \n x = [x0 - x1, x0 - x2]\n y = [y0 - y1, y0 - y2]\n z = [z0 - z1, z0 - z2]\n res = 0\n for i in range(2):\n for j in range(2):\n for k in range(2):\n rijk = np.sqrt(x[i]**2 + y[j]**2 + z[k]**2)\n ijk = (-1)**(i+1) * (-1)**(j+1) * (-1)**(k+1)\n arg1 = np.arctan2(x[i] * y[j], z[k] * rijk)\n if arg1 < 0: \n arg1 += twopi\n arg2 = rijk + y[j]\n arg3 = rijk + x[i]\n arg2 = np.log(arg2)\n arg3 = np.log(arg3)\n res += ijk * (z[k] * arg1 - x[i] * arg2 - y[j] * arg3)\n g = rho * gamma * res * si2mg * km2m\n return g\n\ndef gbox_by_integration(x0, y0, z0, x1, y1, z1, x2, y2, z2, rho):\n gamma = 6.670e-11 \n km2m = 1e3\n si2mg = 1e5 \n f = lambda z, y, x: (z0-z) / ((x0 - x)**2 + (y0-y)**2 + (z0 - z)**2)**(3/2)\n arg1 = tplquad(f, x1, x2, lambda x: y1, lambda x: y2, lambda x, y: z1, lambda x, y: z2)[0]\n g = -1 * rho * gamma * arg1 * si2mg * km2m\n return g\n\ndef gpoly(x0, z0, xcorn, zcorn, rho):\n gamma = 6.670e-11 \n km2m = 1e3\n si2mg = 1e5\n xcorn.append(xcorn[0])\n zcorn.append(zcorn[0])\n res = 0\n for i in range(len(xcorn) - 1):\n x1 = xcorn[i] - x0 \n z1 = zcorn[i] - z0\n x2 = xcorn[i+1] - x0 \n z2 = zcorn[i+1] - z0\n r1sq = x1**2 + z1**2 \n r2sq = x2**2 + z2**2\n denom = z2 - z1 \n if denom == 0:\n denom = 1e-6 \n alpha = (x2 - x1) / denom \n beta = (x1 * z2 - x2 * z1) / denom \n factor = beta / (1 + alpha**2)\n term1 = 0.5 * (np.log(r2sq) - np.log(r1sq))\n term2 = np.arctan2(z2, x2) - np.arctan2(z1, x1)\n res += factor * (term1 - alpha * term2)\n g = 2 * rho * gamma * res * si2mg * km2m\n return g\n\n","sub_path":"gravity_subroutines.py","file_name":"gravity_subroutines.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215858365","text":"from player_class import Player\nimport random\nimport sys\n\ndef game_intro():\n '''intro the game and get the player's name'''\n \n # insert the background and story to the game here, as well as any other\n # info related to controls or something\n \n return input(\"Welcome player! What is your name? \\n\")\n\ndef roll_base_stats():\n '''get the base stats for the player; these are the stats that will be used to\n calculate all other stats'''\n\n print()\n file = open(\"art/dice.txt\", \"r\")\n print(file.read())\n print()\n print(\"Here, we're going to roll for base stats!\")\n print(\"Enter 1 when you're done rolling!\")\n print(\"Be careful, you only get 10 tries and then you're stuck with what you get!!\")\n done = \"\"\n print()\n count = 0\n # loop until they type 1\n while(not (done == \"1\") and count < 10):\n #pick from 4-10 randomly (odds of higher numbers probably should be a little lower)\n STR = random.randint(4, 10) \n DEX = random.randint(4, 10)\n INT = random.randint(4, 10)\n LUK = random.randint(4, 10)\n done = input(\"\\rSTR: \" + str(STR) + \" DEX: \" + str(DEX) + \" INT: \" + str(INT) + \" LUK: \"\\\n + str(LUK) + \"\\n\")\n count += 1\n # I want to know how to print on top of something else, haven't figured it out yet\n\n return {'STR' : STR, 'DEX' : DEX, 'INT' : INT, 'LUK' : LUK }\n\ndef calculate_stats(base_stats):\n '''calculate all the stats that are based on our base stats; so far this is\n only DEF and maxHP, could add magic dmg, mana, speed, dodge, accuracy'''\n\n return {'DEF': int(base_stats['STR']/4 + base_stats['LUK']/5), 'maxHP': int(10 + base_stats['STR']/2)}\n\n","sub_path":"intro_methods.py","file_name":"intro_methods.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105665127","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom os.path import join as pjoin\n\nbaseline_name = 'o3-4-issue.csv'\nbaseline = pd.read_csv(f'./data/{baseline_name}', index_col=0)\n\nresult = None\nfor f in os.listdir('./data'):\n if f.endswith('.csv') and f != baseline_name:\n path = pjoin('./data', f)\n ipcs = pd.read_csv(path, index_col=0)\n print(f)\n if result is None:\n result = pd.DataFrame(ipcs.values / baseline.values,\n columns=[f], index=baseline.index)\n else:\n result.loc[:, f] = pd.Series((ipcs.values / baseline.values)[:, 0],\n index=baseline.index, )\n\nresult.loc['mean'] = result.values.mean(axis=0)\nresult.to_csv('./results/compare.csv', float_format='%.3f')\nprint(result)\n","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"555278836","text":"import os\n\nfrom pants.backend.jvm.repository import Repository\nfrom pants.base.build_file_aliases import BuildFileAliases\nfrom rtluckie.monolith.pants.python.monolith.read_contents import read_contents_factory\nfrom rtluckie.monolith.pants.python.monolith.remote_python_thrift_fileset import (\n RemotePythonThriftFileset)\nfrom rtluckie.monolith.pants.python.monolith.version import Version\n\n\npublic_repo = Repository(name='public',\n url='http://repo1.maven.org/maven2',\n push_db_basedir=os.path.join('build-support', 'commons', 'ivy', 'pushdb'))\n\n\ndef build_file_aliases():\n return BuildFileAliases.create(\n objects={\n 'monolith_version': Version('src/python/rtluckie/monolith/VERSION').version,\n 'VERSION': Version(os.path.join(os.getcwd(), 'VERSION')).version,\n 'public': public_repo, # key 'public' must match name='public' above)\n },\n context_aware_object_factories={\n 'read_contents': read_contents_factory,\n 'remote_python_thrift_fileset': RemotePythonThriftFileset.factory,\n })\n","sub_path":"pants-plugins/src/python/rtluckie/monolith/pants/python/monolith/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459140838","text":"# 78. 子集\n\n# 给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。\n\n# 说明:解集不能包含重复的子集。\n# 示例:\n\n# 输入: nums = [1,2,3]\n# 输出:\n# [\n# [3],\n# [1],\n# [2],\n# [1,2,3],\n# [1,3],\n# [2,3],\n# [1,2],\n# []\n# ]\n\n\nclass Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n result = [[]]\n for i in xrange(len(nums)):\n size = len(result)\n for j in xrange(size):\n result.append(list(result[j]))\n result[-1].append(nums[i])\n return result\n","sub_path":"Python/078.py","file_name":"078.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"161866742","text":"\"\"\"Visualize all perturbations and levels for a given image.\"\"\"\n\nimport numpy as np\n\nfrom PIL import Image\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nfrom constants import LEVELS, PERTURBATIONS\n\n\nFIG_SIZE = (12, 8)\n\n\ndef visualize(img, perturbations, levels, show):\n \"\"\"Visualize all specified perturbations and levels for a given image.\n\n Args:\n img (Image): PIL image for which to plot perturbations\n perturbations (dict): maps name (str) -> mapping function (function)\n levels (list): list of each level (int) to plot\n show (bool): whether to show the final figure\n\n Returns:\n (None)\n\n \"\"\"\n # Workaround to allow code to run without display attached\n import matplotlib\n if not show:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n\n out_dir = Path('output')\n out_dir.mkdir(exist_ok=True, parents=True)\n\n # Don't visualize identity\n if 'identity' in perturbations:\n perturbations = perturbations.copy()\n del perturbations['identity']\n fig, ax = plt.subplots(nrows=len(levels),\n ncols=len(perturbations),\n gridspec_kw={'wspace': 0, 'hspace': 0},\n figsize=FIG_SIZE)\n names = sorted(list(perturbations.keys()))\n for iname, name in enumerate(names):\n print('Generating visuals for \"%s\"...' % name)\n mapping_fn = perturbations[name]\n for ilevel, level in enumerate(levels):\n perturbed_img = mapping_fn(level, img)\n perturbed_img_name = '%s_%d.png' % (name, level)\n perturbed_img.save(out_dir / perturbed_img_name)\n ax[ilevel, iname].imshow(perturbed_img)\n # Remove tick marks\n ax[ilevel, iname].set_yticklabels([])\n ax[ilevel, iname].set_xticklabels([])\n ax[ilevel, iname].tick_params(axis='both',\n which='both',\n length=0)\n # Plot vertical and horizontal titles\n if not iname:\n ax[ilevel, iname].set_ylabel('Level %d' % level,\n rotation=90,\n size='large')\n if not ilevel:\n ax[ilevel, iname].set_title(name)\n if show:\n plt.show()\n plt.savefig(out_dir / 'visualization.png')\n\n\ndef parse_script_args():\n \"\"\"Parse command line arguments.\n\n Returns:\n args (Namespace): Parsed command line arguments\n\n \"\"\"\n parser = ArgumentParser()\n\n parser.add_argument('--img_path', type=str,\n default='test_images/xray.jpg',\n help='Path to image to visualize')\n\n parser.add_argument('--no_show', action='store_true',\n help='Whether to not show the final figure.')\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_script_args()\n np.random.seed(0)\n src_img = Image.open(args.img_path)\n visualize(src_img, PERTURBATIONS, LEVELS, not args.no_show)\n","sub_path":"transforms/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"581121806","text":"from __future__ import division\n\nimport csv\nimport re\nfrom collections.abc import Mapping, Sequence\n\nimport natsort\nfrom docutils import nodes\nfrom docutils.parsers.rst.states import Body\n\nfrom docutils.parsers.rst import Directive, directives\nfrom docutils.parsers.rst.directives import unchanged_required, unchanged\nfrom six import StringIO\n\nfrom added_value.common_options import NAME_OPTION, CLASS_OPTION\nfrom added_value.grammatical_conjunctions import list_conjunction\nfrom added_value.importer import import_object\nfrom added_value.invoke import parse_call, resolve_attr\nfrom added_value.multisort import asc, dec, as_is\nfrom added_value.non_string_iterable import NonStringIterable\n\nASCENDING = \"asc\"\nDECENDING = \"dec\"\nAS_IS = \"as-is\"\n\nBULLET_LIST_TYPE = \"bullet\"\nENUMERATED_LIST_TYPE = \"enumerated\"\nDEFINITION_LIST_TYPE = \"definition\"\n\nLIST_TYPES_OPTION = \"list-types\"\nKEY_FORMATS_OPTION = \"key-formats\"\nSORT_ORDERS_OPTION = \"sort-orders\"\nINTERNAL_FORMATS_OPTION = \"internal-formats\"\nLEAF_FORMAT_OPTION = \"leaf-format\"\nORDINAL_BASES_OPTION = \"ordinal-bases\"\n\n_natural = natsort.natsort_keygen()\n\nENUMERATOR_PATTERN = Body.patterns[\"enumerator\"]\n\nTEXT_SORT_ORDERS = {ASCENDING: asc(_natural), DECENDING: dec(_natural), AS_IS: as_is()}\n\nSORT_ORDERS = {ASCENDING: asc(), DECENDING: dec(), AS_IS: as_is()}\n\nDEFAULT_KEY_FORMATS = {\n BULLET_LIST_TYPE: \"*\",\n ENUMERATED_LIST_TYPE: \"1.\",\n DEFINITION_LIST_TYPE: \"{k}\",\n}\n\nDEFAULT_INTERNAL_FORMAT = object()\n\nDEFAULT_INTERNAL_FORMATS = {\n BULLET_LIST_TYPE: DEFAULT_INTERNAL_FORMAT,\n ENUMERATED_LIST_TYPE: DEFAULT_INTERNAL_FORMAT,\n DEFINITION_LIST_TYPE: DEFAULT_INTERNAL_FORMAT,\n}\n\nDEFAULT_SORT_ORDERS = {\n BULLET_LIST_TYPE: AS_IS,\n ENUMERATED_LIST_TYPE: AS_IS,\n DEFINITION_LIST_TYPE: AS_IS,\n}\n\nDEFAULT_ORDINAL_BASE = 1\n\n\nclass ItemsListDirective(Directive):\n \"\"\"Format a data structure as nested lists.\n \"\"\"\n\n required_arguments = 1\n has_content = False\n option_spec = {\n LIST_TYPES_OPTION: unchanged_required,\n SORT_ORDERS_OPTION: unchanged,\n KEY_FORMATS_OPTION: unchanged,\n INTERNAL_FORMATS_OPTION: unchanged,\n LEAF_FORMAT_OPTION: unchanged,\n ORDINAL_BASES_OPTION: unchanged,\n CLASS_OPTION: directives.class_option,\n NAME_OPTION: unchanged,\n }\n\n @property\n def key_formats(self):\n if KEY_FORMATS_OPTION not in self.options:\n return None\n formats = self.options[KEY_FORMATS_OPTION]\n formats_stream = StringIO(formats)\n reader = csv.reader(\n formats_stream, delimiter=\",\", quotechar='\"', skipinitialspace=True, doublequote=True\n )\n formats_row = next(reader)\n stripped_formats = [cell.strip() for cell in formats_row]\n return stripped_formats\n\n @property\n def internal_formats(self):\n if INTERNAL_FORMATS_OPTION not in self.options:\n return None\n formats = self.options[INTERNAL_FORMATS_OPTION]\n formats_stream = StringIO(formats)\n reader = csv.reader(\n formats_stream, delimiter=\",\", quotechar='\"', skipinitialspace=True, doublequote=True\n )\n formats_row = next(reader)\n stripped_formats = [cell.strip() for cell in formats_row]\n return stripped_formats\n\n @property\n def leaf_format(self):\n return self.options.get(LEAF_FORMAT_OPTION, \"{v}\")\n\n @property\n def list_types(self):\n text = self.options.get(LIST_TYPES_OPTION, \"bullet\")\n try:\n types = list(map(lambda s: s.strip().lower(), filter(None, text.split(\",\"))))\n except ValueError:\n raise self.error(\"Could not interpret option {} {!r}\".format(LIST_TYPES_OPTION, text))\n if not types:\n return None\n\n for type in types:\n if type not in self.LIST_TYPES:\n raise self.error(\n \"Could not interpret option {} {!r}. Items must each be one of {}\".format(\n LIST_TYPES_OPTION,\n text,\n list_conjunction([repr(k) for k in self.LIST_TYPES.keys()], \"or\"),\n )\n )\n return types\n\n @property\n def sort_orders(self):\n if SORT_ORDERS_OPTION not in self.options:\n return None\n text = self.options[SORT_ORDERS_OPTION]\n try:\n orders = list(map(lambda s: s.strip(), filter(None, text.split(\",\"))))\n except ValueError:\n raise self.error(\"Could not interpret option {} {!r}\".format(SORT_ORDERS_OPTION, text))\n\n for order in orders:\n if order not in TEXT_SORT_ORDERS:\n raise self.error(\n \"Could not interpret option {} {!r}. Items must each be one of {}\".format(\n SORT_ORDERS_OPTION,\n text,\n list_conjunction([repr(k) for k in TEXT_SORT_ORDERS.keys()], \"or\"),\n )\n )\n return orders\n\n @property\n def ordinal_bases(self):\n if ORDINAL_BASES_OPTION not in self.options:\n return None\n text = self.options[ORDINAL_BASES_OPTION]\n try:\n ordinals = list(map(lambda s: int(s.strip()), filter(None, text.split(\",\"))))\n except ValueError:\n raise self.error(\n \"Could not interpret option {} {!r}\".format(ORDINAL_BASES_OPTION, text)\n )\n return ordinals\n\n @staticmethod\n def is_leaf_list(inner_list_types):\n return len(inner_list_types) == 0\n\n def render_child_nodes(\n self,\n obj,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n ):\n if self.is_leaf_list(inner_list_types):\n child_ordinals, child_keys, child_values, child_nodes = self.render_leaves(\n obj, sort_order, ordinal_base\n )\n else:\n child_ordinals, child_keys, child_values, child_nodes = self.render_nested_list(\n obj,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n )\n return list(child_ordinals), list(child_keys), list(child_values), list(child_nodes)\n\n def render_nested_list(\n self,\n obj,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n ):\n sort_key = SORT_ORDERS[sort_order]\n\n if isinstance(obj, Mapping):\n # TODO: Consider doing something different here!\n sorted_keys = sorted(obj.keys(), key=sort_key.func, reverse=sort_key.reverse)\n sorted_values = [obj[key] for key in sorted_keys]\n else:\n\n sorted_items = sorted(\n enumerate(obj, start=0), # Original position\n key=lambda item: sort_key.func(item[1]),\n reverse=sort_key.reverse,\n )\n sorted_keys, sorted_values = zip(*sorted_items)\n\n item_nodes = [\n (\n ordinal,\n key,\n value,\n self.render_list(\n value,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n ),\n )\n for ordinal, (key, value) in enumerate(\n zip(sorted_keys, sorted_values), start=ordinal_base\n )\n ]\n return zip(*item_nodes)\n\n def render_leaves(self, obj, sort_order, ordinal_base):\n if isinstance(obj, Mapping):\n key_value_pairs = obj.items()\n elif isinstance(obj, Sequence):\n key_value_pairs = enumerate(obj, start=0)\n else:\n key_value_pairs = enumerate(list(obj), start=0)\n\n sort_key = TEXT_SORT_ORDERS[sort_order]\n\n # TODO: Allow specification of the sort key\n sorted_items = sorted(\n key_value_pairs, key=lambda item: sort_key.func(item[1]), reverse=sort_key.reverse\n )\n\n item_nodes = [\n (\n ordinal,\n key,\n value,\n nodes.paragraph(text=self.leaf_format.format(o=ordinal, k=key, v=value)),\n )\n for ordinal, (key, value) in enumerate(sorted_items, start=ordinal_base)\n ]\n return zip(*item_nodes)\n\n def add_global_attributes(self, list_node):\n list_node[\"classes\"] += self.options.get(\"class\", [])\n if \"align\" in self.options:\n list_node[\"align\"] = self.options.get(\"align\")\n self.add_name(list_node)\n\n def render_list(\n self, obj, list_types, key_formats, internal_formats, sort_orders, ordinal_bases\n ):\n if not isinstance(obj, NonStringIterable):\n raise self.error(\"Cannot make a list from object {!r}\".format(obj))\n\n list_type, *inner_list_types = list_types\n key_format, *inner_key_formats = key_formats\n internal_format, *inner_internal_formats = internal_formats\n sort_order, *inner_sort_orders = sort_orders\n ordinal_base, *inner_ordinal_bases = ordinal_bases\n\n if internal_format is DEFAULT_INTERNAL_FORMAT:\n internal_format = \"{k}\" if isinstance(obj, Mapping) else \"\"\n\n list_renderer = self.LIST_TYPES[list_type]\n\n list_node = list_renderer(\n self,\n obj,\n key_format,\n internal_format,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n )\n\n self.add_global_attributes(list_node)\n return list_node\n\n def render_bullet_list(\n self,\n obj,\n key_format,\n internal_format,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n ):\n if key_format not in {\"*\", \"+\", \"-\", \"•\", \"‣\", \"⁃\"}:\n raise self.error(\n \"Invalid {} {} for {}\".format(KEY_FORMATS_OPTION, key_format, BULLET_LIST_TYPE)\n )\n\n list_node = nodes.bullet_list()\n list_node[\"bullet\"] = key_format\n\n child_ordinals, child_keys, child_values, child_nodes = self.render_child_nodes(\n obj,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n )\n\n for ordinal, key, value, child_node in zip(\n child_ordinals, child_keys, child_values, child_nodes\n ):\n list_item_node = nodes.list_item()\n\n header_text = internal_format.format(o=ordinal, k=key, v=value)\n if header_text and not header_text.isspace():\n list_item_node += nodes.paragraph(text=header_text)\n\n list_node += list_item_node\n list_item_node += child_node\n return list_node\n\n def render_enumerated_list(\n self,\n obj,\n key_format,\n internal_format,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n ):\n m = re.match(ENUMERATOR_PATTERN, key_format)\n if m is None:\n raise self.error(\n \"Invalid {} {} for {}\".format(KEY_FORMATS_OPTION, key_format, \"bullet\")\n )\n\n # We instantiate this docutils state-machine state just so we leverage methods defined on it\n # which allows us to maintain consistency with the core docutils behaviour\n body = Body(None)\n\n fmt, sequence, text, start = body.parse_enumerator(m)\n\n list_node = nodes.enumerated_list()\n\n if sequence == \"#\":\n list_node[\"enumtype\"] = \"arabic\"\n else:\n list_node[\"enumtype\"] = sequence\n\n list_node[\"prefix\"] = body.enum.formatinfo[fmt].prefix\n list_node[\"suffix\"] = body.enum.formatinfo[fmt].suffix\n\n if start != 1:\n list_node[\"start\"] = start\n\n child_ordinals, child_keys, child_values, child_nodes = self.render_child_nodes(\n obj,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_formats,\n inner_sort_orders,\n inner_ordinal_bases,\n )\n\n list_node[\"bullet\"] = key_format\n for ordinal, key, value, child_node in zip(\n child_ordinals, child_keys, child_values, child_nodes\n ):\n list_item_node = nodes.list_item()\n\n header_text = internal_format.format(o=ordinal, k=key, v=value)\n if header_text and not header_text.isspace():\n list_item_node += nodes.paragraph(text=header_text)\n\n list_node += list_item_node\n list_item_node += child_node\n return list_node\n\n def render_definition_list(\n self,\n obj,\n key_format,\n internal_format,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_format,\n inner_sort_orders,\n inner_ordinal_bases,\n ):\n if not isinstance(obj, Mapping):\n raise self.error(\n \"List with {} {!r} cannot represent object of type {!r} because it is not a {!r}.\".format(\n LIST_TYPES_OPTION, DEFINITION_LIST_TYPE, type(obj).__name__, Mapping.__name__\n )\n )\n\n child_ordinals, child_keys, child_values, child_nodes = self.render_child_nodes(\n obj,\n sort_order,\n ordinal_base,\n inner_list_types,\n inner_key_formats,\n inner_internal_format,\n inner_sort_orders,\n inner_ordinal_bases,\n )\n\n term_nodes = [\n nodes.term(\"\", \"\", nodes.Text(key_format.format(o=ordinal, k=key, v=value)))\n for ordinal, key, value in zip(child_ordinals, child_keys, child_values)\n ]\n\n list_node = nodes.definition_list()\n for term_node, child_node in zip(term_nodes, child_nodes):\n definition_node = nodes.definition()\n definition_node += child_node\n\n item_node = nodes.definition_list_item(\"\", term_node, definition_node)\n list_node += item_node\n return list_node\n\n LIST_TYPES = {\n BULLET_LIST_TYPE: render_bullet_list,\n ENUMERATED_LIST_TYPE: render_enumerated_list,\n DEFINITION_LIST_TYPE: render_definition_list,\n }\n\n def run(self):\n name = self.arguments[0]\n attribute_name, args = parse_call(name)\n attr, prefixed_name = import_object(attribute_name, context=self)\n obj = resolve_attr(attr, args)\n\n list_types = self.list_types\n\n key_formats = self.key_formats or [\n DEFAULT_KEY_FORMATS[list_type] for list_type in self.list_types\n ]\n internal_formats = self.internal_formats or [\n DEFAULT_INTERNAL_FORMATS[list_type] for list_type in self.list_types\n ]\n sort_orders = self.sort_orders or [\n DEFAULT_SORT_ORDERS[list_type] for list_type in self.list_types\n ]\n ordinal_bases = self.ordinal_bases or [\n DEFAULT_ORDINAL_BASE for list_type in self.list_types\n ]\n if not (len(self.list_types) == len(key_formats) == len(sort_orders) == len(ordinal_bases)):\n raise self.error(\n \"{}, {}, {}, and {} do not all have the same number of items ({} directive)\".format(\n LIST_TYPES_OPTION,\n KEY_FORMATS_OPTION,\n SORT_ORDERS_OPTION,\n ORDINAL_BASES_OPTION,\n self.name,\n )\n )\n\n list_node = self.render_list(\n obj, list_types, key_formats, internal_formats, sort_orders, ordinal_bases\n )\n\n return [list_node]\n","sub_path":"source/added_value/items_list_directive.py","file_name":"items_list_directive.py","file_ext":"py","file_size_in_byte":16589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640291676","text":"class Solution:\n def longestSubstringWithoutDuplication(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n # 滑动窗口法\n i = 0\n maxlen = 0\n dict = {}\n for j in range(len(s)):\n if s[j] in dict and dict[s[j]] > i:\n i = dict[s[j]]\n maxlen = max(maxlen, j - i + 1)\n dict[s[j]] = j + 1\n return maxlen\n","sub_path":"剑指offer/Week05/最长不含重复字符的子字符串/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650386685","text":"# -*- coding: utf-8 -*-\n\nimport config\nfrom metagraph.processor import *\n\nclass Loader:\n\n \"\"\"\n Класс для загрузки ситуаций и проверки загрузки\n \"\"\"\n\n example_vertex_name_1 = \"Пример 1\"\n example_vertex_name_2 = \"Пример 2\"\n\n @staticmethod\n def load():\n \"\"\"\n Загрузка в модель тестовых данных\n \"\"\"\n metagraphModelConfig = MetagraphModelConfig('test_db', 'test_collection')\n proc = MetagraphModelProcessor(metagraphModelConfig)\n proc.drop_db()\n Loader.load_examples(proc)\n\n @staticmethod\n def load_examples(proc: MetagraphModelProcessor):\n privet = proc.create_vertex('Приветствие')\n privet_call = proc.create_vertex('Добрый день, ИО')\n privet.add_vertices(privet_call)\n\n predl = proc.create_vertex('Предложение')\n predl_call_1 = proc.create_vertex('Вас приветствует компания CompanyName.')\n predl_call_2 = proc.create_vertex('Меня зовут Arthas Звоню оповестить Вас об изменениях в тарифных планах')\n predl_call_3 = proc.create_vertex('Сейчас для всех абонентов сотовой связи, '\n 'которые оплачивают свыше 600 рублей, мы предоставляем домашний интернет '\n 'бесплатно, при условии, что дом подключен к сети CompanyName')\n predl_call_4 = proc.create_vertex(' У меня указан адрес Вашего проживания «Улица, дом» Адрес верный?')\n predl.add_vertices(predl_call_1, predl_call_2, predl_call_3, predl_call_4)\n predl_edge_1 = proc.create_edge(\"1\", predl_call_1, predl_call_2)\n predl_edge_2 = proc.create_edge(\"1\", predl_call_2, predl_call_3)\n predl_edge_3 = proc.create_edge(\"1\", predl_call_3, predl_call_4)\n predl.add_edges(predl_edge_1, predl_edge_2, predl_edge_3)\n\n predl_accept = proc.create_vertex('у нас есть для вас ахууенный тариф')\n predl_decl = proc.create_vertex('Назовите, пожалуйста, актуальный адрес – улицу и номер дома.')\n poka_call = proc.create_vertex('До свидания, ИО')\n\n poka = proc.create_vertex('Прощание')\n poka.add_vertices(poka_call)\n\n otvet_priv = proc.create_edge('Добрый день.', privet_call, predl_call_1)\n otvet_da = proc.create_edge('Дa', predl_call_4, predl_accept)\n otvet_net = proc.create_edge('Нет', predl_call_4, predl_decl)\n poka_edge_1 = proc.create_edge('Пока', predl_accept, poka_call)\n poka_edge = proc.create_edge('Пока', predl_decl, poka_call)\n\n situation_1 = proc.create_vertex('Тестовый скрипт')\n situation_1.add_vertices(privet, predl, predl_accept, predl_decl, poka)\n situation_1.add_edges(otvet_da, otvet_net, otvet_priv, poka_edge, poka_edge_1)\n situation_1.save()\n\n @staticmethod\n def check_load():\n \"\"\"\n проверка загрузки данных\n \"\"\"\n level = 0\n separator = \"*\" * 100\n metagraphModelConfig = MetagraphModelConfig('test_db', 'test_collection')\n proc = MetagraphModelProcessor(metagraphModelConfig)\n proc.first_vertex_by_name('Тестовый скрипт').print_recursive(level)\n\n\nif __name__ == '__main__':\n Loader.load()\n Loader.check_load()","sub_path":"tests/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409755565","text":"import smtplib\nimport time\nimport imaplib\nimport email\nimport base64\nimport os\nimport time\nimport pusherdemo as pysher\nfrom random import randint\n# import PyPDF2 \nimport re\nimport json\nimport pdftotext\n\n\n\nORG_EMAIL = \"@gmail.com\"\nFROM_EMAIL = \"recauxdev@gmail.com\"\nFROM_PWD = \"yrryhgnabnclmokj\"\nSMTP_SERVER = \"imap.gmail.com\"\nSMTP_PORT = 993\n\n# -------------------------------------------------\n#\n# Utility to read email from Gmail Using Python\n#\n# ------------------------------------------------\n\n#print(email_message.keys())\n\nmail = imaplib.IMAP4_SSL('imap.gmail.com')\n\nmail.login(FROM_EMAIL, FROM_PWD)\nls = mail.list()\ni=0\nlatest_email_id = b'15' #id_list[8]\nwhile True:\n i=i+1\n print(i,\" th itr\")\n mail.select(\"inbox\") \n\n skfile = open(\"./config/skills.json\",\"r\")\n\n data = json.load(skfile)\n\n skarr = []\n for item in data['skills']:\n # print(item[\"label\"])\n skarr.append(item[\"label\"])\n\n\n print(skarr)\n\n result, data = mail.search(None, \"ALL\")\n \n ids = data[0] \n id_list = ids.split() \n if ids.split()[-1] == latest_email_id:\n print(\"no new emails\",ids)\n time.sleep(5)\n else:\n print(\"have new emails\")\n latest_email_id =ids.split()[-1]\n \n\n print(type(latest_email_id))\n \n result, data = mail.fetch(latest_email_id, \"(RFC822)\") # fetch the email body (RFC822) for the given ID\n\n raw_email = data[0][1]\n\n print(type(raw_email))\n\n email_message = email.message_from_bytes(raw_email)\n \n print(\"to -\",email_message['To'])\n \n print(\"from -\",email.utils.parseaddr(email_message['From'])) # for parsing \"Yuji Tomita\" \n \n #print(email_message)\n #print (email_message.items() )# print all headers\n buffer= \"\"\n print(\"type - \",email_message.get_content_maintype())\n\n maintype = email_message.get_content_maintype()\n # att_path = os.path.join(detach_dir, filename)\n print (\"[\"+email_message[\"From\"]+\"] :\" + email_message[\"Subject\"])\n detach_dir = \"./assets/cv/\"\n # we use walk to create a generator so we can iterate on the parts and forget about the recursive headach\n for part in email_message.walk():\n # multipart are just containers, so we skip them\n if part.get_content_maintype() == 'multipart':\n continue\n\n # is this part an attachment ?\n if part.get('Content-Disposition') is None:\n continue\n\n filename_old = part.get_filename()\n fileext = filename_old.split(\".\")[-1]\n\n sender_name = email.utils.parseaddr(email_message['From'])[0]\n sender_email = email.utils.parseaddr(email_message['From'])[1]\n print(\"emai id - \",latest_email_id.decode(\"utf-8\") )\n filename = sender_email +\"-\"+ latest_email_id.decode(\"utf-8\")+\".\"+fileext\n \n att_path = os.path.join(detach_dir, filename)\n print(\"sendr name name - \",sender_email)\n #Check if its already there\n \n #finally write the stuff\n print(\"writing.....\")\n fp = open(att_path, 'wb+')\n fp.write(part.get_payload(decode=True))\n fp.close()\n\n \n pdfFileObj = open(att_path, 'rb') \n \n # creating a pdf reader object \n # pdfReader = PyPDF2.PdfFileReader(pdfFileObj) \n\n pdfReader = pdftotext.PDF(pdfFileObj)\n \n # printing number of pages in pdf file \n print(len(pdfReader)) \n \n # creating a page object \n # pageObj = pdfReader[0] \n # pdftext = pageObj.extractText() \n pdftext = \"\\n\\n\".join(pdfReader)\n print(\"pdf text\")\n # print(pdftext)\n # extracting text from page \n# [\"react\",\"angular\",\"html\",\"javascript\",\"php\",\"xml\",\"css\",\"json\",\"sql\"]\n skillset = skarr \n print(skarr) \n x=list()\n for item in skillset:\n regx = re.findall(item, pdftext,re.I|re.M|re.U)\n if(regx !=[]):\n x.append(item)\n\n jobspec = email_message[\"Subject\"]\n print(jobspec)\n\n print(x) \n pysher.new_emqail_trigger(filename,sender_email,sender_name,x,jobspec)\n\n time.sleep(5)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# print(type(latest_email_id))\n \n# result, data = mail.fetch(latest_email_id, \"(RFC822)\") # fetch the email body (RFC822) for the given ID\n\n# raw_email = data[0][1]\n\n# print(type(raw_email))\n\n# email_message = email.message_from_bytes(raw_email)\n \n# print(\"to -\",email_message['To'])\n \n# print(\"from -\",email.utils.parseaddr(email_message['From'])) # for parsing \"Yuji Tomita\" \n \n# #print(email_message)\n# #print (email_message.items() )# print all headers\n# buffer= \"\"\n# print(\"type - \",email_message.get_content_maintype())\n\n# maintype = email_message.get_content_maintype()\n\n# print (\"[\"+email_message[\"From\"]+\"] :\" + email_message[\"Subject\"])\n# detach_dir = '.'\n# # we use walk to create a generator so we can iterate on the parts and forget about the recursive headach\n# for part in email_message.walk():\n# # multipart are just containers, so we skip them\n# if part.get_content_maintype() == 'multipart':\n# continue\n\n# # is this part an attachment ?\n# if part.get('Content-Disposition') is None:\n# continue\n\n# filename_old = part.get_filename()\n# fileext = filename_old.split(\".\")[-1]\n# print(\"file name - \",fileext)\n# filename = email_message[\"From\"] + \"_hw1ans[[[[[[werweweewe.\"+fileext\n \n# att_path = os.path.join(detach_dir, filename)\n\n# #Check if its already there\n# if os.path.isfile(att_path) :\n# # finally write the stuff\n# print(\"writing.....\")\n# fp = open(att_path, 'wb')\n# fp.write(part.get_payload(decode=True))\n# fp.close()\n\n\n\n# if maintype == 'multipart':\n\n# print(\"********************multiaprt***********************\")\n# # temp = email_message.get_payload()[0]\n# # temp2 = temp.get_payload()[0]\n\n# temp = email_message.get_payload()[0]\n\n# encodingtype = temp['Content-Transfer-Encoding']\n\n# if encodingtype == \"base64\" :\n# print(\"base 64 \")\n# b64dec = base64.b64decode(temp.get_payload())\n# print(\"base64 decode - \",b64dec)\n# elif encodingtype == None:\n# print(\"none\")\n# temp2 = temp.get_payload()[0]#get text\n# print(\" no encoding - \",(temp2))\n# else:\n# print(\" err - encoding \")\n# print(\" 7bit - \",email_message.get_payload()[1].get_payload()[0])\n\n# #temp2 = temp.get_payload()\n# # for ll in temp:\n# # print(\" pp- \",type(ll))\n# print(\"enc - 1 \",temp['Content-Transfer-Encoding'])\n \n# # print(\"temp type\",type(temp))\n# #print(\"temp2 type\",type(temp2))\n# # b64dec = base64.b64decode(temp.get_payload())\n# # print(\"base64 decode - \",b64dec)\n# #print(\"temp str -\",temp2)\n# #print(\"temp = \",(temp2.get_payload()))\n# # print(\"temp2 = type @ index 0 \",type(temp2))\n# # print(\"temp = payload \",(temp.get_payload()[0]))\n\n\n# elif maintype == 'text':\n\n# print(\"*******************text************************\")\n# print(\"temp = \",type(email_message.get_payload()))\n# print(email_message.get_payload())\n\n# if maintype == 'multipart':\n# for part in email_message.get_payload():\n# if part.get_content_maintype() == 'text':\n# buffer = buffer + part.get_payload()\n# elif maintype == 'text':\n# buffer = buffer + email_message.get_payload()\n\n\n\n# print(\"*******************************************\")\n# print(type(email_message.get_payload()[0]))\n# temp = email_message.get_payload()[0]\n# print(\"temp = \",type(temp.get_payload()))\n# print(\"temp = \",(temp.get_payload()))\n# outu =\"\"\n\n# print(\"base64 - ************************** \", base64.b64decode(temp.get_payload()))\n \n# note that if you want to get text content (body) and the email contains\n# multiple payloads (plaintext/ html), you must parse each message separately.\n# use something like the following: (taken from a stackoverflow post)\n# def get_first_text_block(email_message_instance):\n# maintype = email_message_instance.get_content_maintype()\n# if maintype == 'multipart':\n# for part in email_message_instance.get_payload():\n# if part.get_content_maintype() == 'text':\n# return part.get_payload()\n# elif maintype == 'text':\n# return email_message_instance.get_payload()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# def read_email_from_gmail():\n# try:\n# mail = imaplib.IMAP4_SSL(SMTP_SERVER)\n# mail.login(FROM_EMAIL,FROM_PWD)\n# mail.select('inbox')\n\n# type, data = mail.search(None, 'ALL')\n# mail_ids = data[0]\n\n# id_list = mail_ids.split() \n# first_email_id = int(id_list[0])\n# latest_email_id = int(id_list[-1])\n\n\n# for i in range(latest_email_id,first_email_id, -1):\n# typ, data = mail.fetch(i, '(RFC822)' )\n\n# for response_part in data:\n# if isinstance(response_part, tuple):\n# msg = email.message_from_string(response_part[1])\n# email_subject = msg['subject']\n# email_from = msg['from']\n# print ('From : ' + email_from + '\\n')\n# print ('Subject : ' + email_subject + '\\n')\n\n# except (Exception, e):\n# print (str(e))\n\n# read_email_from_gmail()","sub_path":"python-email-new.py","file_name":"python-email-new.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"631382222","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.model_zoo as model_zoo\n\nfrom torch.autograd import Variable\nfrom copy import deepcopy\nfrom collections import Counter\nfrom torchvision.models.inception import InceptionA, InceptionB, \\\n InceptionC, InceptionD, InceptionE, BasicConv2d, InceptionAux\n\nfrom optimizers.adamnormgrad import AdamNormGrad\nfrom datasets.loader import get_loader\nfrom datasets.utils import GenericLoader, label_offset_merger, simple_merger\n\nfrom .utils import float_type, check_or_create_dir, num_samples_in_loader\nfrom .metrics import softmax_accuracy\nfrom .layers import View, Identity, flatten_layers, EarlyStopping, \\\n BWtoRGB, build_conv_encoder\n\n\ndef build_optimizer(model, args):\n optim_map = {\n \"rmsprop\" : optim.RMSprop,\n \"adam\" : optim.Adam,\n \"adamnorm\": AdamNormGrad,\n \"adadelta\": optim.Adadelta,\n \"sgd\" : optim.SGD,\n \"lbfgs\" : optim.LBFGS\n }\n return optim_map[args.optimizer](model.parameters(), lr=args.lr)\n\n\ndef train(epoch, model, optimizer, data_loader, args):\n model.train()\n for batch_idx, (data, target) in enumerate(data_loader.train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n\n data, target = Variable(data), Variable(target)\n if len(list(target.size())) > 1: #XXX: hax\n target = torch.squeeze(target)\n\n optimizer.zero_grad()\n\n # project to the output dimension\n output, _ = model(data)\n loss = model.loss_function(output, target)\n correct = softmax_accuracy(output, target)\n\n # compute loss\n loss.backward()\n optimizer.step()\n\n # log every nth interval\n if batch_idx % args.log_interval == 0:\n # the total number of samples is different\n # if we have filtered using the class_sampler\n if hasattr(data_loader.train_loader, \"sampler\") \\\n and hasattr(data_loader.train_loader.sampler, \"num_samples\"):\n num_samples = data_loader.train_loader.sampler.num_samples\n else:\n num_samples = len(data_loader.train_loader.dataset)\n\n print('[FID]Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tAccuracy: {:.4f}'.format(\n epoch, batch_idx * len(data) , num_samples,\n 100. * batch_idx * len(data) / num_samples,\n loss.data.item(), correct\n )\n )\n\n\ndef test(epoch, model, data_loader, args):\n model.eval()\n loss, correct, num_samples = [], [], 0\n\n for data, target in data_loader.test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n\n with torch.no_grad():\n data, target = Variable(data), Variable(target)\n if len(list(target.size())) > 1: #XXX: hax\n target = torch.squeeze(target)\n\n output, _ = model(data)\n loss_t = model.loss_function(output, target)\n correct_t = softmax_accuracy(output, target)\n\n loss.append(loss_t.detach().cpu().item())\n correct.append(correct_t)\n num_samples += data.size(0)\n\n loss = np.mean(loss)\n acc = np.mean(correct)\n print('\\n[FID {} samples]Test Epoch: {}\\tAverage loss: {:.4f}\\tAverage Accuracy: {:.4f}\\n'.format(\n num_samples, epoch, loss, acc)\n )\n return loss, acc\n\n\ndef train_fid_model(args, fid_type='conv', batch_size=32):\n ''' builds and trains a classifier '''\n loader = get_loader(args)\n if isinstance(loader, list): # has a sequential loader\n loader = simple_merger(loader, batch_size, args.cuda)\n\n # debug prints\n print(\"[FID] train = {} | test = {} | output_classes = {}\".format(\n num_samples_in_loader(loader.train_loader),\n num_samples_in_loader(loader.test_loader),\n loader.output_size\n ))\n\n model = FID(loader.img_shp, loader.output_size, batch_size=batch_size,\n fid_type=fid_type, kwargs=vars(args))\n if not model.model_exists:\n optimizer = build_optimizer(model, args)\n early_stop = EarlyStopping(model, max_steps=50)\n\n for epoch in range(1, args.epochs + 1):\n train(epoch, model, optimizer, loader, args)\n loss, _ = test(epoch, model, loader, args)\n if early_stop(loss):\n early_stop.restore()\n break\n\n # save the model\n model.save()\n\n # test one final time to check accuracy .\n # this is useful to validate loaded models\n # Doesn't make sense for pretrained inceptionv3\n if fid_type == 'conv':\n test(epoch=-1, model=model, data_loader=loader, args=args)\n\n del loader # force cleanup\n return model\n\nclass InceptionV3UptoPool3(nn.Module):\n def __init__(self, num_classes=1000, transform_input=True):\n super(InceptionV3UptoPool3, self).__init__()\n self.transform_input = transform_input\n self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)\n self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)\n self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)\n self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)\n self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)\n self.Mixed_5b = InceptionA(192, pool_features=32)\n self.Mixed_5c = InceptionA(256, pool_features=64)\n self.Mixed_5d = InceptionA(288, pool_features=64)\n self.Mixed_6a = InceptionB(288)\n self.Mixed_6b = InceptionC(768, channels_7x7=128)\n self.Mixed_6c = InceptionC(768, channels_7x7=160)\n self.Mixed_6d = InceptionC(768, channels_7x7=160)\n self.Mixed_6e = InceptionC(768, channels_7x7=192)\n self.AuxLogits = InceptionAux(768, num_classes)\n self.Mixed_7a = InceptionD(768)\n self.Mixed_7b = InceptionE(1280)\n self.Mixed_7c = InceptionE(2048)\n self.fc = nn.Linear(2048, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n stddev = m.stddev if hasattr(m, 'stddev') else 0.1\n X = stats.truncnorm(-2, 2, scale=stddev)\n values = torch.Tensor(X.rvs(m.weight.data.numel()))\n values = values.view(m.weight.data.size())\n m.weight.data.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n if self.transform_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n # x # 299 x 299 x 3\n x = self.Conv2d_1a_3x3(x) # 149 x 149 x 32\n x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32\n x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64\n x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80\n x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192\n x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192\n x = self.Mixed_5b(x) # 35 x 35 x 256\n x = self.Mixed_5c(x) # 35 x 35 x 288\n x = self.Mixed_5d(x) # 35 x 35 x 288\n x = self.Mixed_6a(x) # 17 x 17 x 768\n x = self.Mixed_6b(x) # 17 x 17 x 768\n x = self.Mixed_6c(x) # 17 x 17 x 768\n x = self.Mixed_6d(x) # 17 x 17 x 768\n x = self.Mixed_6e(x) # 17 x 17 x 768\n x = self.Mixed_7a(x) # 8 x 8 x 1280\n x = self.Mixed_7b(x) # 8 x 8 x 2048\n x = self.Mixed_7c(x) # 8 x 8 x 2048\n x_pool2d = F.avg_pool2d(x, kernel_size=8) # 1 x 1 x 2048\n x = F.dropout(x_pool2d, training=self.training, inplace=False) # 1 x 1 x 2048\n x = x.view(x.size(0), -1) # 2048\n x = self.fc(x) # 1000 (num_classes)\n return x, x_pool2d\n\n\nclass ConvFID(nn.Module):\n def __init__(self, input_shape, output_size):\n super(ConvFID, self).__init__()\n self.input_shape = input_shape\n self.output_size = output_size\n model = build_conv_encoder(self.input_shape, self.output_size, normalization_str=\"batchnorm\")\n self.first_section = model[0:-4] # extract a feature layer\n self.second_section = model[-4:]\n\n def forward(self, x):\n batch_size = x.size(0)\n features = self.first_section(x)\n return self.second_section(features).squeeze(), features\n\n\nclass FID(nn.Module):\n def __init__(self, input_shape, output_size, batch_size, fid_type='inceptionv3', **kwargs):\n super(FID, self).__init__()\n assert fid_type in ('inceptionv3', 'conv')\n\n self.input_shape = input_shape\n self.output_size = output_size\n self.batch_size = batch_size\n self.fid_type = fid_type\n self.is_color = input_shape[0] > 1\n self.chans = 3 if self.is_color else 1\n\n # grab the meta config\n self.config = kwargs['kwargs']\n\n # build the encoder and decoder\n self.model = self._build_inception()\n self.model_exists = self.load()\n\n def _build_inception(self):\n if self.fid_type == 'inceptionv3':\n print(\"compiling inception_v3 FID model\")\n model = nn.Sequential(\n BWtoRGB(),\n nn.Upsample(size=[299, 299], mode='bilinear'),\n InceptionV3UptoPool3()\n )\n else:\n print(\"compiling standard convnet FID model\")\n model = ConvFID(self.input_shape, self.output_size)\n\n # push to multi-gpu\n if self.config['ngpu'] > 1:\n model = nn.DataParallel(model)\n\n # push to cuda\n if self.config['cuda']:\n model.cuda()\n\n return model\n\n def load(self):\n # load the FID model if it exists\n if self.fid_type == 'inceptionv3':\n # load the state dict from the zoo\n model_url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'\n self.model[-1].load_state_dict(model_zoo.load_url(model_url))\n print(\"successfully loaded inceptionv3\")\n return True\n else:\n if os.path.isdir(self.config['fid_model_dir']):\n model_filename = os.path.join(self.config['fid_model_dir'], self.get_name() + \".th\")\n if os.path.isfile(model_filename):\n print(\"loading existing FID model\")\n self.load_state_dict(torch.load(model_filename))\n return True\n\n return False\n\n def save(self, overwrite=False):\n # save the FID model if it doesnt exist\n check_or_create_dir(self.config['fid_model_dir'])\n model_filename = os.path.join(self.config['fid_model_dir'], self.get_name() + \".th\")\n if not os.path.isfile(model_filename) or overwrite:\n print(\"saving existing FID model\")\n torch.save(self.state_dict(), model_filename)\n\n def get_name(self):\n full_hash_str = \"_type{}_input{}_output{}_batch{}_lr{}_ngpu{}\".format(\n str(self.fid_type),\n str(self.input_shape),\n str(self.output_size),\n str(self.batch_size),\n str(self.config['lr']),\n str(self.config['ngpu'])\n )\n\n # cleanup symbols that would cause filename issues\n full_hash_str = full_hash_str.strip().lower()\n full_hash_str = full_hash_str.replace(',', '_')\n for s in \"[] {}:()'\":\n full_hash_str = full_hash_str.replace(s, '')\n\n return 'fid_{}{}'.format(FID._clean_task_str(str(self.config['task'])), full_hash_str)\n\n @staticmethod\n def _clean_task_str(task_str):\n ''' helper to reduce string length.\n eg: mnist+svhn+mnist --> mnist2svhn1 '''\n result_str = ''\n if '+' in task_str:\n splits = Counter(task_str.split('+'))\n for k, v in splits.items():\n result_str += '{}{}'.format(k, v)\n\n return result_str\n\n return task_str\n\n\n def loss_function(self, pred, target):\n return F.cross_entropy(pred, target)\n\n def forward(self, x):\n output, feat = self.model(x)\n return output, feat\n","sub_path":"code/helpers/fid.py","file_name":"fid.py","file_ext":"py","file_size_in_byte":13421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133032540","text":"import numpy as np\nimport cv2\nimport math\nimport sys\nfrom ImageStitcher import *\n\n############################################################\n#\n# Image Stitching\n#\n############################################################\n\n# 1. load panorama images\ncarPanoR = cv2.imread('images/pano3.jpg', 1)\ncarPanoM = cv2.imread('images/pano2.jpg', 1)\ncarPanoL = cv2.imread('images/pano1.jpg', 1)\n\ncityPanoR = cv2.imread('images/pano6.jpg', 1)\ncityPanoM = cv2.imread('images/pano5.jpg', 1)\ncityPanoL = cv2.imread('images/pano4.jpg', 1)\n\n# order of input images is important(from right to left)\nimageStitcherCar = ImageStitcher([carPanoR, carPanoM, carPanoL]) # list of images\n(matchlistCar, resultCar) = imageStitcherCar.stitch_to_panorama()\n\nimageStitcherCity = ImageStitcher([cityPanoR, cityPanoM, cityPanoL]) # list of images\n(matchlistCity, resultCity) = imageStitcherCity.stitch_to_panorama()\n\nif not matchlistCar and matchlistCity:\n print(\"We have not enough matching keypoints to create a panorama\")\nelse:\n # YOUR CODE HERE\n # output all matching images\n # output result\n # Note: if necessary resize the image\n for idx, out in enumerate(matchlistCar):\n cv2.imshow(\"Match car\" + str(idx), out)\n cv2.imshow(\"Panorama Ergebnis car\", resultCar)\n\n for idx, out in enumerate(matchlistCity):\n cv2.imshow(\"Match city\" + str(idx), out)\n cv2.imshow(\"Panorama Ergebnis city\", resultCity)\n while True:\n key = cv2.waitKey(0) & cv2.waitKey(0xFF)\n if key == ord('q'):\n cv2.destroyAllWindows()\n break\n print('end')\n","sub_path":"isy/isy-02/isy-02/02_stitching.py","file_name":"02_stitching.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73766494","text":"import os\nimport sys\nimport time\nimport tqdm\n\nfrom IPython.display import clear_output\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom utils import get_logger, dice_coeff, dice_loss\n\n\n\ndef eval_net(net, dataset, device):\n net.eval()\n tot = 0.\n with torch.no_grad():\n for i, b in tqdm.tqdm(enumerate(dataset), total=len(dataset)):\n imgs, true_masks = b\n masks_pred = net(imgs.to(device)).squeeze(1) # (b, 1, h, w) -> (b, h, w)\n masks_pred = (F.sigmoid(masks_pred) > 0.5).float()\n tot += dice_coeff(masks_pred.cpu(), true_masks).item()\n return tot / len(dataset)\n\ndef trainYar():\n pass\n\n\ndef train(net, optimizer, criterion, scheduler, epochs, train_dataloader, val_dataloader, saveto, device, logger, show_plots=True):\n since = time.time() \n \n num_batches = len(train_dataloader)\n best_model_info = {'epoch': -1, 'val_dice': 0., 'train_dice': 0., 'train_loss': 0.}\n \n bce_history=[]\n dice_history=[]\n loss_history=[]\n valDice_history=[]\n\n for epoch in range(epochs):\n logger.info('Starting epoch {}/{}.'.format(epoch + 1, epochs))\n net.train()\n if scheduler is not None:\n scheduler.step(epoch)\n\n epoch_loss = 0.\n bce_epochHistory, dice_epochHistory = [], []\n tqdm_iter = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader))\n for i, batch in tqdm_iter:\n imgs, true_masks = batch\n masks_pred = net(imgs.to(device))\n masks_probs = F.sigmoid(masks_pred)\n\n bce_val, dice_val = criterion(masks_probs.cpu().view(-1), true_masks.view(-1))\n loss = bce_val + dice_val\n \n bce_epochHistory.append(bce_val.item())\n dice_epochHistory.append(dice_val.item())\n epoch_loss += loss.item()\n \n tqdm_iter.set_description('mean loss: {:.4f}'.format(epoch_loss / (i + 1)))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if show_plots:\n if (i+1)%40==0:\n clear_output(True)\n fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(12, 8))\n ax[0][0].plot(bce_epochHistory, label='epoch bce loss')\n ax[0][0].set_xlabel('batch')\n ax[0][0].set_title('loss')\n ax[0][1].plot(dice_epochHistory, label='epoch dice loss')\n ax[0][1].set_xlabel('batch')\n ax[0][1].set_title('loss')\n ax[1][0].plot(bce_history, label='all bce loss')\n ax[1][0].set_xlabel('epoch')\n ax[1][0].set_title('loss')\n ax[1][1].plot(dice_history, label='all dice loss')\n ax[1][1].set_xlabel('epoch')\n ax[1][1].set_title('loss')\n ax[2][0].plot(loss_history, label='main loss (sum dice+bce)')\n ax[2][0].set_xlabel('epoch')\n ax[2][0].set_title('loss')\n ax[2][1].plot(valDice_history, label='val dice')\n ax[2][1].set_xlabel('epoch')\n ax[2][1].set_title('val dice')\n plt.legend()\n plt.show()\n \n\n logger.info('Epoch finished! Loss: {:.5f} ({:.5f} | {:.5f})'.format(epoch_loss / num_batches,\n np.mean(bce_epochHistory), np.mean(dice_epochHistory)))\n bce_history.append(np.mean(bce_epochHistory))\n dice_history.append(np.mean(dice_epochHistory))\n loss_history.append(epoch_loss / num_batches)\n\n val_dice = eval_net(net, val_dataloader, device=device)\n valDice_history.append(val_dice)\n \n if val_dice > best_model_info['val_dice']:\n best_model_info['val_dice'] = val_dice\n best_model_info['train_loss'] = epoch_loss / num_batches\n best_model_info['epoch'] = epoch\n torch.save(net.state_dict(), os.path.join(saveto, 'detectionbest.pth'))\n logger.info('Validation Dice Coeff: {:.5f} (best)'.format(val_dice))\n else:\n logger.info('Validation Dice Coeff: {:.5f} (best {:.5f})'.format(val_dice, best_model_info['val_dice']))\n\n torch.save(net.state_dict(), os.path.join(saveto, 'detectionlast.pth'))\n \n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best epoch:{:4f} val_dice:{:4f} train_loss:{:4f}'.format(best_model_info['epoch'], best_model_info['val_dice'], best_model_info['train_loss']))\n ","sub_path":"segmentation/routine.py","file_name":"routine.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"38380204","text":"class Excel:\n\n def __init__(self, H: int, W: str):\n self.data = collections.defaultdict(int)\n self.mp = collections.defaultdict(list)\n\n\n def set(self, r: int, c: str, v: int) -> None:\n if (r,c) in self.mp:\n del self.mp[r,c]\n self.data[r,c] = v\n \n\n def get(self, r: int, c: str) -> int:\n if (r,c) in self.mp:\n return self.sum_(r,c)\n return self.data[r,c]\n \n\n def sum(self, r: int, c: str, strs: List[str]) -> int:\n self.mp[r,c] = strs\n return self.sum_(r,c)\n\n def sum_(self, r, c):\n sums = 0\n for ch in self.mp[r,c]:\n ch = ch.split(':')\n if len(ch) == 1:\n row, col = int(ch[0][1:]), ch[0][0]\n sums += self.get(row, col)\n else:\n row1, col1 = int(ch[0][1:]), ch[0][0]\n row2, col2 = int(ch[1][1:]), ch[1][0]\n for row in range(row1,row2+1):\n col = col1\n while col <= col2:\n sums += self.get(row, col)\n col = chr(ord(col)+1)\n return sums\n\n\n\n \n\n\n# Your Excel object will be instantiated and called as such:\n# obj = Excel(H, W)\n# obj.set(r,c,v)\n# param_2 = obj.get(r,c)\n# param_3 = obj.sum(r,c,strs)","sub_path":"LeetCode/631. Design Excel Sum Formula/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"18460561","text":"import boto3\nimport collections\nimport datetime\nimport csv\nfrom time import gmtime, strftime\n# import smtplib\n# from email.MIMEMultipart import MIMEMultipart\n# from email.MIMEBase import MIMEBase\n# from email.MIMEText import MIMEText\n# from email import Encoders\nimport os\nimport sys\nimport getopt\nprofile = ''\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"p:\")\nexcept getopt.GetoptError as e:\n print (str(e))\n print(\"Usage: %s -p profile_name\" % sys.argv[0])\n sys.exit(2)\n# print 'Argument List:', str(sys.argv)\nfor opt, arg in opts:\n if opt == '-p':\n profile = str(arg)\n# print 'profile is \"', profile\ntry:\n session = boto3.session.Session(profile_name=profile)\nexcept:\n print(\"Profile %s does not exist!\" % profile)\n print(\"Usage: %s -p profile_name. for example: %s -p default\" %\n (sys.argv[0], sys.argv[0]))\n sys.exit(2)\n# EC2 connection beginning\nec = session.client('ec2')\nec2 = session.resource('ec2')\n# S3 connection beginning\n# s3 = boto3.resource('s3')\n# get to the curren date\ndate_fmt = strftime(\"%Y_%m_%d\", gmtime())\n# Give your file path\nfilepath = './PUC_AWS_Resources_' + profile + '_' + date_fmt + '.csv'\n# Give your filename\nfilename = 'PUC_AWS_Resources_' + profile + '_' + date_fmt + '.csv'\ncsv_file = open(filepath, 'w+')\n# Get your owner ID\nreservations = ec.describe_instances().get('Reservations', [])\naccount_ids = reservations[0]['OwnerId']\n\n# boto3 library ec2 API describe region page\n# http://boto3.readthedocs.org/en/latest/reference/services/ec2.html#EC2.Client.describe_regions\nregions = ec.describe_regions().get('Regions', [])\nInstancename = \"\"\ninstance_contact = \"\"\ninstance_tier = \"\"\ninstance_project = \"\"\ninstance_creator = \"\"\ninstance_security_group = \"\"\ninstance_environment = \"\"\ninstance_public_ip_address = \"\"\ninstance_private_ip_address = \"\"\ninstance_keyname = \"\"\n\n\nfor region in regions:\n reg = region['RegionName']\n regname = 'REGION :' + reg\n # EC2 connection beginning\n ec2con = session.client('ec2', region_name=reg)\n # boto3 library ec2 API describe instance page\n # http://boto3.readthedocs.org/en/latest/reference/services/ec2.html#EC2.Client.describe_instances\n reservations = ec2con.describe_instances().get(\n 'Reservations', []\n )\n instances = sum(\n [\n [i for i in r['Instances']]\n for r in reservations\n ], [])\n instanceslist = len(instances)\n if instanceslist > 0:\n csv_file.write(\"%s,%s,%s,%s,%s,%s\\n\" % ('', '', '', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('EC2 INSTANCE', regname))\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % ('InstanceID', 'Instance_State',\n 'InstanceName', 'Instance_Type', 'LaunchTime', 'Instance_Placement', 'VPC_Id', 'Contact', 'Environment', 'Project', 'Creator', 'Tier', 'Private IP Address', 'Public IP Address', 'Security Group', 'Key Name'))\n csv_file.flush()\n\n for instance in instances:\n state = instance['State']['Name']\n try:\n Instancename = instance['KeyName']\n except:\n Instancename = \"\"\n instancetype = \"\"\n if state == 'running':\n instanceid = instance['InstanceId']\n instancetype = instance['InstanceType']\n launchtime = instance['LaunchTime']\n Placement = instance['Placement']['AvailabilityZone']\n try:\n instance_keyname = instance['KeyName']\n except:\n instance_keyname = \"\"\n VpcId = instance['VpcId']\n instance_public_ip_address = \"\"\n if instance['PublicDnsName'] != \"\":\n instance_public_ip_address = instance['PublicIpAddress']\n instance_private_ip_address = instance['PrivateIpAddress']\n try:\n sg = instance['SecurityGroups'][0]\n except:\n instance_security_group = \"\"\n else:\n instance_security_group = \"\"\n for sg in instance['SecurityGroups']:\n instance_security_group = instance_security_group + \\\n sg['GroupName'] + \" \"\n # instance_security_group = instance[\n # 'SecurityGroups'][0]['GroupName']\n try:\n tags = instance['Tags']\n except:\n Instancename = \"\"\n instance_contact = \"\"\n instance_tier = \"\"\n instance_project = \"\"\n instance_environment = \"\"\n instance_creator = \"\"\n else:\n for tags in instance['Tags']:\n key = tags['Key']\n if key == 'Name':\n Instancename = tags['Value']\n if key == 'Contact':\n instance_contact = tags['Value']\n if key == 'Tier':\n instance_tier = tags['Value']\n if key == 'Project':\n instance_project = tags['Value']\n if key == 'Environment':\n instance_environment = tags['Value']\n if key == 'Creator':\n instance_creator = tags['Value']\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % (instanceid, state, Instancename, instancetype, launchtime, Placement, VpcId, instance_contact,\n instance_environment, instance_project, instance_creator, instance_tier, instance_private_ip_address, instance_public_ip_address, instance_security_group, instance_keyname))\n csv_file.flush()\n instancetype = \"\"\n instance_keyname = \"\"\n for instance in instances:\n state = instance['State']['Name']\n if state == 'stopped':\n # for tags in instance['Tags']:\n # Instancename = tags['Value']\n # key = tags['Key']\n # if key == 'Name':\n # instanceid = instance['InstanceId']\n # instancetype = instance['InstanceType']\n # launchtime = instance['LaunchTime']\n # Placement = instance['Placement']['AvailabilityZone']\n # VpcId = instance['VpcId']\n # csv_file.write(\"%s,%s,%s,%s,%s,%s,%s\\n\" % (\n # instanceid, state, Instancename, instancetype, launchtime, Placement, VpcId))\n # csv_file.flush()\n instanceid = instance['InstanceId']\n instancetype = instance['InstanceType']\n launchtime = instance['LaunchTime']\n Placement = instance['Placement']['AvailabilityZone']\n instance_keyname = instance['KeyName']\n VpcId = instance['VpcId']\n if instance['PublicDnsName'] != \"\":\n instance_public_ip_address = instacne['PublicIpAddress']\n instance_private_ip_address = instance['PrivateIpAddress']\n # try:\n # sg = instance['SecurityGroups'][0]\n # except:\n # instance_security_group = \"\"\n # if len(instance['SecurityGroups']) > 0:\n try:\n sg = instance['SecurityGroups'][0]\n except:\n instance_security_group = \"\"\n else:\n instance_security_group = \"\"\n for sg in instance['SecurityGroups']:\n instance_security_group = instance_security_group + \\\n sg['GroupName'] + \" \"\n try:\n tags = instance['Tags']\n except:\n Instancename = \"\"\n instance_contact = \"\"\n instance_tier = \"\"\n instance_project = \"\"\n instance_environment = \"\"\n instance_creator = \"\"\n else:\n for tags in instance['Tags']:\n key = tags['Key']\n if key == 'Name':\n Instancename = tags['Value']\n if key == 'Contact':\n instance_contact = tags['Value']\n if key == 'Tier':\n instance_tier = tags['Value']\n if key == 'Project':\n instance_project = tags['Value']\n if key == 'Environment':\n instance_environment = tags['Value']\n if key == 'Creator':\n instance_creator = tags['Value']\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % (instanceid, state, Instancename, instancetype, launchtime, Placement, VpcId, instance_contact,\n instance_environment, instance_project, instance_creator, instance_tier, instance_private_ip_address, instance_public_ip_address, instance_security_group, instance_keyname))\n csv_file.flush()\n# boto3 library ec2 API describe volumes page\n# http://boto3.readthedocs.org/en/latest/reference/services/ec2.html#EC2.Client.describe_volumes\nec2volumes = ec2con.describe_volumes().get('Volumes', [])\nBackup = \"\"\nSize = \"\"\n\nused_volumes = sum(\n [\n [i for i in r['Attachments']]\n for r in ec2volumes\n ], [])\nvolumeslist = len(used_volumes)\nif volumeslist > 0:\n csv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('In-Use EBS Volume', regname))\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" %\n ('VolumeId', 'InstanceId', 'AttachTime', 'State', 'Size', 'InstanceName', 'IOPS', 'DeleteOnTermination', 'VolumeType', 'SnapShotId', 'Backup'))\n csv_file.flush()\n\n for volume in used_volumes:\n VolumeId = volume['VolumeId']\n InstanceId = volume['InstanceId']\n State = volume['State']\n AttachTime = volume['AttachTime']\n Backup = \"\"\n v = ec2.Volume(VolumeId)\n if v.tags is not None:\n for tag in v.tags:\n if tag[\"Key\"] == 'Backup':\n Backup = tag[\"Value\"]\n Size = v.size\n IOPS = v.iops\n VolumeType = v.volume_type\n SnapShotId = v.snapshot_id\n attachments = v.attachments\n for attachment in attachments:\n if attachment[\"DeleteOnTermination\"] != \"\":\n DeleteOnTermination = attachment[\"DeleteOnTermination\"]\n # Size = volume['Size']\n Instance = ec2.Instance(InstanceId)\n for tags in Instance.tags:\n if tags[\"Key\"] == 'Name':\n InstanceName = tags[\"Value\"]\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" %\n (VolumeId, InstanceId, AttachTime, State, Size, InstanceName, IOPS, DeleteOnTermination, VolumeType, SnapShotId, Backup))\n csv_file.flush()\n\n # list all unused volumes\ncsv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\ncsv_file.write(\"%s,%s\\n\" % ('Avaiable EBS Volume', regname))\ncsv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" %\n ('VolumeId', 'InstanceId', 'AttachTime', 'State', 'Size', 'InstanceName', 'IOPS', 'DeleteOnTermination', 'VolumeType', 'SnapShotId', 'Backup'))\ncsv_file.flush()\ntry:\n volume = ec2volumes[0]\nexcept:\n VolumeId = \"\"\n InstanceId = \"\"\n AttachTime = \"\"\n State = \"\"\n Size = \"\"\n InstanceName = \"\"\n IOPS = \"\"\n VolumeType = \"\"\n SnapShotId = \"\"\n DeleteOnTermination = \"\"\n Backup = \"\"\nelse:\n for volume in ec2volumes:\n if volume[\"State\"] == \"available\":\n VolumeId = volume[\"VolumeId\"]\n InstanceId = \"\"\n AttachTime = \"\"\n State = volume[\"State\"]\n Size = volume[\"Size\"]\n InstanceName = \"\"\n IOPS = volume[\"Iops\"]\n VolumeType = volume[\"VolumeType\"]\n SnapShotId = volume[\"SnapshotId\"]\n DeleteOnTermination = \"\"\n v = ec2.Volume(VolumeId)\n Backup = \"\"\n if v.tags is not None:\n for tag in v.tags:\n if tag[\"Key\"] == 'Backup':\n Backup = tag[\"Value\"]\ncsv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" %\n (VolumeId, InstanceId, AttachTime, State, Size, InstanceName, IOPS, DeleteOnTermination, VolumeType, SnapShotId, Backup))\ncsv_file.flush()\n\n# boto3 library ec2 API describe snapshots page\n# http://boto3.readthedocs.org/en/latest/reference/services/ec2.html#EC2.Client.describe_snapshots\nec2snapshot = ec2con.describe_snapshots(\n OwnerIds=[account_ids, ],).get('Snapshots', [])\nec2snapshotlist = len(ec2snapshot)\nif ec2snapshotlist > 0:\n csv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('EC2 SNAPSHOT', regname))\n csv_file.write(\"%s,%s,%s,%s\\n\" % (\n 'SnapshotId', 'VolumeId', 'StartTime', 'VolumeSize'))\n csv_file.flush()\nfor snapshots in ec2snapshot:\n SnapshotId = snapshots['SnapshotId']\n VolumeId = snapshots['VolumeId']\n StartTime = snapshots['StartTime']\n VolumeSize = snapshots['VolumeSize']\n csv_file.write(\"%s,%s,%s,%s\\n\" %\n (SnapshotId, VolumeId, StartTime, VolumeSize))\n csv_file.flush()\n\n# boto3 library ec2 API describe addresses page\n# http://boto3.readthedocs.org/en/latest/reference/services/ec2.html#EC2.Client.describe_addresses\naddresses = ec2con.describe_addresses().get('Addresses', [])\naddresseslist = len(addresses)\nif addresseslist > 0:\n csv_file.write(\"%s,%s,%s,%s,%s\\n\" % ('', '', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('EIPS INSTANCE', regname))\n csv_file.write(\"%s,%s,%s,%s\\n\" %\n ('PublicIp', 'InstanceName', 'AllocationId', 'Domain'))\n csv_file.flush()\nfor address in addresses:\n PublicIp = address['PublicIp']\n AllocationId = address['AllocationId']\n Domain = address['Domain']\n try:\n InstanceId = address['InstanceId']\n except KeyError:\n InstanceName = \"\"\n else:\n Instance = ec2.Instance(InstanceId)\n InstanceName = \"\"\n for tags in Instance.tags:\n if tags[\"Key\"] == 'Name':\n InstanceName = tags[\"Value\"]\n csv_file.write(\"%s,%s,%s,%s\\n\" %\n (PublicIp, InstanceName, AllocationId, Domain))\n csv_file.flush()\n\n# RDS Connection beginning\nrdscon = session.client('rds', region_name=reg)\n# boto3 library RDS API describe db instances page\n# http://boto3.readthedocs.org/en/latest/reference/services/rds.html#RDS.Client.describe_db_instances\nrdb = rdscon.describe_db_instances().get('DBInstances', [])\nrdblist = len(rdb)\nif rdblist > 0:\n csv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('RDS INSTANCE', regname))\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % (\n 'DBInstanceIdentifier', 'DBInstanceStatus', 'Engine', 'EngineVersion', 'MultiAZ', 'DBInstanceClass', 'AllocatedStorage', 'StorageType', 'Iops', 'DBInstanceEndpoint', 'BackupRetentionPeriod'))\n csv_file.flush()\nfor dbinstance in rdb:\n DBInstanceIdentifier = dbinstance['DBInstanceIdentifier']\n DBInstanceClass = dbinstance['DBInstanceClass']\n DBName = dbinstance['Engine']\n EngineVersion = dbinstance['EngineVersion']\n StorageType = dbinstance['StorageType']\n DBInstanceStatus = dbinstance['DBInstanceStatus']\n MultiAZ = dbinstance['MultiAZ']\n AllocatedStorage = dbinstance['AllocatedStorage']\n DBInstanceEndpoint = dbinstance['Endpoint'][\n 'Address'] + \":\" + str(dbinstance['Endpoint']['Port'])\n BackupRetentionPeriod = dbinstance['BackupRetentionPeriod']\n Iops = \"\"\n if StorageType != \"gp2\":\n Iops = str(dbinstance['Iops'])\n csv_file.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % (\n DBInstanceIdentifier, DBInstanceStatus, DBName, EngineVersion, str(MultiAZ), DBInstanceClass, AllocatedStorage, StorageType, Iops, DBInstanceEndpoint, str(BackupRetentionPeriod)))\n csv_file.flush()\n\n# ElastiCache Connection beginning\neccon = session.client('elasticache', region_name=reg)\n# boto3 library ElatiCache API describe db instances page\n# http://boto3.readthedocs.io/en/latest/reference/services/elasticache.html#ElastiCache.Client.describe_cache_clusters\ncache = eccon.describe_cache_clusters().get('CacheClusters', [])\ncachelist = len(cache)\nif cachelist > 0:\n csv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('CACHE', regname))\n csv_file.write(\"%s,%s,%s,%s,%s\\n\" % (\n 'CacheClusterId', 'CacheClusterStatus', 'Engine', 'EngineVersion', 'CacheNodeType'))\n csv_file.flush()\nfor cacheinstance in cache:\n CacheClusterId = cacheinstance['CacheClusterId']\n CacheNodeType = cacheinstance['CacheNodeType']\n CacheClusterStatus = cacheinstance['CacheClusterStatus']\n CacheEngine = cacheinstance['Engine']\n EngineVersion = cacheinstance['EngineVersion']\n csv_file.write(\"%s,%s,%s,%s,%s\\n\" % (\n CacheClusterId, CacheClusterStatus, CacheEngine, EngineVersion, CacheNodeType))\n csv_file.flush()\n\n# Elb Connection beginning\nelbcon = session.client('elb', region_name=reg)\n# boto3 library ELB API describe db instances page\n# http://boto3.readthedocs.org/en/latest/reference/services/elb.html#ElasticLoadBalancing.Client.describe_load_balancers\nloadbalancer = elbcon.describe_load_balancers().get('LoadBalancerDescriptions', [])\nloadbalancerlist = len(loadbalancer)\nif loadbalancerlist > 0:\n csv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('ELB INSTANCE', regname))\n csv_file.write(\"%s,%s,%s\\n\" % ('LoadBalancerName', 'DNSName',\n 'CanonicalHostedZoneNameID'))\n csv_file.flush()\nfor load in loadbalancer:\n LoadBalancerName = load['LoadBalancerName']\n DNSName = load['DNSName']\n CanonicalHostedZoneNameID = load['CanonicalHostedZoneNameID']\n csv_file.write(\"%s,%s,%s\\n\" % (\n LoadBalancerName, DNSName, CanonicalHostedZoneNameID))\n csv_file.flush()\n\n# Elb v2 Connection beginning\nelbcon = session.client('elbv2', region_name=reg)\n# boto3 library ELB v2 API describe db instances page\n# http://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers\nloadbalancer = elbcon.describe_load_balancers().get('LoadBalancers', [])\nloadbalancerlist = len(loadbalancer)\nif loadbalancerlist > 0:\n csv_file.write(\"%s,%s,%s,%s\\n\" % ('', '', '', ''))\n csv_file.write(\"%s,%s\\n\" % ('ELBv2 INSTANCE', regname))\n csv_file.write(\"%s,%s,%s\\n\" % ('LoadBalancerName', 'DNSName',\n 'CanonicalHostedZoneId'))\n csv_file.flush()\nfor load in loadbalancer:\n LoadBalancerName = load['LoadBalancerName']\n DNSName = load['DNSName']\n CanonicalHostedZoneId = load['CanonicalHostedZoneId']\n csv_file.write(\"%s,%s,%s\\n\" % (\n LoadBalancerName, DNSName, CanonicalHostedZoneId))\n csv_file.flush()\n\n","sub_path":"awswork/ec2info/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":18988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"632656477","text":"import os\n\nfrom itertools import izip_longest\n\ndef grouper(n, iterable, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)\n\nn = 10\ni=0\n\ndir_path=os.listdir('/home/purvi/author_classification/chunking_text_file/40authors')\nfold=('/home/purvi/author_classification/chunking_text_file/40authors/chacha/chacha')\n\nfor eachFile in dir_path:\n f_path=fold+'/'+eachFile \t\t\n with open(f_path) as f:\n #if f_path.endswith(\".java\"):\n for k,g in enumerate(grouper(n, f, fillvalue=''), 1):\n i+=2\n with open('/home/purvi/author_classification/chunking_text_file/40authors_chunk/chacha/{0}.txt'.format(i * n), 'w') as fout:\n fout.writelines(g)\n \n\n","sub_path":"chunking_text_file/chunk_size.py","file_name":"chunk_size.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"527861941","text":"from Inst import *\nfrom Course import *\nfrom XLSReader import *\nimport codecs\n\n\ndef main():\n color1 = input(\"Cor das Instituições(Eng):\")\n shape1 = input(\"Forma das Instituiçõs(Box, Square, Star, etc):\")\n color2 = input(\"Cor dos Cursos(Eng):\")\n shape2 = input(\"Forma dos Cursos(Box, Square, Star, etc):\")\n\n reader = XLSReader(\"data/data.xlsx\", \"Tabela 4\")\n\n instList = reader.getInstsFromFile()\n filledList = reader.getCourses(instList)\n\n instFilterList = filterInstList(filledList)\n instFilledFilterList = filterCoursesList(instFilterList)\n\n print(writeOutPut(instFilledFilterList, color1, color2, shape1, shape2))\n\n\n\"\"\"\nThis Method filter the list of institutions by the input of the file (instituicoes.txt).\n\n:param list - list of all institutions.\n\n:return - return a list of filtered institutions.\n\"\"\"\n\n\ndef filterInstList(list):\n newList = []\n txtList = readTxtFile(1)\n\n if txtList:\n for inst in list:\n for txt in txtList:\n instName = inst.getName()\n txt = txt.rstrip()\n if (instName == txt):\n newList.append(inst)\n return newList\n else:\n return list\n\n\"\"\"\nThis Method filters the courses of each institution by the input of the file (cursos.txt).\n\n:param list - list of the filtred institutions.\n\n:return- return a list of institutions with the courses filtered.\n\"\"\"\n\n\ndef filterCoursesList(list):\n newList = []\n txtList = readTxtFile(2)\n\n if txtList:\n for inst in list:\n for course in inst.getListaCursos():\n for txt in txtList:\n txt = txt.rstrip()\n cName = course.getName()\n if (cName == txt):\n inst.addCourseFilter(course)\n newList.append(inst)\n return newList\n else:\n for inst in list:\n for course in inst.getListaCursos():\n inst.addCourseFilter(course)\n newList.append(inst)\n return newList\n\n\n\"\"\"\nMethod that reads the input files.\n\n:param id - indentifies the file it must read.\n\n:return- return in form of list the content of the files.\n\"\"\"\n\n\ndef readTxtFile(id):\n listFile = []\n f = \"\"\n\n if (id == 1):\n f = open(\"data/instituicoes.txt\", \"r\", encoding=\"UTF-8\")\n if (id == 2):\n f = open(\"data/cursos.txt\", \"r\", encoding=\"UTF-8\")\n\n for text in f.readlines():\n listFile.append(text)\n\n return listFile\n\n\n\"\"\"\nMethod that generates the code to create the graph.\n\n:param listInst - list of institutions to generate.\n\n:param color1 - color of the institutions on the graph.\n\n:param color2 - color of the courses on the graph.\n\n:param shape1 - shape of the institutions on the graph.\n\n:param shape2 - shape of the courses on the graph.\n\n:return - string in dot to generate the graph.\n\"\"\"\n\n\ndef writeOutPut(listInst, color1, color2, shape1, shape2):\n header = \"digraph G {\"\n body = \"\"\n\n for inst in listInst:\n listaCurso = inst.getShowFilterList()\n for curso in listaCurso:\n totalStu = curso.getTotalStu()\n result = totalStu / 100\n body += '\\n\"' + curso.getName() + '\" [height=' + str(result) + ',weight=' + str(result) + '];'\n body += '\\n\"' + curso.getName() + '\" [shape=' + shape1 + ', color=' + color1 + '];'\n body += '\\n\"' + inst.getName() + '\" [shape=' + shape2 + ', color=' + color2 + '];'\n body += '\\n\"' + inst.getName() + '\"->\"' + curso.getName() + '\";'\n\n footer = \"\\n}\"\n\n return header + body + footer\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/13995_PauloPalma_13948_MarisaJaneiro_TP_Python_LP_2016-2017/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516952713","text":"# 회귀\n\nfrom sklearn.datasets import load_diabetes\nimport tensorflow as tf\n\n\ndataset = load_diabetes()\nx_data = dataset.data\ny_data = dataset.target\n\ny_data = y_data.reshape(-1, 1)\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.8, shuffle=True, random_state=66)\n\n#(442, 10)\n#(442, 1)\n\nprint(x_data.shape)\nprint(y_data.shape)\n\nx = tf.placeholder(tf.float32, shape=[None, 10])\ny = tf.placeholder(tf.float32, shape=[None, 1])\n\nw = tf.Variable(tf.random.normal([10,1]), name='weight')\nb = tf.Variable(tf.random.normal([1]), name='bias')\n\n\n# hypothesis = x * w + b # 일반 연산\nhypothesis = tf.matmul(x, w) + b # 행렬 연산 \n\nloss = tf.reduce_mean(tf.square(hypothesis - y))\ntrain = tf.train.AdamOptimizer(learning_rate= 0.8).minimize(loss)\n#train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(cost) # optimizer + train\nfrom sklearn.metrics import r2_score\n\n\nwith tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n for epoch in range(2000):\n _, cur_loss, cur_hypothesis, cur_w, cur_b = sess.run([train, loss, hypothesis, w, b], feed_dict= {x:x_train, y:y_train})\n if epoch%20 == 0:\n print(f'Epoch : {epoch} >>> loss : {cur_loss}\\nhypo : {cur_hypothesis}')\n y_predict1 = sess.run(hypothesis, feed_dict={x:x_test})\n R2 = r2_score(y_test, y_predict1)\n print('R2: ', R2) \n","sub_path":"tf114/tf11_2_diabets.py","file_name":"tf11_2_diabets.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551431221","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nCategory = [\n ('機械学習', 'Machine Learning'),\n ('ディープラーニング', 'Deep Learning'),\n ('データ分析', 'Data Analysis'),\n ('Python 基礎', 'Python Basic'),\n]\n\nclass HistoryModel(models.Model):\n postdate = models.DateField(auto_now_add=True)\n title = models.CharField(max_length=100)\n summary = models.TextField()\n content = models.TextField()\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n images = models.ImageField(upload_to='images/', blank=True, null=True)\n category = models.CharField(max_length=20, choices=Category)\n","sub_path":"HistoryPost/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"648096899","text":"max1 = 0\nmax2 = 0\nmax3 = 0\nmin1 = 0\nmin2 = 0\nmin3 = 0\nmaxp = 0\nminp = 0\n\n\nnum1 = int(input())\nnum2 = int(input())\nnum3 = int(input())\n\nif num1 > num2 and num1 > num3:\n max1 = num1\nelif num1 < num2 and num1 < num3:\n min1 = num1\n\nif num2 > num1 and num2 > num3:\n max2 = num2\nelif num2 < num1 and num2 < num3:\n min2 = num2\n\nif num3 > num1 and num3 > num2:\n max3 = num3\nelif num3 < num1 and num3 < num2:\n min3 = num3\n\nif max1 > max2 and max1 > max3:\n maxp = max1\nelif max2 > max3 and max2 > max1:\n maxp = max2\nelif max3 > max1 and max3 > max2:\n maxp = max3\n\n\nif min1 < min2 and min1 < min3:\n minp = min1\nelif min2 < min1 and min2 < min3:\n minp = min2\nelif min3 < min1 and min3 < min2:\n minp = min3\n\nprint (\"max: \" + str(maxp))\nprint (\"min: \" + str(minp))","sub_path":"src/Week1/day1.HW.4.2.py","file_name":"day1.HW.4.2.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"90541462","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import UserProfile\nfrom .forms import UserProfileForm\n\nfrom checkout.models import Order\n\n\n@login_required\ndef user_profile(request):\n \"\"\"\n Display the user's profile\n \"\"\"\n user_profile = get_object_or_404(UserProfile, user=request.user)\n # Create a new instance of the user profile form\n if request.method == 'POST':\n form = UserProfileForm(request.POST, instance=user_profile)\n if form.is_valid():\n form.save()\n messages.success(request, 'Profile updated successfully')\n else:\n messages.error(\n request, 'Update failed. Please ensure the form is valid.')\n else:\n form = UserProfileForm(instance=user_profile)\n orders = user_profile.orders.all()\n\n template = 'user_profiles/user_profile.html'\n context = {\n 'form': form,\n 'orders': orders,\n 'on_profile_page': True\n }\n\n return render(request, template, context)\n\n\ndef order_history(request, order_number):\n order = get_object_or_404(Order, order_number=order_number)\n\n messages.info(request, (\n f'This is a past confirmation for order number {order_number}. '\n 'A confirmation email was sent on day of the order.'\n ))\n\n template = 'checkout/checkout_complete.html'\n context = {\n 'order': order,\n 'from_user_profile': True,\n }\n\n return render(request, template, context)\n","sub_path":"user_profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12386849","text":"\"\"\"\nThe app factory function.\n\"\"\"\nimport os\n\nfrom flask import Flask\nfrom flask_cors import CORS\n\nfrom .extensions import db\nfrom .settings import ProdConfig, DevConfig\nfrom .api import api_blueprint\n\nif os.getenv(\"FLASK_ENV\") == 'prod':\n DefaultConfig = ProdConfig\nelse:\n DefaultConfig = DevConfig\n\n\ndef create_app(config_object=DefaultConfig):\n app = Flask(\"FlaskPython\")\n CORS(app)\n app.config.from_object(config_object)\n with app.app_context():\n register_extensions(app)\n register_blueprints(app)\n return app\n\n\ndef register_extensions(app: Flask):\n db.init_app(app)\n # db.create_all()\n\n\ndef register_blueprints(app: Flask):\n app.register_blueprint(api_blueprint)\n","sub_path":"02-docker-project/backend/resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108916392","text":"#!/usr/bin/env python\n\nimport unittest\nimport os\nimport sys\nimport re\nfrom tempfile import gettempdir\n\nbase = os.path.abspath(os.path.normpath(os.path.split(sys.argv[0])[0]))\ntestfile = re.compile('^test_.*\\.py$', re.IGNORECASE) \n\ndef get_tests(directory):\n sys.path.append(directory)\n tests = []\n modules = []\n for name in os.listdir(directory):\n if name.startswith('.'): continue\n fullname = os.path.join(directory, name)\n if os.path.isdir(fullname):\n tests.extend(get_tests(fullname))\n elif testfile.match(name):\n modname = os.path.splitext(name)[0]\n modules.append(__import__(modname))\n tests.extend([ unittest.defaultTestLoader.loadTestsFromModule(module)\n for module in modules ])\n return tests\n\ndef exists(file_name):\n file = os.path.join(base, file_name)\n return os.path.exists(file)\n\ndef sh(command):\n process = os.popen(command)\n output = process.read()\n process.close()\n return output\n\ndef add_dependencies_to_path():\n main = os.path.join('src', 'main', 'python')\n test = os.path.join('src', 'test', 'python')\n for path in [ main, test ]:\n path = os.path.join(base, path.replace('/', os.sep))\n if path not in sys.path:\n sys.path.insert(0, path)\n\n if not exists('dependencies.txt'):\n os.environ['MAVEN_OPTS'] = '-DoutputAbsoluteArtifactFilename=true'\n mvn_output = sh('mvn dependency:list').splitlines()\n jars = [re.sub('.*:(C:)?', '\\\\1', file) for file in mvn_output if re.search('jar', file)]\n dependencies_txt = open(os.path.join(base, 'dependencies.txt'), 'w')\n for jar in jars:\n dependencies_txt.write(jar + '\\n')\n\n classes = os.path.join('target', 'classes')\n test_classes = os.path.join('target', 'test-classes')\n if not exists(classes) or not exists(test_classes):\n sh('mvn test-compile')\n\n dependencies = [classes, test_classes] + open('dependencies.txt', 'rb').read().splitlines()\n os.environ['CLASSPATH'] = os.pathsep.join(dependencies)\n os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)\n\ndef get_python_path():\n for path in sys.path:\n if os.path.exists(os.path.join(path, 'robot')):\n return path\n \n\nif __name__ == '__main__':\n rc = 0\n if os.name == 'java':\n tests = get_tests('.')\n suite = unittest.TestSuite(tests)\n runner = unittest.TextTestRunner(descriptions=0, verbosity=1)\n result = runner.run(suite)\n rc = len(result.failures) + len(result.errors)\n if rc > 250: rc = 250\n else:\n python_path = get_python_path()\n add_dependencies_to_path()\n if len(sys.argv[1:]) > 0:\n runner = os.path.join(python_path, 'robot', 'runner.py')\n args_as_string = ' '.join(sys.argv[1:])\n rc = os.system('jython -Dpython.path=%s %s --loglevel TRACE --outputdir %s %s' % (python_path, runner, gettempdir(), args_as_string))\n else:\n rc = os.system('jython -Dpython.path=%s %s' % (python_path, __file__))\n\n sys.exit(rc)\n","sub_path":"jvmconnector/tags/jvmconnector-0.8/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"465448023","text":"\n\nfrom xai.brain.wordbase.adjectives._wee import _WEE\n\n#calss header\nclass _WEEST(_WEE, ):\n\tdef __init__(self,): \n\t\t_WEE.__init__(self)\n\t\tself.name = \"WEEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"wee\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_weest.py","file_name":"_weest.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640142513","text":"# coding: utf-8\n\n__author__ = 'Junki Ishida'\n\nfrom .cache import RedisViewBase\nfrom ._compat import iteritems\n\nimport flask, pickle\nfrom datetime import datetime, timezone\n\n\ndef remote_addr():\n if 'X-Forwarded-For' in flask.request.headers:\n return flask.request.headers.get('X-Forwarded-For')\n return flask.request.remote_addr\n\n\nclass RedisCounterView(RedisViewBase):\n counter_zkey = 'flask-flab:counter_hkey_set'\n counter_hkey_prefix = 'flask-flab:counter_hash:'\n counter_hkey_suffix = ''\n counter_timezone = None\n counter_field_serializer = pickle\n counter_field_getters = [\n lambda r: {'type': 'a', },\n lambda r: {'type': 'u', 'url': flask.request.url, },\n lambda r: {\n 'type': 'urs',\n 'url': flask.request.url,\n 'remote_addr': remote_addr(),\n 'status_code': r.status_code\n },\n ]\n\n @classmethod\n def format_counter_hkey(cls, keycode):\n return '{0}{1}{2}'.format(cls.counter_hkey_prefix or '', keycode, cls.counter_hkey_suffix or '')\n\n @classmethod\n def counter_keycode(cls, dt):\n if cls.counter_timezone:\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=timezone.utc)\n dt = dt.astimezone(cls.counter_timezone)\n dt = dt.replace()\n return int(dt.strftime('%Y%m%d%H'))\n\n @classmethod\n def iter_counter_items(cls, dt):\n redis_client = cls.redis()\n keycode = cls.counter_keycode(dt)\n hkey = cls.format_counter_hkey(keycode)\n result = redis_client.hgetall(hkey)\n\n\n\n def after_dispatch(self, response, *args, **kwargs):\n response = super(RedisCounterView, self).after_dispatch(response, *args, **kwargs)\n keycode = self.counter_keycode(datetime.utcnow())\n hkey = self.format_counter_hkey(keycode)\n redis_client = self.redis()\n p = redis_client.pipeline()\n for getter in self.counter_field_getters:\n field = getter(response)\n if self.counter_field_serializer:\n field = self.counter_field_serializer.dumps(field)\n p.hincrby(hkey, field, 1)\n p.zadd(self.counter_zkey, keycode, hkey)\n p.execute()\n return response","sub_path":"flask_flab/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"231169094","text":"import os\n\nimport numpy as np\n\nfrom examples.sensitivity_analysis.chains_access import get_three_depth_manual_class_chain\nfrom examples.sensitivity_analysis.dataset_access import get_scoring_data\nfrom fedot.core.chains.chain import Chain\nfrom fedot.sensitivity.node_sa_approaches import NodeDeletionAnalyze, NodeReplaceOperationAnalyze\nfrom fedot.sensitivity.nodes_sensitivity import NodesAnalysis\n\n\ndef create_correct_path(path: str, dirname_flag: bool = False):\n \"\"\"\n Create path with time which was created during the testing process.\n \"\"\"\n\n for dirname in next(os.walk(os.path.curdir))[1]:\n if dirname.endswith(path):\n if dirname_flag:\n return dirname\n else:\n file = os.path.join(dirname, path + '.json')\n return file\n return None\n\n\ndef run_import_export_example(chain_path):\n # Prepare data to train the model\n train_data, test_data = get_scoring_data()\n\n # Get chain and fit it\n chain = get_three_depth_manual_class_chain()\n chain.fit_from_scratch(train_data)\n\n predicted_output = chain.predict(test_data)\n prediction_before_export = np.array(predicted_output.predict)\n print(f'Before export {prediction_before_export[:4]}')\n\n NodesAnalysis(chain, train_data, test_data,\n approaches=[NodeDeletionAnalyze,\n NodeReplaceOperationAnalyze]).analyze()\n\n # Export it\n chain.save(path=chain_path)\n\n # Import chain\n json_path_load = create_correct_path(chain_path)\n new_chain = Chain()\n new_chain.load(json_path_load)\n\n predicted_output_after_export = new_chain.predict(test_data)\n prediction_after_export = np.array(predicted_output_after_export.predict)\n\n print(f'After import {prediction_after_export[:4]}')\n\n\nif __name__ == '__main__':\n run_import_export_example(chain_path='import_export_sa')\n","sub_path":"examples/sensitivity_analysis/chain_export_with_sa.py","file_name":"chain_export_with_sa.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362012754","text":"import os\n\n\ndef get_dir_files_paths(dir_path, accepted_ext): # xlsx\n files_paths = []\n for item in os.listdir(dir_path):\n if not os.path.isdir(os.path.join(dir_path, item)) and '.' + str(item.split('.')[-1]) in accepted_ext:\n files_paths.append(os.path.join(dir_path, item))\n return files_paths\n\n","sub_path":"get_dir_files_paths.py","file_name":"get_dir_files_paths.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"170128464","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport time\nimport os\nimport complex_cnn as ac\n\n\nif __name__ == '__main__':\n batch_size = 256\n trainloader, testloader, classes = ac.load_data(batch_size)\n path = './'\n max_epoch = 30\n net = ac.Net()\n if torch.cuda.is_available():\n net.cuda()\n epoch_acc_rate = np.zeros((max_epoch, 1))\n class_acc_rate = np.zeros((10, max_epoch))\n\n for epoch in range(max_epoch):\n print('-'*30, 'test epoch ' + str(epoch+1), '-'*30)\n net.load_state_dict(torch.load(path + 'cnn2_epoch_' + str(epoch+1) + '.nn'))\n correct = 0\n total = 0\n class_correct = list(0. for i in range(10))\n class_total = list(0. for i in range(10))\n for data in testloader:\n images, labels = data\n if torch.cuda.is_available():\n images, labels = Variable(images.cuda()), Variable(labels.cuda())\n else:\n inputs, labels = Variable(inputs), Variable(labels)\n\n outputs = net(Variable(images))\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == labels).squeeze()\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n for i in range(10):\n if class_total[i] == 0:\n print('Accuracy of %5s : %2f %%' % (\n classes[i], 0))\n else:\n print('Accuracy of %5s : %2f %%' % (\n classes[i], 100.0 * float(class_correct[i]) / float(class_total[i])))\n class_acc_rate[i][epoch] = 100.0 * float(class_correct[i]) / float(class_total[i])\n\n print('Accuracy of the network on the 10000 test images: %f %%' % (\n 100.0 * correct / total))\n epoch_acc_rate[epoch] = 100.0 * correct / total\n\n\n # 绘制图像\n x = np.linspace(1, max_epoch, max_epoch)\n plt.plot(x, epoch_acc_rate, 'r-o', label='Test acc rate')\n plt.legend()\n plt.xlabel('epoch')\n plt.ylabel('acc rate')\n plt.title('acc rate Waves')\n plt.savefig(path + \"cnn2_test_acc_rate_\" + str(max_epoch) + \".jpg\")\n\n plt.figure()\n row = 1\n col = 1\n color = ['#FF0000', '#FF8000', '#FFFF00', '#80FF00', '#00FF00',\n '#00FF80', '#00FFFF', '#0080FF', '#0000FF', '#8000FF']\n for i in range(10):\n if i == 5:\n row += 1\n col = 1\n #plt.subplot(row,col,1)\n col += 1\n plt.plot(x, class_acc_rate[i], color=color[i],linestyle='-', marker='o', label=classes[i])\n plt.legend()\n plt.legend\n plt.xlabel('epoch')\n plt.ylabel('acc rate')\n plt.title('acc rate Waves')\n plt.savefig(path + \"cnn2_class_acc_rate_\" + str(max_epoch) + \".jpg\")\n plt.show()\n","sub_path":"cnn/test_complex_cnn.py","file_name":"test_complex_cnn.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311253765","text":"from torchvision import transforms\nfrom PIL import Image\n\ndef transform_image(path):\n\n image_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()])\n \n image = Image.open(path)\n image = image_transform(image)\n image = image.unsqueeze(0)\n\n return image.cpu()\n","sub_path":"predict/TRANSF.py","file_name":"TRANSF.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504603788","text":"# -*- coding: utf-8 -*-\r\n\r\nimport math\r\nimport numpy as np\r\nfrom .loader import tomatrix\r\nfrom .models import Recommender\r\n\r\n__all__ = [\"distance\", \"KDE\", \"KDEModel\"]\r\n\r\ndef distance(point_x, point_y): \r\n \"\"\"distance between two point, unit is meter\r\n usage:\r\n >>> print distance((1.0, 0.0), (0.0, 0.0)) \r\n 111319.490793\r\n \"\"\"\r\n EARTH_RADIUS = 6378137.0 \r\n radlat1 = point_x[0] * math.pi / 180.0 \r\n radlat2 = point_y[0] * math.pi / 180.0 \r\n\r\n x = radlat1 - radlat2 \r\n y = (point_x[1] - point_y[1]) * math.pi / 180.0\r\n c = math.sqrt((math.sin(x * 0.5) ** 2) + \r\n math.cos(radlat1) * math.cos(radlat2)* (math.sin(y * 0.5) ** 2))\r\n lenght = 2.0 * math.asin(c) * EARTH_RADIUS \r\n return math.fabs(lenght)\r\n\r\n\r\nclass KDE(object):\r\n \"\"\"Estamate problity that a user show up in a poi, using KDE method\r\n usage:\r\n >>> cks = {0:[0]}\r\n >>> locations = {0:(0.0, 0.0), 1: (0.00014, 0.0), 2: (0.00028, 0.0)}\r\n >>> k = KDE(cks, locations)\r\n >>> print k.probility(0, 1) > k.probility(0, 2)\r\n True\r\n >>> print k.probility(0, 1) \r\n 0.398893835041\r\n \"\"\"\r\n def __init__(self, checkins, locations, smooth=1.0):\r\n \"\"\"\r\n matrix : user checkin data sparse matrix\r\n locations: poi latitude and longitude\r\n {\"loc1\": (20.0, 30.0), ...}\r\n \"\"\"\r\n self.matrix = tomatrix(checkins)\r\n self.locations = locations\r\n if smooth <= 0.0:\r\n raise ValueError(\"smooth should > 0.0\")\r\n self.smooth = smooth\r\n\r\n def probility(self, user, item):\r\n pois = set(np.nonzero(self.matrix[user])[1])\r\n if len(pois) == 0 or item in pois:\r\n return 1.0\r\n\r\n sum_prob = 0.0\r\n for poi in pois:\r\n loc_x = self.locations[poi]\r\n loc_y = self.locations[item]\r\n _dis = distance(loc_x, loc_y) / 1000.0 # to kilometer\r\n x = _dis / self.smooth\r\n prob = math.pow(math.e, -0.5 * math.pow(x, 2))\r\n sum_prob += prob\r\n return sum_prob / (math.sqrt(2.0 * math.pi) * self.smooth * len(pois))\r\n\r\n\r\nclass KDEModel(Recommender):\r\n def __init__(self, checkins, locations, smooth=1.0):\r\n super(KDEModel, self).__init__(checkins)\r\n self.kde = KDE(checkins, locations, smooth)\r\n\r\n def predict(self, user, item):\r\n return self.kde.probility(user, item)\r\n\r\n","sub_path":"poi/kde.py","file_name":"kde.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"553601417","text":"def solution(number):\n l = [x for x in range(number) if x%3 == 0 or x%5 == 0]\n return sum(l)\n\nprint(solution(10))\n\n\ndef solutio(number):\n l = [x for x in range(number) if x%3 == 0 or x%5 == 0] \n a = 0\n for x in l:\n a +=x \n return a \nprint(solutio(10))","sub_path":"Codewars/Multiples_3_5.py","file_name":"Multiples_3_5.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433637642","text":"#!/usr/bin/env python3\n\nimport random\nimport json\nimport time\n\nfrom argparse import ArgumentParser\nfrom sidecar_client import connect_url\n\n# in seconds\ndefault_cycle = 1\ndefault_runs = 0\ndefault_sidecar_url = \"https://r2lab.inria.fr:999/\"\n\nnode_ids = range(1, 38)\ndefault_max_nodes_impacted = 10\n\n######## helper to create float ranges\ndef drange(start, stop, step):\n result = []\n r = start\n while r < stop:\n result.append(r)\n r += step\n return result\n\nwlan_rates_range = drange(0., 20. * 10**6, 6. * 10**5)\n\n######## valid values for initializing\nnodes_field_possible_values = {\n 'available' : [ None, 'ko'] + 3*['ok'],\n 'cmc_on_off' : [ 'fail' ] + 3 * [ 'on', 'off' ],\n 'control_ping' : [ 'on', 'off' ],\n 'control_ssh' : ['on', 'off'],\n 'os_release' : [ 'fedora-21', 'ubuntu-15.04', 'other', ],\n 'gnuradio_release' : ['3.7.10', '', None],\n 'uname' : [ 'foo', '4.2.300-generic' ],\n 'image_radical' : [ 'ubuntu-15.04', 'oai-scrambler', '', None ],\n 'usrp_type' : 12 * [ None ] + [ 'b210', 'n210', 'usrp1', 'usrp2'],\n 'usrp_on_off' : 3 * ['on'] + ['off'],\n# 'wlan0_rx_rate' : wlan_rates_range,\n# 'wlan0_tx_rate' : wlan_rates_range,\n# 'wlan1_rx_rate' : wlan_rates_range,\n# 'wlan1_tx_rate' : wlan_rates_range,\n}\n\nphones_field_possible_values = {\n 'wifi_on_off' : [ 'on', 'off' ],\n 'airplane_mode' : [ 'on', 'off'],\n }\n\n#################### \ndef random_ids(max_nodes_impacted):\n how_many = random.randint(1, max_nodes_impacted)\n return [ random.choice(node_ids) for i in range(how_many)]\n\n# heuristics to avoid too inconsistent data\ndef normalize_status(node_info):\n # None means do not mention this key at all\n none_keys = { k for k in node_info if node_info[k] is None }\n for k in none_keys:\n del node_info[k]\n # avoid producing too inconsistent data\n if 'control_ssh' in node_info and node_info['control_ssh'] != 'off':\n node_info.update({'cmc_on_off' : 'on',\n 'control_ping' : 'on',\n })\n return node_info\n\ndef random_node_status(id, index=0):\n # for testing incomplete news on the livemap side\n # we expose one or the other or both\n # however the default always expose the full monty\n node_info = { 'id' : id }\n # fill node_info with all known keys\n node_info.update( { field : random.choice(values) \n for field, values in nodes_field_possible_values.items() })\n # make sure this is mostly consistent\n normalize_status(node_info)\n # index == 0 means we need a complete record\n # otherwise let's remove some\n items_to_remove = index % len(nodes_field_possible_values)\n keys_to_remove = random.sample(nodes_field_possible_values.keys(), items_to_remove)\n for field in keys_to_remove:\n if field in node_info:\n del node_info[field]\n return node_info\n\ndef random_phone_status(id):\n phone_info = { 'id' : id }\n # fill phone_info with all known keys\n phone_info.update( { field : random.choice(values) \n for field, values in phones_field_possible_values.items() })\n return phone_info\n\n# too lazy to get this properly (need to turn off server auth)\nleases_url = \"https://faraday.inria.fr:12346/resources/leases\";\nleases_file = \"LEASES\"\n\ndef get_leases():\n try:\n with open(leases_file) as input:\n string = input.read()\n obj = json.loads(string)\n resources = obj['resource_response']['resources']\n return resources\n except:\n print(\"WARNING: unable to read leases file {} - not sending leases\".format(leases_file))\n return []\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('-c', '--cycle', dest='cycle', default=default_cycle,\n type=float,\n help=\"Cycle duration in seconds (default={})\".format(default_cycle))\n parser.add_argument('-r', '--runs', dest='runs', default=default_runs,\n type=int,\n help=\"How many runs (default={}; 0 means forever)\".format(default_runs))\n parser.add_argument('-n', '--nodes', dest='max_nodes_impacted', default=default_max_nodes_impacted,\n type=int,\n help=\"Maximum number of nodes impacted by each cycle\")\n parser.add_argument('-l', '--live', dest='live', action='store_true', default=False,\n help=\"If set, only rx/tx data are animated\")\n parser.add_argument('-p', '--phone-cycle', default=5, type=int,\n help='send a random phone status every n cycles')\n parser.add_argument(\"-u\", \"--sidecar-url\", dest=\"sidecar_url\",\n default=default_sidecar_url,\n help=\"url for thesidecar server (default={})\"\n .format(default_sidecar_url))\n parser.add_argument('-v', '--verbose', action='store_true', default=False)\n args = parser.parse_args()\n\n cycle = args.cycle\n\n if args.live:\n to_remove = [ k for k in field_possible_values if 'rx' not in k and 'tx' not in k]\n for k in to_remove:\n del field_possible_values[k]\n\n if args.verbose:\n print(\"Using cycle {}s\".format(cycle))\n\n url = args.sidecar_url\n print(\"Connecting to sidecar at {}\".format(url))\n socketio = connect_url(url)\n\n counter = 0\n while True:\n news_infos = [ random_node_status(id, index)\n for index, id in enumerate(random_ids(args.max_nodes_impacted)) ]\n if args.verbose:\n print(\"{} -- on {} nodes (id, len(fields)) : {}\"\n .format(counter, len(news_infos),\n [ (info['id'], len(info)-1) for info in news_infos]))\n print(news_infos[0])\n socketio.emit('info:nodes', json.dumps(news_infos), None)\n\n # only one phone\n if counter % args.phone_cycle == 0:\n phone_infos = [ random_phone_status(id) for id in [1]]\n if args.verbose:\n print(\"phone: emitting {}\".format(phone_infos[0]))\n socketio.emit('info:phones', json.dumps(phone_infos), None)\n\n leases = get_leases()\n if leases:\n socketio.emit('info:leases', json.dumps(leases), None)\n counter += 1\n if args.runs and counter >= args.runs:\n break\n time.sleep(cycle)\n\n \n # xxx should probably clean up the socket io client\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"sidecar/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":6518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591940262","text":"from iworld import *\n\nclass findpath():\n __instance = None\n @staticmethod\n def get_instance():\n if findpath.__instance is None:\n findpath.__instance = findpath()\n return findpath.__instance\n\n def __init__(self):\n self.used=[]\n self.path1=[]\n self.m=getmap.get_instance()\n self.sta=state.get_instance()\n def change(self, x, y, f_x, f_y):\n self.x=x\n self.y=y\n self.f_x=f_x\n self.f_y=f_y\n self.used=[]\n self.path1=[]\n self.ret_path(self.x,self.y)\n return self.path1\n\n def manhattan_dist(self,tx,ty,f_x,f_y):\n return abs(tx - f_x) + abs(ty - f_y)\n def near(self,tx,ty):\n greedy=[]\n if (self.m.map[tx + 1][ty] == self.m.map[tx][ty] and [tx + 1, ty] not in self.used):\n greedy.append([self.manhattan_dist(self.f_x, self.f_y, tx + 1, ty),tx + 1, ty ])\n if (self.m.map[tx][ty - 1] == self.m.map[tx][ty] and [tx, ty - 1] not in self.used):\n greedy.append([self.manhattan_dist(self.f_x, self.f_y, tx, ty - 1),tx, ty - 1 ])\n if (self.m.map[tx - 1][ty] == self.m.map[tx][ty] and [tx - 1, ty] not in self.used):\n greedy.append([self.manhattan_dist(self.f_x, self.f_y, tx - 1, ty), tx - 1, ty])\n if (self.m.map[tx][ty + 1] == self.m.map[tx][ty] and [tx, ty + 1] not in self.used):\n greedy.append([self.manhattan_dist(self.f_x, self.f_y, tx, ty + 1),tx, ty + 1 ])\n return greedy\n def ret_path(self,tx,ty):\n self.path1.append([tx,ty])\n self.used.append([tx,ty])\n if([tx-1,ty]==[self.f_x,self.f_y] or [tx,ty+1]==[self.f_x,self.f_y]\n or [tx+1,ty]==[self.f_x,self.f_y] or [tx,ty-1]==[self.f_x,self.f_y]):\n self.path1.append([self.f_x,self.f_y])\n return True\n greedy = self.near(tx,ty)\n temp = sorted(greedy, reverse=True)\n while temp:\n yun=temp.pop()\n if( self.ret_path(yun[1],yun[2])):\n return True\n else:\n self.path1.pop()\n return False\n\n def next_step(self,x, y, dx, dy, action=''):\n dd = 0\n op = [' RDL','L RD','DL R','RDL ']\n if (x == dx and y == dy):\n return ''\n if (x == dx +1 and y == dy):\n dd = 1\n if (x == dx and y == dy -1):\n dd = 2\n if (x == dx and y == dy + 1):\n dd = 0\n if (x == dx -1 and y == dy):\n dd = 3\n temp=op[self.sta.dirn][dd]\n if(temp=='D'):\n temp='LL'\n elif(temp==' '):\n temp=''\n return temp + action + 'F'\n","sub_path":"9414_3/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275424985","text":"# ActivitySim\n# See full license in LICENSE.txt.\nfrom __future__ import (absolute_import, division, print_function, )\nfrom future.standard_library import install_aliases\ninstall_aliases() # noqa: E402\n\nimport logging\n\nfrom activitysim.core import simulate\nfrom activitysim.core import tracing\nfrom activitysim.core import config\n\nfrom activitysim.core.assign import evaluate_constants\n\nfrom .mode import tour_mode_choice_spec\nfrom .mode import tour_mode_choice_coeffecients_spec\n\n\nfrom . import expressions\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_logsum_spec(model_settings):\n\n return tour_mode_choice_spec(model_settings)\n\n\ndef get_coeffecients_spec(model_settings):\n return tour_mode_choice_coeffecients_spec(model_settings)\n\n\ndef filter_chooser_columns(choosers, logsum_settings, model_settings):\n\n chooser_columns = logsum_settings.get('LOGSUM_CHOOSER_COLUMNS', [])\n\n if 'CHOOSER_ORIG_COL_NAME' in model_settings:\n chooser_columns.append(model_settings['CHOOSER_ORIG_COL_NAME'])\n\n missing_columns = [c for c in chooser_columns if c not in choosers]\n if missing_columns:\n logger.debug(\"logsum.filter_chooser_columns missing_columns %s\" % missing_columns)\n\n # ignore any columns not appearing in choosers df\n chooser_columns = [c for c in chooser_columns if c in choosers]\n\n choosers = choosers[chooser_columns]\n return choosers\n\n\ndef compute_logsums(choosers,\n tour_purpose,\n logsum_settings, model_settings,\n skim_dict, skim_stack,\n chunk_size, trace_hh_id, trace_label):\n \"\"\"\n\n Parameters\n ----------\n choosers\n tour_purpose\n logsum_settings\n model_settings\n skim_dict\n skim_stack\n chunk_size\n trace_hh_id\n trace_label\n\n Returns\n -------\n logsums: pandas series\n computed logsums with same index as choosers\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'compute_logsums')\n\n logsum_spec = get_logsum_spec(logsum_settings)\n\n omnibus_coefficient_spec = get_coeffecients_spec(logsum_settings)\n coefficient_spec = omnibus_coefficient_spec[tour_purpose]\n\n # compute_logsums needs to know name of dest column in interaction_sample\n orig_col_name = model_settings['CHOOSER_ORIG_COL_NAME']\n dest_col_name = model_settings['ALT_DEST_COL_NAME']\n\n # FIXME - are we ok with altering choosers (so caller doesn't have to set these)?\n assert ('in_period' not in choosers) and ('out_period' not in choosers)\n choosers['in_period'] = expressions.skim_time_period_label(model_settings['IN_PERIOD'])\n choosers['out_period'] = expressions.skim_time_period_label(model_settings['OUT_PERIOD'])\n\n assert ('duration' not in choosers)\n choosers['duration'] = model_settings['IN_PERIOD'] - model_settings['OUT_PERIOD']\n\n nest_spec = config.get_logit_model_settings(logsum_settings)\n constants = config.get_model_constants(logsum_settings)\n\n logger.debug(\"Running compute_logsums with %d choosers\" % choosers.shape[0])\n\n # setup skim keys\n odt_skim_stack_wrapper = skim_stack.wrap(left_key=orig_col_name, right_key=dest_col_name,\n skim_key='out_period')\n dot_skim_stack_wrapper = skim_stack.wrap(left_key=dest_col_name, right_key=orig_col_name,\n skim_key='in_period')\n od_skim_stack_wrapper = skim_dict.wrap(orig_col_name, dest_col_name)\n\n skims = {\n \"odt_skims\": odt_skim_stack_wrapper,\n \"dot_skims\": dot_skim_stack_wrapper,\n \"od_skims\": od_skim_stack_wrapper,\n 'orig_col_name': orig_col_name,\n 'dest_col_name': dest_col_name\n }\n\n locals_dict = evaluate_constants(coefficient_spec, constants=constants)\n locals_dict.update(constants)\n locals_dict.update(skims)\n\n # - run preprocessor to annotate choosers\n # allow specification of alternate preprocessor for nontour choosers\n preprocessor = model_settings.get('LOGSUM_PREPROCESSOR', 'preprocessor')\n preprocessor_settings = logsum_settings[preprocessor]\n\n if preprocessor_settings:\n\n simulate.set_skim_wrapper_targets(choosers, skims)\n\n expressions.assign_columns(\n df=choosers,\n model_settings=preprocessor_settings,\n locals_dict=locals_dict,\n trace_label=trace_label)\n\n logsums = simulate.simple_simulate_logsums(\n choosers,\n logsum_spec,\n nest_spec,\n skims=skims,\n locals_d=locals_dict,\n chunk_size=chunk_size,\n trace_label=trace_label)\n\n return logsums\n","sub_path":"activitysim/abm/models/util/logsums.py","file_name":"logsums.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"336589417","text":"import argparse\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport time\nimport sys\nimport os\n\n# Константы\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'\n}\n\n\n######### Парсер #########\n\n\n# Получаем страницу результатов поиска\ndef get_hh_resume_search_page(job_name, page_num):\n global HEADERS\n\n request_addr = 'https://hh.ru/search/resume?clusters=true&exp_period=all_time&logic=normal&no_magic=false&order_by=relevance&pos=full_text&text={0}&page={1}'.format(\n job_name, page_num)\n response = requests.get(request_addr, headers=HEADERS)\n\n return response\n\n\ndef get_res_pages_num(parsed_page):\n return int(parsed_page.findAll('a', {'data-qa': 'pager-page'})[-1].text)\n\n\ndef get_resume_url(resume_card):\n return 'https://hh.ru' + resume_card.find('a', {'class': 'resume-search-item__name'})['href']\n\n\n# Строим из результтов парсера объект с данными о вакансии\ndef parse_resume(resume_url, search_phrase):\n res = {}\n try:\n response = requests.get(resume_url, headers=HEADERS)\n parsed_resume = BeautifulSoup(response.text)\n\n res['url'] = resume_url\n resume_name = parsed_resume.find('span', {'class': 'resume-block__title-text'})\n if resume_name != None:\n res['resume_name'] = resume_name.text\n res['profession'] = search_phrase\n\n specialization_name = parsed_resume.find('span', {'data-qa': 'resume-block-specialization-category'})\n if specialization_name != None:\n res['specialization'] = {}\n res['specialization']['name'] = specialization_name.text\n\n splecializations_list = parsed_resume.findAll('li', {'data-qa': 'resume-block-position-specialization'})\n if splecializations_list != None:\n res['specialization']['list'] = []\n for spec in splecializations_list:\n res['specialization']['list'].append(spec.text)\n\n all_p = parsed_resume.findAll('p')\n for item in all_p:\n if item.text.startswith('График работы'):\n res['time'] = item.text.split(': ')[1]\n\n salary = parsed_resume.find('span', {'class', 'resume-block__salary'})\n if salary != None:\n res['salary'] = salary.text\n\n meta_info = parsed_resume.find('div', {'class': 'resume-header-block'})\n if meta_info != None:\n sex = meta_info.find('span', {'data-qa': 'resume-personal-gender'})\n if sex != None:\n res['sex'] = 0 if sex.text == 'Женщина' else 1\n\n age = meta_info.find('span', {'data-qa': 'resume-personal-age'})\n if age != None:\n res['age'] = int(age.text.split()[0])\n\n address = meta_info.find('span', {'data-qa': 'resume-personal-address'})\n if address != None:\n res['address'] = address.text.split()[0]\n\n about = parsed_resume.find('div', {'data-qa': 'resume-block-skills'})\n if about != None:\n res['about'] = about.text\n\n key_skills = parsed_resume.find('div', {'data-qa': 'skills-table'})\n if key_skills != None:\n res['key_skills'] = list(map(lambda x: x.text, key_skills.findAll('span', {'data-qa': 'bloko-tag__text'})))\n\n education = parsed_resume.find('div', {'data-qa': 'resume-block-education'}).find('div', {\n 'class': 'resume-block-item-gap'}).find('div', {'class': 'resume-block-item-gap'})\n if education != None:\n res['education'] = []\n for education_item in education.findAll('div', {'class': 'bloko-columns-row'}):\n item_content = education_item.findAll('div')\n res['education'].append({'end_date': item_content[0].text, 'name': item_content[1].text})\n\n experience = parsed_resume.find('div', {'data-qa': 'resume-block-experience'})\n if experience != None:\n res['experience'] = {\n 'total': experience.find('span', {'class': 'resume-block__title-text resume-block__title-text_sub'}).text}\n res['experience']['list'] = []\n for item in experience.find('div', {'class': 'resume-block-item-gap'}).findAll('div', {'itemprop': 'worksFor'}):\n tmp = {}\n tmp['company_name'] = item.find('div', {'itemprop': 'name'}).text\n tmp['time'] = item.find('div', {'class': 'resume-block__experience-timeinterval'}).text.replace(u'\\xa0',\n u' ')\n tmp['position'] = item.find('div', {'data-qa': 'resume-block-experience-position'}).text\n tmp['duties'] = item.find('div', {'data-qa': 'resume-block-experience-description'}).text\n res['experience']['list'].append(tmp)\n\n languages = parsed_resume.find('div', {'data-qa': 'resume-block-languages'})\n if languages != None:\n res['languages'] = []\n for language in languages.findAll('p', {'data-qa': 'resume-block-language-item'}):\n data = language.text.split(' — ')\n res['languages'].append({data[0]: data[1]})\n except:\n res['msg'] = 'Fail. Caught exception'\n\n return res\n\n# Print iterations progress\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', help='Input file names', nargs='+', required=True)\nparser.add_argument('-o', help='Output dir', required=True)\nparser.add_argument('-j', help='Job name', required=True)\nargs = parser.parse_args()\n\nfor input_path in args.i:\n total_urls = 0\n with open(input_path, 'r') as input:\n total_urls = sum(1 for line in input)\n\n with open(input_path, 'r') as input:\n progress_bar_len = 100\n printProgressBar(0, total_urls, prefix='Progress:', suffix='Complete', length=progress_bar_len)\n\n i = 0\n dir_path = '{0}/{1}'.format(args.o, args.j)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n for link in input:\n link = link[:-1]\n id = link.replace('https://hh.ru/resume/', '')\n target_path = '{0}/{1}.json'.format(dir_path, id)\n if not os.path.exists(target_path):\n res = parse_resume(link, args.j)\n with open(target_path, 'w', encoding='utf8') as f:\n json.dump(res, f, ensure_ascii=False, indent=4)\n i += 1\n printProgressBar(i, total_urls, prefix='Progress:', suffix='Complete', length=progress_bar_len)\n\n","sub_path":"parse_urls.py","file_name":"parse_urls.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"21505249","text":"\r\nimport os\r\nimport os.path\r\nimport re\r\nimport sys\r\n\r\ndef format_str(str):\r\n str = str.replace(\"\\r\\n\", \"\\n\")\r\n r = re.compile(r\"^\\[[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\\]\", re.MULTILINE)\r\n str = r.sub(\"\", str)\r\n # [2016-07-09T11:58:56] INFO -- : End test..... (2.593000s)\r\n r = re.compile(r\"End .* \\([0-9]\\.[0-9]+s\\)$\", re.MULTILINE)\r\n str = r.sub(\"\", str)\r\n r = re.compile(r\"^ Ran [0-9]+ tests in [0-9]+\\.[0-9]*s\", re.MULTILINE)\r\n str = r.sub(\"\", str)\r\n # 0:[ 4.1s] \r\n r = re.compile(r\"^(\\s*[0-9]+:)\\[\\s*[0-9]+\\.[0-9]*s\\] \", re.MULTILINE)\r\n str = r.sub(r\"\\1\", str)\r\n return str\r\n\r\nos.chdir(os.path.normpath(os.path.join(__file__, os.path.pardir)))\r\n\r\ndef check(expected_result, actual_result):\r\n with open(expected_result) as f:\r\n expected = format_str(f.read()).split(\"\\n\")\r\n expected.sort()\r\n\r\n with open(actual_result) as f:\r\n actual = format_str(f.read()).split(\"\\n\")\r\n actual.sort()\r\n\r\n if actual != expected:\r\n print(\"ERROR\")\r\n exit(1)\r\n\r\nif sys.version_info[0] == 3:\r\n check(\"expected_test_result_py3.txt\", \"test_result.txt\")\r\n check(\"expected_test_result_info_py3.txt\", \"test_result_info.txt\")\r\nelse:\r\n check(\"expected_test_result.txt\", \"test_result.txt\")\r\n check(\"expected_test_result_info.txt\", \"test_result_info.txt\")\r\n","sub_path":"test/check_result.py","file_name":"check_result.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"420800359","text":"import sys\nimport argparse\nimport configparser\n\ndef str2bool(v):\n '''Convert a string to a boolean value'''\n if v == 'True':\n return True\n elif v == 'False':\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\ndef str2FloatList(x):\n\n '''Convert a formated string to a list of float value'''\n if len(x.split(\",\")) == 1:\n return float(x)\n else:\n return [float(elem) for elem in x.split(\",\")]\ndef strToStrList(x):\n if x == \"None\":\n return []\n else:\n return x.split(\",\")\n\ndef str2StrList(x):\n '''Convert a string to a list of string value'''\n return x.split(\" \")\n\nclass ArgReader():\n \"\"\"\n This class build a namespace by reading arguments in both a config file\n and the command line.\n\n If an argument exists in both, the value in the command line overwrites\n the value in the config file\n\n This class mainly comes from :\n https://stackoverflow.com/questions/3609852/which-is-the-best-way-to-allow-configuration-options-be-overridden-at-the-comman\n Consulted the 18/11/2018\n\n \"\"\"\n\n def __init__(self,argv):\n ''' Defines the arguments used in several scripts of this project.\n It reads them from a config file\n and also add the arguments found in command line.\n\n If an argument exists in both, the value in the command line overwrites\n the value in the config file\n '''\n\n # Do argv default this way, as doing it in the functional\n # declaration sets it at compile time.\n if argv is None:\n argv = sys.argv\n\n # Parse any conf_file specification\n # We make this parser with add_help=False so that\n # it doesn't parse -h and print help.\n conf_parser = argparse.ArgumentParser(\n description=__doc__, # printed with -h/--help\n # Don't mess with format of description\n formatter_class=argparse.RawDescriptionHelpFormatter,\n # Turn off help, so we print all options in response to -h\n add_help=False\n )\n conf_parser.add_argument(\"-c\", \"--conf_file\",\n help=\"Specify config file\", metavar=\"FILE\")\n args, self.remaining_argv = conf_parser.parse_known_args()\n\n defaults = {}\n\n if args.conf_file:\n config = configparser.SafeConfigParser()\n config.read([args.conf_file])\n defaults.update(dict(config.items(\"default\")))\n\n # Parse rest of arguments\n # Don't suppress add_help here so it will handle -h\n self.parser = argparse.ArgumentParser(\n # Inherit options from config_parser\n parents=[conf_parser]\n )\n self.parser.set_defaults(**defaults)\n\n # Training settings\n #parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n\n self.parser.add_argument('--stop_crit', type=float, metavar='M',\n help='The threshold value under which the training stops')\n self.parser.add_argument('--epochs', type=int, metavar='N',\n help='number of epochs to train')\n\n self.parser.add_argument('--lr', type=str2FloatList,metavar='LR',\n help='learning rate (it can be a schedule : --lr 0.01,0.001,0.0001)')\n self.parser.add_argument('--num_workers', type=int,metavar='NUMWORKERS',\n help='the number of processes to load the data. num_workers equal 0 means that it’s \\\n the main process that will do the data loading when needed, num_workers equal 1 is\\\n the same as any n, but you’ll only have a single worker, so it might be slow')\n self.parser.add_argument('--momentum', type=float, metavar='M',\n help='SGD momentum')\n self.parser.add_argument('--seed', type=int, metavar='S',\n help='Seed used to initialise the random number generator.')\n self.parser.add_argument('--log_interval', type=int, metavar='N',\n help='how many epochs to train before logging training status')\n\n self.parser.add_argument('--ind_id', type=int, metavar='IND_ID',\n help='the id of the individual')\n self.parser.add_argument('--exp_id', type=str, metavar='EXP_ID',\n help='the id of the experience')\n self.parser.add_argument('--dataset', type=str, metavar='N',help='the dataset to use. Can be \\'NETFLIX\\', \\'IRCCYN\\' or \\'VQEG\\'.')\n\n self.parser.add_argument('--cuda', type=str2bool, metavar='S',\n help='To run computations on the gpu')\n self.parser.add_argument('--optim', type=str, metavar='OPTIM',\n help='the optimizer to use (default: \\'GD\\')')\n\n self.parser.add_argument('--start_mode', type=str,metavar='SM',\n help='The mode to use to initialise the model. Can be \\'base_init\\', \\'iter_init\\' or \\'fine_tune\\'.')\n\n self.parser.add_argument('--true_scores_init', type=str,metavar='SM',\n help='The function name to use to init the true scores when using aproximate init. Can only be \\'base\\'')\n self.parser.add_argument('--bias_init', type=str,metavar='SM',\n help='The function name to use to init the biases when using aproximate init. Can only be \\'base\\'')\n self.parser.add_argument('--diffs_init', type=str,metavar='SM',\n help='The function name to use to init the difficulties when using aproximate init. Can only be \\'base\\'')\n self.parser.add_argument('--incons_init', type=str,metavar='SM',\n help='The function name to use to init the inconsistencies when using aproximate init. Can only be \\'base\\', \\'use_diffs\\'.')\n\n self.parser.add_argument('--train_mode', type=str,metavar='TM',\n help='The mode to use to train the model. Can be \\'joint\\' or \\'alternate\\'.')\n self.parser.add_argument('--alt_epoch_nb', type=int,metavar='TM',\n help='The number of epoch during which train each parameter. Ignored if using \\'joint\\' training mode.')\n\n self.parser.add_argument('--noise', type=float, metavar='NOISE',\n help='the amount of noise to add in the gradient of the model (as a percentage of the norm)(default: 0.1)')\n\n self.parser.add_argument('--prior', type=str,metavar='S',\\\n help='The prior to use. Can be \\'uniform\\' or \\'oracle\\'.')\n self.parser.add_argument('--prior_weight', type=float,metavar='S',\\\n help='The weight of the prior term in the loss function')\n\n self.parser.add_argument('--param_to_opti', type=strToStrList,metavar='V',\n help=\"The parameters to optimise. Can be a list with elements among 'bias','incons','diffs','trueScores'\")\n self.parser.add_argument('--param_not_gt',type=strToStrList,metavar='V',\n help=\"The parameters to set to ground truth when not starting the training witha pre-trained net \\\n (i.e. choosing option 'init' for --start_mode). Can be a list (possibly empty) with elements among 'bias','incons','diffs','trueScores'\")\n\n self.parser.add_argument('--note', type=str,metavar='NOTE',\n help=\"A note on the model\")\n\n self.parser.add_argument('--score_dis', type=str, metavar='S',\n help='The distribution to use to model the scores')\n\n self.parser.add_argument('--score_min', type=int, metavar='S',\n help='The minimum score that can be given by an annotator')\n self.parser.add_argument('--score_max', type=int, metavar='S',\n help='The maximum score that can be given by an annotator')\n self.parser.add_argument('--div_beta_var', type=float, metavar='S',\n help='The coefficient with which to rescale down the variances (difficulties and inconsistencies) \\\n sampled from the beta distribution')\n\n self.parser.add_argument('--prior_update_frequ', type=int, metavar='S',\n help='The number of epoch to wait before updating the empirical prior. Ignored if other prior is used.')\n\n self.parser.add_argument('--extr_sco_dep', type=str2bool, metavar='S',\n help='Whether or not to add a dependency between the variance and the mean of videos. If true, raw score variance of videos with very high or very low scores\\\n will be lower.')\n\n self.parser.add_argument('--truescores_tanh', type=str2bool, metavar='S',\n help='To pass the true scores through a tanh during optimisation.')\n\n self.parser.add_argument('--bias_tanh', type=str2bool, metavar='S',\n help='To pass the bias through a tanh during optimisation.')\n self.parser.add_argument('--bias_ampl', metavar='STD',type=float,help='The amplitude of the bias gaussian distribution. Ignored if bias are sampled from a normal distribution')\n\n self.parser.add_argument('--bias_dis', metavar='STD',type=str,default='Beta',help='The bias distribution. Can be \\'Beta\\' or \\'Normal\\'')\n\n self.args = None\n\n def getRemainingArgs(self):\n ''' Reads the comand line arg'''\n\n self.args = self.parser.parse_args(self.remaining_argv)\n\n def writeConfigFile(self,filePath):\n \"\"\" Writes a config file containing all the arguments and their values\"\"\"\n\n config = configparser.SafeConfigParser()\n config.add_section('default')\n\n for k, v in vars(self.args).items():\n config.set('default', k, str(v))\n\n with open(filePath, 'w') as f:\n config.write(f)\n","sub_path":"code/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":10002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297609976","text":"# -*- coding: utf-8 -*-\n# created on 2018/11/5\n# __author__ = 'XiaoHuang'\n\nimport sympy\nfrom sympy.printing.str import StrPrinter\nfrom latex2check.latex_parser.gen.primaryparserLexer import primaryparserLexer\n\n\ndef convert_relation(rel):\n if rel.expr():\n return convert_expr(rel.expr())\n\n lh = convert_relation(rel.relation(0))\n rh = convert_relation(rel.relation(1))\n return lh, rh\n\n\ndef convert_expr(expr):\n return convert_add(expr.additive())\n\n\ndef convert_add(add):\n if add.ADD():\n lh = convert_add(add.additive(0))\n rh = convert_add(add.additive(1))\n return sympy.Add(lh, rh, evaluate=False)\n elif add.SUB():\n lh = convert_add(add.additive(0))\n rh = convert_add(add.additive(1))\n return sympy.Add(lh, -1 * rh, evaluate=False)\n else:\n return convert_mp(add.mp())\n\n\ndef convert_mp(mp):\n if hasattr(mp, 'mp'):\n mp_left = mp.mp(0)\n mp_right = mp.mp(1)\n else:\n mp_left = mp.mp_nofunc(0)\n mp_right = mp.mp_nofunc(1)\n\n if mp.MUL() or mp.CMD_TIMES() or mp.CMD_CDOT():\n lh = convert_mp(mp_left)\n rh = convert_mp(mp_right)\n return sympy.Mul(lh, rh, evaluate=False)\n elif mp.DIV() or mp.CMD_DIV() or mp.COLON():\n lh = convert_mp(mp_left)\n rh = convert_mp(mp_right)\n return sympy.Mul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)\n else:\n if hasattr(mp, 'unary'):\n return convert_unary(mp.unary())\n else:\n return convert_unary(mp.unary_nofunc())\n\n\ndef convert_unary(unary):\n if hasattr(unary, 'unary'):\n nested_unary = unary.unary()\n else:\n nested_unary = unary.unary_nofunc()\n if hasattr(unary, 'postfix_nofunc'):\n first = unary.postfix()\n tail = unary.postfix_nofunc()\n postfix = [first] + tail\n else:\n postfix = unary.postfix()\n\n if unary.ADD():\n return convert_unary(nested_unary)\n elif unary.SUB():\n return sympy.Mul(-1, convert_unary(nested_unary), evaluate=False)\n elif postfix:\n return convert_postfix_list(postfix)\n\n\ndef convert_postfix_list(arr, i=0):\n if i >= len(arr):\n raise Exception(\"Index out of bounds\")\n\n res = convert_postfix(arr[i])\n if isinstance(res, sympy.Expr):\n if i == len(arr) - 1:\n return res # nothing to multiply by\n else:\n if i > 0:\n left = convert_postfix(arr[i - 1])\n right = convert_postfix(arr[i + 1])\n if isinstance(left, sympy.Expr) and isinstance(right, sympy.Expr):\n left_syms = convert_postfix(arr[i - 1]).atoms(sympy.Symbol)\n right_syms = convert_postfix(arr[i + 1]).atoms(sympy.Symbol)\n # if the left and right sides contain no variables and the\n # symbol in between is 'x', treat as multiplication.\n if len(left_syms) == 0 and len(right_syms) == 0 and str(res) == \"x\":\n return convert_postfix_list(arr, i + 1)\n # multiply by next\n return sympy.Mul(res, convert_postfix_list(arr, i + 1), evaluate=False)\n else: # must be derivative\n wrt = res[0]\n if i == len(arr) - 1:\n raise Exception(\"Expected expression for derivative\")\n else:\n expr = convert_postfix_list(arr, i + 1)\n return sympy.Derivative(expr, wrt)\n\n\ndef do_subs(expr, at):\n if at.expr():\n at_expr = convert_expr(at.expr())\n syms = at_expr.atoms(sympy.Symbol)\n if len(syms) == 0:\n return expr\n elif len(syms) > 0:\n sym = next(iter(syms))\n return expr.subs(sym, at_expr)\n elif at.equality():\n lh = convert_expr(at.equality().expr(0))\n rh = convert_expr(at.equality().expr(1))\n return expr.subs(lh, rh)\n\n\ndef convert_postfix(postfix):\n if hasattr(postfix, 'exp'):\n exp_nested = postfix.exp()\n else:\n exp_nested = postfix.exp_nofunc()\n\n exp = convert_exp(exp_nested)\n for op in postfix.postfix_op():\n if op.BANG():\n if isinstance(exp, list):\n raise Exception(\"Cannot apply postfix to derivative\")\n exp = sympy.factorial(exp, evaluate=False)\n elif op.eval_at():\n ev = op.eval_at()\n at_b = None\n at_a = None\n if ev.eval_at_sup():\n at_b = do_subs(exp, ev.eval_at_sup())\n if ev.eval_at_sub():\n at_a = do_subs(exp, ev.eval_at_sub())\n if at_b != None and at_a != None:\n exp = sympy.Add(at_b, -1 * at_a, evaluate=False)\n elif at_b != None:\n exp = at_b\n elif at_a != None:\n exp = at_a\n\n return exp\n\n\ndef convert_exp(exp):\n if hasattr(exp, 'exp'):\n exp_nested = exp.exp()\n else:\n exp_nested = exp.exp_nofunc()\n\n if exp_nested:\n base = convert_exp(exp_nested)\n if isinstance(base, list):\n raise Exception(\"Cannot raise derivative to power\")\n if exp.atom():\n exponent = convert_atom(exp.atom())\n elif exp.expr():\n exponent = convert_expr(exp.expr())\n return sympy.Pow(base, exponent, evaluate=False)\n else:\n if hasattr(exp, 'comp'):\n return convert_comp(exp.comp())\n else:\n return convert_comp(exp.comp_nofunc())\n\n\ndef convert_comp(comp):\n if comp.group():\n return convert_expr(comp.group().expr())\n elif comp.abs_group():\n return sympy.Abs(convert_expr(comp.abs_group().expr()), evaluate=False)\n elif comp.atom():\n return convert_atom(comp.atom())\n elif comp.frac():\n return convert_frac(comp.frac())\n elif comp.func():\n return convert_func(comp.func())\n\n\ndef convert_atom(atom):\n if atom.LETTER():\n subscriptName = ''\n if atom.subexpr():\n subscript = None\n if atom.subexpr().expr(): # subscript is expr\n subscript = convert_expr(atom.subexpr().expr())\n else: # subscript is atom\n subscript = convert_atom(atom.subexpr().atom())\n subscriptName = '_{' + StrPrinter().doprint(subscript) + '}'\n return sympy.Symbol(atom.LETTER().getText() + subscriptName)\n elif atom.SYMBOL():\n s = atom.SYMBOL().getText()[1:]\n if s == \"infty\":\n return sympy.oo\n else:\n if atom.subexpr():\n subscript = None\n if atom.subexpr().expr(): # subscript is expr\n subscript = convert_expr(atom.subexpr().expr())\n else: # subscript is atom\n subscript = convert_atom(atom.subexpr().atom())\n subscriptName = StrPrinter().doprint(subscript)\n s += '_{' + subscriptName + '}'\n return sympy.Symbol(s)\n elif atom.NUMBER():\n s = atom.NUMBER().getText().replace(\",\", \"\")\n return sympy.Number(s)\n elif atom.DIFFERENTIAL():\n var = get_differential_var(atom.DIFFERENTIAL())\n return sympy.Symbol('d' + var.name)\n elif atom.mathit():\n text = rule2text(atom.mathit().mathit_text())\n return sympy.Symbol(text)\n elif atom.Percent():\n return sympy.Mul(1, sympy.Pow(100, -1, evaluate=False), evaluate=False)\n\n\ndef rule2text(ctx):\n stream = ctx.start.getInputStream()\n # starting index of starting token\n startIdx = ctx.start.start\n # stopping index of stopping token\n stopIdx = ctx.stop.stop\n\n return stream.getText(startIdx, stopIdx)\n\n\ndef convert_frac(frac):\n diff_op = False\n partial_op = False\n lower_itv = frac.lower.getSourceInterval()\n lower_itv_len = lower_itv[1] - lower_itv[0] + 1\n if (frac.lower.start == frac.lower.stop and frac.lower.start.type == primaryparserLexer.DIFFERENTIAL):\n wrt = get_differential_var_str(frac.lower.start.text)\n diff_op = True\n elif (lower_itv_len == 2 and frac.lower.start.type == primaryparserLexer.SYMBOL and frac.lower.start.text == '\\\\partial' and (frac.lower.stop.type == primaryparserLexer.LETTER or frac.lower.stop.type == primaryparserLexer.SYMBOL)):\n partial_op = True\n wrt = frac.lower.stop.text\n if frac.lower.stop.type == primaryparserLexer.SYMBOL:\n wrt = wrt[1:]\n\n expr_top = convert_expr(frac.upper)\n expr_bot = convert_expr(frac.lower)\n if frac.NUMBER() != None:\n number = sympy.sympify(frac.NUMBER().getText())\n _frac = sympy.Mul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)\n return sympy.Add(number, _frac, evaluate=False)\n else:\n return sympy.Mul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)\n\n\n\ndef convert_func(func):\n if func.func_normal():\n if func.L_PAREN(): # function called with parenthesis\n arg = convert_func_arg(func.func_arg())\n else:\n arg = convert_func_arg(func.func_arg_noparens())\n\n name = func.func_normal().start.text[1:]\n\n # change arc -> a\n if name in [\"arcsin\", \"arccos\", \"arctan\", \"arccsc\", \"arcsec\",\n \"arccot\"]:\n name = \"a\" + name[3:]\n expr = getattr(sympy.functions, name)(arg, evaluate=False)\n if name in [\"arsinh\", \"arcosh\", \"artanh\"]:\n name = \"a\" + name[2:]\n expr = getattr(sympy.functions, name)(arg, evaluate=False)\n\n if (name == \"log\" or name == \"ln\"):\n if func.subexpr():\n base = convert_expr(func.subexpr().expr())\n elif name == \"log\":\n base = 10\n elif name == \"ln\":\n base = sympy.E\n expr = sympy.log(arg, base, evaluate=False)\n\n func_pow = None\n should_pow = True\n if func.supexpr():\n if func.supexpr().expr():\n func_pow = convert_expr(func.supexpr().expr())\n else:\n func_pow = convert_atom(func.supexpr().atom())\n\n if name in [\"sin\", \"cos\", \"tan\", \"csc\", \"sec\", \"cot\", \"sinh\", \"cosh\", \"tanh\"]:\n if func_pow == -1:\n name = \"a\" + name\n should_pow = False\n expr = getattr(sympy.functions, name)(arg, evaluate=False)\n\n if func_pow and should_pow:\n expr = sympy.Pow(expr, func_pow, evaluate=False)\n\n return expr\n elif func.LETTER() or func.SYMBOL():\n if func.LETTER():\n fname = func.LETTER().getText()\n elif func.SYMBOL():\n fname = func.SYMBOL().getText()[1:]\n fname = str(fname) # can't be unicode\n if func.subexpr():\n subscript = None\n if func.subexpr().expr(): # subscript is expr\n subscript = convert_expr(func.subexpr().expr())\n else: # subscript is atom\n subscript = convert_atom(func.subexpr().atom())\n subscriptName = StrPrinter().doprint(subscript)\n fname += '_{' + subscriptName + '}'\n input_args = func.args()\n output_args = []\n while input_args.args(): # handle multiple arguments to function\n output_args.append(convert_expr(input_args.expr()))\n input_args = input_args.args()\n output_args.append(convert_expr(input_args.expr()))\n return sympy.Function(fname)(*output_args)\n elif func.FUNC_INT():\n return handle_integral(func)\n elif func.FUNC_SQRT():\n expr = convert_expr(func.base)\n if func.root:\n r = convert_expr(func.root)\n return sympy.root(expr, r)\n else:\n return sympy.sqrt(expr)\n elif func.FUNC_SUM():\n return handle_sum_or_prod(func, \"summation\")\n elif func.FUNC_PROD():\n return handle_sum_or_prod(func, \"product\")\n elif func.FUNC_LIM():\n return handle_limit(func)\n\n\ndef convert_func_arg(arg):\n if hasattr(arg, 'expr'):\n return convert_expr(arg.expr())\n else:\n return convert_mp(arg.mp_nofunc())\n\n\ndef handle_integral(func):\n if func.additive():\n integrand = convert_add(func.additive())\n elif func.frac():\n integrand = convert_frac(func.frac())\n else:\n integrand = 1\n\n int_var = None\n if func.DIFFERENTIAL():\n int_var = get_differential_var(func.DIFFERENTIAL())\n else:\n for sym in integrand.atoms(sympy.Symbol):\n s = str(sym)\n if len(s) > 1 and s[0] == 'd':\n if s[1] == '\\\\':\n int_var = sympy.Symbol(s[2:])\n else:\n int_var = sympy.Symbol(s[1:])\n int_sym = sym\n if int_var:\n integrand = integrand.subs(int_sym, 1)\n else:\n # Assume dx by default\n int_var = sympy.Symbol('x')\n\n if func.subexpr():\n if func.subexpr().atom():\n lower = convert_atom(func.subexpr().atom())\n else:\n lower = convert_expr(func.subexpr().expr())\n if func.supexpr().atom():\n upper = convert_atom(func.supexpr().atom())\n else:\n upper = convert_expr(func.supexpr().expr())\n return sympy.Integral(integrand, (int_var, lower, upper))\n else:\n return sympy.Integral(integrand, int_var)\n\n\ndef handle_sum_or_prod(func, name):\n val = convert_mp(func.mp())\n iter_var = convert_expr(func.subeq().equality().expr(0))\n start = convert_expr(func.subeq().equality().expr(1))\n if func.supexpr().expr(): # ^{expr}\n end = convert_expr(func.supexpr().expr())\n else: # ^atom\n end = convert_atom(func.supexpr().atom())\n\n if name == \"summation\":\n return sympy.Sum(val, (iter_var, start, end))\n elif name == \"product\":\n return sympy.Product(val, (iter_var, start, end))\n\n\ndef handle_limit(func):\n sub = func.limit_sub()\n if sub.LETTER():\n var = sympy.Symbol(sub.LETTER().getText())\n elif sub.SYMBOL():\n var = sympy.Symbol(sub.SYMBOL().getText()[1:])\n else:\n var = sympy.Symbol('x')\n if sub.SUB():\n direction = \"-\"\n else:\n direction = \"+\"\n approaching = convert_expr(sub.expr())\n content = convert_mp(func.mp())\n\n return sympy.Limit(content, var, approaching, direction)\n\n\ndef get_differential_var(d):\n text = get_differential_var_str(d.getText())\n return sympy.Symbol(text)\n\n\ndef get_differential_var_str(text):\n for i in range(1, len(text)):\n c = text[i]\n if not (c == \" \" or c == \"\\r\" or c == \"\\n\" or c == \"\\t\"):\n idx = i\n break\n text = text[idx:]\n if text[0] == \"\\\\\":\n text = text[1:]\n return text\n\n\ndef _long_num_mul_unidigit_1(left, right, rh):\n for i in range(1, len(str(left))):\n _left = round(left, -i)\n if sympy.Eq(_left * right, int(rh)):\n return True\n return False\n\n\ndef _long_num_mul_unidigit(left, right, rh):\n if right == 9:\n if sympy.Eq(left * 10, int(rh)):\n return True\n elif _long_num_mul_unidigit_1(left, 10, rh):\n return True\n else:\n return _long_num_mul_unidigit_1(left, 9, rh)\n else:\n return _long_num_mul_unidigit_1(left, right, rh)\n\n\n\n\n\n","sub_path":"g4/latex2check-v6/latex_parser/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":15229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"494003992","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 15 05:27:04 2019\n\n@author: Phuong\n\"\"\"\n\nimport numpy as np\nimport re\nimport csv\nfrom nltk.stem import PorterStemmer\n\n\ndef getVocab():\n \"\"\"Return a dictionary of vocab.\"\"\"\n with open('../input/vocab.csv') as f:\n vocabList = csv.reader(f)\n return list(vocabList)[0]\n\n\ndef removeHeader(email: str):\n index = email.find(\"\\n\\n\")\n if (index == -1):\n index = 0\n email = email[index:]\n return email\n\n\ndef processEmail(email, indices=True):\n \"\"\"Tokenize and Porter Stem emails.\"\"\"\n email = removeHeader(email)\n words = []\n\n email = email.lower()\n # Remove HTML elements.\n email = re.sub(r'<[^<>]+>', ' ', email)\n # Change digits to \"number\"\n email = re.sub(r'[0-9]+', 'number', email)\n # Change web address to \"httpaddr\"\n email = re.sub(r'(http+s*)://[^\\s]*', 'httpaddr', email)\n # Change email address to \"emailaddr\"\n email = re.sub(r'[^\\s]+@[^\\s]+', 'emailaddr', email)\n # Change currency to \"dollar\"\n email = re.sub(r'[$]+', 'dollar', email)\n # Change letters that are repeated multiple times to one occurence.\n email = re.sub(r'([a-zA-Z])\\1{3,}', r'\\1', email)\n\n # ========================== Tokenize Email ===========================\n tokens = re.sub(r'[@$/#.-:&*+=\\[\\]?!(){},\\'\\'\\\">_<;%]+\\s*|\\n+|\\s+', ' ',\n email).strip().split()\n\n if indices:\n vocabList = getVocab()\n\n for word in tokens:\n word = re.sub('[^a-zA-Z0-9]', '', word)\n try:\n word = PorterStemmer().stem(word)\n except (word == ''):\n continue\n if len(word) < 1:\n continue\n if indices:\n try:\n words.append(vocabList.index(word))\n except ValueError:\n pass\n continue\n words.append(word)\n return words\n\n\ndef indiceWords(words: list, vocabList: list) -> int:\n indices_lst = []\n for word in words:\n try:\n indices_lst.append(vocabList.index(word))\n except ValueError:\n pass\n return indices_lst\n\n\ndef featurizeEmail(indices):\n new_indices = []\n if (type(indices) == str):\n new_indices = processEmail(indices, indices=True)\n\n elif (type(indices) == list and type(indices[0]) == str):\n new_indices = indiceWords(indices, getVocab())\n\n elif (type(indices) == list and type(indices[0]) == int):\n new_indices = indices\n\n n = 10_000\n x = np.zeros((1, n))\n x[0, new_indices] = 1\n return x\n","sub_path":"model/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"371419056","text":"from django.forms import ModelForm, ValidationError, DateInput, ModelChoiceField, HiddenInput, NumberInput, DateField\n\n# Models\nfrom .models import Payment\nfrom personal_details.models import User\n\n# Crispy Forms\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Submit, HTML, Row, Field, Column\nfrom crispy_forms.bootstrap import AppendedText, PrependedText\n\n# Utils\nimport datetime\nimport calendar\n\n\nclass PaymentForm(ModelForm):\n user = ModelChoiceField(label='Employee', queryset=User.objects.filter(\n is_staff=False, is_active=True))\n date_joined = DateField(\n disabled=True, required=False, widget=HiddenInput())\n\n class Meta:\n model = Payment\n fields = '__all__'\n widgets = {\n 'period_start': DateInput(attrs={'readonly': True}),\n 'period_end': DateInput(attrs={'readonly': True}),\n 'unused_leave_days': HiddenInput(),\n 'unused_leave_pay': HiddenInput(),\n 'is_last': HiddenInput(),\n }\n\n def __init__(self, *args, **kwargs):\n super(PaymentForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n HTML('''\n
\n
\n \n '''),\n Field('user', css_class='form-group col-md-12'),\n Row(\n Column('date_joined', css_class='form-group col-md-6'),\n Column(AppendedText('unused_leave_days', 'Days', readonly=True),\n css_class='col-md-6'),\n css_class='form-row'\n ),\n Row(\n Column('period_start', css_class='form-group col-md-6'),\n Column('period_end', css_class='form-group col-md-6'),\n css_class='form-row'\n ),\n Row(\n Column('pay_date', css_class='form-group col-md-6'),\n Column('method', css_class='form-group col-md-6'),\n css_class='form-row'\n ),\n Row(\n Column('status', css_class='form-group col-md-6'),\n Column(PrependedText(\n 'net_pay', '$', css_class=\"font-weight-bold two-decimal\", readonly=True), css_class='form-group col-md-6'),\n css_class='form-row'\n ),\n HTML('''\n
\n
\n
\n
\n
\n \n '''),\n PrependedText('basic_salary', '$',\n css_class=\"payment two-decimal\", readonly=True),\n PrependedText('unused_leave_pay', '$',\n css_class=\"payment two-decimal\"),\n PrependedText('allowance', '$', css_class=\"payment two-decimal\"),\n PrependedText('other_payments', '$',\n css_class=\"payment two-decimal\"),\n PrependedText('total_payments', '$',\n css_class=\"two-decimal\", readonly=True),\n HTML('''\n
\n
\n
\n \n\n '''),\n PrependedText('mpf_employee', '$',\n css_class=\"deduction two-decimal\", readonly=True),\n PrependedText('np_leave', '$', css_class=\"deduction two-decimal\"),\n PrependedText('other_deductions', '$',\n css_class=\"deduction two-decimal\"),\n PrependedText('total_deductions', '$',\n css_class=\"two-decimal\", readonly=True),\n HTML('''\n
\n
\n
\n \n '''),\n PrependedText('mpf_employer', '$',\n css_class=\"two-decimal\", readonly=True),\n HTML('''\n
\n
\n
\n
\n '''),\n Field('is_last'),\n Submit('submit', 'Save', css_class=\"btn-outline-primary\"),\n HTML(\n 'Back'),\n )\n\n # Init field data\n date = datetime.date.today() # datetime.date(2018, 12, 18)\n period_start = date.replace(day=1)\n period_end = date.replace(day=calendar.monthrange(\n period_start.year, period_start.month)[1])\n self.fields['period_start'].initial = period_start\n self.fields['period_end'].initial = period_end\n self.fields['pay_date'].initial = date\n\n self.fields['status'].disabled = True\n\n\nclass PaymentCreateForm(PaymentForm):\n def clean(self):\n data = super().clean()\n # Check for overlapping payment\n payment = Payment.objects.filter(user=data['user'],\n period_start__lte=data['period_end'],\n period_end__gte=data['period_start']\n ).exclude(status=\"CC\").first()\n if (payment):\n raise ValidationError(\n f'Payment overlapping with {payment}')\n return data\n\n\nclass PaymentUpdateForm(PaymentForm):\n def __init__(self, *args, **kwargs):\n super(PaymentUpdateForm, self).__init__(*args, **kwargs)\n self.helper.layout.pop(-2)\n # self.helper.layout.insert(-1, HTML(\n # 'Export PDF '))\n self.helper.layout.insert(-1, Submit('cancel',\n 'Cancel', css_class='btn-outline-danger'))\n for name, field in self.fields.items():\n field.disabled = True\n\n\nclass PaymentDetailForm(PaymentUpdateForm):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop(\"user\")\n super(PaymentDetailForm, self).__init__(*args, **kwargs)\n if not self.user.is_superuser:\n self.fields['user'].queryset = User.objects.filter(id=self.user.id)\n self.fields['user'].initial = self.user.id\n self.fields['user'].disabled = True\n\n self.helper.layout.pop(-2)\n\n\nclass LastPaymentForm(PaymentForm):\n def __init__(self, *args, **kwargs):\n super(LastPaymentForm, self).__init__(*args, **kwargs)\n self.fields['period_end'].widget = DateInput(\n attrs={'readonly': False})\n self.fields['unused_leave_days'].widget = NumberInput(\n attrs={'class': 'one-decimal'})\n self.fields['unused_leave_pay'].widget = NumberInput(\n attrs={'class': 'two-decimal'})\n self.fields['date_joined'].widget = DateInput()\n\n self.fields['is_last'].initial = True\n\n\nclass LastPaymentCreateForm(LastPaymentForm):\n def clean(self):\n data = super().clean()\n # Check for overlapping payment\n payment = Payment.objects.filter(user=data['user'],\n period_start__lte=data['period_end'],\n period_end__gte=data['period_start'])\n if (payment):\n raise ValidationError(\n 'Payment overlapping with [%(payment)s]', params={'payment': payment[0]})\n return data\n\n\nclass LastPaymentUpdateForm(LastPaymentForm):\n def __init__(self, *args, **kwargs):\n super(LastPaymentUpdateForm, self).__init__(*args, **kwargs)\n self.helper.layout.pop(-2) # Save btn\n # self.helper.layout.insert(-1, HTML(\n # 'Export PDF '))\n self.helper.layout.insert(-1, Submit('cancel',\n 'Cancel', css_class='btn-outline-danger'))\n for name, field in self.fields.items():\n field.disabled = True\n\n\nclass LastPaymentDetailForm(LastPaymentUpdateForm):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop(\"user\")\n super(LastPaymentDetailForm, self).__init__(*args, **kwargs)\n if not self.user.is_superuser:\n self.fields['user'].queryset = User.objects.filter(id=self.user.id)\n self.fields['user'].initial = self.user.id\n self.fields['user'].disabled = True\n\n self.helper.layout.pop(-2)\n","sub_path":"payroll/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324919972","text":"import google.protobuf.wrappers_pb2 as wrappers_pb2\nfrom dialog_api import messaging_pb2, media_and_files_pb2\nimport mimetypes\n\n\ndef get_str_val(s):\n \"\"\"Return obj google.protobuf.StringValue\n\n :param s: string\n :return: StringValue\n \"\"\"\n return wrappers_pb2.StringValue(value=s)\n\n\ndef get_webpage(url, title=None, description=None, image_location=None):\n \"\"\"Return MessageMedia with WebpageMedia for messaging.send_media\n\n :param url: url (str)\n :param title: title (str)\n :param description: description (str)\n :param image_location: image (ImageLocation)\n :return: MessageMedia obj\n \"\"\"\n return messaging_pb2.MessageMedia(\n webpage=messaging_pb2.WebpageMedia(\n url=get_str_val(url),\n title=get_str_val(title),\n description=get_str_val(description),\n image=image_location\n )\n )\n\n\ndef get_image_location(bot, file, width=100, height=100):\n \"\"\"Return obj ImageLocation\n\n :param bot: DialogBot\n :param file: image's file\n :param width: image's width\n :param height: image's height\n :return: ImageLocation\n \"\"\"\n location = bot.internal.uploading.upload_file(file)\n return media_and_files_pb2.ImageLocation(file_location=location, width=width, height=height)\n\n\ndef get_image(bot, file, width=100, height=100):\n \"\"\"Return MessageMedia with ImageMedia for messaging.send_media\n\n :param bot: DialogBot\n :param file: image's file\n :param width: subj\n :param height: subj\n :return: MessageMedia obj\n \"\"\"\n image_location = get_image_location(bot, file, width, height)\n return messaging_pb2.MessageMedia(image=messaging_pb2.ImageMedia(image=image_location))\n\n\ndef get_audio(bot, file, duration=0):\n \"\"\"Return MessageMedia with AudioMedia for messaging.send_media\n\n :param bot: DialogBot\n :param file: audio's file\n :param duration: duration audio\n :return: MessageMedia obj\n \"\"\"\n mime_type = mimetypes.guess_type(file)[0]\n file_location = bot.internal.uploading.upload_file(file)\n return messaging_pb2.MessageMedia(\n audio=messaging_pb2.AudioMedia(\n audio=media_and_files_pb2.AudioLocation(\n file_location=file_location, mime_type=mime_type, duration=duration))\n )\n","sub_path":"dialog_bot_sdk/utils/get_media.py","file_name":"get_media.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"187373242","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\nFunction:\nThis class is used to om fusion parser.\nCopyright Information:\nHuawei Technologies Co., Ltd. All Rights Reserved © 2021\n\"\"\"\nimport itertools\nimport json\nimport numpy as np\n\nfrom common.dump_data import DumpData\nfrom common import utils\nfrom common.utils import AccuracyCompareException\n\nGRAPH_OBJECT = \"graph\"\nOP_OBJECT = \"op\"\nNAME_OBJECT = \"name\"\nTYPE_OBJECT = \"type\"\nINPUT_DESC_OBJECT = \"input_desc\"\nATTR_OBJECT = \"attr\"\nSHAPE_OBJECT = \"shape\"\nSHAPE_RANGE_OBJECT = \"shape_range\"\nDIM_OBJECT = \"dim\"\nDATA_OBJECT = \"Data\"\nNET_OUTPUT_OBJECT = \"NetOutput\"\nATC_CMDLINE_OBJECT = \"atc_cmdline\"\nINPUT_SHAPE_RANGE = \"--input_shape_range\"\nLIST_LIST_INT_OBJECT = 'list_list_int'\nLIST_LIST_I_OBJECT = 'list_list_i'\nLIST_I_OBJECT = 'list_i'\nKEY_OBJECT = \"key\"\nVALUE_OBJECT = \"value\"\nSUBGRAPH_NAME = 'subgraph_name'\nS_OBJECT = \"s\"\nDTYPE_OBJECT = \"dtype\"\nDTYPE_MAP = {\"DT_FLOAT\": np.float32, \"DT_FLOAT16\": np.float16, \"DT_DOUBLE\": np.float64, \"DT_INT8\": np.int8,\n \"DT_INT16\": np.int16, \"DT_INT32\": np.int32, \"DT_INT64\": np.int64, \"DT_UINT8\": np.uint8,\n \"DT_UINT16\": np.uint16, \"DT_UINT32\": np.uint32, \"DT_UINT64\": np.uint64, \"DT_BOOL\": np.bool}\n\n\nclass OmParser:\n \"\"\"\n This class is used to parse om model.\n \"\"\"\n\n def __init__(self, output_json_path):\n self.json_object = self._load_json_file(output_json_path)\n self.subgraph_name = self._get_sub_graph_name()\n self.shape_range = self._is_input_shape_range()\n self.contain_negative_1 = False\n\n def _get_sub_graph_name(self):\n subgraph_name = []\n for graph in self.json_object.get(GRAPH_OBJECT):\n for operator in graph.get(OP_OBJECT):\n if SUBGRAPH_NAME in operator:\n subgraph_name += operator.get(SUBGRAPH_NAME)\n return subgraph_name\n\n def get_shape_size(self):\n \"\"\"\n Get shape size for input\n \"\"\"\n input_desc_array = self._get_data_input_desc()\n # extracts the input shape value\n return self._process_inputs(input_desc_array)\n\n @staticmethod\n def _load_json_file(json_file_path):\n \"\"\"\n Function Description:\n load json file\n Parameter:\n json_file_path: json file path\n Return Value:\n json object\n Exception Description:\n when invalid json file path throw exception\n \"\"\"\n try:\n with open(json_file_path, \"r\") as input_file:\n try:\n return json.load(input_file)\n except Exception as load_input_file_except:\n print(str(load_input_file_except))\n raise AccuracyCompareException(utils.ACCURACY_COMPARISON_PARSER_JSON_FILE_ERROR)\n except IOError as input_file_open_except:\n utils.print_error_log('Failed to open\"' + json_file_path + '\", ' + str(input_file_open_except))\n raise AccuracyCompareException(utils.ACCURACY_COMPARISON_OPEN_FILE_ERROR)\n\n def _get_data_input_desc(self):\n input_desc_list = []\n for graph in self.json_object.get(GRAPH_OBJECT):\n if graph.get(NAME_OBJECT) in self.subgraph_name:\n continue\n for operator in graph.get(OP_OBJECT):\n if DATA_OBJECT == operator.get(TYPE_OBJECT):\n if len(operator.get(INPUT_DESC_OBJECT)) != 0:\n for item in operator.get(INPUT_DESC_OBJECT):\n input_desc_list.append(item)\n return input_desc_list\n\n def get_net_output_count(self):\n \"\"\"\n Get net output count\n \"\"\"\n count = 0\n for graph in self.json_object.get(GRAPH_OBJECT):\n if graph.get(NAME_OBJECT) in self.subgraph_name:\n continue\n for operator in graph.get(OP_OBJECT):\n if NET_OUTPUT_OBJECT == operator.get(TYPE_OBJECT) and INPUT_DESC_OBJECT in operator:\n count += len(operator.get(INPUT_DESC_OBJECT))\n return count\n\n def _is_input_shape_range(self):\n if ATTR_OBJECT not in self.json_object:\n return False\n for attr in self.json_object.get(ATTR_OBJECT):\n if KEY_OBJECT in attr and attr.get(KEY_OBJECT) == ATC_CMDLINE_OBJECT:\n if VALUE_OBJECT in attr and S_OBJECT in attr.get(VALUE_OBJECT):\n if INPUT_SHAPE_RANGE in attr.get(VALUE_OBJECT).get(S_OBJECT):\n return True\n return False\n\n def _get_range_shape_size_list(self, input_object):\n range_shape_size_list = []\n if ATTR_OBJECT not in input_object:\n return\n shape_list = []\n for attr in input_object.get(ATTR_OBJECT):\n if KEY_OBJECT in attr and attr.get(KEY_OBJECT) == SHAPE_RANGE_OBJECT:\n if VALUE_OBJECT in attr and attr.get(VALUE_OBJECT) and LIST_LIST_INT_OBJECT in attr.get(VALUE_OBJECT):\n list_list_int_object = attr.get(VALUE_OBJECT).get(LIST_LIST_INT_OBJECT)\n if LIST_LIST_I_OBJECT in list_list_int_object:\n for list_list_i in list_list_int_object.get(LIST_LIST_I_OBJECT):\n if LIST_I_OBJECT in list_list_i:\n list_i = list_list_i.get(LIST_I_OBJECT)\n if -1 in list_i:\n self.contain_negative_1 = True\n return []\n if len(list_i) != 2:\n continue\n shape_list.append(list(range(list_i[0], list_i[1] + 1)))\n shape_list_all = list(itertools.product(*shape_list))\n for item in shape_list_all:\n item_sum = 1\n for num in item:\n item_sum *= num\n range_shape_size_list.append(item_sum)\n return range_shape_size_list\n\n def _process_inputs(self, input_desc_array):\n value = []\n for input_object in input_desc_array:\n if SHAPE_OBJECT not in input_object:\n value.append(0)\n continue\n data_type = DTYPE_MAP.get(input_object.get(DTYPE_OBJECT))\n if not data_type:\n utils.print_error_log(\n \"The dtype attribute does not support {} value.\".format(input_object[DTYPE_OBJECT]))\n raise AccuracyCompareException(utils.ACCURACY_COMPARISON_INVALID_KEY_ERROR)\n data_type_size = np.dtype(data_type).itemsize\n if self.shape_range:\n range_shape_size_list = self._get_range_shape_size_list(input_object)\n for item in range_shape_size_list:\n value.append(item * data_type_size)\n else:\n item_sum = 1\n for num in input_object.get(SHAPE_OBJECT).get(DIM_OBJECT):\n item_sum *= num\n value.append(item_sum * data_type_size)\n return value\n","sub_path":"msquickcmp/npu/om_parser.py","file_name":"om_parser.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"590126372","text":"def _hide_all(app):\n for x in app.buttons_dict:\n exec('app.%s.hide()' % x)\n\n app.wpnbox.hide()\n app.armorbox.hide()\n\n app.mapbox.hide()\n\n app.markettab.hide()\n app.marketbox.hide()\n\n\ndef main_mode(app):\n _hide_all(app)\n\n app.savebtn.show()\n app.invbtn.show()\n app.fndbtn.show()\n app.srbtn.show()\n app.mapbtn.show()\n\n\ndef fight_mode(app):\n _hide_all(app)\n\n app.escbtn.show()\n app.atkbtn.show()\n\n\ndef inv_mode(app):\n _hide_all(app)\n\n app.cngarmorbtn.show()\n app.armorbox.show()\n app.extinvbtn.show()\n app.wpnbox.show()\n app.cngwpnbtn.show()\n\n\ndef dead_mode(app):\n _hide_all(app)\n\n app.startbtn.show()\n app.loadbtn.show()\n\n\ndef map_mode(app):\n _hide_all(app)\n\n app.mapbox.show()\n app.extmapbtn.show()\n app.cnglocbtn.show()\n app.marketbtn.show()\n\n\ndef market_mode(app):\n _hide_all(app)\n\n app.markettab.show()\n app.buybtn.show()\n app.extmarket.show()\n app.marketbox.show()\n app.sellbtn.show()\n","sub_path":"mode_select.py","file_name":"mode_select.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119717752","text":"import os\nfrom os.path import exists\nimport pandas as pd\nfrom . import basic\nfrom .amino_acids import amino_acids\nfrom .tcr_distances_blosum import blosum\nfrom . import translation\n\ncdrs_sep = ';'\ngap_character = '.'\n\nall_genes = {}\n\nclass TCR_Gene:\n def __init__( self, l ):\n # l comes from pandas dataframe.itertuples\n self.id = l.id\n self.organism = l.organism\n self.chain = l.chain\n self.region = l.region\n self.nucseq = l.nucseq\n self.alseq = l.aligned_protseq\n if pd.isna(l.cdrs):\n self.cdrs = []\n self.cdr_columns = []\n else:\n self.cdrs = l.cdrs.split(cdrs_sep)\n ## these are still 1-indexed !!!!!!!!!!!!!!\n self.cdr_columns = [ list(map( int,x.split('-'))) for x in l.cdr_columns.split(cdrs_sep) ]\n frame = l.frame\n #assert frame in ['+1','+2','+3','1','2','3']\n assert frame in [1,2,3] # now parsed by pandas, so string converted to int\n #self.nucseq_offset = int( frame[-1] )-1 ## 0, 1 or 2 (0-indexed for python)\n self.nucseq_offset = frame-1 ## 0, 1 or 2 (0-indexed for python)\n self.protseq = translation.get_translation( self.nucseq, f'+{frame}' )\n assert self.protseq == self.alseq.replace(gap_character,'')\n # sanity check\n if self.cdrs:\n assert self.cdrs == [ self.alseq[ x[0]-1 : x[1] ] for x in self.cdr_columns ]\n\ndef trim_allele_to_gene( id ):\n return id[: id.index('*') ] #will fail if id doesn't contain '*'\n\ndb_file = os.path.dirname(os.path.realpath(__file__))+'/db/'+basic.db_file\nassert exists(db_file)\n\ndf = pd.read_csv(db_file, sep='\\t')\n\nfor l in df.itertuples():\n g = TCR_Gene( l )\n if g.organism not in all_genes:\n all_genes[g.organism] = {} # map from id to TCR_Gene objects\n all_genes[g.organism][g.id] = g\n\n\nverbose = ( __name__ == '__main__' )\n\nfor organism,genes in all_genes.items():\n\n for ab in 'AB':\n org_merged_loopseqs = {}\n for id,g in genes.items():\n if g.chain == ab and g.region == 'V':\n loopseqs = g.cdrs[:-1] ## exclude CDR3 Nterm\n org_merged_loopseqs[id] = ' '.join( loopseqs )\n\n all_loopseq_nbrs = {}\n all_loopseq_nbrs_mm1 = {}\n for id1,seq1 in org_merged_loopseqs.items():\n g1 = genes[id1]\n cpos = g1.cdr_columns[-1][0] - 1 #0-indexed\n alseq1 = g1.alseq\n minlen = cpos+1\n assert len(alseq1) >= minlen\n if alseq1[cpos] != 'C' and verbose:\n print('funny cpos:',id1,alseq1,g1.cdrs[-1])\n\n all_loopseq_nbrs[id1] = []\n all_loopseq_nbrs_mm1[id1] = []\n for id2,seq2 in org_merged_loopseqs.items():\n g2 = genes[id2]\n alseq2 = g2.alseq\n assert len(alseq2) >= minlen\n assert len(seq1) == len(seq2)\n if seq1 == seq2:\n all_loopseq_nbrs[id1].append( id2 )\n all_loopseq_nbrs_mm1[id1].append( id2 )\n continue\n\n ## count mismatches between these two, maybe count as an \"_mm1\" nbr\n loop_mismatches = 0\n loop_mismatches_cdrx = 0\n loop_mismatch_seqs =[]\n spaces=0\n for a,b in zip( seq1,seq2):\n if a==' ':\n spaces+=1\n continue\n if a!= b:\n if a in '*.' or b in '*.':\n loop_mismatches += 10\n break\n else:\n if not (a in amino_acids and b in amino_acids):\n print( id1,id2,a,b)\n assert a in amino_acids and b in amino_acids\n if spaces<=1:\n loop_mismatches += 1\n loop_mismatch_seqs.append( ( a,b ) )\n else:\n assert spaces==2\n loop_mismatches_cdrx += 1\n if loop_mismatches>1:\n break\n if loop_mismatches <=1:\n all_mismatches = 0\n for a,b in zip( alseq1[:cpos+2],alseq2[:cpos+2]):\n if a!= b:\n if a in '*.' or b in '*.':\n all_mismatches += 10\n else:\n if not (a in amino_acids and b in amino_acids):\n print( id1,id2,a,b)\n assert a in amino_acids and b in amino_acids\n all_mismatches += 1\n #dist = tcr_distances.blosum_sequence_distance( seq1, seq2, gap_penalty=10 )\n if loop_mismatches<=1 and loop_mismatches + loop_mismatches_cdrx <= 2 and all_mismatches<=10:\n if loop_mismatches == 1:\n blscore= blosum[(loop_mismatch_seqs[0][0],loop_mismatch_seqs[0][1])]\n else:\n blscore = 100\n if blscore>=1:\n all_loopseq_nbrs_mm1[id1].append( id2 )\n if loop_mismatches>0 and verbose:\n mmstring = ','.join(['%s/%s'%(x[0],x[1]) for x in loop_mismatch_seqs])\n gene1 = trim_allele_to_gene( id1 )\n gene2 = trim_allele_to_gene( id2 )\n if gene1 != gene2 and verbose:\n print('v_mismatches:',organism,mmstring,blscore,id1,id2,\\\n loop_mismatches,loop_mismatches_cdrx,all_mismatches,seq1)\n print('v_mismatches:',organism,mmstring,blscore,id1,id2,\\\n loop_mismatches,loop_mismatches_cdrx,all_mismatches,seq2)\n\n\n for id in all_loopseq_nbrs:\n rep = min( all_loopseq_nbrs[id] )\n assert org_merged_loopseqs[id] == org_merged_loopseqs[ rep ]\n genes[id].rep = rep\n if verbose:\n print('vrep %s %15s %15s %s'%(organism, id, rep, org_merged_loopseqs[id]))\n\n\n ## merge mm1 nbrs to guarantee transitivity\n while True:\n new_nbrs = False\n for id1 in all_loopseq_nbrs_mm1:\n new_id1_nbrs = False\n for id2 in all_loopseq_nbrs_mm1[id1]:\n for id3 in all_loopseq_nbrs_mm1[id2]:\n if id3 not in all_loopseq_nbrs_mm1[id1]:\n all_loopseq_nbrs_mm1[id1].append( id3 )\n if verbose:\n print('new_nbr:',id1,'<--->',id2,'<--->',id3)\n new_id1_nbrs = True\n break\n if new_id1_nbrs:\n break\n if new_id1_nbrs:\n new_nbrs = True\n if verbose:\n print('new_nbrs:',ab,organism,new_nbrs)\n if not new_nbrs:\n break\n\n for id in all_loopseq_nbrs_mm1:\n rep = min( all_loopseq_nbrs_mm1[id] )\n genes[id].mm1_rep = rep\n if verbose:\n print('mm1vrep %s %15s %15s %s'%(organism, id, rep,org_merged_loopseqs[id]))\n\n\n ## setup Jseq reps\n for ab in 'AB':\n jloopseqs = {}\n for id,g in genes.items():\n if g.chain == ab and g.region == 'J':\n num = len( g.cdrs[0].replace( gap_character, '' ) )\n jloopseq = g.protseq[:num+3] ## go all the way up to and including the GXG\n jloopseqs[id] = jloopseq\n all_jloopseq_nbrs = {}\n for id1,seq1 in jloopseqs.items():\n all_jloopseq_nbrs[id1] = []\n for id2,seq2 in jloopseqs.items():\n if seq1 == seq2:\n all_jloopseq_nbrs[id1].append( id2 )\n for id in all_jloopseq_nbrs:\n rep = min( all_jloopseq_nbrs[id] )\n genes[id].rep = rep\n genes[id].mm1_rep = rep # just so we have an mm1_rep field defined...\n assert jloopseqs[id] == jloopseqs[ rep ]\n if verbose:\n print('jrep %s %15s %15s %15s'%(organism, id, rep, jloopseqs[id]))\n\n\n\n ## setup a mapping that we can use for counting when allowing mm1s and also ignoring alleles\n\n # allele2mm1_rep_gene_for_counting = {}\n # def get_mm1_rep_ignoring_allele( gene, organism ): # helper fxn\n # rep = get_mm1_rep( gene, organism )\n # rep = rep[:rep.index('*')]\n # return rep\n\n #allele2mm1_rep_gene_for_counting[ organism ] = {}\n\n if not basic.CLASSIC_COUNTREPS:\n # simpler scheme for choosing the 'count_rep' field\n for id, g in all_genes[organism].items():\n g.count_rep = trim_allele_to_gene(id)\n else:\n for chain in 'AB':\n for vj in 'VJ':\n allele_gs = [ (id,g) for (id,g) in all_genes[organism].items() if g.chain==chain and g.region==vj]\n\n gene2rep = {}\n gene2alleles = {}\n rep_gene2alleles = {}\n\n for allele,g in allele_gs:\n #assert allele[2] == chain\n gene = trim_allele_to_gene( allele )\n rep_gene = trim_allele_to_gene( g.mm1_rep )\n if rep_gene not in rep_gene2alleles:\n rep_gene2alleles[ rep_gene ] = []\n rep_gene2alleles[ rep_gene ].append( allele )\n\n if gene not in gene2rep:\n gene2rep[gene] = set()\n gene2alleles[gene] = []\n gene2rep[ gene ].add( rep_gene )\n gene2alleles[gene].append( allele )\n\n merge_rep_genes = {}\n for gene,reps in gene2rep.items():\n if len(reps)>1:\n if verbose:\n print('multireps:',organism, gene, reps)\n for allele in gene2alleles[gene]:\n print(' '.join(all_genes[organism][allele].cdrs), allele, \\\n all_genes[organism][allele].rep, \\\n all_genes[organism][allele].mm1_rep)\n assert vj=='V'\n\n ## we are going to merge these reps\n ## which one should we choose?\n l = [ (len(rep_gene2alleles[rep]), rep ) for rep in reps ]\n l.sort()\n l.reverse()\n assert l[0][0] > l[1][0]\n toprep = l[0][1]\n for (count,rep) in l:\n if rep in merge_rep_genes:\n # ACK need to think more about this, should probably just kill this logic!\n assert rep == toprep and merge_rep_genes[rep] == rep\n merge_rep_genes[ rep ] = toprep\n\n\n for allele,g in allele_gs:\n count_rep = trim_allele_to_gene( g.mm1_rep ) #get_mm1_rep_ignoring_allele( allele, organism )\n if count_rep in merge_rep_genes:\n count_rep = merge_rep_genes[ count_rep ]\n g.count_rep = count_rep #allele2mm1_rep_gene_for_counting[ organism ][ allele] = count_rep\n if verbose:\n print('countrep:',organism, allele, count_rep)\n\n\nif __name__ == '__main__':\n for org, genes in all_genes.items():\n for id, g in genes.items():\n print( org, id, g.cdrs )\n\n","sub_path":"tcrdock/tcrdist/all_genes.py","file_name":"all_genes.py","file_ext":"py","file_size_in_byte":11930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438330540","text":"import cv2\nimport numpy as np\nimport imutils\nfrom PIL import Image\nimport collections\nimport random\nimport sys\n\nnumber = 1\n\ndef getColorList(flag):#flag=1表示模板颜色匹配\n dict = collections.defaultdict(list)\n\n #黑色\n\n lower_black = np.array([0, 0, 0])\n upper_black = np.array([180, 255, 46])\n color_list = []\n color_list.append(lower_black)\n color_list.append(upper_black)\n dict['black'] = color_list\n\n # #灰色\n\n lower_gray = np.array([0, 0, 46])\n upper_gray = np.array([180, 43, 220])\n color_list = []\n color_list.append(lower_gray)\n color_list.append(upper_gray)\n dict['gray']=color_list\n\n #白色\n if flag==0:\n lower_white = np.array([0, 0, 221])\n upper_white = np.array([180, 30, 255])\n color_list = []\n color_list.append(lower_white)\n color_list.append(upper_white)\n dict['white'] = color_list\n if flag==1:\n lower_pink = np.array([160, 86, 178])\n upper_pink = np.array([180, 165, 255])\n color_list = []\n else:\n #粉色\n lower_pink=np.array([170,70,230])\n upper_pink=np.array([180,130,255])\n lower_pink1=np.array([0,76,170])\n upper_pink1=np.array([7,140,255])\n color_list = []\n color_list.append(lower_pink1)\n color_list.append(upper_pink1)\n color_list.append(lower_pink)\n color_list.append(upper_pink)\n dict['pink']=color_list\n\n # 红色\n\n lower_red = np.array([166, 166, 80])\n upper_red = np.array([180, 255, 255])\n color_list = []\n color_list.append(lower_red)\n color_list.append(upper_red)\n dict['red'] = color_list\n\n # 红色2\n lower_red = np.array([0, 140, 130])\n upper_red = np.array([6, 255, 255])\n color_list = []\n color_list.append(lower_red)\n color_list.append(upper_red)\n dict['red2'] = color_list\n\n #橙色\n if flag:\n lower_orange = np.array([6, 43, 46])\n upper_orange = np.array([24, 255, 255])\n else:\n lower_orange = np.array([7, 102, 125])\n upper_orange = np.array([15, 230, 255])\n color_list = []\n color_list.append(lower_orange)\n color_list.append(upper_orange)\n dict['orange'] = color_list\n\n #黄色\n if flag:\n lower_yellow = np.array([25, 43, 46])\n upper_yellow = np.array([34, 255, 255])\n else:\n lower_yellow = np.array([25, 43, 180])\n upper_yellow = np.array([33, 255, 255])\n color_list = []\n color_list.append(lower_yellow)\n color_list.append(upper_yellow)\n dict['yellow'] = color_list\n\n #绿色\n\n lower_green = np.array([35, 43, 46])\n upper_green = np.array([77, 255, 255])\n color_list = []\n color_list.append(lower_green)\n color_list.append(upper_green)\n dict['green'] = color_list\n\n #青色\n\n lower_cyan = np.array([78, 43, 46])\n upper_cyan = np.array([99, 255, 255])\n color_list = []\n color_list.append(lower_cyan)\n color_list.append(upper_cyan)\n dict['cyan'] = color_list\n\n #蓝色\n\n lower_blue = np.array([100, 43, 46])\n upper_blue = np.array([124, 255, 255])\n color_list = []\n color_list.append(lower_blue)\n color_list.append(upper_blue)\n dict['blue'] = color_list\n\n # 紫色\n if flag:\n lower_purple = np.array([125, 43, 46])\n upper_purple = np.array([159, 255, 255])\n else:\n lower_purple = np.array([125, 50, 115])\n upper_purple = np.array([180, 153, 216])\n #lower_purple1=np.array([170,])\n color_list = []\n color_list.append(lower_purple)\n color_list.append(upper_purple)\n dict['purple'] = color_list\n\n return dict\ndef contrast_brightness_image(src1, a, g):\n h, w, ch = src1.shape # 获取shape的数值,height和width、通道\n # 新建全零图片数组src2,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片)\n src2 = np.zeros([h, w, ch], src1.dtype)\n dst = cv2.addWeighted(src1, a, src2, 1 - a, g) # addWeighted函数说明如下\n # cv2.imshow(\"con-bri-demo\", dst)\n # cv2.waitKey(0)\n return dst\nclass Analysis:\n def __init__(self):\n self.shapes = {'triangle': 0, 'square': 0, 'polygons': 0, 'circles': 0,'parallelogram':0}\n def draw_text_info(self, image):\n c1 = self.shapes['triangle']\n c2 = self.shapes['square']\n c3 = self.shapes['polygons']\n c4 = self.shapes['circles']\n c5=self.shapes['parallelogram']\n cv2.putText(image, \"triangle: \"+str(c1), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.2, (255, 0, 0), 1)\n cv2.putText(image, \"square: \" + str(c2), (10, 40), cv2.FONT_HERSHEY_PLAIN, 1.2, (255, 0, 0), 1)\n cv2.putText(image, \"parallelogram: \" + str(c5), (10, 60), cv2.FONT_HERSHEY_PLAIN, 1.2, (255, 0, 0), 1)\n return image\n def analy(self, color,shape,image):\n # print('analy ' +color + ' ' + shape)\n y,x=image.shape[:2]\n center_p=(x/2,y/2)\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n dict = getColorList(0)\n num = 0\n for d in dict:\n if d==color:\n # print(d)\n mask = cv2.inRange(hsv, dict[d][0], dict[d][1])\n # cv2.imshow(d,mask)\n # cv2.waitKey(0)\n blurred = cv2.GaussianBlur(mask, (5, 5), 0)\n thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n closed = cv2.erode(closed, None, iterations=5)\n thresh = cv2.dilate(closed, None, iterations=5)\n # find contours in the thresholded image\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n # print(len(cnts))\n # loop over the contours\n for c in cnts:\n # 轮廓逼近\n epsilon = 0.05 * cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, epsilon, True)\n # 分析几何形状\n corners = len(approx)\n # print(\"KKK\" + str(corners))\n shape_type = \"\"\n if corners == 3:\n count = self.shapes['triangle']\n count = count + 1\n self.shapes['triangle'] = count\n shape_type = \"triangle\"\n if corners == 4:\n # 用红色表示有旋转角度的矩形框架\n rect = cv2.minAreaRect(approx)\n w=rect[1][0]\n h=rect[1][1]\n ar=w/float(h)\n # print(ar)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(image, [box], 0, (0, 0, 255), 2)\n if ar>=0.70 and ar<=2.00:\n count = self.shapes['square']\n count = count + 1\n self.shapes['square'] = count\n shape_type = \"square\"\n else:\n count=self.shapes['parallelogram']\n count=count+1\n self.shapes['parallelogram']=count\n shape_type=\"parallelogram\"\n if corners >= 10:\n count = self.shapes['circles']\n count = count + 1\n self.shapes['circles'] = count\n shape_type = \"circles\"\n if 4 < corners < 10:\n count = self.shapes['polygons']\n count = count + 1\n self.shapes['polygons'] = count\n shape_type = \"polygons\"\n cX=0\n cY=0\n if shape_type==shape:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n # print((cX,cY))\n print('object center pos: ({0}, {1})'.format(cX, cY))\n # cv2.drawContours(image, [c], -1, (0, 255, 0), 1)\n # cv2.circle(image, (cX, cY), 2, (255, 0, 0), 2)\n\n\n # global number\n # print('write image {}'.format(number))\n # cv2.imwrite(str(number) + '.jpg', image)\n # number += 1\n\n else:\n print('no shape!!!')\n # # global number\n # print('write image {}'.format(number))\n # cv2.imwrite(str(1000 + number) + '.jpg', image)\n # print('No shape error !!!!!!')\n # # sys.exit()\n # number += 1\n\n im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n # draw the contour and center of the shape on the image\n\n # print('show image...')\n # cv2.imshow(\"Analysis Result\", self.draw_text_info(image))\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # print('end analy!')\n return (center_p[0]-cX,center_p[1]-cY)\n\n\ndef main():\n image = cv2.imread(\"image/02.jpg\")\n # image = cv2.imread(\"error.jpg\")\n\n\n print(image.shape)\n size = image.shape\n # 获取图像的高、宽\n h, w = image.shape[:2]\n # 调整图像亮度、对比度\n img = contrast_brightness_image(image, 1.3, -10)\n # 高斯模糊去噪\n blured = cv2.GaussianBlur(img, (5, 5), 0)\n # # cv2.imshow(\"blured\",blured)\n # # 进行泛洪填充,处理背景\n mask = np.zeros((h + 2, w + 2), np.uint8) # 掩码长和宽都比输入图像多两个像素点,满水填充不会超出掩码的非零边缘\n cv2.floodFill(blured, mask, (w - 1, h - 1), (0, 0, 0), (50, 35, 50), (185, 190, 190), cv2.FLOODFILL_FIXED_RANGE)\n #\n # cv2.imshow(\"floodFill\",blured)\n # cv2.waitKey(0)\n instance=Analysis()\n # m=instance.analy('green','triangle',blured)\n org =instance.analy('yellow', 'parallelogram', image)\n # org =instance.analy('blue', 'square', image)\n\n print(org)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Color_pos.py","file_name":"Color_pos.py","file_ext":"py","file_size_in_byte":9918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365872724","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom .config import PROXY_SLEEP\nfrom datetime import datetime\nfrom random import choices\nimport redis\nimport time\n\n\nclass ProxyFilter(object):\n \"\"\"代理过滤器,被使用过的代理自动被登记\n 在配置参数指定的时间内将无法使用\n \"\"\"\n\n def __init__(self):\n self.register_table = {}\n self.pool = ProxyPool()\n\n def register(self, proxy):\n \"\"\"登记代理,标记目前的时间\"\"\"\n self.register_table[proxy] = datetime.now()\n\n def remove_expired(self):\n \"\"\"检查并去除登记过期的代理\"\"\"\n for k, v in self.register_table.copy().items():\n if (datetime.now() - v).seconds > PROXY_SLEEP:\n self.register_table.pop(k)\n\n def get_usable(self, total):\n \"\"\"返回没有登记过的代理,并自动登记\n :param total:\n :return:\n \"\"\"\n # 先检查过期时间\n self.remove_expired()\n # 如果是代理列表,可以引入替换这里的`pool.get_all()`\n usable = list(set(self.pool.get_all()) ^\n set(self.register_table.keys()))\n if len(usable) < total:\n print('当前无可用代理,将在5分钟后重试')\n time.sleep(300)\n return self.get_usable(total)\n chosen = choices(usable, k=total)\n for each in chosen:\n self.register(each)\n return chosen\n\n\nclass ProxyPool(object):\n \"\"\"这里用的是作者实现的代理IP池:\n https://github.com/zkqiang/ProxyPool\n 如果有固定的代理IP可加在`config.py`里然后引用\n \"\"\"\n\n def __init__(self):\n \"\"\"初始化 Redis 连接\"\"\"\n self.redis = redis.StrictRedis(host='localhost', port=6379,\n decode_responses=True)\n self.key_name = 'proxies'\n\n def get_all(self):\n \"\"\"返回所有可用代理\n \"\"\"\n return self.redis.zrevrangebyscore(self.key_name, 100, 40)\n","sub_path":"webtraffic/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"244045012","text":"import requests\nimport csv\n\nurl = \"https://pomber.github.io/covid19/timeseries.json\"\ncovid = requests.get(url).json()\nexporter = csv.writer(open(\"../Newcases_Data.csv\", \"w\"), lineterminator ='\\n')\nexporter.writerow(['Country','Date','Confirmed','Deaths','Recovered'])\nfor country in covid:\n previous_day_confirmed = 0\n previous_day_deaths = 0\n previous_day_recovered = 0\n for item in covid[country]:\n if country == \"Taiwan*\":\n country = \"Taiwan\"\n elif country == \"US\":\n country = \"USA\"\n confirmed_cases = item[\"confirmed\"] - previous_day_confirmed\n previous_day_confirmed = item[\"confirmed\"]\n death_cases = item[\"deaths\"] - previous_day_deaths\n previous_day_deaths = item[\"deaths\"]\n recovered_cases = item[\"recovered\"] - previous_day_recovered\n previous_day_recovered = item[\"recovered\"]\n exporter.writerow([\n country,\n item['date'],\n confirmed_cases,\n death_cases,\n recovered_cases\n ])","sub_path":"Data_Extraction/NonCumulative_Data.py","file_name":"NonCumulative_Data.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"420250079","text":"class Solution(object):\n def toHex(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n mask = 0xffffffff\n mask_l = 0x0000000f\n T = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']\n ans = \"\"\n num = num & mask\n if num == 0:\n return '0'\n while num != 0:\n ans = T[num & mask_l] + ans\n num = num >> 4 & mask\n return ans\n","sub_path":"convertAnumberToHexadecimal.py","file_name":"convertAnumberToHexadecimal.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595446253","text":"'''\nFaça um programa que leia o nome e peso de várias pessoas, guardando tudo em uma lista. No final, mostre:\nQuantas pessoas foram cadastradas\nUma listagem com as pessoas mais pesadas (mais pesado e mais leve são definidos de acordo com dados recebidos)\nUma listagem com as pessoas mais leves\n'''\nlpessoas = []\npessoa = []\nmaiorp = menorp = c = 0\nlispesados = []\nlisleve = []\nwhile True:\n test = \"erro\"\n v = 0\n pessoa.clear()\n while not isinstance(v, str):\n v = input(\"Digite o Nome: \")\n try:\n v = float(v)\n v = int(v)\n print(\"Nome não pode ser um número, tente novamente.\")\n except ValueError:\n v = str(v)\n pessoa.append(v)\n while not isinstance(v, float):\n v = input(\"Digite o Peso: \")\n try:\n v = float(v)\n except ValueError:\n print(\"Você digitou um peso inválido, tente novamente.\")\n pessoa.append(v)\n lpessoas.append(pessoa[:])\n if c != 0:\n if v > lpessoas[maiorp][1]:\n maiorp = c\n if v < lpessoas[menorp][1]:\n menorp = c\n while test not in \"SN\":\n test = input(\"Deseja continuar [S/N]? \").strip().upper()[0]\n if test in 'N':\n break\n c += 1\nfor i in lpessoas:\n if i[1] == lpessoas[maiorp][1]:\n lispesados.append(i[0])\n if i[1] == lpessoas[menorp][1]:\n lisleve.append(i[0])\nprint(\"=-\"*35)\nprint(f\"Você realizou {len(lpessoas)} novos cadastros.\")\nprint(f\"As pessoas pesadas são {lispesados} e elas pesam {lpessoas[maiorp][1]}Kg\")\nprint(f\"O menor peso foi {lpessoas[menorp][1]}Kg. O peso de: \", end=\"\")\nfor i in lpessoas:\n if i[1] == lpessoas[menorp][1]:\n print(f\" [{i[0]}] \", end=\"\")\nprint()\nprint(\"=-\"*35)\n","sub_path":"ex084 LISTA COMPOSTA recebe lista e mostra mais pesados e leves.py","file_name":"ex084 LISTA COMPOSTA recebe lista e mostra mais pesados e leves.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466686666","text":"#!python\n# -*- coding: utf-8 -*-\n\"\"\"\n\t@package:\tcmd.@os\n\t@author:\tKRZYSZTOF \"@K0FF.EU\" K0FF\n\t@version:\t2.17.12\n\"\"\"\nimport bx\nimport sys\nimport os\n\n#\ndef OS():\n\tif sys.platform in ('linux','linux2'): return 'linux'\n\tif sys.platform in ('win32','win64','cygwin'): return 'windows'\n\tif sys.platform == 'darwin': return 'darwin'\n\treturn os.name\n\n#\nbx.var._set('@os',OS())\n\n#\ndef OS_if( name, value ):\n\tOS = bx.var._get('@os')\n\tif value is True: return True\n\tif value is False: return False\n\tvalue = bx.var._str(value).lower()\n\n\tif value in ('windows','win'): return OS == 'windows'\n\tif value in ('darwin','macos','mac'): return OS == 'darwin'\n\tif value == 'linux': return OS == 'linux'\n\n\tif value == os.name: return True\n\tif value == sys.platform: return True\n\treturn False\n\n#\nbx.reg.ex('bx.vars.set',{\n\t\t'@os': bx.var.readonly,\n\t})\n\n#\ndef __blox__():\n\tbx.reg.ex('cmd.if',{\n\t\t\t'@os': OS_if\n\t\t})\n","sub_path":"cmds/_os.py","file_name":"_os.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380324496","text":"import websocket, json, pprint, talib, numpy\nimport config\nfrom binance.client import Client\nfrom binance.enums import *\n## is where you put the asset you want to trade, in this instance its ETHUSD\n## is the time interval between prices \n\nSOCKET = \"wss://stream.binance.com:9443/ws/ethusdt@kline_1m\"\nRSI_Period = 14\n# RSI_Period = 30 , for BTC in a previous test, RSI 30 was the strongest correlated predictable variable\n\nRSI_OVERBOUGHT = 70\nRSI_OVERSOLD = 30\nTRADE_SYMBOL = 'ETHUSD'\nTRADE_QUANTITY = 0.05\n\ncloses = []\nin_position = False\n\nclient = Client(config.API_KEY, config.API_SECRET, tld='cad')\n## btcusdt is bitcoin, ltcusdt is litecoin, \n\ndef order(side, quantity, symbol, order_type=ORDER_TYPE_MARKET):\n try:\n print(\"sending order\")\n order = client.create_order(symbol=symbol,\n side=side,\n type=order_type,\n quantity=quantity)\n print(order)\n except Exception as e:\n return False\n \n return True\n\ndef on_open(ws):\n print('opened connection')\n\ndef on_close(ws):\n print('closed_connection')\n\ndef on_message(ws, message):\n print('received message')\n json_message = json.loads(message)\n pprint.pprint(json_message)\n\n candle = json_message['k']\n is_candle_closed = candle['x']\n close = candle['c']\n\n\n if is_candle_closed:\n print('candle closed at {}'.format(close))\n closes.append(float(close))\n print('closes')\n print(closes)\n\n if len(closes) > RSI_Period:\n np_closes = numpy.arry(closes)\n rsi = talib.RSI(np_closes, RSI_Period)\n print(\"all rsis calculated so far\")\n print(rsi)\n last_rsi = rsi[-1]\n print(\"the current rsi is {}\".format(last_rsi))\n\n if last_rsi > RSI_OVERBOUGHT:\n print(\"Sell! Sell! Sell!\")\n order_succeeded = order(SIDE_SELL, TRADE_QUANTITY, TRADE_SYMBOL)\n if order_succeeded:\n in_positions = False\n else: \n print(\"It is overbought, but we dont own any.\")\n\n if last_rsi < RSI_OVERSOLD:\n if in_position:\n print(\"It is oversold, but you already own it, nothing to do\")\n else:\n print(\"Buy! Buy! Buy!\")\n order_succeeded = order(SIDE_BUY, TRADE_QUANTITY, TRADE_SYMBOL)\n if order_succeeded:\n in_positions = True\n \n\n\n\n\n\n\n\nws = websocket.WebSocketApp(SOCKET, on_open=on_open, on_close=on_close, on_message=on_message)\nws.run_forever()\n\n\n\n","sub_path":"Binance_bot/Eth_model/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146434674","text":"import datetime\n\nfrom django.core.exceptions import ValidationError\nfrom dateutil.relativedelta import relativedelta\nfrom rest_framework import generics, status\nfrom rest_framework.response import Response\n\nfrom .models import CardSeries, CardNumber\nfrom .serializers import (\n SeriesSerializer,\n CardSerializer,\n CreateCardsSerializer,\n)\n\n\nclass SeriesDetailView(generics.RetrieveAPIView):\n serializer_class = SeriesSerializer\n lookup_field = 'series'\n queryset = CardSeries.objects.all()\n\n\nclass CardListView(generics.ListAPIView):\n serializer_class = CardSerializer\n queryset = CardNumber.objects.all()\n\n def get_serializer_class(self):\n if self.request.method == 'POST':\n return CreateCardsSerializer\n return CardSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = CreateCardsSerializer(\n data=request.data,\n *args,\n **kwargs\n )\n serializer.is_valid(raise_exception=True)\n now_date = datetime.datetime.now().date()\n months = serializer.validated_data['validity']\n end_date = now_date + relativedelta(months=+months)\n try:\n CardNumber.objects.cards_bulk_create(\n series=serializer.validated_data['series'],\n cards_quantity=serializer.validated_data['cards_quantity'],\n end_date=end_date\n )\n status_code = status.HTTP_201_CREATED\n message = None\n except ValidationError as e:\n status_code = status.HTTP_400_BAD_REQUEST\n message = {\n 'cards_quantity': (e.message, )\n }\n\n return Response(\n message,\n status=status_code,\n )\n","sub_path":"backend/cards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556797436","text":"# The arduino accepts commands in 1/10 millimeters\nUNIT = 10\n\nclass Line:\n def __init__(self, x1, y1, x2, y2):\n self.x1 = float(x1)\n self.y1 = float(y1)\n self.x2 = float(x2)\n self.y2 = float(y2)\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) \\\n and (self.x1 == other.x1) \\\n and (self.y1 == other.y1) \\\n and (self.x2 == other.x2) \\\n and (self.y2 == other.y2)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def instruction(self):\n return [\n 'M {0} {1}'.format(self.x1 * UNIT, self.y1 * UNIT),\n 'L {0} {1}'.format(self.x2 * UNIT, self.y2 * UNIT)\n ]\n\n\nclass Rect:\n def __init__(self, x, y, width, height):\n self.x = float(x)\n self.y = float(y)\n self.width = float(width)\n self.height = float(height)\n\n # Translate a rectangle to 4 lines\n def instruction(self):\n instructions = []\n instructions.append('M ' + str(float(self.x) * UNIT) + ' ' + str(float(self.y) * UNIT))\n instructions.append('L ' + str(float(self.x + self.width) * UNIT) + ' ' + str(float(self.y) * UNIT)) \n instructions.append('L ' + str(float(self.x + self.width) * UNIT) + ' ' + str(float(self.y + self.height) * UNIT))\n instructions.append('L ' + str(float(self.x) * UNIT) + ' ' + str(float(self.y + self.height) * UNIT)) \n instructions.append('L ' + str(float(self.x) * UNIT) + ' ' + str(float(self.y) * UNIT))\n return instructions\n\nclass Polygon:\n # Example value: points=\"750,600 629.7,659.4 600,792.9 683.2,900 816.8,900 900,792.9 870.3,659.4 \"\n def __init__(self, points):\n self.points = points.strip().split()\n\n # Add starting point to the back to get the last line\n self.points.append(self.points[0])\n self.points = list(map(lambda x: x.split(','), self.points))\n\n def instruction(self):\n instructions = []\n instructions.append('M ' + str(float(self.points[0][0]) * UNIT) + ' ' + str(float(self.points[0][1]) * UNIT))\n for i, point in enumerate(self.points[1:]):\n instructions.append('L ' + str(float(self.points[i][0]) * UNIT) + ' ' + str(float(self.points[i][1]) * UNIT))\n instructions.append('L ' + str(float(self.points[0][0]) * UNIT) + ' ' + str(float(self.points[0][1]) * UNIT))\n\n return instructions\n","sub_path":"shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"185722074","text":"# Implementation of bubble sort\ndef bubblesort(lst):\n n = len(lst)\n\n for i in range(n):\n for j in range(0, n-i-1):\n if lst[j] > lst[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\n return lst\n","sub_path":"sorting/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"339225759","text":"class Solution(object):\n def scheduleCourse(self, courses):\n courses.sort(key=lambda x: x[1])\n longest = [-1]\n start = 0\n for i in range(len(courses)):\n start = courses[i][0] + start\n if start <= courses[i][1]:\n longest.append(courses[i][0])\n\n elif start > courses[i][1]:\n max_value = max(longest)\n if max_value == -1:\n start -= courses[i][0]\n elif max_value <= courses[i][0]:\n #delete current course\n start -= courses[i][0]\n else:\n #delete previous longese course\n start -= max_value\n longest.remove(max_value)\n longest.append(courses[i][0])\n return len(longest) - 1\n \n","sub_path":"python/630 Course Schedule III.py","file_name":"630 Course Schedule III.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"349344417","text":"\"\"\"tutorial URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom tutorial.quickstart import views as quickstart_views\nfrom tutorial.api_root import views as root_views\nfrom tutorial.snippets import views as snippets_views\n\nurlpatterns = [\n # url(r'^', include(router.urls)),\n # url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n url(r'^admin/', admin.site.urls),\n url(r'^$', root_views.api_root),\n url(r'^snippets/$',\n snippets_views.SnippetViewSet.as_view({\n 'get': 'list'\n }),\n name='snippet-list'),\n url(r'^snippets/(?P[0-9]+)/$',\n snippets_views.SnippetViewSet.as_view({\n 'get': 'retrieve'\n }),\n name='snippet-detail'),\n url(r'^snippets/(?P[0-9]+)/highlight/$',\n snippets_views.SnippetViewSet.as_view(),\n name='snippet-highlight'),\n url(r'^users/$',\n quickstart_views.UserViewSet.as_view({\n 'get': 'list'\n }),\n name='user-list'),\n url(r'^users/(?P[0-9]+)/$',\n quickstart_views.UserViewSet.as_view({\n 'get': 'retrieve'\n }),\n name='user-detail'),\n url(r'^groups/$',\n quickstart_views.GroupViewSet.as_view(),\n name='group-list'),\n url(r'^groups/(?P[0-9]+)/$',\n quickstart_views.GroupViewSet.as_view({\n 'get': 'list'\n }),\n name='group-detail'),\n]\n","sub_path":"tutorial/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302957400","text":"import pandas as pd\nfrom math import trunc\nimport itertools\n\nBINS_PER_DAY = 24\nMINUTES_IN_DAY = 1440\n\n# open csv file and read out time and washers lists\ndataSheet = pd.read_csv('/Users/jonathanconroy/Documents/laundryDataProject/data.csv')\ntimeInHours = dataSheet['TimeInHours']\nwashers = dataSheet['Washers Used']\ndryers = dataSheet['Dryers Used']\n\n# create a dictionary that associates a time (in minutes since 00:00) with the number of washers in use\nwasherDict = dict()\ndryerDict = dict()\nfor i in range(0,len(washers)):\n currTime = int(timeInHours[i][-2:]) + int(timeInHours[i][:-3])*60 #currentTimeInMinutes\n if(currTime in washerDict):\n washerDict[currTime] = washerDict[currTime] + [washers[i]]\n dryerDict[currTime] = dryerDict[currTime] + [dryers[i]]\n else:\n washerDict[currTime] = [washers[i]]\n dryerDict[currTime] = [dryers[i]]\nprint('done with first dict')\n\n# create a dictionary that associates each minute in the day with a washer number, relying on the last known washer number\nminutesDict = dict.fromkeys(range(0,MINUTES_IN_DAY))\ncurrWasherTime = min(washerDict.keys()) #start with the closest washer number to 00:00\nfor minute in range(0,MINUTES_IN_DAY):\n if(minute in washerDict):\n currWasherTime = minute\n minutesDict[minute] = {'washers':washerDict[currWasherTime],'dryers':dryerDict[currWasherTime]}\nprint('done with minutes dict')\n\n# get the data into bins that contain the average washer number\nbinSize = MINUTES_IN_DAY/BINS_PER_DAY\navgWashersInBins = [0] * BINS_PER_DAY\navgDryersInBins = [0] * BINS_PER_DAY\nfor minute,data in minutesDict.items():\n avgWasherNumAtMinute = sum(data['washers'])*1.0/len(data['washers'])\n avgDryerNumAtMinute = sum(data['dryers'])/len(data['dryers'])\n binIndex = int(trunc(minute/binSize))\n avgWashersInBins[binIndex] = avgWashersInBins[binIndex] + avgWasherNumAtMinute/binSize\n avgDryersInBins[binIndex] = avgDryersInBins[binIndex] + avgDryerNumAtMinute/binSize\nprint('done with bins')\n\n#print the data in a new csv file\nnewDataFile = open('processedData.csv','w+')\nnewDataFile.write('Time,Washers Used,,Time,Dryers Used\\n')\nfor binNum in range(0,BINS_PER_DAY):\n # find the timestamp and washerNumber for each bin and write it to the new file\n timeInMinutes = binSize*binNum\n timeStamp = str(trunc(timeInMinutes/60)) + ':' + str(timeInMinutes%60)\n washerNum = avgWashersInBins[binNum]\n dryerNum = avgDryersInBins[binNum]\n newDataFile.write(timeStamp + ',' + str(washerNum)+',,' + timeStamp + ',' + str(dryerNum)+'\\n')\nnewDataFile.close()\nprint('done')\n","sub_path":"processCsv.py","file_name":"processCsv.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"185341295","text":"#!/usr/bin/env python3\n\"\"\" Train \"\"\"\nimport tensorflow.keras as K\n\n\ndef train_model(network, data, labels, batch_size, epochs,\n validation_data=None, early_stopping=False, patience=0,\n learning_rate_decay=False, alpha=0.1, decay_rate=1,\n verbose=True, shuffle=False):\n \"\"\"\n Trains a model using mini-batch gradient descent\n network is the model to train\n data is a numpy.ndarray of shape (m, nx) containing the input data\n labels is a one-hot numpy.ndarray of shape (m, classes) containing the\n labels of data\n batch_size is the size of the batch used for mini-batch gradient descent\n epochs is the number of passes through data for mini-batch gradient descent\n verbose is a boolean that determines if output should be printed during\n training\n shuffle is a boolean that determines whether to shuffle the batches every\n epoch. Normally, it is a good idea to shuffle, but for reproducibility,\n we have chosen to set the default to False.\n validation_data is the data to validate the model with, if not None\n learning_rate_decay is a boolean that indicates whether learning rate decay\n should be used\n learning rate decay should only be performed if validation_data exists\n the decay should be performed using inverse time decay\n the learning rate should decay in a stepwise fashion after each epoch\n each time the learning rate updates, Keras should print a message\n alpha is the initial learning rate\n decay_rate is the decay rate\n Returns: the History object generated after training the model\n \"\"\"\n callbacks = []\n\n if validation_data is not None:\n if early_stopping is True:\n callbacks.append(K.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=patience\n ))\n if learning_rate_decay is True:\n callbacks.append(K.callbacks.LearningRateScheduler(\n lambda x: alpha / (1 + decay_rate * x),\n verbose=1\n ))\n\n history = network.fit(\n x=data,\n y=labels,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n shuffle=shuffle,\n validation_data=validation_data,\n callbacks=callbacks\n )\n return history\n","sub_path":"supervised_learning/0x06-keras/7-train.py","file_name":"7-train.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605542400","text":"#!/usr/bin/env python\nimport sqlite3\nimport collections\n\ndef map_samples_to_indicies(c):\n \"\"\"Return a dict mapping samples names (key)\n to sample indices in the numpy genotype arrays (value).\n \"\"\"\n sample_to_idx = {}\n c.execute(\"select sample_id, name from samples\")\n for row in c:\n name = str(row['name'])\n idx = row['sample_id'] - 1\n sample_to_idx[name] = idx\n return sample_to_idx\n\n\ndef map_indicies_to_samples(c):\n \"\"\"Return a dict mapping samples indices in the \n numpy arrays (key) to sample names.\n \"\"\"\n idx_to_sample = {}\n c.execute(\"select sample_id, name from samples\")\n for row in c:\n name = str(row['name'])\n idx = row['sample_id'] - 1\n idx_to_sample[idx] = name\n return idx_to_sample\n\n\ndef get_col_names_and_indices(sqlite_description, ignore_gt_cols = False):\n \"\"\"Return a list of column namanes and a list of the row indicies.\n Optionally exclude gt_* columns.\n \"\"\"\n col_indices = []\n col_names = []\n for idx, col_tup in enumerate(sqlite_description):\n # e.g., each col in sqlite desc is a tuple like:\n # ('variant_id', None, None, None, None, None, None)\n col_name = col_tup[0]\n if ((not ignore_gt_cols) or \\\n (ignore_gt_cols and not col_name.startswith('gt'))):\n col_indices.append(idx)\n col_names.append(col_name)\n return col_names, col_indices\n\n\n# http://code.activestate.com/recipes/576694/\nclass OrderedSet(collections.MutableSet):\n\n def __init__(self, iterable=None):\n self.end = end = [] \n end += [None, end, end] # sentinel node for doubly linked list\n self.map = {} # key --> [key, prev, next]\n if iterable is not None:\n self |= iterable\n\n def __len__(self):\n return len(self.map)\n\n def __contains__(self, key):\n return key in self.map\n\n def add(self, key):\n if key not in self.map:\n end = self.end\n curr = end[1]\n curr[2] = end[1] = self.map[key] = [key, curr, end]\n\n def discard(self, key):\n if key in self.map: \n key, prev, next = self.map.pop(key)\n prev[2] = next\n next[1] = prev\n\n def __iter__(self):\n end = self.end\n curr = end[2]\n while curr is not end:\n yield curr[0]\n curr = curr[2]\n\n def __reversed__(self):\n end = self.end\n curr = end[1]\n while curr is not end:\n yield curr[0]\n curr = curr[1]\n\n def pop(self, last=True):\n if not self:\n raise KeyError('set is empty')\n key = self.end[1][0] if last else self.end[2][0]\n self.discard(key)\n return key\n\n def __repr__(self):\n if not self:\n return '%s()' % (self.__class__.__name__,)\n return '%s(%r)' % (self.__class__.__name__, list(self))\n\n def __eq__(self, other):\n if isinstance(other, OrderedSet):\n return len(self) == len(other) and list(self) == list(other)\n return set(self) == set(other)\n\n ","sub_path":"gemini/gemini_utils.py","file_name":"gemini_utils.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"231805692","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase, Client\nfrom django.urls import reverse\n\nfrom projects.models import Project, ProjectMembership\n\n\nclass SampleTestCase(TestCase):\n def setUp(self):\n self.user = User.objects.create_user('test_user', 'user@projects.com', 'SecretPassword')\n self.project1 = Project.objects.create(name='first sample project')\n self.project2 = Project.objects.create(name='second sample project')\n\n def test_sample_1(self):\n c = Client()\n c.force_login(user=self.user)\n res = c.get(reverse('index'))\n self.assertEquals(res.status_code, 404)\n self.assertTrue('no projects found' in res.content.decode().lower())\n\n membership1 = ProjectMembership.objects.create(user=self.user, project=self.project1, role='RD')\n self.assertFalse(membership1.is_current)\n\n res = c.get(reverse('index'))\n self.assertEquals(res.status_code, 200)\n self.assertFalse('no projects found' in res.content.decode().lower())\n membership1.refresh_from_db()\n self.assertTrue(membership1.is_current)\n\n membership2 = ProjectMembership.objects.create(user=self.user, project=self.project2, role='RO')\n self.assertFalse(membership2.is_current)\n\n res = c.get(reverse('active_project', args=[self.project2.id]), follow=True)\n self.assertEquals(res.status_code, 200)\n self.assertFalse('no projects found' in res.content.decode().lower())\n\n membership1.refresh_from_db()\n membership2.refresh_from_db()\n\n self.assertFalse(membership1.is_current)\n self.assertTrue(membership2.is_current)\n\n def test_sample_2(self):\n membership = ProjectMembership.objects.create(user=self.user, project=self.project2, role='RD')\n self.assertTrue(membership.has_permission('pull_project_code'))\n self.assertTrue(membership.has_permission('create_new_branches'))\n self.assertFalse(membership.has_permission('add_new_team_members'))\n self.assertFalse(membership.has_permission('remove_project'))\n\n membership.role = 'RO'\n membership.save()\n\n self.assertTrue(membership.has_permission('remove_project'))\n self.assertFalse(membership.has_permission('force_push_to_protected_branches'))\n\n def test_sample_3(self):\n c = Client()\n c.force_login(user=self.user)\n membership1 = ProjectMembership.objects.create(user=self.user, project=self.project1, role='RD')\n res = c.get(reverse('index'))\n self.assertEquals(res.status_code, 200)\n membership1.refresh_from_db()\n self.assertTrue(membership1.is_current)\n\n res = c.get(reverse('remove_project'))\n self.assertTrue(res.status_code, 403)\n","sub_path":"projects/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"341936557","text":"#%%\n\nimport pandas as pd\nimport numpy as np\nimport pymysql\nimport math\nimport statistics\nimport time\nimport datetime\nfrom itertools import combinations, permutations\nfrom scipy.special import comb, perm\nfrom dateutil.relativedelta import relativedelta\n\n#%%\ntoday = datetime.date.today()\n# starttime = datetime.datetime.now()\n# db = pymysql.connect(\"localhost\", \"root\", \"esfortest\", \"etf\")\n\n# name = 'max'\n# password = '1234'\nimport sys\nusername = sys.argv[1]\npassword = sys.argv[2]\n\n\n\ndb = pymysql.connect(\"localhost\", \"root\", \"esfortest\", \"etf\")\ncursor = db.cursor()\nsql = \"select * from `user_datat` where (name = '\"+ str(username) +\"' and id = '\" + str(password) +\"')\"\n\ncursor.execute(sql)\nresult_select = cursor.fetchall()\ndb.commit()\n\nstart_date = result_select[0][2]\nvary = relativedelta(today,start_date)\n\nevery_asset_money = result_select[0][6].split(' ')\ntotal_money = 0\nfor i in range(len(every_asset_money)):\n total_money = total_money + float(every_asset_money[i])\n\n\nchoose = result_select[0][4].split(' ')\nweight = result_select[0][5].split(' ')\nwant_t = vary.months + (vary.years*12)\nper_in_money = float(result_select[0][10])\ninput_money = want_t*(per_in_money/12) + float(result_select[0][9])\nwant_type = 2\n\nprint(input_money)\nprint(total_money)\n\nsql_del1 = \"TRUNCATE TABLE `user_datat`\"\nsql_del2 = \"TRUNCATE TABLE `user_datatr`\"\ncursor.execute(sql_del1)\ncursor.execute(sql_del2)\ndb.commit()\n\ndb.close()\n# %%\n","sub_path":"functions/db_flush.py","file_name":"db_flush.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634647896","text":"import logging\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nfrom requests_toolbelt.utils import dump\nimport datetime\nfrom PIL import Image\nfrom io import BytesIO\nfrom resizeimage import resizeimage\nimport pkg_resources\nimport math\nimport os\nfrom time import gmtime, strftime\nimport re\n\nLOGGING_TRACE_LVL = 5\nlogger = logging.getLogger('postcard_creator')\nlogging.addLevelName(LOGGING_TRACE_LVL, 'TRACE')\nsetattr(logger, 'trace', lambda *args: logger.log(LOGGING_TRACE_LVL, *args))\n\n\ndef _trace_request(response):\n data = dump.dump_all(response)\n try:\n logger.trace(data.decode())\n except Exception:\n data = str(data).replace('\\\\r\\\\n', '\\r\\n')\n logger.trace(data)\n\n\nclass PostcardCreatorException(Exception):\n server_response = None\n\n\nclass Token(object):\n def __init__(self, _protocol='https://'):\n self.protocol = _protocol\n self.base = '{}account.post.ch'.format(self.protocol)\n self.swissid = '{}login.swissid.ch'.format(self.protocol)\n self.token_url = '{}postcardcreator.post.ch/saml/SSO/alias/defaultAlias'.format(self.protocol)\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0.1; wv) AppleWebKit/537.36 (KHTML, like Gecko) ' +\n 'Version/4.0 Chrome/52.0.2743.98 Mobile Safari/537.36',\n 'Origin': '{}account.post.ch'.format(self.protocol)\n }\n\n # cache_filename = 'pcc_cache.json'\n\n self.token = None\n self.token_type = None\n self.token_expires_in = None\n self.token_fetched_at = None\n self.cache_token = False\n\n def _create_session(self):\n return requests.Session()\n\n def has_valid_credentials(self, username, password):\n try:\n self.fetch_token(username, password)\n return True\n except PostcardCreatorException:\n return False\n\n # def store_token_to_cache(self, key, token):\n #\n # def check_token_in_cache(self, username, password):\n # tmp_dir = tempfile.gettempdir()\n # tmp_path = os.path.join(tmp_dir, self.cache_filename)\n # tmp_file = Path(tmp_path)\n #\n # if tmp_file.exists():\n # cache_content = open(tmp_file, \"r\").read()\n # cache = []\n # try:\n # cache = json.load(cache_content)\n # except Exception:\n # return None\n #\n\n def fetch_token(self, username, password):\n logger.debug('fetching postcard account token')\n\n if username is None or password is None:\n raise PostcardCreatorException('No username/ password given')\n\n # if self.cache_token:\n # self.check_token_in_cache(username, password)\n\n # try first to authenticate with Post account, if it fails, try SwissID\n session = None\n saml_response = None\n try:\n session = self._create_session()\n saml_response = self._get_saml_response(session, username, password)\n except PostcardCreatorException:\n session = self._create_session()\n saml_response = self._swissid_get_saml_response(session, username, password)\n \n payload = {\n 'RelayState': '{}postcardcreator.post.ch?inMobileApp=true&inIframe=false&lang=en'.format(self.protocol),\n 'SAMLResponse': saml_response\n }\n\n response = session.post(url=self.token_url, headers=self.headers, data=payload)\n logger.debug(' post {}'.format(self.token_url))\n _trace_request(response)\n\n try:\n if response.status_code is not 200:\n raise PostcardCreatorException()\n\n access_token = json.loads(response.text)\n self.token = access_token['access_token']\n self.token_type = access_token['token_type']\n self.token_expires_in = access_token['expires_in']\n self.token_fetched_at = datetime.datetime.now()\n\n except PostcardCreatorException:\n e = PostcardCreatorException(\n 'Could not get access_token. Something broke. '\n 'set increase debug verbosity to debug why')\n e.server_response = response.text\n raise e\n\n logger.debug('username/password authentication was successful')\n\n def _get_saml_response(self, session, username, password):\n url = '{}/SAML/IdentityProvider/'.format(self.base)\n query = '?login&app=pcc&service=pcc&targetURL=https%3A%2F%2Fpostcardcreator.post.ch' + \\\n '&abortURL=https%3A%2F%2Fpostcardcreator.post.ch&inMobileApp=true'\n data = {\n 'isiwebuserid': username,\n 'isiwebpasswd': password,\n 'confirmLogin': ''\n }\n response1 = session.get(url=url + query, headers=self.headers)\n _trace_request(response1)\n logger.debug(' get {}'.format(url))\n\n response2 = session.post(url=url + query, headers=self.headers, data=data)\n _trace_request(response2)\n logger.debug(' post {}'.format(url))\n\n response3 = session.post(url=url + query, headers=self.headers)\n _trace_request(response3)\n logger.debug(' post {}'.format(url))\n\n if any(e.status_code is not 200 for e in [response1, response2, response3]):\n raise PostcardCreatorException('Wrong user credentials')\n\n soup = BeautifulSoup(response3.text, 'html.parser')\n saml_response = soup.find('input', {'name': 'SAMLResponse'})\n\n if saml_response is None or saml_response.get('value') is None:\n raise PostcardCreatorException('Username/password authentication failed. '\n 'Are your credentials valid?.')\n\n return saml_response.get('value')\n\n def _swissid_get_saml_response(self, session, username, password):\n url = '{}/SAML/IdentityProvider/'.format(self.base)\n query = '?login&app=pcc&service=pcc&targetURL=https%3A%2F%2Fpostcardcreator.post.ch' + \\\n '&abortURL=https%3A%2F%2Fpostcardcreator.post.ch&inMobileApp=true'\n\n response1 = session.get(url=url + query)\n logger.debug(' step 1, GET {}'.format(url + query))\n\n data2 = {\n 'isPilotPhase': 'true',\n 'isiwebuserid': '',\n 'isiwebpasswd': '',\n 'externalIDP': 'externalIDP',\n 'nevisdialog': 'password'\n }\n response2 = session.post(url=url + query, data=data2)\n logger.debug(' step 2, POST {}'.format(url + query))\n\n # extract this goto parameter from the previous redirection to generate\n # the next url request\n goto_param = re.search('&goto=([^&]+)', response2.history[3].url).group(1)\n newurl = 'https://login.swissid.ch/idp/json/authenticate?realm=/SESAM&locale=en&service=Sesam-LDAP&goto={}&authIndexType=service&authIndexValue=Sesam-LDAP'.format(goto_param)\n response3 = session.post(newurl)\n logger.debug(' step 3, POST {}'.format(newurl))\n\n # get the JSON blob, update the username and send back\n data4 = response3.json()\n data4['callbacks'][2]['input'][0]['value'] = username\n json_type = {'Content-Type': 'application/json'}\n response4 = session.post(newurl, headers=json_type, data=json.dumps(data4))\n logger.debug(' step 4, POST {}'.format(newurl))\n\n # get the new JSON blob, update the password and send back\n data5 = response4.json()\n try:\n data5['callbacks'][3]['input'][0]['value'] = password\n except KeyError:\n raise PostcardCreatorException('Oops, is your email valid?')\n response5 = session.post(newurl, headers=json_type, data=json.dumps(data5))\n logger.debug(' step 5, POST {}'.format(newurl))\n\n # update session with the token we receive and request successUrl\n try:\n session.cookies.update({'swissid': response5.json()['tokenId']})\n success_url = response5.json()['successUrl']\n except KeyError:\n raise PostcardCreatorException('Oops, is your password valid?')\n response6 = session.get(success_url)\n logger.debug(' step 6, GET {}'.format(success_url))\n\n # final POST request to get the SAMLResponse\n response7 = session.post(url=url + query)\n logger.debug(' step 7, POST {}'.format(url + query))\n\n if any(e.status_code is not 200 for e in [response1, response2,\n response3, response4, response5, response6, response7]):\n raise PostcardCreatorException('Issue during authentication process, wrong credentials?')\n\n soup = BeautifulSoup(response7.text, 'html.parser')\n saml_response = soup.find('input', {'name': 'SAMLResponse'})\n\n if saml_response is None or saml_response.get('value') is None:\n raise PostcardCreatorException('Username/password authentication failed. '\n 'Are your credentials valid?.')\n\n return saml_response.get('value')\n\n def to_json(self):\n return {\n 'fetched_at': self.token_fetched_at,\n 'token': self.token,\n 'expires_in': self.token_expires_in,\n 'type': self.token_type,\n }\n\n\nclass Sender(object):\n def __init__(self, prename, lastname, street, zip_code, place, company='', country=''):\n self.prename = prename\n self.lastname = lastname\n self.street = street\n self.zip_code = zip_code\n self.place = place\n self.company = company\n self.country = country\n\n def is_valid(self):\n return all(field for field in [self.prename, self.lastname, self.street, self.zip_code, self.place])\n\n\nclass Recipient(object):\n def __init__(self, prename, lastname, street, zip_code, place, company='', company_addition='', salutation=''):\n self.salutation = salutation\n self.prename = prename\n self.lastname = lastname\n self.street = street\n self.zip_code = zip_code\n self.place = place\n self.company = company\n self.company_addition = company_addition\n\n def is_valid(self):\n return all(field for field in [self.prename, self.lastname, self.street, self.zip_code, self.place])\n\n def to_json(self):\n return {'recipientFields': [\n {'name': 'Salutation', 'addressField': 'SALUTATION'},\n {'name': 'Given Name', 'addressField': 'GIVEN_NAME'},\n {'name': 'Family Name', 'addressField': 'FAMILY_NAME'},\n {'name': 'Company', 'addressField': 'COMPANY'},\n {'name': 'Company', 'addressField': 'COMPANY_ADDITION'},\n {'name': 'Street', 'addressField': 'STREET'},\n {'name': 'Post Code', 'addressField': 'ZIP_CODE'},\n {'name': 'Place', 'addressField': 'PLACE'}],\n 'recipients': [\n [self.salutation, self.prename,\n self.lastname, self.company,\n self.company_addition, self.street,\n self.zip_code, self.place]]}\n\n\nclass Postcard(object):\n def __init__(self, sender, recipient, picture_stream, message=''):\n self.recipient = recipient\n self.message = message\n self.picture_stream = picture_stream\n self.sender = sender\n self.frontpage_layout = pkg_resources.resource_string(__name__, 'page_1.svg').decode('utf-8')\n self.backpage_layout = pkg_resources.resource_string(__name__, 'page_2.svg').decode('utf-8')\n\n def is_valid(self):\n return self.recipient is not None \\\n and self.recipient.is_valid() \\\n and self.sender is not None \\\n and self.sender.is_valid()\n\n def validate(self):\n if self.recipient is None or not self.recipient.is_valid():\n raise PostcardCreatorException('Not all required attributes in recipient set')\n if self.recipient is None or not self.recipient.is_valid():\n raise PostcardCreatorException('Not all required attributes in sender set')\n\n def get_frontpage(self, asset_id):\n return self.frontpage_layout.replace('{asset_id}', str(asset_id))\n\n def get_backpage(self):\n svg = self.backpage_layout\n return svg \\\n .replace('{first_name}', self.recipient.prename) \\\n .replace('{last_name}', self.recipient.lastname) \\\n .replace('{company}', self.recipient.company) \\\n .replace('{company_addition}', self.recipient.company_addition) \\\n .replace('{street}', self.recipient.street) \\\n .replace('{zip_code}', str(self.recipient.zip_code)) \\\n .replace('{place}', self.recipient.place) \\\n .replace('{sender_company}', self.sender.company) \\\n .replace('{sender_name}', self.sender.prename + ' ' + self.sender.lastname) \\\n .replace('{sender_address}', self.sender.street) \\\n .replace('{sender_zip_code}', str(self.sender.zip_code)) \\\n .replace('{sender_place}', self.sender.place) \\\n .replace('{sender_country}', self.sender.country) \\\n .replace('{message}',\n self.message.encode('ascii', 'xmlcharrefreplace').decode('utf-8')) # escape umlaute\n\n\ndef _send_free_card_defaults(func):\n def wrapped(*args, **kwargs):\n kwargs['image_target_width'] = kwargs.get('image_target_width') or 154\n kwargs['image_target_height'] = kwargs.get('image_target_height') or 111\n kwargs['image_quality_factor'] = kwargs.get('image_quality_factor') or 20\n kwargs['image_rotate'] = kwargs.get('image_rotate') or True\n kwargs['image_export'] = kwargs.get('image_export') or False\n return func(*args, **kwargs)\n\n return wrapped\n\n\nclass PostcardCreator(object):\n def __init__(self, token=None, _protocol='https://'):\n if token.token is None:\n raise PostcardCreatorException('No Token given')\n self.token = token\n self.protocol = _protocol\n self.host = '{}postcardcreator.post.ch/rest/2.1'.format(self.protocol)\n self._session = self._create_session()\n\n def _get_headers(self):\n return {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0.1; wv) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Version/4.0 Chrome/52.0.2743.98 Mobile Safari/537.36',\n 'Authorization': 'Bearer {}'.format(self.token.token)\n }\n\n def _create_session(self):\n return requests.Session()\n\n def _do_op(self, method, endpoint, **kwargs):\n url = self.host + endpoint\n if 'headers' not in kwargs or kwargs['headers'] is None:\n kwargs['headers'] = self._get_headers()\n\n logger.debug('{}: {}'.format(method, url))\n response = self._session.request(method, url, **kwargs)\n _trace_request(response)\n\n if response.status_code not in [200, 201, 204]:\n e = PostcardCreatorException('error in request {} {}. status_code: {}'\n .format(method, url, response.status_code))\n e.server_response = response.text\n raise e\n return response\n\n def get_user_info(self):\n logger.debug('fetching user information')\n endpoint = '/users/current'\n return self._do_op('get', endpoint).json()\n\n def get_billing_saldo(self):\n logger.debug('fetching billing saldo')\n\n user = self.get_user_info()\n endpoint = '/users/{}/billingOnlineAccountSaldo'.format(user[\"userId\"])\n return self._do_op('get', endpoint).json()\n\n def get_quota(self):\n logger.debug('fetching quota')\n\n user = self.get_user_info()\n endpoint = '/users/{}/quota'.format(user[\"userId\"])\n return self._do_op('get', endpoint).json()\n\n def has_free_postcard(self):\n return self.get_quota()['available']\n\n @_send_free_card_defaults\n def send_free_card(self, postcard, mock_send=False, **kwargs):\n if not self.has_free_postcard():\n raise PostcardCreatorException('Limit of free postcards exceeded. Try again tomorrow at '\n + self.get_quota()['next'])\n if not postcard:\n raise PostcardCreatorException('Postcard must be set')\n\n postcard.validate()\n user = self.get_user_info()\n user_id = user['userId']\n card_id = self._create_card(user)\n\n picture_stream = self._rotate_and_scale_image(postcard.picture_stream, **kwargs)\n asset_response = self._upload_asset(user, picture_stream=picture_stream)\n self._set_card_recipient(user_id=user_id, card_id=card_id, postcard=postcard)\n self._set_svg_page(1, user_id, card_id, postcard.get_frontpage(asset_id=asset_response['asset_id']))\n self._set_svg_page(2, user_id, card_id, postcard.get_backpage())\n\n if mock_send:\n response = False\n logger.debug('postcard was not sent because flag mock_send=True')\n else:\n response = self._do_order(user_id, card_id)\n logger.debug('postcard sent for printout')\n\n return response\n\n def _create_card(self, user):\n endpoint = '/users/{}/mailings'.format(user[\"userId\"])\n\n mailing_payload = {\n 'name': 'Mobile App Mailing {}'.format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")),\n 'addressFormat': 'PERSON_FIRST',\n 'paid': False\n }\n\n mailing_response = self._do_op('post', endpoint, json=mailing_payload)\n return mailing_response.headers['Location'].partition('mailings/')[2]\n\n def _upload_asset(self, user, picture_stream):\n logger.debug('uploading postcard asset')\n endpoint = '/users/{}/assets'.format(user[\"userId\"])\n\n files = {\n 'title': (None, 'Title of image'),\n 'asset': ('asset.png', picture_stream, 'image/jpeg')\n }\n headers = self._get_headers()\n headers['Origin'] = 'file://'\n response = self._do_op('post', endpoint, files=files, headers=headers)\n asset_id = response.headers['Location'].partition('user/')[2]\n\n return {\n 'asset_id': asset_id,\n 'response': response\n }\n\n def _set_card_recipient(self, user_id, card_id, postcard):\n logger.debug('set recipient for postcard')\n endpoint = '/users/{}/mailings/{}/recipients'.format(user_id, card_id)\n return self._do_op('put', endpoint, json=postcard.recipient.to_json())\n\n def _set_svg_page(self, page_number, user_id, card_id, svg_content):\n logger.debug('set svg template ' + str(page_number) + ' for postcard')\n endpoint = '/users/{}/mailings/{}/pages/{}'.format(user_id, card_id, page_number)\n\n headers = self._get_headers()\n headers['Origin'] = 'file://'\n headers['Content-Type'] = 'image/svg+xml'\n return self._do_op('put', endpoint, data=svg_content, headers=headers)\n\n def _do_order(self, user_id, card_id):\n logger.debug('submit postcard to be printed and delivered')\n endpoint = '/users/{}/mailings/{}/order'.format(user_id, card_id)\n return self._do_op('post', endpoint, json={})\n\n def _rotate_and_scale_image(self, file, image_target_width=154, image_target_height=111,\n image_quality_factor=20, image_rotate=True, image_export=False):\n\n with Image.open(file) as image:\n if image_rotate and image.width < image.height:\n image = image.rotate(90, expand=True)\n logger.debug('rotating image by 90 degrees')\n\n if image.width < image_quality_factor * image_target_width \\\n or image.height < image_quality_factor * image_target_height:\n factor_width = math.floor(image.width / image_target_width)\n factor_height = math.floor(image.height / image_target_height)\n factor = min([factor_height, factor_width])\n\n logger.debug('image is smaller than default for resize/fill. '\n 'using scale factor {} instead of {}'.format(factor, image_quality_factor))\n image_quality_factor = factor\n\n width = image_target_width * image_quality_factor\n height = image_target_height * image_quality_factor\n logger.debug('resizing image from {}x{} to {}x{}'\n .format(image.width, image.height, width, height))\n\n cover = resizeimage.resize_cover(image, [width, height], validate=True)\n with BytesIO() as f:\n cover.save(f, 'PNG')\n scaled = f.getvalue()\n\n if image_export:\n name = strftime(\"postcard_creator_export_%Y-%m-%d_%H-%M-%S.jpg\", gmtime())\n path = os.path.join(os.getcwd(), name)\n logger.info('exporting image to {} (image_export=True)'.format(path))\n cover.save(path)\n\n return scaled\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(name)s (%(levelname)s): %(message)s')\n logging.getLogger('postcard_creator').setLevel(logging.DEBUG)\n","sub_path":"postcard_creator/postcard_creator.py","file_name":"postcard_creator.py","file_ext":"py","file_size_in_byte":21150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"484328282","text":"\nmaxx = 0\nmaxy = 0\nlines = []\nfor str in open('day14.txt'):\n line_strs = str.strip().split(' -> ')\n line_coords = [[int(crd) for crd in s.split(',')] for s in line_strs]\n for i in range(len(line_coords) - 1):\n lines.append((line_coords[i], line_coords[i+1]))\n maxx = max(maxx, line_coords[i][0], line_coords[i+1][0])\n maxy = max(maxy, line_coords[i][1], line_coords[i+1][1])\n# print(lines, maxx, maxy, len(lines))\n\ngrid = [[' '] * (maxy + 1) for i in range(maxx + 1)]\ndef sign(i):\n if i > 0:\n return 1\n if i == 0:\n return 0\n return -1\n\ndef add_line(grid, line):\n dx, dy = sign(line[1][0] - line[0][0]), sign(line[1][1] - line[0][1])\n x, y = line[0]\n print(line, maxx, maxy, line[1][1], line[0][1], line[1][1] - line[0][1], len(grid), len(grid[0]))\n while [x, y] != line[1]:\n #print(x, y)\n grid[x][y] = '#'\n x += dx\n y += dy\n grid[x][y] = '#'\n\nfor line in lines:\n add_line(grid, line)\n\ndef drop_sand(grid):\n x, y = 500, 0\n while True:\n if y >= maxy or x < 0 or x > maxx:\n return False\n if grid[x][y+1] == ' ':\n y += 1\n continue\n if grid[x-1][y+1] == ' ':\n x -= 1\n y += 1\n continue\n if grid[x+1][y+1] == ' ':\n x += 1\n y += 1\n continue\n grid[x][y] = '.'\n return True\n\ncount = 0\nwhile drop_sand(grid):\n count += 1\nprint('Part 1', count)","sub_path":"2022/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"560143362","text":"import requests\nimport random\nimport time\nfrom bs4 import BeautifulSoup\nimport settings\nfrom mongomanager import KeywordsMongo\n'''\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\nAccept-Encoding: gzip, deflate, br\nAccept-Language: en-US,en;q=0.9\nCache-Control: max-age=0\nConnection: keep-alive\nCookie: BAIDUID=4598A5614DC49B579474B573322C3794:FG=1; SE_LAUNCH=5%3A26385780; delPer=0; H_WISE_SIDS=141002_142063_135847_142208_122158_142115_141125_142019_141838_140853_142514_138878_140989_142918_142390_142779_142285_136862_131861_140174_131246_137745_138165_140324_138883_133847_140259_141941_127969_140065_142907_140595_143056_138425_141009_141191_141926_131423_141706_107318_142345_138596_142271_140367_141103_110085; rsv_i=c6429MaTEvyEgjsiMRH%2FsyZ0S523c14NqeTUffYy6TTkL77fKalL%2Fm1cpjrDbfO5Uher9QSRxYGja1%2BGI2SKVmorTw%2F5lEk; PSINO=7; BIDUPSID=4598A5614DC49B579474B573322C3794; PSTM=1583153681; BD_HOME=0; BD_UPN=123353; BD_CK_SAM=1; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=30972_1435_21109_30824_22158; H_PS_645EC=b34fSQC0yXmy5A%2BjNvraDpq4o0YkbcXrHWYTw6NvJQikEa1gCDBmRcYuB1I\nHost: www.baidu.com\nSec-Fetch-Dest: document\nSec-Fetch-Mode: navigate\nSec-Fetch-Site: none\nSec-Fetch-User: ?1\nUpgrade-Insecure-Requests: 1\nUser-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) snap Chromium/80.0.3987.122 Chrome/80.0.3987.122 Safari/537.36\n'''\n\n\nclass KeywordsSpider(object):\n def __init__(self, keywords):\n self.user_agents = settings.USER_AGENTS[random.randint(0,19)]\n self.headers = settings.USER_HEADERS\n self.headers['User-Agent'] = self.user_agents\n self.keywords = keywords\n # self.url = url\n self.baidu_url = 'https://www.baidu.com/s?wd='\n self.keywords = keywords\n self.PROXY_POOL_URL = 'http://localhost:5555/random'\n\n def get_proxy(self):\n try:\n res = requests.get(self.PROXY_POOL_URL)\n if res.status_code == 200:\n return res.text\n except ConnectionError:\n return None\n\n def get_baidu_cookie(self):\n url = 'https://www.baidu.com'\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400 QQBrowser/9.7.12661.400'\n headers = {'User-Agent':user_agent}\n\n res = requests.Session().get(url=url, headers=headers)\n print(\"res.cookies = \", res.cookies)\n return res.cookies\n\n\n def get_new_headers(self):\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Upgrade-Insecure-Requests': '1',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Connection': 'keep-alive',\n 'Cookies': self.get_baidu_cookie(),\n 'User-Agent': settings.USER_AGENTS[random.randint(0,19)]\n }\n return headers\n\n def get_info_request(self, url, headers):\n\n # proxy = self.get_proxy()\n # proxies = {\n # 'http': 'http://' + proxy,\n # 'https': 'https://' + proxy,\n # }\n # page = requests.get(url=url, headers=self.headers, proxies=proxies)\n try:\n page = requests.get(url=url, headers=headers, timeout=10)\n except:\n page =None\n\n\n if page == None:\n headers=self.get_new_headers()\n try:\n page = requests.get(url=url, headers=headers, timeout=10)\n except:\n print(\"这个链接已经失效,请查看\")\n return\n\n page.encoding = 'utf8'\n soup = BeautifulSoup(page.text, \"html.parser\")\n title = None\n try:\n title = soup.title.string\n print(title)\n except :\n title = None\n print(\"没有关键词为title的描述\")\n print(title)\n\n description = None\n try:\n description = soup.find(attrs={\"name\":\"description\"})['content']\n print(description)\n except :\n description = None\n print(\"没有关键词为description的描述\")\n if description == None:\n try:\n description = soup.find(attrs={\"name\":\"Description\"})['content']\n print(description)\n except :\n description = None\n print(\"没有关键词为description的描述\")\n\n if description == None:\n try:\n description = soup.find(attrs={\"name\":\"DESCRIPTION\"})['content']\n print(description)\n except :\n description = None\n print(\"没有关键词为description的描述\")\n\n keywords = None\n try:\n keywords = soup.find(attrs={\"name\":\"keywords\"})['content']\n print(keywords)\n except :\n keywords = None\n print(\"没有关键词为description的描述\")\n\n if keywords == None:\n try:\n keywords = soup.find(attrs={\"name\":\"Keywords\"})['content']\n print(keywords)\n except :\n keywords = None\n print(\"没有关键词为description的描述\")\n\n if keywords == None:\n try:\n keywords = soup.find(attrs={\"name\":\"KEYWORDS\"})['content']\n print(keywords)\n except :\n keywords = None\n print(\"没有关键词为description的描述\")\n\n if title != None and description != None and keywords != None and description != '' and keywords != '':\n context = {\n \"title\": title,\n \"description\": description,\n \"keywords\": keywords\n }\n return context\n else:\n context = None\n return context\n\n\n\n def get_urls_from_baidu(self):\n return_context = []\n for i in range(0, 60):\n url = self.baidu_url + self.keywords + '&pn=' + str(10*i)\n print(url)\n # headers = self.get_new_headers()\n headers = self.headers\n page = requests.get(url=url, headers=headers)\n time.sleep(random.uniform(1.1,5.4))\n # print(page.text)\n page.encoding = 'utf8'\n # print(page.text)\n print(page.status_code)\n soup = BeautifulSoup(page.text, \"html5lib\")\n tagh3 = soup.find_all('h3')\n print(tagh3)\n print(len(tagh3))\n for h3 in tagh3:\n print(\"HI\")\n try:\n href = h3.a.attrs['href']\n except:\n href = None\n print(\"读取百度页面失败\")\n continue\n print(href)\n result = self.get_info_request(href, headers)\n if result == None:\n pass\n else:\n print(\"result = \", result)\n result[\"url\"] = href\n keywordsmogodb = KeywordsMongo()\n keywordsmogodb.test_insert(self.keywords ,result)\n return_context.append(result)\n\n return return_context\n\n # try:\n # href = h3.a.attrs['href']\n # print(href)\n # print(type(href))\n # self.get_request(href)\n # except :\n # time.sleep(10)\n # print(\"Error\")\n # break\n\n\n # if real_url.startswith('http'):\n # all.write(real_url + '\\n')\n\n\n\n\n# def main():\n# url = 'http://www.wuhanzhengtian.com/'\n# keswords = '在线无码av高清毛片'\n# keywordsspider= KeywordsSpider(keswords)\n# keywordsspider.get_urls_from_baidu()\n#\n# if __name__ == '__main__':\n# main()\n","sub_path":"9999_some_tiny_program/keywordsspider/keywordsspider.py","file_name":"keywordsspider.py","file_ext":"py","file_size_in_byte":8039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"205171406","text":"\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework.parsers import JSONParser\n\nfrom metrics.models import Metric, ComputeType\n\n\nclass metrics(APIView):\n\n permission_classes = (IsAuthenticatedOrReadOnly,)\n parser_classes = [JSONParser]\n\n def get(self, request):\n metrics = []\n for metric_object in Metric.objects.all():\n metrics.append({'id': metric_object.id,\n 'name': metric_object.name,\n 'display_name': metric_object.display_name,\n 'description_short': metric_object.description_short,\n 'description_long': metric_object.description_long,\n 'compute_type': metric_object.compute_type.name,\n 'parent': metric_object.parent.name if metric_object.parent else None,\n 'units': metric_object.units})\n output = {'metrics': {'data': metrics, 'count': len(metrics)}}\n return Response(output)\n\n def post(self, request):\n if ComputeType.objects.all().count() < 1:\n return Response({'status': 'error', 'message': 'Metrics require compute type'})\n for value in request.data['data']:\n m_object, _ = Metric.objects.get_or_create(id=value['id'],\n name=value['name'],\n display_name=value['display_name'],\n description_short=value['description_short'],\n description_long=value['description_long'],\n compute_type=ComputeType.objects.get(name=value['compute_type']),\n units=value['units']\n )\n return Response({'status': 'ok', 'message': 'Metrics loaded'})\n","sub_path":"metrics/views/api/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313761654","text":"__author__ = 'Mohammad Yousuf Ali, aliyyousuf@gmail.com'\n\n# Write a function that receives a positive integer as function parameter and returns True if\n# the integer is a perfect number, False otherwise. A perfect number is a number whose sum of\n# the all the divisors (excluding itself) is equal to itself. For example: divisors of 6\n# (excluding 6 are) : 1, 2, 3 and their sum is 1+2+3 = 6. Therefore, 6 is a perfect number.\ndef common_divisor(n):\n output = False\n summ = 0\n for i in range(1,n):\n if n % i == 0:\n summ += i\n if summ == n:\n output = True\n return output\n\n\n","sub_path":"UTAx-CSE1309X/Q3P5.py","file_name":"Q3P5.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401719145","text":"import time\n\n\ndef utc_mktime(DATE, TIME):\n \"\"\"Returns number of seconds elapsed since epoch\n \n Note that no timezone are taken into consideration.\n \n utc tuple must be: (year, month, day, hour, minute, second)\n \"\"\"\n if 170101 < DATE < 180101: # 年月日\n utc_tuple = (int(DATE / 10000) + 2000,\n int((DATE - int(DATE / 10000) * 10000) / 100),\n int(DATE - int(DATE / 100) * 100),\n int(TIME / 10000),\n int((TIME - int(TIME / 10000) * 10000) / 100),\n int(TIME - int(TIME / 100) * 100),\n 0, 0, 0)\n else: # 日月年\n utc_tuple = (int(DATE - int(DATE / 100) * 100) + 2000,\n int((DATE - int(DATE / 10000) * 10000) / 100),\n int(DATE / 10000),\n int(TIME / 10000),\n int((TIME - int(TIME / 10000) * 10000) / 100),\n int(TIME - int(TIME / 100) * 100),\n 0, 0, 0)\n return int(time.mktime(utc_tuple))\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Converts a datetime object to UTC timestamp\"\"\"\n return int(utc_mktime(dt.timetuple()))\n\n\ndef normal_time_stamp(DATE):\n if 170101 < DATE < 180101: # 年月日\n normal_time_stamp = (int(DATE / 10000) + 2000)*10000+(int((DATE - int(DATE / 10000) * 10000) / 100))*100+int(DATE - int(DATE / 100) * 100)\n\n else: # 日月年\n normal_time_stamp = ((int(DATE - int(DATE / 100) * 100) + 2000))*10000+(int((DATE - int(DATE / 10000) * 10000) / 100))*100+int(DATE / 10000)\n\n return normal_time_stamp\n","sub_path":"Time_to_UTC.py","file_name":"Time_to_UTC.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"131276709","text":"from p5 import *\nimport random, pygame\n\n\ndef make2DArray(boardwidth, boardheight):\n #arr = np.array([range(boardwidth) for row in range(boardheight)])\n #arr = np.zeros(shape = (boardheight,boardwidth))\n arr = [[0 for _i in range(boardwidth)] for _j in range(boardheight)]\n return arr\n\n\ndef setup():\n global cellsize\n global totalMines\n global grid\n global boardheight\n global boardwidth\n global boardrects\n global windowheight\n global windowwidth\n \n cellsize = 20\n totalMines = 30\n windowwidth = 600\n windowheight = 600\n \n size(windowwidth, windowheight)\n \n boardwidth = int(windowwidth // cellsize)\n boardheight = int(windowheight // cellsize)\n \n xmargin = int((windowwidth - cellsize * boardwidth) / 2)\n ymargin = int((windowwidth - cellsize * boardheight) / 2)\n \n #grid = make2DArray(boardwidth, boardheight)\n \n # fill every \"cell\" with a cell object\n #for col in range(len(grid)):\n # for row in range(len(grid[0])):\n # grid[col][row] = Cell(col, row, cellsize)\n # Create pygame.Rect objects for each board space to\n # do board-coordinate-to-pixel-coordinate conversions.\n grid = []\n for x in range(boardwidth):\n grid.append([])\n for y in range(boardheight):\n r = pygame.Rect((xmargin + (x*cellsize),\n ymargin + (y*cellsize),\n cellsize,\n cellsize))\n grid[x].append(r)\n \n \n \n \n # Pick totalMines spots\n options = []\n for i in range(boardwidth):\n for j in range(boardheight):\n options.append([i, j])\n \n \n for n in range(totalMines):\n index = int(random.randint(1, len(options)+1))\n print(\"index\", index)\n choice = options[index]\n print(\"choice\", choice)\n print(\"index\", index)\n \n i = choice[0]\n j = choice[1]\n # Delete that spot so its no longer an option\n options.remove(choice)\n grid[i][j].__mine = True\n \n for i in range(boardwidth):\n for j in range(boardheight):\n c = grid[i][j]\n c.countMines()\n \n \ndef draw():\n background(255)\n \n for i in range(boardwidth):\n for j in range(boardheight):\n grid[i][j].show()\n\n\ndef gameOver():\n for i in range(boardwidth):\n for j in range(boardheight):\n grid[i][j].__revealed == True\n\n\ndef mousePressed():\n for i in range(boardwidth):\n for j in range(boardheight):\n if grid[i][j].contains(mouse_x, mouse_y):\n grid[i][j].reveal()\n \n if grid[i][j].mine:\n gameOver()\n\n\n\nclass Cell:\n def __init__(self, i, j, cellsize):\n self.__i = i\n self.__j = j\n self.__x = i * cellsize\n self.__y = j * cellsize\n self.__cellsize = cellsize\n self.__neighborCount = 0\n \n self.__mine = False\n self.__revealed = False\n \n def show(self):\n stroke(0)\n fill.enabled = False\n #nofill()\n _rect_mode = 'CENTER'\n print(self.__x, self.__y)\n rect(self.__x, self.__y, self.__cellsize, self.__cellsize) #removed , self.__cellsize, self.__cellsize from the parantheses\n if self.__revealed:\n if self.__mine:\n fill(127)\n ellipse(self.__x + self.__cellsize * 0.5, self.__y + self.__y * 0.5, self.__cellsize * 0.5)\n else:\n fill(200)\n rect(self.__x, self.__y, self.__cellsize, self.__cellsize)\n if self.__neighborCount > 0:\n # textAlign is not integrated yet in p5.py \n textAlign(CENTER)\n fill(0)\n # text is not integrated yet in p5.py\n text(self.__neighborCount, self.__x + self.__cellsize *0.5, self.__y + self.__cellsize - 6)\n\n\n def countMines(self):\n if self.__mine:\n self.__neighborCount = -1\n return\n \n total = 0\n for xoff in range(-1, 2):\n i = self.__i + xoff\n if i < 0 or i >= boardwidth:\n continue\n for yoff in range(-1, 2):\n j = self.__y + yoff\n if j < 0 or j >= boardheight:\n continue\n neighbor = grid[i][j]\n if neighbor.__mine:\n total =+1\n self.__neighborCount = total\n \n \n def contains(self, x, y):\n return x > self.__x and x < self.__x + self.__cellsize and y > self.__y and y < self.__y + self.__cellsize\n \n \n def reveal(self):\n self.__revealed = True\n if self.__neighborCount == 0:\n # floodFill time\n self.floodFill()\n \n def floodFill(self):\n for xoff in range(-1, 2):\n i = self.__i + xoff\n if i < 0 or i >= boardwidth:\n continue\n for yoff in range(-1, 2):\n j = self.__y + yoff\n if j < 0 or j >= boardheight:\n continue\n \n neighbor = grid[i][j]\n if not neighbor.__revealed:\n neighbor.reveal()\n\n\n\n ###### to be continued here \n \nrun()","sub_path":"game/game/p5test.py","file_name":"p5test.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478240979","text":"#!/usr/bin/env python\n\nimport os\nimport csv\nimport io\ntry:\n import statistics\nexcept:\n import statistics_for_py2 as statistics\n\nfrom data_types import Purchase\n\ndef main():\n print_header()\n filename = get_data_file()\n data = load_file(filename)\n query_data(data)\n\ndef print_header():\n print('---------------------------') \n print('REAL ESTATE DATA MINING APP') \n print('---------------------------')\n print('')\n\ndef get_data_file():\n base_folder = os.path.dirname(__file__)\n return os.path.join(base_folder,'data','file.csv')\n\ndef load_file(filename):\n with io.open(filename, 'r', encoding='utf-8') as fin:\n\n reader = csv.DictReader(fin)\n purchases = []\n for row in reader:\n p = Purchase.create_from_dict(row)\n purchases.append(p)\n\n return purchases\n\ndef announce(item,msg):\n print(\"Pulling item {} for {}\".format(item, msg))\n return item\n\ndef query_data(data):\n data.sort(key=lambda p: p.price)\n\n high_purchase = data[-1]\n print(\"The most expensive house is ${:,} with {} beds and {} baths\".format(\n high_purchase.price, high_purchase.beds, high_purchase.baths))\n low_purchase = data[0]\n print(\"The lease expensive house is ${:,} with {} beds and {} baths\".format(\n low_purchase.price, low_purchase.beds, low_purchase.baths))\n \n two_bedroom_homes = (\n p\n for p in data\n if announce(p, \"2-bedrooms, found {}\".format(p.beds)) and p.beds == 2 \n )\n\n homes = []\n for h in two_bedroom_homes:\n if len(homes) > 5:\n break\n homes.append(h)\n\n ave_price = statistics.mean((announce(p.price, \"price\") for p in homes))\n ave_baths = statistics.mean((p.baths for p in homes))\n ave_sqft = statistics.mean((p.sq__ft for p in homes))\n print(\"The average 2-bedrooms home price is ${:,},bath={},sq ft={:,}\".format(int(ave_price),round(ave_baths,1),round(ave_sqft,1)))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"375364335","text":"import datetime\nfrom dayangpy.api.base import BaseAPI\nfrom dayangpy.utils import handle_args\n\n\nclass DaYangHotspot(BaseAPI):\n \"\"\"\n 热点事件查询相关接口\n \"\"\"\n\n API_BASE_URL = \"http://gateway.bigdata.cloud.dayang.com.cn:8088\"\n\n def list(self, sort_name, sort_type, latest_days, **kwargs):\n \"\"\"\n 获取热点列表根据时间或者热度排序,查询条件支持地域、分类、时间\n \"\"\"\n\n data = handle_args(kwargs)\n data.update(sorts=[{\"sortName\": sort_name, \"sortType\": sort_type}])\n time_end = datetime.datetime.now()\n time_start = time_end - datetime.timedelta(days=int(latest_days))\n data.update(timeStart=time_start.strftime(\"%Y-%m-%d %H:%M:%S\"))\n data.update(timeEnd=time_end.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n result = self._post(\"/openapi4hoge/v1/events/list\", json=data)\n return result.get(\"result\", None).get(\"resources\", None)\n\n def get_classify(self):\n \"\"\"\n 获取热点类型\n \"\"\"\n result = self._get(f\"/openapi4hoge/v1/events/classify/list\")\n return result.get(\"resultList\", None)\n\n def get_area_heat(self, latest_days, **kwargs):\n \"\"\"\n 地域热点地图统计\n \"\"\"\n data = handle_args(kwargs)\n time_end = datetime.datetime.now()\n time_start = time_end - datetime.timedelta(days=int(latest_days))\n data.update(timeStart=time_start.strftime(\"%Y-%m-%d %H:%M:%S\"))\n data.update(timeEnd=time_end.strftime(\"%Y-%m-%d %H:%M:%S\"))\n result = self._get(f\"/openapi4hoge/v1/events/areaHeat\", params=data)\n return result.get(\"result\", None).get(\"subAreas\", None)\n\n def get_hotwords(self, latest_days, **kwargs):\n \"\"\"\n 热词排行列表\n \"\"\"\n data = handle_args(kwargs)\n time_end = datetime.datetime.now()\n time_start = time_end - datetime.timedelta(days=int(latest_days))\n data.update(timeStart=time_start.strftime(\"%Y-%m-%d %H:%M:%S\"))\n data.update(timeEnd=time_end.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n result = self._get(f\"/openapi4hoge/v1/hotword/list\", params=data)\n return result.get(\"result\", None).get(\"list\", None)\n","sub_path":"dayangpy/api/hotspot.py","file_name":"hotspot.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39855178","text":"from riotwatcher import LolWatcher\nimport discord\n\n#currently can return rank, tier, and winrate for summoner name via discord command using this module below to interface with league API \n# https://riot-watcher.readthedocs.io/en/latest/riotwatcher/LeagueOfLegends/index.html\n# https://www.youtube.com/watch?v=ml0lKDU5JvY could be helpful (using requests and json with league API raw to print out stats)\n\nkey=\"RGAPI-16bce2cc-2dd9-44b1-92a2-d4bddf46fa3b\"\nwatcher = LolWatcher(key)\n\n\ndef printStats(summonerName):\n summoner = watcher.summoner.by_name(\"na1\", summonerName)\n summonerID = summoner['id']\n stats=watcher.league.by_summoner(\"na1\", summonerID)\n\n tier = stats[0]['tier']\n rank = stats[0]['rank']\n lp = stats[0]['leaguePoints']\n wins = int(stats[0]['wins'])\n losses = int(stats[0]['losses'])\n winrate= int(wins/(wins+losses)*100)\n #print(tier, rank, lp)\n #print(\"winrate: \" + str(winrate)+\"% \")\n #print(summonerName + \" is currently ranked in \" + str(tier), str(rank) + \" with \" + str(lp) + \" LP and a \" + str(winrate) + \"% winrate.\")\n userData = summonerName + \" is currently ranked in \" + str(tier) + \", \" + str(rank) + \" with \" + str(lp) + \" LP and a \" + str(winrate) + \"% winrate.\"\n return userData\n\n#serData = printStats(\"EddieTGH12\")\n#print(userData)\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith('$stats'):\n summonerName = message.content.split(\"$stats \",1)[1]\n #print(\"summonername: \" + summonerName)\n userData = printStats(summonerName)\n await message.channel.send(userData)\n\nclient.run(\"ODgyOTkxNTMxOTY1OTQ3OTY0.YTDb8g.8nrYM2nYkK_uGnufbL4tvZaWpf8\")\n ","sub_path":"riotapi.py","file_name":"riotapi.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"512132293","text":"# 这是自己建的urls.py,方便在py_server的urls.py文件中调用\nfrom django.urls import path\nfrom backend import views\n\nurlpatterns = [\n path(r'test_view_function', views.test_view_function, name='test_view_function'),\n path(r'test_struct_function', views.test_struct_function, name='test_struct_function'),\n path(r'cofactor_analysis', views.cofactor_analysis, name='cofactor_analysis'),\n path(r'FBA_caculation', views.FBA_caculation, name='FBA_caculation'),\n path(r'FVA_caculation', views.FVA_caculation, name='FVA_caculation'),\n path(r'full_analysis', views.full_analysis, name='full_analysis'),\n path(r'model_info', views.model_info, name='model_info'),\n path(r'out_image', views.out_image, name='out_image'),\n path(r'mat_info', views.mat_info, name='mat_info'),\n]\n","sub_path":"mst_py/backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"520876332","text":"# Example1 :\n\ndata = lambda x:x**2\nprint(data(5))\n\n#\nlist2 = [3, 5, 6, 7]\n\nresult = list(map((lambda X : X**2), list2))\nprint(result)\n\n# map example2\n\nlist_str = ['hello', 'geek', 'ogo', ]\nresult_str = list(map(lambda x: str(x).upper(), list_str))\nprint(result_str)\n#----------\nlist_str2 = ['hello', 'geek', 'ogo', 'ata' ]\nresult_str2 = list(filter(lambda x: (str(x)[::-1] == x), list_str2))\nprint(result_str2)\n\n\n\n#----------------------\ndef fun2(list):\n for i in list:\n print(i**2, end=\" \")\n\nfun2([3, 5, 7, 8])\n\ndef fun(n):\n return n**2\n# using map function can eterate through any list of data\nresult3 = list(map(lambda x : fun(x), list2))\nprint(result3)\n\n#######################################\n# Filter : It will return list of true values:\n\nList3 = [1, 3, 6, 34, 65, 73]\n\nfilter_result = list(filter(lambda x : (x>2), List3))\nfilter_result1 = list(filter(lambda x : (x%2 == 0), List3))\nfilter_result3 = list(filter(lambda x : (x%2 != 0), List3))\n\n\nprint(filter_result)\nprint(filter_result1)\nprint(filter_result3)\n\n\n\n\n\n","sub_path":"PythonPractice/Lambda_function/lambda_example2.py","file_name":"lambda_example2.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"199313536","text":"message = input(\"Tell me something: \")\nprint(message)\n# When you use the input() function, Python interprets everything the user enters as a string\n# % modulo operator - returns remainder\n\n# 7-1\nrental_car = input(\"What kind of rental car would you like? \")\nprint(\"Let me see if I can find you a/an \" + rental_car.title())\n\n# 7-2\ndinner_group = input(\"How many people are in your group? \")\n# Textbook Answer - party_size = int(party_size)\nif int(dinner_group) > 8:\n print(\"Fucking Wait then!\")\nelse:\n print(\"Go to that table Ahole\")\n\n# 7-3\nuser_number = input(\"Input a number: \")\nif int(user_number) % 10 == 0:\n print(\"That's multiple of 10!\")\nelse:\n print(\"not multiple of 10. Fuck off\")\n\n# != not equal to\n\nprompt = \"\\nTell me something, and I will repeat it back to you: \"\nprompt += \"\\nEnter 'quit' to end the program.\"\n# Python must have something(initial value) to check the first time it reaches the while line\nmessage = \"\"\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\n\n# flag - set any name as a variable to determine whether or not the entire program is active\nactive = True\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n\n# 7-4\ntoppings = True\nwhile toppings:\n pizza_toppings = input(\"do you wannna add anything toppings? \" + \"\\nif not type in 'quit'\")\n if pizza_toppings != 'quit':\n #pizza_toppings = input(\"do you wannna add anything toppings? \" + \"\\nif not type in 'quit'\") - NameError: name 'pizza_toppings' is not defined\n print(pizza_toppings.title() + \" has been added.\")\n if pizza_toppings == 'quit':\n print('okay, we are preparing your pizza!')\n break\n# Textbook answer\n #else:\n #break\n\n# 7-5\nactive = True\nwhile active:\n age = input(\"Enter your age\" + \"\\nOr enter 'quit' \")\n if age == 'quit':\n break\n #elif age == str:\n #print(\"enter a number\")\n elif int(age) <= 3:\n print(\"Your ticket is free\")\n elif int(age) <= 12:\n print(\"Please pay $10\")\n elif int(age) > 12:\n print(\"Please pay $15\")\n # How can I print if input is any string\n #elif type(age) == str: - ValueError: invalid literal for int() with base 10: 'sdgdsg'\n #elif age == str:\n #elif age == 'quit': - this must go to the first because if 'quit' is input it then it cause error cause 'quit' is converted to integer\nprompt = \"How old are you?\"\nprompt += \"\\nEnter 'quit' when you are finished. \"\n# Textbook answer\nwhile True:\n age = input(prompt)\n if age == 'quit':\n break\n age = int(age)\n\n if age < 3:\n print(\" You get in free!\")\n elif age < 13:\n print(\" Your ticket is $10.\")\n else:\n print(\" Your ticket is $15.\")\n\n# 7-8\nsandwich_orders = ['russian', 'the dude', 'the bomb']\nfinished_sandwiches = []\nwhile sandwich_orders:\n filled_orders = sandwich_orders.pop()\n print(\"Making \" + filled_orders)\n finished_sandwiches.append(filled_orders)\n\nfor finished in finished_sandwiches:\n print(finished + \" is done!!\")\n\n# 7-9\nsandwich_orders = ['pastrami', 'russian', 'pastrami', 'the dude', 'the bomb', 'pastrami']\nfinished_sandwiches = []\nprint('Sorry, We are out of pastrami')\nwhile 'pastrami' in sandwich_orders:\n sandwich_orders.remove('pastrami')\n\nwhile sandwich_orders:\n filled_orders = sandwich_orders.pop()\n print(\"Making \" + filled_orders)\n finished_sandwiches.append(filled_orders)\n\nfor finished in finished_sandwiches:\n print(finished + \" is done!!\")\n\n# 7-10\nreponses = {}\nactive = True\n\nwhile active:\n name = input(\"Enter your name: \")\n place = input(\"If you could visit on place in the world,\" + \"\\n Where would you go? \")\n\n reponses[name] = place\n\n repeat = input(\"Does your friend wanna response? Yes/No \")\n if repeat == 'no':\n active = False\n # you don't need to set up 'yes' separately since while is a loop it will continue to run\n\nfor names, places in reponses.items():\n print(names + ' would like to go to ' + places)","sub_path":"Ch7. Input&While.py","file_name":"Ch7. Input&While.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"434713533","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport time\nimport unittest\nimport sys\nimport requests\nfrom base_test_class import BaseTestCase\n\n\nclass Login(BaseTestCase):\n\n def get_api_key(self):\n driver = self.login_page()\n driver.get(self.base_url + \"api/key\")\n time.sleep(3)\n api_text = driver.find_element_by_tag_name(\"BODY\").text\n r_pattern = re.compile('Your current API key is (\\\\w+)')\n r_match = r_pattern.search(api_text)\n return r_match.group(1)\n\n def test_engagement_status(self):\n api_key = self.get_api_key()\n api_url = self.base_url + \"api/v1/engagements\"\n user = os.environ['DD_ADMIN_USER']\n headers = {'content-type': 'application/json',\n 'Authorization': 'ApiKey %s:%s' % (user, api_key)}\n r = requests.get(api_url, headers=headers, verify=False)\n self.assertEqual(r.status_code, 200)\n\n def test_finding_status(self):\n api_key = self.get_api_key()\n api_url = self.base_url + \"api/v1/findings\"\n user = os.environ['DD_ADMIN_USER']\n headers = {'content-type': 'application/json',\n 'Authorization': 'ApiKey %s:%s' % (user, api_key)}\n\n r = requests.get(api_url, headers=headers, verify=False)\n self.assertEqual(r.status_code, 200)\n\n def test_product_status(self):\n api_key = self.get_api_key()\n api_url = self.base_url + \"api/v1/products\"\n user = os.environ['DD_ADMIN_USER']\n headers = {'content-type': 'application/json',\n 'Authorization': 'ApiKey %s:%s' % (user, api_key)}\n r = requests.get(api_url, headers=headers, verify=False)\n self.assertEqual(r.status_code, 200)\n\n def test_t_status(self):\n api_key = self.get_api_key()\n api_url = self.base_url + \"api/v1/tests\"\n user = os.environ['DD_ADMIN_USER']\n headers = {'content-type': 'application/json',\n 'Authorization': 'ApiKey %s:%s' % (user, api_key)}\n r = requests.get(api_url, headers=headers, verify=False)\n self.assertEqual(r.status_code, 200)\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(Login('setUp'))\n suite.addTest(Login('login_page'))\n return suite\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2)\n ret = not runner.run(suite()).wasSuccessful()\n BaseTestCase.tearDownDriver()\n sys.exit(ret)\n","sub_path":"tests/check_status.py","file_name":"check_status.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"314478280","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAsynchronous coroutine crawler with multi-threading.\n\nCombination:\nThread pool + asyncio + requests\n\"\"\"\n\n__author__ = 'Ziang Lu'\n\nimport asyncio\nimport concurrent.futures as cf\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nTEMPLATE_URL = 'https://movie.douban.com/top250?start={}&filter='\n\n\ndef crawl_title(i: int) -> None:\n \"\"\"\n Crawls the movie titles of a Douban top250 page.\n :param i: int\n :return: None\n \"\"\"\n url = TEMPLATE_URL.format(i * 25)\n\n r = requests.get(url)\n print(r.status_code) # 200\n soup = BeautifulSoup(r.text, 'lxml')\n\n title_elems = soup.find('ol', class_='grid_view').find_all('li')\n for title_elem in title_elems:\n title = title_elem.find('span', class_='title').text\n print(title)\n\n\nasync def main():\n # Create a thread pool with 8 threads\n with cf.ThreadPoolExecutor(max_workers=8) as pool:\n loop = asyncio.get_event_loop()\n tasks = [\n loop.run_in_executor(pool, crawl_title, i) for i in range(10)\n ]\n await asyncio.gather(*tasks, return_exceptions=True)\n\n\nasyncio.run(main())\n","sub_path":"Concurrent Crawlers/Async Crawler/multi-threading_with_asyncio_requests.py","file_name":"multi-threading_with_asyncio_requests.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300404422","text":"# Copyright (C) 2020 University of Glasgow\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom pathlib import Path\n\nimport ietfdata.datatracker as dt\nimport ietfdata.mailarchive as mailarchive\n\nfrom ietfdata.mailhelper_headerdata import *\nfrom ietfdata.mailhelper_datatracker import *\n\ndatatracker = dt.DataTracker(cache_dir=Path(\"cache\"))\narchive = mailarchive.MailArchive(cache_dir=Path(\"cache\"), helpers=[HeaderDataMailHelper(), DatatrackerMailHelper()])\n\ndef pretty_print_message_metadata(msg: mailarchive.MailingListMessage):\n subject = msg.message[\"Subject\"].replace('\\n', \"\\\\n\")\n string = f\"{msg.metadata('from_name'):50s} | {msg.metadata('from_addr'):30s} | {str(msg.metadata('from_person').id) if msg.metadata('from_person') is not None else '--':6s} | {msg.metadata('timestamp'):%Y-%m-%d %H:%M} | {subject:30s}\"\n for document in msg.metadata(\"related_docs\"):\n name = document.name\n if document.rfc is not None:\n name = f\"RFC{document.rfc}\"\n string += f\"\\n\\tRelated Document: {document.title} ({name})\"\n return string\n\nfor ml_name in [\"rfced-future\"]:\n ml = archive.mailing_list(ml_name)\n ml.update()\n print(ml_name)\n \n for thread in ml.threads():\n first_index, first_message = thread.messages[0]\n print(\"--|\", pretty_print_message_metadata(ml.message(first_index)))\n for index, message in thread.messages[1:]:\n print(\" |\", pretty_print_message_metadata(ml.message(index)))\n print()\n\n print()\n\n # filter by Person\n print(\"Filter by Person\")\n for index, im in ml.messages(from_person=datatracker.person_from_email(\"csp@csperkins.org\")):\n print(f\" {pretty_print_message_metadata(im)}\")\n print()\n\n # filter by Document\n print(\"Filter by Document\")\n for index, im in ml.messages(related_doc=datatracker.document_from_draft(\"draft-carpenter-rfc-principles\")):\n print(f\" {pretty_print_message_metadata(im)}\")\n\nprint()\n\n# archive-wide searching\nml = archive.mailing_list(\"fdt\")\nml.update()\nml = archive.mailing_list(\"abnf-discuss\")\nml.update()\n\nprint(\"Archive-wide searching\")\nfor msg_id, msg in archive.messages(from_addr=\"Stephen.McQuistin@glasgow.ac.uk\"):\n list_name, msg_index = msg_id\n print(f\"{list_name:15s} | {pretty_print_message_metadata(msg)}\")\n","sub_path":"examples/mailing-lists.py","file_name":"mailing-lists.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"195595986","text":"from wolff.optimization.gradient_descent_solver \\\n import GradientDescentSolver\n\n\nclass AdamOptimizer(GradientDescentSolver):\n def __init__(self,\n mu=10e-8,\n beta_1=0.9,\n beta_2=0.999):\n super(AdamOptimizer, self).__init__()\n\n self.mu = mu\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self._constants_0 = [0, 0]\n\n @property\n def mu(self):\n return self._mu\n\n @mu.setter\n def mu(self, value):\n if value <= 0:\n raise ValueError(\"mu must be greater than zero\")\n self._mu = value\n\n @property\n def beta_1(self):\n return self._beta_1\n\n @beta_1.setter\n def beta_1(self, value):\n if value <= 0:\n raise ValueError('First momentum rate must be greater than zero')\n self._beta_1 = value\n\n @property\n def beta_2(self):\n return self._beta_2\n\n @beta_2.setter\n def beta_2(self, value):\n if value <= 0:\n raise ValueError('Second momentum rate must be greater than zero')\n self._beta_2 = value\n\n def next_theta(self):\n\n current_m = (1-self._beta_1)*self._model.H.data \\\n + self._beta_1 * self._constants[0]\n current_v = (1-self._beta_2)*(self._model.H.data)**2 \\\n + self._beta_2 * self._constants[1]\n\n m_hat = current_m / (1-self._beta_1**self._iters)\n v_hat = current_v / (1-self._beta_2**self._iters)\n\n self._theta = self._theta - \\\n (self._learning_rate * m_hat) / (v_hat**(1/2) + self._mu)\n\n self._constants[0] = current_m\n self._constants[1] = current_v\n","sub_path":"wolff/optimization/adam_optimizer.py","file_name":"adam_optimizer.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526197729","text":"import sys\r\nimport threading\r\nimport time\r\nimport random\r\n\r\nimport socket\r\n\r\nDNS_table = dict();\r\n\r\n\r\ndef topserver(port):\r\n try:\r\n ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n print(\"[S]: Server socket created\")\r\n except socket.error as err:\r\n print(\"socket open error: ()\\n\".format(err))\r\n exit()\r\n\r\n server_binding = ('', port)\r\n ss.bind(server_binding)\r\n ss.listen(1)\r\n\r\n while True:\r\n conn, addr = ss.accept();\r\n revbuf = \"\";\r\n\r\n while True:\r\n try:\r\n data = conn.recv(1024);\r\n\r\n if data == b\"\":\r\n break;\r\n revbuf += data;\r\n\r\n try:\r\n while True:\r\n pos = revbuf.find('\\n')\r\n\r\n if pos < 0:\r\n break;\r\n\r\n if pos > 0:\r\n domain = revbuf[0:pos].replace(' ', '').replace('\\r', '');\r\n revbuf = revbuf[pos + 1:]\r\n\r\n if DNS_table.has_key(domain.lower()):\r\n conn.send(domain + \" \" + DNS_table[domain.lower()] + \" A\\n\")\r\n else:\r\n conn.send(domain + \" - Error:HOST NOT FOUND\\n\")\r\n except:\r\n {\r\n\r\n }\r\n except socket.error as e:\r\n break;\r\n\r\n conn.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n with open(\"PROJI-DNSTS.txt\", \"r\") as f:\r\n for line in f:\r\n arr = line.replace('\\r', '').replace('\\n', '').split(' ');\r\n\r\n if len(arr) == 3:\r\n if arr[2] == \"A\":\r\n DNS_table[arr[0].lower()] = arr[1]\r\n\r\n topserver(int(sys.argv[1]))\r\n except:\r\n {\r\n\r\n }","sub_path":"project1/ts.py","file_name":"ts.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"164357215","text":"#coding=utf-8\n\nfrom setuptools import setup, find_packages\n\nPACKAGE = 'pycapture'\nname = 'pycapture'\ndescription = 'Parse pcap file with python'\nauthor = 'xiaxiaocao'\nauthor_email = 'dongliu@live.cn'\nurl = 'https://github.com/xiaxiaocao/pycapture'\nversion = __import__(PACKAGE).__version__\n\ntry:\n with open('README.rst', 'rb') as f:\n long_description = f.read()\nexcept:\n long_description = description\n\nsetup(\n name=name,\n version=version,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n license='Apache Software License',\n url=url,\n packages=find_packages(exclude=['tests.*', 'tests']),\n include_package_data=True,\n classifiers=[\n 'Programming Language :: Python',\n 'Development Status :: 4 - Beta',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n ],\n zip_safe=False,\n scripts=['parse_pcap', 'proxy_cap'],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"123465668","text":"\"\"\"\nMatplotlib plotting tools.\n\"\"\"\n\ndef text(ax, x, y, s, edgecolor=None, edgealpha=0.1, edgewidth=0.75, npmb=16, **kwargs):\n \"\"\"\n Matplotlib text command augmented with poor man's bold.\n \"\"\"\n import math\n h = [ax.text(x, y, s, **kwargs)]\n h[0].zorder += 1\n if edgecolor is not None:\n if 'bbox' in kwargs:\n del(kwargs['bbox'])\n kwargs['color'] = edgecolor\n kwargs['alpha'] = edgealpha\n aspect = ax.get_aspect()\n dx, dy = ax.get_position().size * ax.figure.get_size_inches() * 72.0\n x1, x2 = ax.get_xbound()\n y1, y2 = ax.get_ybound()\n dx = edgewidth * (x2 - x1) / dx\n dy = edgewidth * (y2 - y1) / dy\n if aspect == 'equal':\n dx = dy\n m = math.sqrt(0.5)\n dx = dx / m\n dy = dy / m\n for i in range(npmb):\n phi = 2.0 * math.pi * (i + 0.5) / npmb\n x_ = x + dx * math.cos(phi)\n y_ = y + dy * math.sin(phi)\n h += [ax.text(x_, y_, s, **kwargs)]\n return h\n\ndef colormap(*args, **kwargs):\n \"\"\"\n Matplotlib colormap. See viz.colormap for details.\n \"\"\"\n import numpy as np\n from matplotlib.colors import LinearSegmentedColormap\n from . import viz\n v, r, g, b, a = viz.colormap(*args, **kwargs)\n n = 2001\n cmap = { 'red':np.c_[v, r, r],\n 'green':np.c_[v, g, g],\n 'blue':np.c_[v, b, b] }\n cmap = LinearSegmentedColormap('cmap', cmap, n)\n return cmap\n\ndef colorbar(fig, cmap, ticks, title=None, rect=None, contours=None,\n ticklabels=None, edgecolor=None, **kwargs):\n \"\"\"\n Matplotlib enhanced colorbar.\n \"\"\"\n import numpy as np\n import matplotlib.pyplot as plt\n if rect is None:\n rect = 0.25, 0.08, 0.5, 0.02\n ax = fig.add_axes(rect)\n if len(ticks) == 2:\n ticks = ticks[0], 0.5 * (ticks[0] + ticks[1]), ticks[1]\n x1, x2 = ticks[0], ticks[-1]\n y1, y2 = -0.5, 0.5\n extent = x1, x2, y1, y2\n x = np.linspace(x1, x2, 1001)\n if contours:\n ax.contourf(x, [y1, y2], [x, x], contours, cmap=cmap, **kwargs)\n ax.contourf(x, [y1, y2], [x, x], contours, cmap=cmap, **kwargs)\n else:\n ax.imshow([x.T], cmap=cmap, extent=extent, **kwargs)\n x, y = x2 - x1, y2 - y1\n c = plt.rcParams['axes.edgecolor']\n lw = plt.rcParams['axes.linewidth'] * 2\n ec = edgecolor\n if ec:\n r = plt.Rectangle([x1, y1], x, y, fc='none', lw=lw, ec=ec,\n alpha=0.4, clip_on=False)\n ax.add_patch(r)\n r = plt.Rectangle([x1, y1], x, y, fc='none', lw=lw, ec=c)\n ax.add_patch(r)\n ax.axis('off')\n ax.axis('tight')\n ax.axis(extent)\n if ticklabels is None:\n ticklabels = ticks\n for i, x in enumerate(ticks):\n s = '%s' % ticklabels[i]\n text(ax, x, -1, s, ha='center', va='top', edgecolor=ec)\n if title:\n x = 0.5 * (x1 + x2)\n text(ax, x, 1, title, ha='center', va='baseline', edgecolor=ec)\n\n return ax\n\ndef lengthscale(ax, x, y, w=None, label='%s', style='k-', **kwargs):\n \"\"\"\n Draw a length scale bar between the points (x[0], y[0]) and (x[1], y[1]).\n \"\"\"\n import math\n import numpy as np\n x0 = 0.5 * (x[0] + x[1])\n y0 = 0.5 * (y[0] + y[1])\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n l = math.sqrt(dx*dx + dy*dy)\n if not w:\n x = ax.get_xlim()\n y = ax.get_ylim()\n x = abs(x[1] - x[0])\n y = abs(y[1] - y[0])\n if ax.get_aspect() == 'equal':\n w = 0.005 * (y + x)\n else:\n w = 0.01 / l * (y * abs(dx) + x * abs(dy))\n try:\n label = label % l\n except TypeError:\n pass\n rot = (dx, -dy), (dy, dx)\n x = -l, l, float('nan'), -l, -l, float('nan'), l, l\n y = 0, 0, float('nan'), -w, w, float('nan'), -w, w\n x, y = 0.5 / l * np.dot(rot, [x, y])\n theta = math.atan2(dy, dx) * 180.0 / math.pi\n h1 = ax.plot(x0 + x, y0 + y, style, clip_on=False)\n h2 = text(ax, x0, y0, label, ha='center', va='center', rotation=theta, **kwargs)\n return h1, h2\n\ndef compass_rose(ax, x, y, r, style='k-', **kwargs):\n import math\n theta = 0.0\n if 'rotation' in kwargs:\n theta = kwargs['rotation']\n kwargs.update(rotation_mode='anchor')\n c = r * math.cos(theta / 180.0 * math.pi)\n s = r * math.sin(theta / 180.0 * math.pi)\n x_ = (x+c, x+s), (x-c, x-s)\n y_ = (y+s, y-c), (y-s, y+c)\n h = [ax.plot(x_, y_, style, clip_on=False)]\n c *= 1.3\n s *= 1.3\n x_ = (x+c, x-c), (x+s, x-s)\n y_ = (y+s, y-s), (y-c, y+c)\n h += [\n text(ax, x_[0][0], y_[0][0], 'E', ha='left', va='center', **kwargs),\n text(ax, x_[0][1], y_[0][1], 'W', ha='right', va='center', **kwargs),\n text(ax, x_[1][0], y_[1][0], 'S', ha='center', va='top', **kwargs),\n text(ax, x_[1][1], y_[1][1], 'N', ha='center', va='bottom', **kwargs),\n ]\n return h\n\ndef savefig(fig, fh=None, format=None, distill=False, **kwargs):\n \"\"\"\n Enhanced version of Matplotlib savefig command.\n\n Takes the same argnuments as savefig. Saves to disk if a filename is\n given. Otherwise return a StringIO file descriptor, or a numpy array. PDF is\n distilled using Ghostscript to produce smaller files.\n \"\"\"\n import os, cStringIO\n import numpy as np\n from . import viz\n if isinstance(fh, basestring):\n if format is None:\n format = fh.split('.')[-1]\n fh = open(os.path.expanduser(fh), 'wb')\n else:\n if format is None:\n format = 'array'\n out = cStringIO.StringIO()\n if format == 'array':\n if 'dpi' not in kwargs:\n kwargs['dpi'] = fig.dpi\n dpi = kwargs['dpi']\n n = fig.get_size_inches()\n n = int(n[1] * dpi), int(n[0] * dpi), 4\n fig.savefig(out, format='raw', **kwargs)\n out = np.fromstring(out.getvalue(), 'u1').reshape(n)\n elif distill and format == 'pdf':\n fig.savefig(out, format='eps', **kwargs)\n out = viz.distill_eps(out)\n else:\n fig.savefig(out, format=format, **kwargs)\n out.reset()\n if fh is None:\n return out\n else:\n with fh:\n fh.write(out.getvalue())\n return\n\ndef digitize(img, xlim=(-1, 1), ylim=(-1, 1), color='r'):\n \"\"\"\n Digitize points on an image and rectify to a rectangular coordinate system.\n \"\"\"\n import matplotlib.pyplot as plt\n from . import coord\n fig = plt.gcf()\n fig.clf()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.imshow(img)\n ax.axis('tight')\n ax.axis('off')\n plt.draw()\n plt.show()\n ax.hold(True)\n xx, yy = [], []\n for j in 0, 1:\n for k in 0, 1:\n print('Left-click %r' % [xlim[j], ylim[k]])\n x, y = fig.ginput(1, -1)[0]\n xx += [x]\n yy += [y]\n ax.plot([x], [y], '+' + color)\n plt.draw()\n\n xx = xx[:2], xx[2:]\n yy = yy[:2], yy[2:]\n print(\"\"\"\n Left-click, space: add point\n Right-click, delete: cancel last point\n Enter: new line segment\n Enter twice: finish\n \"\"\")\n x0 = 0.5 * (xlim[1] + xlim[0])\n y0 = 0.5 * (ylim[1] + ylim[0])\n dx = 0.5 * (xlim[1] - xlim[0])\n dy = 0.5 * (ylim[1] - ylim[0])\n xr, yr = [], []\n while 1:\n xy = fig.ginput(-1, -1)\n if len(xy) == 0:\n break\n x, y = zip(*xy)\n ax.plot(x, y, '+-'+color)\n plt.draw()\n x, y = coord.ibilinear(xx, yy, x, y)\n x = x0 + dx * x\n y = y0 + dy * y\n print(x)\n print(y)\n xr += [x]\n yr += [y]\n return xr, yr\n\ndef contour(*args, **kwargs):\n \"\"\"\n Extract contour polygons using matplotlib.\n \"\"\"\n import numpy as np\n import matplotlib.pyplot as plt\n concat = True\n pp = []\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if concat:\n for ccc in ax.contour(*args, **kwargs).collections:\n p = []\n for cc in ccc.get_paths():\n p += cc.to_polygons() + [[[float('nan'), float('nan')]]]\n #for c in cc.to_polygons():\n # p += [c, [[float('nan'), float('nan')]]]\n if p:\n del p[-1]\n pp += [np.concatenate(p).T]\n else:\n pp += [None]\n else:\n for cc in ax.contour(*args, **kwargs).collections:\n p = []\n for c in cc.get_paths():\n p += c.to_polygons()\n pp += [p]\n plt.close(fig)\n return pp\n\n\n","sub_path":"cst/plt.py","file_name":"plt.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157903272","text":"from tkinter import *\nimport Helpers.Modules.Weather.WeatherInformation as WI\n\nclass Application(Frame):\n\n weatherinfo = WI.WeatherInfo()\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.grid()\n self.master.title(\"Weather module\")\n\n for r in range(8):\n self.master.rowconfigure(r, weight=1)\n for c in range(15):\n self.master.columnconfigure(c, weight=1)\n\n FrameCurrent = Frame(master, bg=\"white\")\n FrameCurrent.grid(row = 0, column = 0, rowspan = 5, columnspan = 5, sticky = W+E+N+S)\n FrameGraph = Frame(master, bg=\"blue\")\n FrameGraph.grid(row = 5, column = 0, rowspan = 5, columnspan = 10, sticky = W+E+N+S)\n FrameD1 = Frame(master, bg=\"green\")\n FrameD1.grid(row = 5, column = 0, rowspan = 3, columnspan = 3, sticky = W+E+N+S)\n FrameD2 = Frame(master, bg=\"green\")\n FrameD2.grid(row=5, column=3, rowspan=3, columnspan=3, sticky=W + E + N + S)\n FrameD3 = Frame(master, bg=\"green\")\n FrameD3.grid(row=5, column=6, rowspan=3, columnspan=3, sticky=W + E + N + S)\n FrameD4 = Frame(master, bg=\"green\")\n FrameD4.grid(row=5, column=9, rowspan=3, columnspan=3, sticky=W + E + N + S)\n FrameD5 = Frame(master, bg=\"green\")\n FrameD5.grid(row=5, column=12, rowspan=3, columnspan=3, sticky=W + E + N + S)\n\n Label(FrameCurrent, text=self.weatherinfo.currentInfo[\"Day\"], bg = \"white\", fg = \"blue\", font=(\"Helvetica\",10)).grid(column=0, row=0, columnspan=5, sticky = W + E)\n Label(FrameCurrent, text=str(self.weatherinfo.currentInfo[\"Temp\"]) + \" °C\", bg = \"white\", fg = \"blue\", font=(\"Helvetica\",16)).grid(column=3, row=1, columnspan=2, rowspan=2, sticky=S)\n\nroot = Tk()\napp = Application(master=root)\napp.mainloop()","sub_path":"Modules/Weather.py","file_name":"Weather.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"159567497","text":"# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance\n# with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"LICENSE.txt\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and\n# limitations under the License.\nimport os as os_lib\n\nimport pytest\nfrom assertpy import assert_that, soft_assertions\n\nfrom pcluster.aws.aws_resources import ImageInfo, InstanceTypeInfo\nfrom pcluster.aws.common import AWSClientError\nfrom pcluster.aws.ec2 import Ec2Client\nfrom pcluster.config.cluster_config import AmiSearchFilters, Tag\nfrom pcluster.constants import OS_TO_IMAGE_NAME_PART_MAP\nfrom pcluster.utils import get_installed_version\nfrom tests.pcluster.aws.dummy_aws_api import mock_aws_api\nfrom tests.utils import MockedBoto3Request\n\n\n@pytest.fixture()\ndef boto3_stubber_path():\n return \"pcluster.aws.common.boto3\"\n\n\n@pytest.mark.parametrize(\n \"region, free_tier_instance_type, default_instance_type, stub_boto3\",\n [\n (\"us-east-1\", \"t2.micro\", \"t2.micro\", True),\n (\"eu-north-1\", \"t3.micro\", \"t3.micro\", True),\n (\"us-gov-east-1\", None, \"t3.micro\", True),\n ],\n)\n@pytest.mark.nomockdefaultinstance\ndef test_get_default_instance(boto3_stubber, region, free_tier_instance_type, default_instance_type, stub_boto3):\n os_lib.environ[\"AWS_DEFAULT_REGION\"] = region\n if free_tier_instance_type:\n response = {\"InstanceTypes\": [{\"InstanceType\": free_tier_instance_type}]}\n else:\n response = {\"InstanceTypes\": []}\n if stub_boto3:\n mocked_requests = [\n MockedBoto3Request(\n method=\"describe_instance_types\",\n response=response,\n expected_params={\n \"Filters\": [\n {\"Name\": \"free-tier-eligible\", \"Values\": [\"true\"]},\n {\"Name\": \"current-generation\", \"Values\": [\"true\"]},\n ]\n },\n )\n ]\n\n boto3_stubber(\"ec2\", mocked_requests)\n assert_that(Ec2Client().get_default_instance_type()).is_equal_to(default_instance_type)\n\n\n@pytest.mark.parametrize(\"generate_error\", [True, False])\ndef test_list_instance_types(boto3_stubber, generate_error):\n \"\"\"Verify that list_instance_types behaves as expected.\"\"\"\n dummy_message = \"dummy error message\"\n dummy_instance_types = [\"c5.xlarge\", \"m6g.xlarge\"]\n mocked_requests = [\n MockedBoto3Request(\n method=\"describe_instance_type_offerings\",\n expected_params={},\n response=dummy_message\n if generate_error\n else {\"InstanceTypeOfferings\": [{\"InstanceType\": instance_type} for instance_type in dummy_instance_types]},\n generate_error=generate_error,\n )\n ]\n boto3_stubber(\"ec2\", mocked_requests)\n if generate_error:\n with pytest.raises(AWSClientError, match=dummy_message):\n Ec2Client().list_instance_types()\n else:\n return_value = Ec2Client().list_instance_types()\n assert_that(return_value).is_equal_to(dummy_instance_types)\n\n\n@pytest.mark.parametrize(\n \"instance_type, supported_architectures, error_message\",\n [\n (\"t2.micro\", [\"x86_64\", \"i386\"], None),\n (\"a1.medium\", [\"arm64\"], None),\n (\"valid.exotic.arch.instance\", [\"exoticArch\"], None),\n ],\n)\ndef test_get_supported_architectures(mocker, instance_type, supported_architectures, error_message):\n \"\"\"Verify that get_supported_architectures_for_instance_type behaves as expected for various cases.\"\"\"\n mock_aws_api(mocker)\n get_instance_types_info_patch = mocker.patch(\n \"pcluster.aws.ec2.Ec2Client.get_instance_type_info\",\n return_value=InstanceTypeInfo({\"ProcessorInfo\": {\"SupportedArchitectures\": supported_architectures}}),\n )\n observed_architectures = Ec2Client().get_supported_architectures(instance_type)\n expected_architectures = list(set(supported_architectures) & set([\"x86_64\", \"arm64\"]))\n assert_that(observed_architectures).is_equal_to(expected_architectures)\n\n get_instance_types_info_patch.assert_called_with(instance_type)\n\n\n@pytest.mark.parametrize(\n \"os_part, expected_os\",\n [\n (\"amzn2-hvm\", \"alinux2\"),\n (\"centos7-hvm\", \"centos7\"),\n (\"ubuntu-2004-lts-hvm\", \"ubuntu2004\"),\n (\"nonexistant-hvm\", \"linux\"),\n (\"nonexistant\", \"linux\"),\n ],\n)\ndef test_extract_os_from_official_image_name(os_part, expected_os):\n name = f\"aws-parallelcluster-3.0.0-{os_part}-otherstuff\"\n os = Ec2Client.extract_os_from_official_image_name(name)\n assert_that(os).is_equal_to(expected_os)\n\n\n@pytest.mark.parametrize(\n \"os, architecture, boto3_response, expected_response, error_message\",\n [\n pytest.param(\n None,\n None,\n {\n \"Images\": [\n {\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"},\n {\"Name\": \"ami-parallelcluster-3.0.0-centos7-hvm-x86_64-other\"},\n ]\n },\n [\n ImageInfo({\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"}),\n ImageInfo({\"Name\": \"ami-parallelcluster-3.0.0-centos7-hvm-x86_64-other\"}),\n ],\n None,\n id=\"test with no filter\",\n ),\n pytest.param(\n \"alinux2\",\n None,\n {\"Images\": [{\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"}]},\n [ImageInfo({\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"})],\n None,\n id=\"test with os\",\n ),\n pytest.param(\n None,\n \"x86_64\",\n {\"Images\": [{\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"}]},\n [ImageInfo({\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"})],\n None,\n id=\"test with architecture\",\n ),\n pytest.param(\n \"alinux2\",\n \"x86_64\",\n {\"Images\": [{\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"}]},\n [ImageInfo({\"Name\": \"aws-parallelcluster-3.0.0-amzn2-hvm-x86_64-other\"})],\n None,\n id=\"test with os and architecture\",\n ),\n pytest.param(\"alinux2\", \"arm64\", Exception(\"error message\"), None, \"error message\", id=\"test with boto3 error\"),\n ],\n)\ndef test_get_official_images(boto3_stubber, os, architecture, boto3_response, expected_response, error_message):\n filter_version = get_installed_version()\n filter_os = OS_TO_IMAGE_NAME_PART_MAP[os] if os else \"*\"\n filter_arch = architecture or \"*\"\n expected_params = {\n \"Filters\": [\n {\"Name\": \"name\", \"Values\": [f\"aws-parallelcluster-{filter_version}-{filter_os}-{filter_arch}*\"]},\n ],\n \"ImageIds\": [],\n \"Owners\": [\"amazon\"],\n }\n mocked_requests = [\n MockedBoto3Request(\n method=\"describe_images\",\n expected_params=expected_params,\n response=str(boto3_response) if isinstance(boto3_response, Exception) else boto3_response,\n generate_error=isinstance(boto3_response, Exception),\n )\n ]\n boto3_stubber(\"ec2\", mocked_requests)\n\n if error_message:\n with pytest.raises(AWSClientError, match=error_message):\n Ec2Client().get_official_images(os, architecture)\n else:\n response = Ec2Client().get_official_images(os, architecture)\n with soft_assertions():\n assert_that(len(response)).is_equal_to(len(expected_response))\n for i in range(len(response)):\n assert_that(response[i].name).is_equal_to(expected_response[i].name)\n\n\n@pytest.mark.parametrize(\n \"os, architecture, filters, boto3_response, error_message\",\n [\n (\n \"alinux2\",\n \"arm64\",\n None,\n {\"Images\": [{\"ImageId\": \"ami-00e87074e52e6\", \"CreationDate\": \"2018-11-09T01:21:00.000Z\"}]},\n None,\n ),\n (\n \"alinux2\",\n \"x86_64\",\n AmiSearchFilters(owner=\"self\"),\n {\"Images\": [{\"ImageId\": \"ami-00e87074e52e6\", \"CreationDate\": \"2018-11-09T01:21:00.000Z\"}]},\n None,\n ),\n (\n \"alinux2\",\n \"x86_64\",\n AmiSearchFilters(owner=\"self\", tags=[Tag(\"key1\", \"value1\"), Tag(\"key2\", \"value2\")]),\n {\"Images\": [{\"ImageId\": \"ami-00e87074e52e6\", \"CreationDate\": \"2018-11-09T01:21:00.000Z\"}]},\n None,\n ),\n (\"alinux2\", \"arm64\", None, Exception(\"error message\"), \"error message\"),\n (\"alinux2\", \"arm64\", None, {\"Images\": []}, \"Cannot find official ParallelCluster AMI\"),\n (\n \"alinux2\",\n \"arm64\",\n None,\n {\n \"Images\": [\n {\"ImageId\": \"ami-older-1\", \"CreationDate\": \"2018-11-09T01:21:00.000Z\"},\n {\"ImageId\": \"ami-00e87074e52e6\", \"CreationDate\": \"2018-11-09T01:22:00.000Z\"},\n {\"ImageId\": \"ami-older-2\", \"CreationDate\": \"2017-11-09T01:21:00.000Z\"},\n ]\n },\n None,\n ),\n ],\n ids=[\"no filtering\", \"filtering owner\", \"filtering full\", \"error from boto3\", \"empty ami list\", \"multiple results\"],\n)\ndef test_get_official_image_id(boto3_stubber, os, architecture, filters, boto3_response, error_message):\n expected_ami_id = \"ami-00e87074e52e6\"\n expected_params = {\n \"Filters\": [\n {\"Name\": \"name\", \"Values\": [f\"aws-parallelcluster-{get_installed_version()}-amzn2-hvm-{architecture}*\"]},\n ],\n \"Owners\": [filters.owner if filters and filters.owner else \"amazon\"],\n }\n if filters and filters.tags:\n expected_params[\"Filters\"].extend([{\"Name\": f\"tag:{tag.key}\", \"Values\": [tag.value]} for tag in filters.tags])\n mocked_requests = [\n MockedBoto3Request(\n method=\"describe_images\",\n expected_params=expected_params,\n response=str(boto3_response) if isinstance(boto3_response, Exception) else boto3_response,\n generate_error=isinstance(boto3_response, Exception),\n )\n ]\n boto3_stubber(\"ec2\", mocked_requests)\n\n if error_message:\n with pytest.raises(AWSClientError, match=error_message):\n Ec2Client().get_official_image_id(os, architecture, filters)\n else:\n ami_id = Ec2Client().get_official_image_id(os, architecture, filters)\n assert_that(ami_id).is_equal_to(expected_ami_id)\n\n\n@pytest.mark.parametrize(\n \"snapshot_id, error_message\",\n [(\"snap-1234567890abcdef0\", None), (\"snap-1234567890abcdef0\", \"Some error message\")],\n)\ndef test_get_ebs_snapshot_info(boto3_stubber, snapshot_id, error_message):\n \"\"\"Verify that get_snapshot_info makes the expected API call.\"\"\"\n response = {\n \"Description\": \"This is my snapshot\",\n \"Encrypted\": False,\n \"VolumeId\": \"vol-049df61146c4d7901\",\n \"State\": \"completed\",\n \"VolumeSize\": 120,\n \"StartTime\": \"2014-02-28T21:28:32.000Z\",\n \"Progress\": \"100%\",\n \"OwnerId\": \"012345678910\",\n \"SnapshotId\": \"snap-1234567890abcdef0\",\n }\n describe_snapshots_response = {\"Snapshots\": [response]}\n\n mocked_requests = [\n MockedBoto3Request(\n method=\"describe_snapshots\",\n response=describe_snapshots_response if error_message is None else error_message,\n expected_params={\"SnapshotIds\": [\"snap-1234567890abcdef0\"]},\n generate_error=error_message is not None,\n )\n ]\n boto3_stubber(\"ec2\", mocked_requests)\n if error_message is None:\n assert_that(Ec2Client().get_ebs_snapshot_info(snapshot_id)).is_equal_to(response)\n elif error_message:\n with pytest.raises(AWSClientError, match=error_message) as clienterror:\n Ec2Client().get_ebs_snapshot_info(snapshot_id)\n assert_that(clienterror.value.code).is_not_equal_to(0)\n\n\n@pytest.mark.parametrize(\n \"error_code, raise_exception\",\n [(\"DryRunOperation\", False), (\"UnsupportedOperation\", True)],\n)\ndef test_run_instances_dryrun(boto3_stubber, error_code, raise_exception):\n \"\"\"Verify that if run_instance doesn't generate exception if the error code is DryRunOperation.\"\"\"\n error_message = \"fake error message\"\n mocked_requests = [\n MockedBoto3Request(\n method=\"run_instances\",\n response=error_message,\n expected_params=None,\n generate_error=True,\n error_code=error_code,\n )\n ]\n boto3_stubber(\"ec2\", mocked_requests)\n kwargs = {\"MaxCount\": 10, \"MinCount\": 0, \"DryRun\": True}\n if raise_exception:\n with pytest.raises(AWSClientError, match=error_message) as clienterror:\n Ec2Client().run_instances(**kwargs)\n assert_that(clienterror.value.code).is_not_equal_to(0)\n else:\n Ec2Client().run_instances(**kwargs)\n","sub_path":"cli/tests/pcluster/aws/test_ec2.py","file_name":"test_ec2.py","file_ext":"py","file_size_in_byte":13146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148766720","text":"#!\n#! Class.\n#!\n\nclass Person:\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return \"[Person] My name is \" + self.name + \".\"\n\nclass Human(Person):\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n def __str__(self):\n return \"[Human] My name is \" + self.name + \".\"\\\n \"I am \" + str(self.age) + \" years old.\"\n\nob = Person(\"Taro\")\nprint(ob)\n\nob2 = Human(\"Hanako\", 28)\nprint(ob2)\n\na = isinstance(ob2, Person)\nprint(a)\n\na = isinstance(ob, Human)\nprint(a)\n","sub_path":"14. class/class007_override.py","file_name":"class007_override.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392115902","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#a = -1\na = 0\nb = 1\nn = 150\n#Ua = 1\n#Ub = 2\nUa = 0\nUb =0\nh = (b - a)/(n-1)\nk1 = 29\nk_placer = {a:k1}\nc_placer = dict()\n\n\ndef k(x0, x1, x2,h):\n k_last = 0 # k before this point\n for x_i, k_i in k_placer.items():\n if (x0 < x_i) and (x1 > x_i):\n return h/((x_i - x0)/k_last + (x1 - x_i)/k_i), k_i # a, b\n if(x1 < x_i) and (x2 > x_i):\n return k_last, h/((x_i - x1)/k_last + (x2 - x_i)/k_i)\n if x2 < x_i:\n return k_last, k_last\n k_last = k_i\n return k_last, k_last\n\n\ndef f(x1, x2,h):\n for x_i, c_i in c_placer.items():\n if (x_i >= x1) and (x_i <= x2):\n return -c_i*h/2\n return 0\n\n\ndef get_matrix():\n matrix = np.zeros((n, n))\n x = [a + i*h for i in range(n)]\n i = 1\n matrix[0][0] = matrix[n-1][n-1] = 1\n while i < n-1:\n A, B = k(x[i-1], x[i], x[i+1])\n matrix[i][i-1] = A\n matrix[i][i] = -A - B\n matrix[i][i+1] = B\n i += 1\n return matrix\n\n\ndef get_vector():\n result = [f(a + i*h, a + 2*h + i*h) for i in range(n)]\n result[0] = Ua\n result[n-1] = Ub\n return result\n\n\ndef graph_builder(y_list):\n x_list = [(a + i*h) for i in range(n)]\n plt.plot(x_list, y_list)\n plt.show()\n\n\ndef get_solution(matrix, vector):\n return np.linalg.solve(matrix, vector)\n\n\nn_list = [90,100,150,200,250]\nc_placer[a+(b-a)/2] = 1\nglobal h\nfor i in n_list:\n h = (b - a)/(i-1)\n matrix = get_matrix()\n vector = get_vector()\n solution = get_solution(matrix, vector)\n graph_builder(solution)\n\n\n","sub_path":"lab3/lab3.1.py","file_name":"lab3.1.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569197520","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 16 17:26:08 2019\n\n@author: angelo\n\nScrivere una funzione che prende una stringa di parole separate da spazi in ingresso e restituisce l'insieme delle parole distinte. La stringa deve essere anche ripulita di tutti i segni di interpunzione (ovvero virgole, punti, punti esclamativi, punti interrogativi, punti e virgola, due punti, apostrofi, virgolette)\n\nesempio: \"ciao ciao se beccamo\" {\"ciao\",\"se\",\"beccamo\"}\n\"\"\"\n\ndef parole_distinte(stringa):\n s = set()\n stringa = stringa.replace(\"'\",\" \")\n for parola in stringa.split():\n parola = parola.strip(\",.!?;:'\\\"\")\n s.add(parola)\n return s","sub_path":"programming_lab/lab161019/parole_distinte.py","file_name":"parole_distinte.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119498587","text":"import numpy as np\nimport pandas as pd\nimport pandas_datareader as pdr\nfrom datetime import datetime\nfrom sklearn.ensemble import AdaBoostRegressor, BaggingRegressor\nfrom pyearth import Earth\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom statsmodels.tsa.stattools import adfuller\nimport xgboost as xgb\n\nmodwt_mra = pd.read_csv('modwt_mra.csv')\ndel modwt_mra['Unnamed: 0']\n\nD1, D2, D3, S3 = modwt_mra['D1'], modwt_mra['D2'], modwt_mra['D3'], modwt_mra['S3']\n\nticker = 'SPY'\nstock = pdr.get_data_yahoo(ticker.upper(), start='2009-01-01', end=str(datetime.now())[0:11])\nstock = stock.Close\nreturns = np.log(stock).diff().dropna()\n\n\ndef MODWT_MARS_TRAIN(series, regressors=4, delay=1, N=2000):\n\tseries = series[len(series)-N:]\n\tseries = np.array(series)\n\tseries = series.reshape(-1, 1)\n\n\tD = regressors # number of regressors\n\tT = delay # delay\n\tN = N\n\tseries = series[500:]\n\tdata = np.zeros((N - 500 - T - (D - 1) * T, D))\n\tlbls = np.zeros((N - 500 - T - (D - 1) * T,))\n\n\tfor t in range((D - 1) * T, N - 500 - T):\n\t\tdata[t - (D - 1) * T, :] = [series[t - 3 * T], series[t - 2 * T], series[t - T], series[t]]\n\t\tlbls[t - (D - 1) * T] = series[t + T]\n\ttrnData = data[:lbls.size - round(lbls.size * 0.3), :]\n\ttrnLbls = lbls[:lbls.size - round(lbls.size * 0.3)]\n\n\tmars = Earth()\n\tmars.fit(trnData, trnLbls)\n\tboosted_mars = AdaBoostRegressor(base_estimator=mars, n_estimators=25, learning_rate=0.01, loss='exponential')\n\tboosted_mars.fit(trnData, trnLbls)\n\tpreds = boosted_mars.predict(trnData)\n\n\treturn preds\n\n\nseries = returns[len(returns)-2000:]\nseries = np.array(series)\nseries = series.reshape(-1, 1)\n\nD = 4\nT = 1\nN = 2000\nseries = series[500:]\nlbls = np.zeros((N - 500 - T - (D - 1) * T,))\n\nfor t in range((D - 1) * T, N - 500 - T):\n\tlbls[t - (D - 1) * T] = series[t + T]\ntrnLbls = lbls[:lbls.size - round(lbls.size * 0.3)]\nchkLbls = lbls[lbls.size - round(lbls.size * 0.3):]\n\n\n# MRA\nD1_train = pd.DataFrame(MODWT_MARS_TRAIN(D1))\nD2_train = pd.DataFrame(MODWT_MARS_TRAIN(D2))\nD3_train = pd.DataFrame(MODWT_MARS_TRAIN(D3))\nS3_train = pd.DataFrame(MODWT_MARS_TRAIN(S3))\n\nD1_train = D1_train.rename(columns={0: 'D1'})\nD2_train = D2_train.rename(columns={0: 'D2'})\nD3_train = D3_train.rename(columns={0: 'D3'})\nS3_train = S3_train.rename(columns={0: 'S3'})\n\nD1_train = pd.concat([D1_train, D2_train], axis=1)\nD1_train = pd.concat([D1_train, D3_train], axis=1)\nD1_train = pd.concat([D1_train, S3_train], axis=1)\n\nD1_train['sum'] = D1_train.sum(axis=1)\nplt.plot(np.array(D1_train['sum']), color='g')\nplt.plot(trnLbls)\n\t\n\t\nDA_train = pd.DataFrame(trnLbls)\nDA_train['com'] = DA_train[0].shift(1)\nDA_train['ACC'] = DA_train[0] - DA_train['com']\nDA_train['ACC'] = DA_train['ACC'].mask(DA_train['ACC'] > 0 , 1)\nDA_train['ACC'] = DA_train['ACC'].mask(DA_train['ACC'] < 0 , 0)\n\nDA_pred = pd.DataFrame(D1_train['sum'])\nDA_pred['com'] = DA_pred['sum'].shift(1)\nDA_pred['ACC2'] = DA_pred['sum'] - DA_pred['com']\nDA_pred['ACC2'] = DA_pred['ACC2'].mask(DA_pred['ACC2'] > 0 , 1)\nDA_pred['ACC2'] = DA_pred['ACC2'].mask(DA_pred['ACC2'] < 0 , 0)\n\nDA = pd.DataFrame(DA_train['ACC'])\nDA = DA.join(DA_pred['ACC2'])\nDA['score'] = 0\nDA['score'] = DA['score'].mask(DA['ACC'] == DA['ACC2'], 1)\n\nAC = DA['score'].value_counts()\nACC = round((AC[1] / len(trnLbls)) * 100, 3)\n\nprint('Directional Accuracy: ' + str(ACC) + ' %')\n\n\ndef MODWT_MARS_TEST(series, regressors=4, delay=1, N=2000):\n series = series[len(series)-2000:]\n series = np.array(series)\n series = series.reshape(-1, 1)\n\n D = regressors # number of regressors\n T = delay # delay\n N = N\n series = series[500:]\n data = np.zeros((N - 500 - T - (D - 1) * T, D))\n lbls = np.zeros((N - 500 - T - (D - 1) * T,))\n\n for t in range((D - 1) * T, N - 500 - T):\n data[t - (D - 1) * T, :] = [series[t - 3 * T], series[t - 2 * T], series[t - T], series[t]]\n lbls[t - (D - 1) * T] = series[t + T]\n trnData = data[:lbls.size - round(lbls.size * 0.3), :]\n trnLbls = lbls[:lbls.size - round(lbls.size * 0.3)]\n chkData = data[lbls.size - round(lbls.size * 0.3):, :]\n chkLbls = lbls[lbls.size - round(lbls.size * 0.3):]\n\n aa = np.array(chkLbls[-4:]).reshape(1, -1)\n chkData = np.append(chkData, aa, axis=0)\n\n mars = Earth()\n mars.fit(trnData, trnLbls)\n boosted_mars = AdaBoostRegressor(base_estimator=mars, n_estimators=50, learning_rate=0.1, loss='exponential')\n bag = BaggingRegressor(base_estimator=mars, n_estimators=50)\n bag.fit(trnData, trnLbls)\n boosted_mars.fit(trnData, trnLbls)\n pred2 = bag.predict(chkData)\n oos_preds = boosted_mars.predict(chkData)\n \n stack_predict = np.vstack([oos_preds, pred2]).T\n \n params_xgd = {\n 'max_depth': 7,\n 'objective': 'reg:linear',\n 'learning_rate': 0.05,\n 'n_estimators': 10000\n }\n clf = xgb.XGBRegressor(**params_xgd)\n clf.fit(stack_predict[:-1,:], chkLbls, eval_set=[(stack_predict[:-1,:], chkLbls)], \n eval_metric='rmse', early_stopping_rounds=20, verbose=False)\n\n xgb_pred = clf.predict(stack_predict)\n\n return xgb_pred\n\n\n# MRA\nD1_test = pd.DataFrame(MODWT_MARS_TEST(D1))\nD2_test = pd.DataFrame(MODWT_MARS_TEST(D2))\nD3_test = pd.DataFrame(MODWT_MARS_TEST(D3))\nS3_test = pd.DataFrame(MODWT_MARS_TEST(S3))\n\nD1_test = D1_test.rename(columns={0: 'D1'})\nD2_test = D2_test.rename(columns={0: 'D2'})\nD3_test = D3_test.rename(columns={0: 'D3'})\nS3_test = S3_test.rename(columns={0: 'S3'})\n\nD1_test = pd.concat([D1_test, D2_test], axis=1)\nD1_test = pd.concat([D1_test, D3_test], axis=1)\nD1_test = pd.concat([D1_test, S3_test], axis=1)\n\nD1_test['sum'] = D1_test.sum(axis=1)\nplt.plot(np.array(D1_test['sum']), color='g')\nplt.plot(chkLbls)\nplt.legend(['Pred','Actual'])\n\t\n\t\nDA_test = pd.DataFrame(chkLbls)\nDA_test['com'] = DA_test[0].shift(1)\nDA_test['ACC'] = DA_test[0] - DA_test['com']\nDA_test['ACC'] = DA_test['ACC'].mask(DA_test['ACC'] > 0 , 1)\nDA_test['ACC'] = DA_test['ACC'].mask(DA_test['ACC'] < 0 , 0)\n\nDA_pred = pd.DataFrame(D1_test['sum'])\nDA_pred['com'] = DA_pred['sum'].shift(1)\nDA_pred['ACC2'] = DA_pred['sum'] - DA_pred['com']\nDA_pred['ACC2'] = DA_pred['ACC2'].mask(DA_pred['ACC2'] > 0 , 1)\nDA_pred['ACC2'] = DA_pred['ACC2'].mask(DA_pred['ACC2'] < 0 , 0)\n\nDA = pd.DataFrame(DA_test['ACC'])\nDA = DA.join(DA_pred['ACC2'])\nDA['score'] = 0\nDA['score'] = DA['score'].mask(DA['ACC'] == DA['ACC2'], 1)\n\nAC = DA['score'].value_counts()\nACC = round((AC[1] / len(chkLbls)) * 100, 3)\n\nprint('Directional Accuracy: ' + str(ACC) + ' %')\n\npred = D1_test['sum']\npred = pred[:-1]\n\ndef rmse(predictions, targets):\n return np.sqrt(((predictions - targets) ** 2).mean())\n\nprint('MSE: ' + str(mean_squared_error(chkLbls, pred)))\nprint('RMSE: ' + str(rmse(np.array(pred), chkLbls)))\nprint('R Squared: ' + str(r2_score(chkLbls, pred)))\n\nDirection = D1_test['sum']\n\nif Direction[449] > Direction[448]:\n print(\"UP\")\nelse:\n print(\"DOWN\")\n\nprevious = stock[-1:]\nret = np.array(Direction)\nret = ret[-1:]\nprice = np.array((np.exp(ret)) * previous)\nprint('Price: ' + str(np.round(price[-1:], 2)))\n","sub_path":"MODWT-MARS.py","file_name":"MODWT-MARS.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"213602939","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 6 12:04:35 2018\n\n@author: Alex\n\"\"\"\n\nfrom flask import (\n Flask, Blueprint, flash, g, redirect, render_template, request, session, url_for\n)\n\napp = Flask(__name__)\nbp = Blueprint('main', __name__, url_prefix='/')\n \n@app.route(\"/\")\ndef index():\n return render_template(\n 'index.html',**locals())\n \n \n@bp.route('/run', methods=('GET, POST'))\ndef run():\n print(\"running\")\n file=open(\"test.txt\", \"w\")\n file.close()\n return render_template(\n 'index.html',**locals())\n \nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=80)","sub_path":"test_scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"150178849","text":"import jax \nfrom jax import lax\nimport jax.numpy as np\nimport numpy as onp\nfrom jax.config import config; config.update(\"jax_enable_x64\", True)\nfrom jax.experimental import loops\n\n#@jax.jit\ndef binom(x,y):\n C = factorial(x) / (factorial(x-y) * factorial(y))\n return C\n\n#@jax.jit\ndef factorial(n):\n n = n.astype(float)\n return jax.lax.exp(jax.lax.lgamma(n + 1))\n\n#@jax.jit\ndef theta(l,lA,lB,PA,PB,r,g):\n \"\"\"\n Calculate the theta factor of the gi term.\n (Handout 4, Eq. 23)\n \"\"\"\n theta = ck(l,lA,lB,PA,PB) * factorial(l) * g**(r-l) / (factorial(r) * factorial(l-2*r)) # real\n return theta\n\n#@jax.jit\ndef ck(j,l,m,a,b):\n '''\n Proves you can jit-compile a function which takes integer arguments and modifies them in while loops\n This means you can in principle convert to while loops for whatever you need\n This is probably the best way to do recursion but its clunky\n '''\n with loops.Scope() as s:\n s.coefficient = 0.0\n s.k = l \n s.i = m \n s.j = j\n s.l = l\n s.m = m\n s.a = a\n s.b = b\n for _ in s.while_range(lambda: s.k > -1):\n s.i = s.m\n for _ in s.while_range(lambda: s.i > -1):\n for _ in s.cond_range(s.k + s.i == s.j): \n s.coefficient += binom(s.l,s.k) * binom(s.m,s.i) * s.a**(s.l-s.k) * s.b**(s.m-s.i)\n s.i -= 1\n s.k -= 1\n return s.coefficient\n\n#@jax.jit\ndef gi(l,lp,r,rp,i, lA,lB,Ai,Bi,Pi,gP, lC,lD,Ci,Di,Qi,gQ):\n \"\"\"\n Calculate the i-th coordinate component of the integral over primitives.\n (Handout 4, Eq. 22)\n \"\"\"\n delta = 1/(4*gP) + 1/(4*gQ)\n gi = (-1)**l \n gi *= theta(l,lA,lB,Pi-Ai,Pi-Bi,r,gP) * theta(lp,lC,lD,Qi-Ci,Qi-Di,rp,gQ)\n gi *= (-1)**i * (2 * delta)**(2 * (r + rp))\n gi *= factorial(l - 2 * r + lp - 2 * rp) * delta**i\n gi *= (Pi - Qi)**(l - 2 * r + lp - 2 * (rp + i))\n gi /= (4 * delta)**(l) * (4 * delta)**(lp) * factorial(i) # this guy\n gi /= factorial(l - 2 * r + lp - 2 * (rp + i))\n return gi\n\n#@jax.jit\ndef gaussian_product(alpha_bra,alpha_ket,A,C):\n '''Gaussian product theorem. Returns center and coefficient of product'''\n R = (alpha_bra * A + alpha_ket * C) / (alpha_bra + alpha_ket)\n return R\n\n#@jax.jit\ndef boys(n,x):\n #TODO vmap with boys function not working, just use s function form\n #return 0.88622692545275798 * jax.lax.rsqrt(x + 1e-10) * jax.lax.erf(jax.lax.sqrt(x + 1e-10))\n return x + 1.0\n #return 0.5 * (x + 1e-11)**(-(n + 0.5)) * jax.lax.igamma(n + 0.5, x + 1e-11) * np.exp(jax.lax.lgamma(n + 0.5))\n\n#@jax.jit\ndef cartesian_product(*arrays):\n '''JAX-friendly version of cartesian product. Same order as other function, more memory requirements though.'''\n tmp = np.asarray(np.meshgrid(*arrays, indexing='ij')).reshape(len(arrays),-1).T\n return np.asarray(tmp)\n\ndef np_cartesian_product(*arrays): \n '''Generalized cartesian product of any number of arrays''' \n la = len(arrays) \n dtype = onp.find_common_type([a.dtype for a in arrays], []) \n arr = onp.empty([len(a) for a in arrays] + [la], dtype=dtype) \n for i, a in enumerate(onp.ix_(*arrays)): \n arr[...,i] = a \n return arr.reshape(-1, la) \n\n#@jax.jit\ndef prefactor(a,b,c,d,gP,gQ,ABsq,CDsq):\n f = ( 2 * np.pi**2 ) / ( gP * gQ ) * np.sqrt( np.pi / ( gP + gQ ) ) * np.exp( -(a*b*ABsq)/gP ) * np.exp(-(c*d*CDsq)/gQ)\n return f\n\n\nvmapped_gaussian_product = jax.vmap(gaussian_product, (0,0,None,None))\nvmapped_prefactor = jax.vmap(prefactor, (0,0,0,0,0,0,None,None))\n#vmapped_gi = jax.vmap(gi, (None,None,None,None,None,None,None,None,None,None,0,None,None,None,None,None,0))\nvmapped_gi = jax.vmap(gi, (None,None,None,None,None,None,None,None,None,0,0,None,None,None,None,0,0))\n #(s.n,s.np,s.t,s.tp,s.k, s.nA,s.nB,s.RA[2],s.RB[2],s.RP[2],s.gP, s.nC,s.nD,s.RC[2],s.RD[2],s.RQ[2],s.gQ)\n\n#(l,lp,r,rp,i, lA,lB,Ai,Bi,Pi,gP, lC,lD,Ci,Di,Qi,gQ)\n\n\ndef contracted_eri(L,a,b,c,d,RA,RB,RC,RD,contraction):\n#def primitive_eri(lA,mA,nA,lB,mB,nB,lC,mC,nC,lD,mD,nD,a,b,c,d,RA,RB,RC,RD,contraction):\n \"\"\"\n Computes a single ERI primitive using highly-inefficient scheme of \n Taketa, O-ohata, and Hunzinaga 1968, or Kevin Murphy, H.F. Schaefer 2018.\n Parameters\n ----------\n L : Vector of size 12, (x,y,z)-components of angular momentum on center A,B,C,D\n a,b,c,d : All Gaussian exponents on center (A,B,C,D) for the contraction\n RA,RB,RC,RD : Cartesian-coordinate vector of center (A,B,C,D)\n contraction : The fused normalization constant/contraction coefficients for this primitive N_contract*Na*Nb*Nc*Nd*coeffa*coeffb*coeffc*coeffd for all primitives in contraction\n\n Returns\n -------\n A single contracted ERI.\n \"\"\"\n with loops.Scope() as s:\n #s.lA,s.mA,s.nA,s.lB,s.mB,s.nB,s.lC,s.mC,s.nC,s.lD,s.mD,s.nD,s.a,s.b,s.c,s.d,s.RA,s.RB,s.RC,s.RD = lA,mA,nA,lB,mB,nB,lC,mC,nC,lD,mD,nD,a,b,c,d,RA,RB,RC,RD\n # Unpack arguments into scope variable\n s.lA,s.mA,s.nA,s.lB,s.mB,s.nB,s.lC,s.mC,s.nC,s.lD,s.mD,s.nD = L\n s.a,s.b,s.c,s.d = a, b, c, d \n s.RA,s.RB,s.RC,s.RD = RA, RB, RC, RD \n s.gP = s.a + s.b\n s.gQ = s.c + s.d\n s.delta = 1/(4*s.gP) + 1/(4*s.gQ)\n s.RP = vmapped_gaussian_product(s.a,s.b,s.RA,s.RB) # (K,3)\n s.RQ = vmapped_gaussian_product(s.c,s.d,s.RC,s.RD) # (K,3)\n s.ABsq = np.dot(s.RA-s.RB,s.RA-s.RB)\n s.CDsq = np.dot(s.RC-s.RD,s.RC-s.RD)\n #s.PQsq = np.dot(s.RP-s.RQ,s.RP-s.RQ)\n s.PQsq = np.einsum('pq,pq->p', s.RP-s.RQ,s.RP-s.RQ) # shape (K,) is this einsum right?\n s.ssss = vmapped_prefactor(s.a,s.b,s.c,s.d,s.gP,s.gQ,s.ABsq,s.CDsq) # shape (K,)\n s.boysarg = s.PQsq / (4 * s.delta) # shape (K,)\n\n # Initialize loop variables\n #s.Gxyz = 0.0\n s.Gxyz = 0.0\n #s.Gxyz = np.zeros_like(\n s.l = s.lA + s.lB \n s.r = np.floor(s.l/2) \n s.lp = s.lC + s.lD \n s.rp = np.floor(s.lp/2) \n s.i = np.floor((s.l - 2 * s.r + s.lp - 2 * s.rp) / 2)\n\n s.m = s.mA + s.mB \n s.s = np.floor(s.m/2) \n s.mp = s.mC + s.mD \n s.sp = np.floor(s.mp/2) \n s.j = np.floor((s.m - 2 * s.s + s.mp - 2 * s.sp) / 2)\n\n s.n = s.nA + s.nB \n s.t = np.floor(s.n/2) \n s.np = s.nC + s.nD \n s.tp = np.floor(s.np/2) \n s.k = np.floor((s.n - 2 * s.t + s.np - 2 * s.tp) / 2)\n \n # Loop over angular momentum and accumulate contributions to primitive. See Taketa, O-ohata, and Hunzinaga 1968, or Kevin Murphy, H.F. Schaefer 2018\n for _ in s.while_range(lambda: s.l > -1): # X\n s.r = np.floor(s.l/2)\n for _ in s.while_range(lambda: s.r > -1):\n s.lp = s.lC + s.lD \n for _ in s.while_range(lambda: s.lp > -1):\n s.rp = np.floor(s.lp/2)\n for _ in s.while_range(lambda: s.rp > -1):\n s.i = np.floor((s.l - 2 * s.r + s.lp - 2 * s.rp) / 2) # This works, compiles fine since loop variables are added in order\n for _ in s.while_range(lambda: s.i > -1):\n gx = vmapped_gi(s.l,s.lp,s.r,s.rp,s.i, s.lA,s.lB,s.RA[0],s.RB[0],s.RP[:,0],s.gP, s.lC,s.lD,s.RC[0],s.RD[0],s.RQ[:,0],s.gQ)\n s.m = s.mA + s.mB \n for _ in s.while_range(lambda: s.m > -1): # Y\n s.s = np.floor(s.m/2)\n for _ in s.while_range(lambda: s.s > -1):\n s.mp = s.mC + s.mD \n for _ in s.while_range(lambda: s.mp > -1):\n s.sp = np.floor(s.mp/2)\n for _ in s.while_range(lambda: s.sp > -1):\n s.j = np.floor((s.m - 2 * s.s + s.mp - 2 * s.sp) / 2)\n for _ in s.while_range(lambda: s.j > -1):\n gy = vmapped_gi(s.m,s.mp,s.s,s.sp,s.j, s.mA,s.mB,s.RA[1],s.RB[1],s.RP[:,1],s.gP, s.mC,s.mD,s.RC[1],s.RD[1],s.RQ[:,1],s.gQ)\n s.n = s.nA + s.nB \n for _ in s.while_range(lambda: s.n > -1): # Z\n s.t = np.floor(s.n/2)\n for _ in s.while_range(lambda: s.t > -1):\n s.np = s.mC + s.mD \n for _ in s.while_range(lambda: s.np > -1):\n s.tp = np.floor(s.mp/2)\n for _ in s.while_range(lambda: s.tp > -1):\n s.k = np.floor((s.n - 2 * s.t + s.np - 2 * s.tp) / 2)\n for _ in s.while_range(lambda: s.k > -1):\n gz = vmapped_gi(s.n,s.np,s.t,s.tp,s.k, s.nA,s.nB,s.RA[2],s.RB[2],s.RP[:,2],s.gP, s.nC,s.nD,s.RC[2],s.RD[2],s.RQ[:,2],s.gQ)\n nu = s.l - 2 * s.r + s.lp - 2 * s.rp - s.i + \\\n s.m - 2 * s.s + s.mp - 2 * s.sp - s.j + \\\n s.n - 2 * s.t + s.np - 2 * s.tp - s.k #THIS ORDER MATTERS (UGH)\n F = boys(nu, s.boysarg) #TODO fix this when JAX fixes the vmap issue with igamma\n #s.Gxyz += F * gx * gy * gz # this is working\n s.Gxyz += np.sum(F * gx * gy * gz * contraction * s.ssss) #TODO something is definitely wrong here\n #s.Gxyz += np.sum(F * gx * gy * gz) \n\n s.k -= 1 \n s.tp -= 1 \n s.np -= 1 \n s.t -= 1 \n s.n -= 1 \n s.j -= 1 \n s.sp -= 1 \n s.mp -= 1 # Decrement all loop variables until 0\n s.s -= 1 \n s.m -= 1 \n s.i -= 1\n s.rp -= 1\n s.lp -= 1\n s.r -= 1\n s.l -= 1\n\n #s.Gxyz *= s.ssss\n #s.Gxyz *= contraction \n #TODO this contraction order may be wrong, you are summing along the contraction dimension in the loop then multiplying by all con\n return s.Gxyz\n\n# TEST\n#primitive_eri(L,a,b,c,d,RA,RB,RC,RD,contraction):\n#L = [1,0,0,1,0,0,1,0,0,1,0,0]\n#a,b,c,d = np.array([0.5,0.4]),np.array([0.5,0.4]), np.array([0.5,0.4]), np.array([0.5,0.4])\n#RA,RB,RC,RD = np.array([0.0,0.0,0.9]), np.array([0.0,0.0,-0.9]), np.array([0.0,0.0,0.9]), np.array([0.0,0.0,-0.9])\n#contraction = np.array([0.75,0.25])\n#\n#print(contracted_eri(L,a,b,c,d,RA,RB,RC,RD,contraction))\n\n\n\n\n\n\n\n","sub_path":"Quax_dev_archive/integrals_dev/tei_trials/teis_trial7/kmurph_trial2/integrals_utils.py","file_name":"integrals_utils.py","file_ext":"py","file_size_in_byte":10393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"358344707","text":"from . import go, make, run, send\n\n\ndef test_send_on_nil_channel():\n ch = None\n go(lambda: send(ch, 5, lambda: None))\n raised = False\n try:\n run()\n except:\n raised = True\n assert raised\n","sub_path":"python/stubs/test_channel.py","file_name":"test_channel.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"415963995","text":"import dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom treatment_calculator.utils import langs, get_title_mapping, labs_ques, oxygen, oxygen_vals\n\n\n\ndef map_feat_vals(x, name, language):\n if name == \"Gender\":\n return langs[language].get_gender(x == 1)\n else:\n return name\n\n\ndef build_dropdown_card(_id, m, content_dict, language, feature_name, readable_name):\n \"\"\"Makes feature card with dropdown data\"\"\"\n insert_data = [\n dbc.Col(\n children=[\n html.H5(readable_name, className=\"input-label\"),\n html.Div(\n id='calc-categorical-{}-wrapper'.format(_id),\n children=dcc.Dropdown(\n id={\n 'type': 'treatments',\n 'index': 'calc-categorical-{}'.format(_id),\n 'f_idx': content_dict[\"index\"],\n 'feature': feature_name,\n 'f_rng': repr((None, content_dict[\"default\"], None))\n },\n options=[{'label': map_feat_vals(x, readable_name, language), 'value': x}\n for x in content_dict['vals']],\n value=1,\n className=\"dcc_dropdown feature-dropdown\",\n clearable=False,\n ),\n ),\n ]\n ),\n ]\n card = [\n dbc.Row(\n insert_data,\n no_gutters=True,\n style={\"width\": \"100%\"}\n ),\n dbc.Tooltip(\n content_dict['explanation'],\n target='calc-categorical-{}-wrapper'.format(_id),\n ),\n ]\n return card\n\n\ndef build_input_card(_id, m, content_dict, feature_name, readable_name):\n is_temp = content_dict[\"name\"] == \"Body Temperature\"\n insert_data = [\n dbc.Col([\n html.H5(readable_name + \" (\" + content_dict[\"units\"] + \")\", className=\"input-label\"),\n html.Div(\n id=\"calc-numeric-{}-wrapper\".format(_id),\n children=dbc.Input(\n id={\n 'type': 'treatments',\n 'index': \"calc-numeric-{}\".format(_id),\n 'f_idx': content_dict[\"index\"],\n 'feature': readable_name,\n 'f_rng': str((content_dict[\"min_val\"], content_dict[\"default\"], content_dict[\"max_val\"])),\n },\n type=\"number\",\n placeholder=\"e.g. {}\".format(int(content_dict['default'])),\n className=\"numeric-input \" + \"temp-input\" if is_temp else \"\",\n bs_size=\"lg\",\n min=content_dict[\"min_val\"],\n max=content_dict[\"max_val\"],\n ),\n ),\n ], align=\"stretch\"\n ),\n ]\n if is_temp:\n insert_data.append(\n dcc.Dropdown(\n id={\n 'type': 'temperature',\n 'index': \"units\",\n },\n options=[{'label': x, 'value': x} for x in [\"°F\", \"°C\"]],\n value=\"°F\",\n className=\"dcc_dropdown temp-dropdown\",\n clearable=False\n ),\n\n )\n card = [\n dbc.Row(\n insert_data,\n align=\"end\",\n no_gutters=True,\n style={\"width\": \"100%\"}\n ),\n dbc.Tooltip(\n content_dict['explanation'],\n target=\"calc-numeric-{}-wrapper\".format(_id),\n ),\n ]\n return card\n\n\ndef build_checkbox_card(_id, feature_name, feature_index, readable_name, explanation):\n item = dbc.Row(\n no_gutters=True,\n style={\"width\": \"100%\"},\n children=[\n html.H5(readable_name.split(\"(\")[0], className=\"input-label\", style={\"max-width\": \"100%\"}),\n html.Div(\n id='bin-{}-wrapper'.format(feature_index),\n style={\"width\": \"100%\", \"display\": \"flex\", \"paddingLeft\": \"10px\"},\n children=[\n dbc.Checkbox(\n id={\n 'type': 'treatments-checkbox',\n 'index': 'calc-checkbox-{}'.format(_id),\n 'f_idx': feature_index,\n 'feature': feature_name\n },\n checked=False\n ),\n html.H5(readable_name.split(\"(\")[1][0:-1], className=\"input-label\",\n style={\"marginBottom\": \"0px\", \"marginTop\": \"0px\", \"marginLeft\": \"20px\",\n \"color\": \"#495057\", \"fontSize\": \"15px\", \"opacity\": \"1\"}),\n ]\n ),\n dbc.Tooltip(\n explanation,\n target=\"bin-{}-wrapper\".format(feature_index)\n )\n ])\n return item\n\n\ndef build_multidrop_card(_id, show_name, content_dict, language, feature_name):\n \"\"\"Used to select multiple from chronic diseases at bottom of mortality calculator\"\"\"\n title_mapping = get_title_mapping()\n options = []\n for i in range(len(content_dict[\"index\"])):\n options.append({'label': title_mapping[language][content_dict['vals'][i]],\n 'value': content_dict['index'][i]})\n return dbc.Col([\n html.H5(content_dict[\"name\"], className=\"input-label\",\n style={\"display\": \"inline-block\" if show_name else \"none\"}),\n dcc.Dropdown(\n options=options,\n value=[] if feature_name != \"Race\" else None,\n id={\n 'type': 'treatments-multi',\n 'index': \"calc-multidrop-{}\".format(_id),\n 'feature': feature_name\n },\n # Classname needed for tooltip target\n className=\"dcc_dropdown feature-dropown calc-multidrop-{}\".format(_id),\n style={\"width\": \"100%\"},\n multi=True if feature_name != \"Race\" else False,\n placeholder=\"Default: Other\" if feature_name == \"Race\" else \"Select...\"\n ),\n dbc.Tooltip(\n content_dict['explanation'],\n target=\".calc-multidrop-{}\".format(_id)\n ),\n ])\n\n\n# TODO: Dropdown tooltips are not translated\ndef build_feature_cards(features, m=True, labs=False, language=0):\n \"\"\"This builds all the feature cards\"\"\"\n inputs = features[\"numeric\"]\n dropdowns = features[\"categorical\"]\n multidrop = features[\"multidrop\"]\n checkboxes = features[\"checkboxes\"]\n title_mapping = get_title_mapping()\n\n # The scaffold that will hold ordered feature cards\n feature_scaffold = [\n {\n \"group\": \"Demographics\",\n \"features\": [\"age\", \"gender\", \"race\", \"temperature\"],\n \"mortality\": {\n \"layout\": \"2x2\",\n \"layout_m\": \"1x3\"\n },\n },\n {\n \"group\": \"Metabolic Panel\",\n \"features\": [\"alanine amino\", \"aspartate amino\", \"bilirubin\", \"calcium\",\n \"creatin\", \"sodium\", \"urea nitro\", \"potas\", \"glyc\"],\n \"mortality\": {\n \"layout\": \"3x1\",\n \"layout_m\": \"4x2\",\n \"expanded\": {\n \"alanine amino\": 2,\n \"glyc\": 2\n }\n },\n \"infection\": {\n \"expanded\": {\n \"alanine amino\": [(\"lg\", 2), (\"md\", 2)], #scale by 2 for large and medium devices\n \"urea nitro\": [(\"lg\", 2), (\"sm\", 2)],\n }\n }\n },\n {\n \"group\": \"Abnormal Labs and Vitals\",\n \"features\": [],\n \"mortality\": {\n \"layout\": \"2x3\",\n \"vertical_expanded\": {\n \"checkboxes\": 0.75,\n }\n }\n },\n {\n \"group\": \"Blood Counts\",\n # Note: red cell does not exist in mortality calculator, that's why the different dimens\n \"features\": [\"hemoglobin\", \"lympho\", \"platelet\", \"leucocyte\"],\n \"mortality\": {\n \"layout\": \"2x2\",\n \"layout_m\": \"2x2\",\n \"expanded\": {\n \"red cell\": 2,\n }\n }\n },\n {\n \"group\": \"Other Lab Values\",\n \"features\": [\"C-reactive protein\", \"prothrombin time\"],\n \"mortality\": {\n \"layout\": \"2x1\",\n \"layout_m\": \"1x2\",\n },\n \"infection\": {\n \"vertical_expanded\": {\n \"C-reactive protein\": 1.5,\n \"prothrombin time\": 1.5\n }\n }\n },\n {\n \"group\": \"Miscellaneous\",\n \"features\": [\"comorbid\", \"treatmen\"],\n \"mortality\": {\n \"layout\": \"2x1\",\n \"layout_m\": \"1x2\",\n \"expanded\": {\n \"comorbid\": 3\n },\n \"vertical_expanded\": {\n \"comorb\": 2\n }\n }\n },\n {\n \"group\": \"Unknown\",\n \"features\": [],\n \"mortality\": {\n \"layout\": \"3x3\",\n }\n }\n ]\n for group in feature_scaffold:\n group[\"cards\"] = [(None, [])] * len(group[\"features\"])\n feature_scaffold[-1][\"cards\"] = []\n\n # Add a card into its right place in the scaffold\n def add_feature(feature_name, feature_card):\n add_feature.count += 1\n # Try to add card to its appropraite group\n for grp in enumerate(feature_scaffold):\n # Check if name is in this group's features\n for fname in enumerate(grp[1][\"features\"]):\n if fname[1].lower() in feature_name.lower():\n feature_scaffold[grp[0]][\"cards\"][fname[0]] = (feature_name, feature_card)\n return\n if feature_name == \"checkboxes\":\n feature_scaffold[2][\"cards\"].append((feature_name, feature_card))\n return\n\n # Add card to default group\n feature_scaffold[-1][\"cards\"].append((feature_name, feature_card))\n\n add_feature.count = 0\n\n for _id, content_dict in enumerate(dropdowns):\n add_feature(\n content_dict['name'],\n build_dropdown_card(str(_id), m, content_dict, language, content_dict['name'],\n title_mapping[language][content_dict['name']])\n )\n\n for _id, content_dict in enumerate(checkboxes):\n for i in range(len(content_dict[\"vals\"])):\n add_feature(\n \"checkboxes\",\n build_checkbox_card(str(_id),\n title_mapping[language][content_dict[\"vals\"][i]],\n content_dict[\"index\"][i],\n title_mapping[language][content_dict[\"vals\"][i]],\n content_dict[\"explanation\"]\n )\n )\n\n for _id, content_dict in enumerate(inputs):\n add_feature(\n content_dict['name'],\n # Give different IDs to fix input box not clearing when change\n build_input_card(str(_id) + str(labs), m, content_dict, content_dict['name'],\n title_mapping[language][content_dict['name']])\n )\n for _id, content_dict in enumerate(multidrop):\n add_feature(\n content_dict['name'],\n build_multidrop_card(str(_id),\n True,\n content_dict, language, content_dict['name'])\n )\n\n # final card layout\n feature_content = []\n\n # card number to keep track of increasing delay\n card_num = 0\n\n # Loop through all the groups\n for grp in feature_scaffold:\n # Get the layout dimensions, row x col\n r, c = [int(x) for x in grp[\"mortality\"][\"layout\"].split('x')]\n r_m, c_m = r, c\n if \"layout_m\" in grp[\"mortality\"]:\n r_m, c_m = [int(x) for x in grp[\"mortality\"][\"layout_m\"].split('x')]\n\n # If there are no cards, skip this group\n if all([x[0] is None for x in grp[\"cards\"]]): continue\n\n group_content = []\n\n w = 12 / c\n w_m = 12 / c_m\n\n # Get all the correct horizontal expansion factors from group\n expansions = {}\n if m and \"expanded\" in grp[\"mortality\"]:\n expansions = grp[\"mortality\"][\"expanded\"]\n elif not m:\n if \"infection\" in grp:\n if \"expanded\" in grp[\"infection\"]:\n expansions = grp[\"infection\"][\"expanded\"]\n elif \"expanded\" in grp[\"mortality\"]:\n expansions = grp[\"mortality\"][\"expanded\"]\n\n # Get all the correct vertical expansion factors from group\n v_expansions = {}\n if m and \"vertical_expanded\" in grp[\"mortality\"]:\n v_expansions = grp[\"mortality\"][\"vertical_expanded\"]\n elif not m:\n if \"infection\" in grp:\n if \"vertical_expanded\" in grp[\"infection\"]:\n v_expansions = grp[\"infection\"][\"vertical_expanded\"]\n elif \"vertical_expanded\" in grp[\"mortality\"]:\n v_expansions = grp[\"mortality\"][\"vertical_expanded\"]\n\n # Loop throgh all the cards in this group\n for name, card in grp[\"cards\"]:\n if name is None:\n continue\n\n # get expansion factor of this card\n f = {\"sm\": 1, \"md\": 1, \"lg\": 1}\n for n in [ex for ex in expansions if ex.lower() in name.lower()]:\n if type(expansions[n]) == list:\n for size, scale in expansions[n]:\n f[size] = scale\n else:\n f[\"sm\"] = expansions[n]\n f[\"md\"] = expansions[n]\n f[\"lg\"] = expansions[n]\n\n # get vertical expansion factor of this card\n v_f = 1\n for n in [ex for ex in v_expansions if ex.lower() in name.lower()]:\n v_f = v_expansions[n]\n\n # Create card content and add it to the group content\n group_content.append(dbc.Col(\n xs=12,\n sm=w_m * f[\"sm\"],\n md=w_m * f[\"md\"],\n lg=w * f[\"lg\"],\n style={\"padding\": \"0px\"},\n children=dbc.Card(\n style={\"borderRadius\": \"0px\",\n \"height\": \"{}px\".format(str(150 * v_f)),\n \"borderWidth\": \"1px\",\n \"background\": \"rgba(0, 0, 0, 0)\"},\n children=[\n dbc.CardBody(card, className=\"feat-options-body\")\n ])\n ))\n\n card_num += 1\n\n # Add the group content to the feature content\n feature_content.append(dbc.Col(\n style={\n 'paddingBottom': 30,\n 'borderColor': 'red',\n },\n xs=12,\n sm=c_m * 6,\n md=c_m * 6,\n lg=c * 4,\n children=[\n html.Div(\n **{\"data-aos\": \"fade-up\", \"data-aos-delay\": str(card_num % 4 * 150)},\n # For overlapping dropdown problem\n style={\"transformStyle\": \"flat\",\n \"zIndex\": str(add_feature.count - card_num),\n \"position\": \"relative\"},\n className=\"aos-refresh-onload\",\n children=dbc.Card(\n className=\"elevation-3\",\n style={\"borderWidth\": \"0px\"},\n children=[\n dbc.CardHeader(grp[\"group\"],\n style={\"fontWeight\": \"bold\"}),\n dbc.Row(group_content, style={\"margin\": \"0px\", \"borderWidth\": \"0px\"})\n ]\n )\n )\n ],\n ))\n return feature_content\n\n","sub_path":"treatment_calculator/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":16172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583712046","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport functools\n\nfrom federatedml.protobuf.generated import sir_meta_pb2, sir_param_pb2\nfrom fate_flow.entity.metric import Metric, MetricMeta\nfrom federatedml.model_base import ModelBase\nfrom federatedml.param.sir_param import SecureInformationRetrievalParam\nfrom federatedml.util import abnormal_detection, LOGGER\nfrom federatedml.transfer_variable.transfer_class.secure_information_retrieval_transfer_variable import \\\n SecureInformationRetrievalTransferVariable\n\n\nMODEL_PARAM_NAME = 'SecureInformationRetrievalParam'\nMODEL_META_NAME = 'SecureInformationRetrievalMeta'\n\n\nclass BaseSecureInformationRetrieval(ModelBase):\n \"\"\"\n\n \"\"\"\n def __init__(self):\n super(BaseSecureInformationRetrieval, self).__init__()\n self.model_param = SecureInformationRetrievalParam()\n self.security_level = None\n self.commutative_cipher = None\n self.transfer_variable = None\n self.block_num = None # N in 1-N OT\n self.coverage = None # the percentage of transactions whose values are successfully retrieved\n\n # For callback\n self.metric_name = \"sir\"\n self.metric_namespace = \"train\"\n self.metric_type = \"SIR\"\n\n def _init_base_model(self, param: SecureInformationRetrievalParam):\n self.transfer_variable = SecureInformationRetrievalTransferVariable()\n self._init_transfer_variable()\n\n self.model_param = param\n self.security_level = self.model_param.security_level\n\n def _init_transfer_variable(self):\n self.transfer_variable.natural_indexation.disable_auto_clean()\n self.transfer_variable.id_blocks_ciphertext.disable_auto_clean()\n\n @staticmethod\n def _abnormal_detection(data_instances):\n \"\"\"\n Make sure input data_instances is valid.\n \"\"\"\n abnormal_detection.empty_table_detection(data_instances)\n abnormal_detection.empty_feature_detection(data_instances)\n\n def _encrypt_id(self, data_instance, mode):\n pass\n\n def _decrypt_id(self, data_instance, mode):\n pass\n\n def _sync_commutative_cipher_public_knowledge(self):\n \"\"\"\n guest -> host public knowledge\n :return:\n \"\"\"\n pass\n\n def _exchange_id_list(self, id_list):\n \"\"\"\n\n :param id_list: Table in the form (id, 0)\n :return:\n \"\"\"\n pass\n\n def _raw_information_retrieval(self, data_instance):\n \"\"\"\n If security_level == 0, then perform raw information retrieval\n :param data_instance:\n :return:\n \"\"\"\n pass\n\n def _parse_security_level(self, data_instance):\n \"\"\"\n Cooperatively parse the security level index\n :param data_instance:\n :return:\n \"\"\"\n pass\n\n def _sync_doubly_encrypted_id_list(self, id_list):\n \"\"\"\n host -> guest\n :param id_list:\n :return:\n \"\"\"\n pass\n\n def _sync_natural_index(self, id_list_arr):\n \"\"\"\n guest -> host\n :param id_list_arr:\n :return:\n \"\"\"\n\n def _sync_natural_indexation(self, id_list, time):\n \"\"\"\n guest -> host\n :param id_list:\n :param time\n :return:\n \"\"\"\n\n def _sync_block_num(self):\n \"\"\"\n guest -> host\n :param\n :return:\n \"\"\"\n\n def _transmit_value_ciphertext(self, id_block, time):\n \"\"\"\n host -> guest\n :param id_block:\n :param time: int\n :return:\n \"\"\"\n\n def _sync_intersect_cipher_cipher(self, id_list):\n \"\"\"\n guest -> host\n :param id_list:\n :return:\n \"\"\"\n\n def _sync_intersect_cipher(self, id_list):\n \"\"\"\n host -> guest\n :param id_list:\n :return:\n \"\"\"\n\n def _check_oblivious_transfer_condition(self):\n \"\"\"\n 1-N OT with N no smaller than 2 is supported\n :return:\n \"\"\"\n return self.block_num >= 2\n\n def _failure_response(self):\n \"\"\"\n If even 1-2 OT cannot be performed, make failure response\n :return:\n \"\"\"\n raise ValueError(\"Cannot perform even 1-2 OT, recommend use raw retrieval\")\n\n def _sync_coverage(self, data_instance):\n \"\"\"\n guest -> host\n :param data_instance:\n :return:\n \"\"\"\n pass\n\n def _sync_nonce_list(self, nonce, time):\n \"\"\"\n host -> guest\n :param nonce:\n :return:\n \"\"\"\n pass\n\n def export_model(self):\n if self.model_output is not None:\n return self.model_output\n\n meta_obj = self._get_meta()\n param_obj = self._get_param()\n result = {\n MODEL_META_NAME: meta_obj,\n MODEL_PARAM_NAME: param_obj\n }\n self.model_output = result\n return result\n\n def _get_meta(self):\n return sir_meta_pb2.SecureInformationRetrievalMeta(\n security_level=self.security_level,\n oblivious_transfer_protocol=self.model_param.oblivious_transfer_protocol,\n commutative_encryption=self.model_param.commutative_encryption,\n non_committing_encryption=self.model_param.non_committing_encryption,\n key_size=self.model_param.key_size,\n raw_retrieval=self.model_param.raw_retrieval\n )\n\n def _get_param(self):\n return sir_param_pb2.SecureInformationRetrievalParam(\n coverage=self.coverage,\n block_num=self.block_num\n )\n\n def _display_result(self, block_num=None):\n if block_num is None:\n self.callback_metric(metric_name=self.metric_name,\n metric_namespace=self.metric_namespace,\n metric_data=[Metric(\"Coverage\", self.coverage),\n Metric(\"Block number\", self.block_num)])\n self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,\n metric_name=self.metric_name,\n metric_meta=MetricMeta(self.metric_name, metric_type=\"INTERSECTION\"))\n else:\n self.callback_metric(metric_name=self.metric_name,\n metric_namespace=self.metric_namespace,\n metric_data=[Metric(\"Coverage\", self.coverage),\n Metric(\"Block number\", block_num)])\n self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,\n metric_name=self.metric_name,\n metric_meta=MetricMeta(self.metric_name, metric_type=\"INTERSECTION\"))\n\n @staticmethod\n def _set_schema(data_instance, id_name=None, label_name=None, feature_name=None):\n \"\"\"\n\n :param data_instance: Table\n :param id_name: str\n :param label_name: str\n :return:\n \"\"\"\n if id_name is not None:\n data_instance.schema['sid_name'] = id_name\n if label_name is not None:\n data_instance.schema['label_name'] = label_name\n if feature_name is not None:\n data_instance.schema['header'] = feature_name\n return data_instance\n\n @staticmethod\n def log_table(tab, mode=0):\n \"\"\"\n Print a small table\n mode = 0: k, v\n mode = 1: k, v.label\n :param mode:\n :param tab: Table\n :return:\n \"\"\"\n tab_col = tab.collect()\n if mode == 0:\n for k, v in tab_col:\n LOGGER.debug(\"k = {}\".format(k))\n LOGGER.debug(\"v = {}\".format(v))\n elif mode == 1:\n for k, v in tab_col:\n LOGGER.debug(\"k = {}\".format(k))\n LOGGER.debug(\"v.label = {}\".format(v.label))\n elif mode == 2:\n for k, v in tab_col:\n LOGGER.debug(\"k = {}\".format(k))\n LOGGER.debug(\"v.id = {}\".format(v.inst_id))\n LOGGER.debug(\"v.label = {}\".format(v.label))\n elif mode == 3:\n for k, v in tab_col:\n LOGGER.debug(\"k = {}\".format(k))\n LOGGER.debug(\"v.id = {}\".format(v.inst_id))\n LOGGER.debug(\"v.features = {}\".format(v.features))\n LOGGER.debug(\"v.label = {}\".format(v.label))\n\n @staticmethod\n def log_schema(tab):\n \"\"\"\n\n :param tab: Table\n :return:\n \"\"\"\n LOGGER.debug(\"tab schema = {}\".format(tab.schema))\n\n\nclass CryptoExecutor(object):\n def __init__(self, cipher_core):\n self.cipher_core = cipher_core\n\n def init(self):\n self.cipher_core.init()\n\n def renew(self, cipher_core):\n self.cipher_core = cipher_core\n\n def map_encrypt(self, plaintable, mode):\n \"\"\"\n Process the input Table as (k, v)\n (k, enc_k) for mode == 0\n (enc_k, -1) for mode == 1\n (enc_k, v) for mode == 2\n (k, (enc_k, v)) for mode == 3\n :param plaintable: Table\n :param mode: int\n :return: Table\n \"\"\"\n if mode == 0:\n return plaintable.map(lambda k, v: (k, self.cipher_core.encrypt(k)))\n elif mode == 1:\n return plaintable.map(lambda k, v: (self.cipher_core.encrypt(k), -1))\n elif mode == 2:\n return plaintable.map(lambda k, v: (self.cipher_core.encrypt(k), v))\n elif mode == 3:\n return plaintable.map(lambda k, v: (k, (self.cipher_core.encrypt(k), v)))\n else:\n raise ValueError(\"Unsupported mode for crypto_executor map encryption\")\n\n def map_values_encrypt(self, plaintable, mode):\n \"\"\"\n Process the input Table as v\n enc_v if mode == 0\n :param plaintable: Table\n :param mode: int\n :return:\n \"\"\"\n if mode == 0:\n return plaintable.mapValues(lambda v: self.cipher_core.encrypt(v))\n else:\n raise ValueError(\"Unsupported mode for crypto_executor map_values encryption\")\n\n def map_decrypt(self, ciphertable, mode):\n \"\"\"\n Process the input Table as (k, v)\n (k, dec_k) for mode == 0\n (dec_k, -1) for mode == 1\n (dec_k, v) for mode == 2\n (k, (dec_k, v)) for mode == 3\n :param ciphertable: Table\n :param mode: int\n :return: Table\n \"\"\"\n if mode == 0:\n return ciphertable.map(lambda k, v: (k, self.cipher_core.decrypt(k)))\n elif mode == 1:\n return ciphertable.map(lambda k, v: (self.cipher_core.decrypt(k), -1))\n elif mode == 2:\n return ciphertable.map(lambda k, v: (self.cipher_core.decrypt(k), v))\n elif mode == 3:\n return ciphertable.map(lambda k, v: (k, (self.cipher_core.decrypt(k), v)))\n else:\n raise ValueError(\"Unsupported mode for crypto_executor map decryption\")\n\n def map_values_decrypt(self, ciphertable, mode):\n \"\"\"\n Process the input Table as v\n dec_v if mode == 0\n decode(dec_v) if mode == 1\n :param ciphertable: Table\n :param mode: int\n :return:\n \"\"\"\n if mode == 0:\n return ciphertable.mapValues(lambda v: self.cipher_core.decrypt(v))\n elif mode == 1:\n f = functools.partial(self.cipher_core.decrypt, decode_output=True)\n return ciphertable.mapValues(lambda v: f(v))\n else:\n raise ValueError(\"Unsupported mode for crypto_executor map_values encryption\")\n\n def get_nonce(self):\n return self.cipher_core.get_nonce()\n","sub_path":"python/federatedml/secure_information_retrieval/base_secure_information_retrieval.py","file_name":"base_secure_information_retrieval.py","file_ext":"py","file_size_in_byte":12246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"261439396","text":"import numpy as np\nimport tvm\nfrom tvm import autotvm\nimport topi\nimport logging\nimport topi.testing\nfrom tvm.contrib.pickle_memoize import memoize\nfrom topi.util import get_const_tuple\nimport json\nimport os\nimport sys\nimport tensorflow as tf\n\nflags = tf.flags\nflags.DEFINE_string(\"input_path\", \"\", \"path of input file\")\nflags.DEFINE_string(\"autotvm_log\", \"\", \"path of autotvm tuning log\")\nflags.DEFINE_string(\"tvm_profile_log\",\n \"/tmp/tvm_profile.log\", \"path of tvm profile\")\nflags.DEFINE_string(\"output_path\", \"\", \"path of output file\")\n\nFLAGS = flags.FLAGS\n\n@autotvm.template\ndef tvm_conv2d_nchw_tune_op(input_shape, filter_shape, output_shape, strides, paddings, dilations):\n A = tvm.placeholder(input_shape, name='input0', dtype=\"float32\")\n W = tvm.placeholder(filter_shape, name='input1', dtype=\"float32\")\n C = topi.nn.conv2d_nchw(A, W, strides, paddings, dilations, out_dtype=\"float32\")\n cfg = autotvm.get_config()\n s = topi.cuda.conv2d.schedule_conv2d_nchw_cuda(cfg, C)\n return s, [A, W, C]\n\ndef search_conv2d_nchw_configs(input_shape, filter_shape, output_shape, strides, paddings, dilations, num_trials):\n input_n, input_c, input_h, input_w = input_shape\n filter_c, filter_input_c, filter_h, filter_w = filter_shape\n output_n, output_c, output_h, output_w = output_shape\n stride_h, stride_w = strides\n padding_h, padding_w = paddings\n dilation_h, dilation_w = dilations\n\n logging.getLogger('autotvm').setLevel(logging.DEBUG)\n logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))\n task = autotvm.task.create(tvm_conv2d_nchw_tune_op, args=(input_shape, filter_shape, output_shape, strides, paddings, dilations), target='cuda')\n print(task.config_space)\n measure_option = autotvm.measure_option(\n builder=autotvm.LocalBuilder(),\n runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)\n )\n\n op_name=\"tuned_convolution_op_float_i%d_%d_%d_%d_w%d_%d_%d_%d_o%d_%d_%d_%d_ws%d_%d_wd%d_%d_p%d_%d\" % (input_n, input_c, input_h, input_w, filter_c, filter_input_c, filter_h, filter_w, output_n, output_c, output_h, output_w, stride_h, stride_w, dilation_h, dilation_w, padding_h, padding_w)\n\n log_name = \"tuned_kernels/\"+op_name+\".log\"\n\n # if DO_TUNING:\n tuner = autotvm.tuner.XGBTuner(task)\n # set num of trial\n tuner.tune(n_trial=num_trials, measure_option=measure_option,\n callbacks=[autotvm.callback.log_to_file(log_name)])\n\n\ndef lookup_conv2d_config(input_shape, filter_shape, output_shape, strides, paddings, dilations, log_path):\n input_n, input_c, input_h, input_w = input_shape\n filter_c, filter_input_c, filter_h, filter_w = filter_shape\n output_n, output_c, output_h, output_w = output_shape\n stride_h, stride_w = strides\n padding_h, padding_w = paddings\n dilation_h, dilation_w = dilations\n\n op_name=\"tuned_convolution_op_float_i%d_%d_%d_%d_w%d_%d_%d_%d_o%d_%d_%d_%d_ws%d_%d_wd%d_%d_p%d_%d\" % (input_n, input_c, input_h, input_w, filter_c, filter_input_c, filter_h, filter_w, output_n, output_c, output_h, output_w, stride_h, stride_w, dilation_h, dilation_w, padding_h, padding_w)\n\n log_name = FLAGS.autotvm_log\n with open(log_name, \"r\") as fin:\n log_lines = fin.readlines()\n # log_records=tvm.autotvm.record.load_from_file(log_name)\n log_records = []\n for line in log_lines:\n line = line.rstrip('\\n')\n # print(line)\n record_json = json.loads(line)\n tm = record_json['r'][0][0]\n if tm > 10000000: # filter bad configs\n continue\n if record_json['i'][2][0] != input_shape or record_json['i'][2][1] != filter_shape or record_json['i'][2][2] != output_shape or record_json['i'][2][3] != strides or record_json['i'][2][4] != paddings or record_json['i'][2][5] != dilations: # filter other configs\n continue\n griddim_x = record_json['i'][5][\"e\"][2][2][0]\n if griddim_x == -1:\n griddim_x = int(output_w / record_json['i'][5][\"e\"][2][2][1] / record_json['i'][5][\"e\"][2][2][2] / record_json['i'][5][\"e\"][2][2][3])\n griddim_y = record_json['i'][5][\"e\"][1][2][0]\n if griddim_y == -1:\n griddim_y = int(output_h / record_json['i'][5][\"e\"][1][2][1] / record_json['i'][5][\"e\"][1][2][2] / record_json['i'][5][\"e\"][1][2][3])\n griddim_z = record_json['i'][5][\"e\"][0][2][0]\n if griddim_z == -1:\n griddim_z = int(output_n * output_c / record_json['i'][5][\"e\"][0][2][1] / record_json['i'][5][\"e\"][0][2][2] / record_json['i'][5][\"e\"][0][2][3])\n record = {\"time\": tm,\n \"grid\": [griddim_x, griddim_y, griddim_z],\n \"block\": [record_json['i'][5][\"e\"][2][2][2], record_json['i'][5][\"e\"][1][2][2], record_json['i'][5][\"e\"][0][2][2]],\n \"config\": line}\n if record[\"block\"][0] * record[\"block\"][1] * record[\"block\"][2] % 32 != 0:\n continue\n opt = tm * record[\"grid\"][0] * record[\"grid\"][1] * record[\"grid\"][2] * record[\"block\"][0] * record[\"block\"][1] * record[\"block\"][2]\n # opt = record[\"grid\"][0] * record[\"grid\"][1] * record[\"grid\"][2] * record[\"block\"][0] * record[\"block\"][1] * record[\"block\"][2]\n record.update({\"opt\": opt})\n log_records.append((tm, record))\n # print(log_records[-1])\n log_records.sort(key=lambda item: item[0])\n # print(\"available kernels:\", len(log_records))\n print(op_name)\n log_records_fast = log_records[0:100] # top fast kernels\n # log_records_fast = log_records\n log_records = []\n for i in range(len(log_records_fast)):\n log_records.append((log_records_fast[i][1][\"opt\"], log_records_fast[i][1]))\n log_records.sort(key=lambda item: item[0])\n print(\"fastest kernel:\",log_records_fast[0][1][\"time\"], \"grid:\", log_records_fast[0][1][\"grid\"], \"block:\", log_records_fast[0][1][\"block\"])\n print(\"efficient kernel:\",log_records[0][1][\"time\"], \"grid:\", log_records[0][1][\"grid\"], \"block:\", log_records[0][1][\"block\"])\n # for i in range(min(10, len(log_records))): # print top 100 entries\n # print(\"time:\", log_records[i][1][\"time\"], \"grid:\", log_records[i][1][\"grid\"], \"block:\", log_records[i][1][\"block\"])\n # print(log_records[i][1][\"config\"])\n with open(log_path, 'a') as fout:\n fout.write(log_records[0][1][\"config\"]+'\\n')\n # json.dump(log_records[0][1][\"config\"], fout)\n\ndef tune_conv2d_nchw_codegen(input_shape, filter_shape, output_shape, strides, paddings, dilations):\n input_n, input_c, input_h, input_w = input_shape\n filter_c, filter_input_c, filter_h, filter_w = filter_shape\n output_n, output_c, output_h, output_w = output_shape\n stride_h, stride_w = strides\n padding_h, padding_w = paddings\n dilation_h, dilation_w = dilations\n\n lookup_conv2d_config(input_shape, filter_shape, output_shape, strides, paddings, dilations)\n\n logging.getLogger('autotvm').setLevel(logging.DEBUG)\n logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))\n task = autotvm.task.create(tvm_conv2d_nchw_tune_op, args=(input_shape, filter_shape, output_shape, strides, paddings, dilations), target='cuda')\n\n op_name=\"tuned_convolution_op_float_i%d_%d_%d_%d_w%d_%d_%d_%d_o%d_%d_%d_%d_ws%d_%d_wd%d_%d_p%d_%d\" % (input_n, input_c, input_h, input_w, filter_c, filter_input_c, filter_h, filter_w, output_n, output_c, output_h, output_w, stride_h, stride_w, dilation_h, dilation_w, padding_h, padding_w)\n\n # log_name = \"tuned_kernels/\"+op_name+\".log\"\n log_name = \"tmp.log\"\n\n dispatch_context = autotvm.apply_history_best(log_name)\n best_config = dispatch_context.query(task.target, task.workload)\n\n with dispatch_context:\n with tvm.target.create('cuda'):\n s, arg_bufs = tvm_conv2d_nchw_tune_op(input_shape, filter_shape, output_shape, strides, paddings, dilations)\n func = tvm.build(s, arg_bufs, 'cuda', name=op_name)\n\n ctx = tvm.context('cuda', 0)\n\n a_np = np.random.uniform(size=input_shape).astype(\"float32\")\n w_np = np.random.uniform(size=filter_shape).astype(\"float32\")\n c_np = np.zeros(output_shape).astype(\"float32\")\n\n a = tvm.nd.array(a_np, ctx)\n w = tvm.nd.array(w_np, ctx)\n c = tvm.nd.array(c_np, ctx)\n\n kernel_code = func.imported_modules[0].get_source()\n\n func(a, w, c)\n\n return kernel_code\n\ndef extract_ops_from_log(log_path):\n conv_ops = []\n lines = open(log_path).readlines()\n deduped_lines = list(set(lines))\n print(\"#convs:\", len(lines), \"#deduped_convs:\", len(deduped_lines))\n for line in deduped_lines:\n # print(line.rstrip('\\n'))\n items = line.rstrip('\\n').split('|')\n\n tmp_items = items[1].split('+')[1].split('_')\n input_shape = [int(item) for item in tmp_items]\n\n tmp_items = items[2].split('+')[1].split('_')\n filter_shape = [int(item) for item in tmp_items]\n\n tmp_items = items[3].split('+')[1].split('_')\n output_shape = [int(item) for item in tmp_items]\n\n tmp_items = items[4].split('+')[1].split('_')\n window_movement_strides = [int(item) for item in tmp_items]\n\n tmp_items = items[5].split('+')[1].split('_')\n window_dilation_strides = [int(item) for item in tmp_items]\n\n tmp_items = items[6].split('+')[1].split('_')\n padding_below_diff = [int(item) for item in tmp_items]\n\n conv_params = {\n 'input_shape': input_shape,\n 'filter_shape': filter_shape,\n 'output_shape': output_shape,\n 'window_movement_strides': window_movement_strides,\n 'window_dilation_strides': window_dilation_strides,\n 'padding_below_diff': padding_below_diff,\n }\n conv_ops.append(conv_params)\n\n return conv_ops\n\n\ndef get_tvm_topi_func_name(input_shape, filter_shape, output_shape, strides, paddings, dilations):\n input_n, input_c, input_h, input_w = input_shape\n filter_c, filter_input_c, filter_h, filter_w = filter_shape\n output_n, output_c, output_h, output_w = output_shape\n stride_h, stride_w = strides\n padding_h, padding_w = paddings\n dilation_h, dilation_w = dilations\n func_name = \"tuned_convolution_op_float_i%d_%d_%d_%d_w%d_%d_%d_%d_o%d_%d_%d_%d_ws%d_%d_wd%d_%d_p%d_%d_kernel0\" % (\n input_n, input_c, input_h, input_w, filter_c, filter_input_c, filter_h, filter_w, output_n, output_c, output_h, output_w, stride_h, stride_w, dilation_h, dilation_w, padding_h, padding_w)\n return func_name\n\n\ndef extract_tvm_profiling_from_log(log_path):\n lines = open(log_path).readlines()\n deduped_lines = list(set(lines))\n # print(deduped_lines)\n # print(\"#convs:\", len(lines), \"#deduped_convs:\", len(deduped_lines))\n profiling_result = {}\n for line in deduped_lines:\n items = line.rstrip('\\n').split('|')\n profiling_data = {\n 'gridDim': [int(items[1]), int(items[2]), int(items[3])],\n 'blockDim': [int(items[4]), int(items[5]), int(items[6])]\n }\n profiling_result.update({items[0]: profiling_data})\n return profiling_result\n\n\ndef generate_db_topi_ops(conv_ops):\n topi_ops = []\n tvm_profiling_log_path = FLAGS.tvm_profile_log\n if os.path.exists(tvm_profiling_log_path):\n os.remove(tvm_profiling_log_path)\n\n for conv_op in conv_ops:\n topi_code = tune_conv2d_nchw_codegen(conv_op['input_shape'], conv_op['filter_shape'], conv_op['output_shape'],\n conv_op['window_movement_strides'], conv_op['padding_below_diff'], conv_op['window_dilation_strides'])\n topi_op = {\n 'tvm_func_name': get_tvm_topi_func_name(conv_op['input_shape'], conv_op['filter_shape'], conv_op['output_shape'],\n conv_op['window_movement_strides'], conv_op['padding_below_diff'], conv_op['window_dilation_strides']),\n 'op_type': 'Convolution',\n 'parameters': conv_op,\n 'code': topi_code\n }\n topi_ops.append(topi_op)\n\n profiling_result = extract_tvm_profiling_from_log(tvm_profiling_log_path)\n for topi_op in topi_ops:\n tvm_func_name = topi_op['tvm_func_name']\n topi_op.update(profiling_result[tvm_func_name])\n\n return topi_ops\n\n\n\noutput_log_file = FLAGS.output_path\n\nif os.path.exists(output_log_file):\n os.remove(output_log_file)\n\nconv_ops = extract_ops_from_log(FLAGS.input_path)\n# conv_op = conv_ops[0]\n# lookup_conv2d_config(conv_op['input_shape'], conv_op['filter_shape'], conv_op['output_shape'],\n# conv_op['window_movement_strides'], conv_op['padding_below_diff'], conv_op['window_dilation_strides'])\n# topi_ops = generate_db_topi_ops(conv_ops)\n\n\nfor conv_op in conv_ops:\n lookup_conv2d_config(conv_op['input_shape'], conv_op['filter_shape'], conv_op['output_shape'],\n conv_op['window_movement_strides'], conv_op['padding_below_diff'], conv_op['window_dilation_strides'], output_log_file)\n\n# with open('resnext-select-efficient_convolution_kernels.json', 'w') as fout:\n# json.dump(topi_ops, fout)","sub_path":"artifacts/kernel_db/autotvm_scripts/tune_topi_conv2d_select.py","file_name":"tune_topi_conv2d_select.py","file_ext":"py","file_size_in_byte":13079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97056359","text":"'''\nCreated on Jul 29, 2014\n\n@author: uduong\n\nThis class will create one instance of each object type.\n'''\n\nimport time\nimport unittest\n\nfrom helpers.Elements import Elements\nfrom helpers.Helpers import Helpers\nfrom helpers.WebdriverUtilities import WebdriverUtilities\nfrom helpers.testcase import *\n\n\nclass TestCreateOneObjectOfEachType(WebDriverTestCase):\n \n def testCreateOneObjectOfEachType(self):\n \n self.testname=\"TestCreateOneObjectOfEachType\"\n self.setup() \n util = WebdriverUtilities()\n util.setDriver(self.driver)\n element = Elements()\n do = Helpers(self)\n do.setUtils(util)\n do.login()\n \n object_list = [\"Contract\",\"Control\",\"DataAsset\",\"Facility\",\"Market\",\"Objective\",\"OrgGroup\",\"Policy\",\"Process\",\"Product\",\"Program\",\n \"Project\",\"Regulation\",\"System\",\"Standard\",\"Clause\"]\n \n for obj in object_list:\n do.createObject(obj)\n\n \nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Create/TestCreateOneObjectOfEachType.py","file_name":"TestCreateOneObjectOfEachType.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"614383810","text":"# IMPORTS=======================================================================\nfrom pylab import rcParams\nimport matplotlib.pyplot as plt\n\nimport nilmtk as ntk\nimport nilmtk.disaggregate as ntkd\nimport nilmtk.metrics as ntkm\n\nrcParams['figure.figsize'] = (14, 6)\nplt.style.use('ggplot')\n\n# CONSTANTS=====================================================================\nh5_path = '/home/t7/Dropbox/Documents/TUDelft/Thesis/Datasets/DRED/DRED.h5'\nh5_path = r'C:\\Users\\davwang\\Desktop\\nilmtk\\nilmtk\\dataset_converters\\dred\\DRED.h5'\n\n# Load Data=====================================================================\ndred = ntk.DataSet(h5_path)\n# dred.set_window(start=None, end='2015-07-10 00:00:00')\n\nelec = dred.buildings[1].elec\nmains = elec.mains()\n\n# Train==========================================================================\nco = ntk.disaggregate.CombinatorialOptimisation()\nco.train(elec)\n\n# Disaggregate====================================================================\noutput = ntk.HDFDataStore(h5_path + 'outputDRED.h5', 'w')\nco.disaggregate(mains, output)\noutput.close()\n\n# Metrics==========================================================================\ndisag = ntk.DataSet(h5_path + 'outputDRED.h5')\ndisag_elec = disag.buildings[1].elec\n\nf1 = ntk.metrics.f1_score(disag_elec, elec)\n","sub_path":"nilmtk/dataset_converters/dred/dred_nilmtk.py","file_name":"dred_nilmtk.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"269272842","text":"import yaml\nimport json\n\n\n\n\ndef output_yaml(data):\n with open('data.yaml', 'w') as outfile:\n yaml.dump(data, outfile, default_flow_style=False)\n \n \ndef output_json(data):\n with open('data.json', 'w') as outfile:\n json.dump(data, outfile)\n\n\n\n\ndef main():\n sample_data = [\n 1, 'two', 3,\n {'a': 1, 'b': 2}\n ]\n\n output_yaml(sample_data)\n output_json(sample_data)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"class1/exercise6.py","file_name":"exercise6.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199539428","text":"import unittest\nimport pandas as pd\nfrom .. import StemmerPreprocessor\n\nclass TestStemmerPreprocessor(unittest.TestCase):\n\n def test_stemmer(self):\n dataframe = pd.DataFrame({\n 'text': [\n 'cars ponies caress',\n \"cars car's car\",\n \"the boy's cars are different colors\"\n ],\n 'valid_text': [\n 'car poni caress',\n 'car car\\' car',\n 'the boy\\' car are differ color'\n ]\n })\n\n StemmerPreprocessor('text').run(dataframe)\n\n dataframe[['text', 'valid_text']].apply(lambda value: self.assertEqual(*value), axis=1)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"src/preprocessors/test/test_stemmer.py","file_name":"test_stemmer.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267179432","text":"n, m = [int(i) for i in input().split(' ')]\ns = input()\nfor i in range(m):\n a, b, c, d = [int(j)-1 for j in input().split(' ')]\n match = temp = 0\n for j in range(a, b+1):\n if temp == d-c+1:\n break\n elif s[j] == s[c+temp]:\n temp += 1\n else:\n match = max(match, temp)\n temp = 0\n print(max(match, temp))","sub_path":"Code/CodeRecords/2179/60812/246734.py","file_name":"246734.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"65662189","text":"# coding: utf-8\n# This file is part of tofbot, a friendly IRC bot.\n# You may redistribute it under the Simplified BSD License.\n# If we meet some day, and you think this stuff is worth it,\n# you can buy us a beer in return.\n#\n# By TaTaaa 2018\n\nfrom toflib import Plugin\nimport time\nfrom unidecode import unidecode\nimport re\n\n\nclass PluginAmour(Plugin):\n # determination of the win in the dating game\n\n def handle_msg(self, msg_text, _chan, _nick):\n # calcule les chances si la synthaxe de la chaine\n # matche grande irma, chaine1<3chaine2?\n chaine = msg_text.lower().strip()\n if re.match(r\"(grande irma,)\\s*[a-zA-Z_]+.*<3(.)*[a-zA-Z_]+.*(\\?)+\",\n chaine):\n # séparation des deux prénoms\n chaine = chaine.replace(\" \", \"\")\n chaine = chaine.replace('grandeirma,', '')\n chaine = chaine[:-1]\n amoureux1, amoureux2 = chaine.split(\"<3\")\n\n # concaténation des deux chaines\n chaine = amoureux1 + amoureux2\n\n # suppression des caractènes n'étant pas des lettres\n chaine = re.sub(\"[^a-zA-Z_]\", \"\", chaine)\n\n # transposition des lettres en identifiant numérique\n chaine = [ord(lettre) for lettre in chaine.lower()]\n\n # calcul de la somme theologique (theonum) de tous\n # les identifiants numériques\n theonum = sum(chaine)\n\n # boucle sur theonum pour que la somme finale soit comprise\n # entre 1 et 9\n while theonum > 9:\n chaine = [int(i) for i in str(theonum)]\n theonum = sum(chaine)\n # calcul du pourcentage de l'amour calibré sur\n # Vanessa et Erick = 100%\n pourcentage = (11 - theonum) * 10\n text = \" \" + str(pourcentage) + '% de réussite pour ' + \\\n amoureux1 + ' et ' + amoureux2\n\n if pourcentage < 40: # pour les plans cul\n self.say(u\"»-(¯`·.·´¯)-> Irma prédit l'Amour <-(¯`·.·´¯)-«\")\n self.say(\" \")\n time.sleep(0.5)\n self.say(text)\n self.say(\" \")\n self.say('8==D 8==D 8==D 8==D 8==D 8==D')\n\n else: # pour le vrai win\n self.say(u\"»-(¯`·.·´¯)-> Irma prédit L'amour <-(¯`·.·´¯)-«\")\n self.say(\" \")\n time.sleep(0.5)\n self.say(text)\n self.say(\" \")\n self.say('<3 <3 <3 <3 <3 <3 <3 <3 <3 <3 <3 <3 <3 <3 <3')\n","sub_path":"plugins/amour.py","file_name":"amour.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221080539","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^cities/$', views.cities, name='cities'),\n url(r'^cities/([0-9]+)$', views.city_details, name='city_details'),\n url(r'^search/$', views.search, name='search')\n]","sub_path":"just_weather/weather/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"215732525","text":"\"\"\"\n23. Merge k Sorted Lists\nHard\n\nMerge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.\n\nExample:\n\nInput:\n[\n 1->4->5,\n 1->3->4,\n 2->6\n]\n\nOutput: 1->1->2->3->4->4->5->6\n\"\"\"\nfrom typing import List\nfrom linked_list import ListNode, build_ll\nimport heapq\n\n\"\"\"\nclass ListNode():\n def __init__(self, val=None, next=None, prev=None):\n self.val = val\n self.next = next\n self.prev = prev\n\n def __repr__(self):\n return f\"{self.val}, {self.next.__repr__()}\"\n\n # Needed for sorted merge of k sorted linked lists.\n def __lt__(self, other):\n return self.val < other.val\n\"\"\"\n\n###############################################################################\n\"\"\" OPTIMAL SOLUTION\nSolution 1: Use min heap to store heads of each list.\nAssume function __lt__() has been defined in class ListNode.\n\nNote: heappop() and heappush() are O(log k), where k is the number of lists.\n\nO(n log k) time, where n is total number of elements among all input lists.\nO(mk log k) time if all atomic lists have approx. length m.\n\nO(k) extra space for heap\n\"\"\"\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n heads = []\n for ll in lists:\n if ll:\n heapq.heappush(heads, ll)\n\n header = ListNode() # dummy header node\n tail = header\n\n while heads:\n m = heapq.heappop(heads) # temp var for readibility\n tail.next = m\n tail = tail.next\n\n if m.next:\n heapq.heappush(heads, m.next)\n \n return header.next # could return header.next, tail\n\n###############################################################################\n\"\"\"\nSolution 1b:\nSame as solution 1 using min heap, \nBUT don't assume function __lt__() has been defined in class ListNode.\nInstead, store tuples (node value, index in lists) in heap rather than nodes.\n\"\"\"\nclass Solution1b:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n heads = []\n\n for i in range(len(lists)):\n if lists[i]:\n heapq.heappush(heads, (lists[i].val, lists[i]))\n \n header = ListNode() # dummy header node\n tail = header\n\n while heads:\n _, node = heapq.heappop(heads)\n\n tail.next = node\n tail = tail.next\n\n if node.next:\n heapq.heappush(heads, (node.next.val, node.next))\n\n return header.next # could return header.next, tail\n\n###############################################################################\n\"\"\"\nSolution 2: \nUse array to store heads of each list.\nAssume __lt__() is defined in class ListNode, but don't have to if\nstore tuples (node value, index of lists, node) in array.\n\nCan either: \n(1) sort array vals and extract the min node each time, or\n(2) find min node each time.\n\nO(nk + k log k) time, where n = total number of elements among all input lists.\nO(k) extra space for array that stores heads of lists.\nO(1) extra space if reuse given lists.\n\"\"\"\nclass Solution2:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n heads = [ll for ll in lists if ll]\n \n header = ListNode() # dummy node\n tail = header\n\n while heads:\n # After first iteration, should be O(k) since we only appended one\n # item to sorted list. First iteration is O(k log k).\n # Sort in reverse to make it easy to remove the min node.\n heads = sorted(heads, reverse=True)\n \n tail.next = heads.pop()\n tail = tail.next\n if tail.next:\n heads.append(tail.next)\n\n return header.next # could return header.next, tail\n\n##############################################################################\n\"\"\"\nSolution 3: repeatedly merge 2 sorted linked lists at a time.\n\nO(m * k^2) = O(nk) time if all k sorted lists have approx. length m.\nO(1) extra space\n\nAssume each sorted list has length m.\nMerging sorted lists of lengths a and b is O(a+b).\nThe first merge is for None with the first list, so is O(1).\n\n2m + 3m + ... + km = m(2 + 3 + ... + k) = m[k(k+1)/2 - 1] = O(m * k^2)\n\"\"\"\nclass Solution3:\n def merge_two_sorted_lists(self, l1: ListNode, l2: ListNode) -> ListNode:\n header = ListNode() # dummy header for merged list\n node = header\n\n while l1 and l2:\n if l1.val <= l2.val:\n node.next = l1\n l1 = l1.next\n else:\n node.next = l2\n l2 = l2.next\n\n node = node.next\n\n if l1: node.next = l1\n elif l2: node.next = l2\n\n return header.next\n\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n\n from functools import reduce\n return reduce(self.merge_two_sorted_lists, lists) if lists else None\n\n###############################################################################\n\"\"\"\nSolution 4: divide & conquer, merging 2 sorted lists at at time.\n\nO(n log k) time\nO(1) extra space\n\nFirst pass has n/2 merges of two 1-element lists.\nSecond pass has n/4 merges of two lists with 2 elements each\nLast pass has 2 merges of two lists with n/2 elements each.\nEach pass, the merges have total O(n) time.\nThere are log_2(k) passes, so overall time is O(n log k)\n\"\"\"\nclass Solution4:\n def merge_two_sorted_lists(self, l1: ListNode, l2: ListNode) -> ListNode:\n header = ListNode() # dummy header for merged list\n node = header\n\n while l1 and l2:\n if l1.val <= l2.val:\n node.next = l1\n l1 = l1.next\n else:\n node.next = l2\n l2 = l2.next\n\n node = node.next\n\n if l1: node.next = l1\n elif l2: node.next = l2\n\n return header.next\n\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n n = len(lists)\n interval = 1\n\n while interval < n:\n for i in range(0, n - interval, 2 * interval):\n lists[i] = self.merge_two_sorted_lists(lists[i], lists[i+interval])\n\n interval *= 2\n\n return lists[0] if n > 0 else None\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(arrays, comment=None):\n lists = []\n for arr in arrays:\n lists += [build_ll(arr)[0]]\n \n print(\"=\"*80)\n if comment:\n print(comment)\n\n # lists are modified after the merge\n print(\"\\nOriginal sorted lists:\") \n for ll in lists:\n print(ll)\n \n head = s.mergeKLists(lists)\n\n print(\"\\nSorted, merged linked list:\")\n print(head)\n print()\n\n #print(\"\\nVerify that it is actually sorted:\")\n\n def test_self_merge(arr):\n lst = build_ll(arr)[0]\n _ = s.mergeKLists([lst, lst]) # infinite loop\n\n\n #s = Solution() # min heap w/ __lt__() defined in ListNode\n s = Solution1b() # min heap storing (node.val, index, node)\n #s = Solution2() # use list to store heads, and repeatedly sort\n #s = Solution3() # merge 2 lists at a time\n #s = Solution4() # divide & comquer, merging 2 lists at a time\n\n comment = \"=== LC example\"\n arrays = [[1,4,5], [1,3,4], [2,6]]\n test(arrays, comment)\n\n #comment = \"=== Lists of unequal length\"\n #arrays = [[1, 2, 3, 17], [-5, 6, 13], [7, 8], [4]]\n #test(arrays, comment)\n\n ### Edge cases\n # test([], \"Empty list of lists\")\n # test([None], \"One list that is None\")\n # test([None, None, None], \"Three lists that are each None\")\n # test([[1,2,3]], \"Nonempty list by itself\")\n # test([[1,2,3], None], \"Nonempty list with list None\")\n # test([None, [1,2,3], None], \"Nonempty list with None lists before and after\")\n # test([[7],[0],[-5],[3]], \"Lists of single elements\")\n\n #test_self_merge([1,2,3,4,5]) # this will hang; infinite loop\n\n # import random\n # comment = \"=== Lists of randomly generated ints\"\n # arrays = [\n # sorted([random.randint(1, 100) for _ in range(5)]),\n # sorted([random.randint(1, 100) for _ in range(3)]),\n # sorted([random.randint(1, 100) for _ in range(4)]),\n # ]\n # test(arrays, comment)\n","sub_path":"linked_list/0078_merge_k_sorted_LL.py","file_name":"0078_merge_k_sorted_LL.py","file_ext":"py","file_size_in_byte":8280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644925108","text":"# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 5\n_modified_time = 1430287673.6521449\n_template_filename='/usr/lib/python2.6/site-packages/wfbsh_dbinst/templates/environment.sh'\n_template_uri='/environment.sh'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\n_exports = []\n\n\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n c = context.get('c', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'NFS_SHARED=')\n __M_writer(unicode(c.nfs_shared))\n __M_writer(u'\\nNFS_OPTION=')\n # SOURCE LINE 2\n __M_writer(unicode(c.nfs_option))\n __M_writer(u'\\nSYSLOG_HOST=')\n # SOURCE LINE 3\n __M_writer(unicode(c.syslog_host))\n __M_writer(u'\\nSYSLOG_PORT=')\n # SOURCE LINE 4\n __M_writer(unicode(c.syslog_port))\n __M_writer(u'\\nINST_HOST=')\n # SOURCE LINE 5\n __M_writer(unicode(c.inst_host))\n __M_writer(u'\\nINST_PORT=')\n # SOURCE LINE 6\n __M_writer(unicode(c.inst_port))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n","sub_path":"res/env/sit_emea/dbinst/data/templates/environment.sh.py","file_name":"environment.sh.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"121297309","text":"import datetime\r\nfrom reportlab.lib.pagesizes import letter\r\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image\r\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\r\n\r\n\r\ndoc = SimpleDocTemplate(\"Coverletter_Kewa_Mei.pdf\",pagesize=letter,\r\n rightMargin=36,leftMargin=36,\r\n topMargin=60,bottomMargin=0)\r\nStory=[]\r\ntoday = datetime.date.today()\r\nmyinfo = ['Kewa “Jeffery” Mei''West Lafayette, IN 47906','meik@purdue.edu']\r\ncompanyinfo = [input('公司名字:'),input('公司地址-Street:'),input('公司地址-State:')]\r\n\r\nstyles=getSampleStyleSheet()['Normal']\r\nstyles.leading = 15\r\n\r\n#我的info\r\nfor part in myinfo:\r\n ptext = '%s' % part.strip()\r\n Story.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\n#计入时间\r\nptext = '%s' % today\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\n#公司info\r\nfor part in companyinfo:\r\n ptext = '%s' % part.strip()\r\n Story.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\nptext = 'Dear Hiring Manager:'\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\ngangwei = input('Position名字:')\r\npingtai = ['Purdue career website', 'LinkedIn']\r\np = int(input(\"1 = Purdue career website, 2 = LinkedIn search \"))\r\nleixing = ['business intelligence', 'data science', 'consulting']\r\nl = int(input(\"1 = business intelligence, 2 = data science, 3 = consulting \"))\r\nptext = \"I found the position %s for %s at %s and \\\r\n I am writing to apply for it. As an MBA Candidate in \\\r\n Management Information System from Krannert School \\\r\n of Management at Purdue University with a series of \\\r\n certificates from SAS and Teradata, I offer an experienced \\\r\n and knowledgeable perspective toward %s.\" % \\\r\n (gangwei.lower(),\r\n companyinfo[0].title(),\r\n pingtai[p-1],\r\n leixing[l-1])\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\nptext = 'Two years consecutively, I joined Teradata Partner Conference as competition \\\r\nfinalists to share my research on text mining. I won the analytics competition in 2015 for successfully \\\r\nquantifying the relationship between customers’ online comments and the actual performance of the company. \\\r\nMy most recent internship at Chicago helped me fully executing my skills in business intelligence; I composed \\\r\nand published the first white paper for the aviation aftermarket industry, which was featured in the ACPC \\\r\nconference this summer. Being an active student in the Teradata University Network, I participated the crowdsourcing data mining \\\r\nproject held by Hire Heroes USA, a NGO helping veterans get hired. During this project, I used my skills in natural language \\\r\nprocessing with Python, SQL, Tableau, and successfully identified the preferred method for the organization to better gain \\\r\nexposure to the social network. As a MBA-MIS student, my skills in Python programing is as good as mines in consulting. It can help to solve most of the \\\r\n problems I come across, such as automatically generating this cover letter, which you are currently reading, according to the job descriptions.'\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\ngongsileixing = ['consulting', 'game', 'high-tech', 'media', 'hospitality']\r\ni = int(input(\"1 = consulting, 2 = game, 3= high-tech, 4=media, 5=Hospitality\"))\r\nptext = 'Thank you very much for your time. \\\r\n It is my personal goal to apply my skills and knowledge \\\r\n as %s with a %s company like %s. \\\r\n I would be happy to further discuss my experience and \\\r\n my qualification with you. Please contact \\\r\n me through meik@purdue.edu or call me at ~.' % (gangwei,gongsileixing[i-1],companyinfo[0].title())\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\nptext = 'Regards,'\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\n\r\nptext = '%s' % myinfo[0]\r\nStory.append(Paragraph(ptext, styles))\r\nStory.append(Spacer(1, 14))\r\ndoc.build(Story)\r\n","sub_path":"Cover letter generator.py","file_name":"Cover letter generator.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209629170","text":"import random\nimport math\n\nr = random.random()\ntheta = random.random() * math.pi * 2\nprint (r, theta)\nprint (r, math.degrees(theta))\nx,y= (r*(math.cos(theta)), r*(math.sin(theta)))\nprint(x,y)\n\n\na, b, c = 2*x*math.sqrt(1-x**2-y**2), 2*y*math.sqrt(1-x**2-y**2), 1-2*(x**2+y**2)\n\nprint(a,b,c)\n\n","sub_path":"randomPointUnitDisk.py","file_name":"randomPointUnitDisk.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"578611959","text":"import requests\nfrom LinkedList import LinkedList, Node\n\n\nclass LRUCache(object):\n def __init__(self, size):\n self.max_cache_size = size\n self.current_cache_size = 0\n self.key_to_object = {}\n self.list_of_items = LinkedList()\n\n def get(self, url):\n if self.key_to_object.get(url, None):\n # update cache: move item to head\n self.list_of_items.update_item(self.key_to_object[url])\n else:\n res = requests.get(url)\n item = Node(url, res.text)\n self.key_to_object[url] = item\n if self.current_cache_size < self.max_cache_size:\n self.list_of_items.add(item)\n self.current_cache_size += 1\n else:\n # remove least recently used item\n key_to_remove = self.list_of_items.remove_tail()\n del self.key_to_object[key_to_remove]\n self.list_of_items.add(item)\n\n\nif __name__ == \"__main__\":\n from random import randint\n from time import sleep\n lru = LRUCache(3)\n base_url = \"https://ipinfo.io/\"\n urls = [\"ip\", \"city\", \"country\", \"timezone\", \"org\", \"loc\"]\n while True:\n i = randint(0, 5)\n url = base_url + urls[i]\n lru.get(url)\n lru.list_of_items.print()\n sleep(1)\n","sub_path":"lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"24154289","text":"\"\"\"empty message\n\nRevision ID: 657ee303bbe7\nRevises: 437ab0e9aa6a\nCreate Date: 2021-06-21 16:59:23.393508\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '657ee303bbe7'\ndown_revision = '437ab0e9aa6a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('garden', sa.Column('image', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('garden', 'image')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/657ee303bbe7_.py","file_name":"657ee303bbe7_.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"397991388","text":"import time\n\nclass Simple_search:\n def __init__(self,key,text):\n self.key = key\n self.text = text\n self.n = len(self.key)\n self.m = len(self.text)\n\n\n def search(self):\n for i in range(self.m-self.n+1):\n for j in range(self.n):\n if self.text[i+j] != self.key[j]:\n break\n if j == self.n-1:\n print(\"{}番目にあります\".format(i))\n\nif __name__ == '__main__':\n f = open(\"sample.txt\",\"r\")\n text = f.read()\n Simple = Simple_search(\"C\",text)#インスタンス生成\n Simple.search()\n","sub_path":"mult1/simple_search.py","file_name":"simple_search.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574248295","text":"import numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport scipy.io.wavfile\nimport pyaudio\nimport wave\nimport threading\n\nwav_file_name = \"cw.wav\"\n\nMORSE_CODE_DICT = {'A': '.-', 'B': '-...',\n 'C': '-.-.', 'D': '-..', 'E': '.',\n 'F': '..-.', 'G': '--.', 'H': '....',\n 'I': '..', 'J': '.---', 'K': '-.-',\n 'L': '.-..', 'M': '--', 'N': '-.',\n 'O': '---', 'P': '.--.', 'Q': '--.-',\n 'R': '.-.', 'S': '...', 'T': '-',\n 'U': '..-', 'V': '...-', 'W': '.--',\n 'X': '-..-', 'Y': '-.--', 'Z': '--..',\n '1': '.----', '2': '..---', '3': '...--',\n '4': '....-', '5': '.....', '6': '-....',\n '7': '--...', '8': '---..', '9': '----.',\n '0': '-----', ', ': '--..--', '.': '.-.-.-',\n '?': '..--..', '/': '-..-.', '-': '-....-',\n '(': '-.--.', ')': '-.--.-'}\n\n\ndef morse_analysis():\n rate, data = scipy.io.wavfile.read(wav_file_name)\n N = data.shape[0]\n envelope = np.abs(signal.hilbert(data))\n list1 = []\n for i in range(0, len(envelope), 300):\n if abs(envelope[i]) > 650:\n list1.append(i)\n count = 0\n num = 0\n liat2 = []\n into = \"\"\n if len(list1) > 0:\n for i in range(1, len(list1)):\n if (list1[i] - list1[i - 1]) < 2000:\n pass\n else:\n if (list1[i - 1] - list1[num]) > 5999:\n liat2.append(i - 1)\n into += \"-\"\n # print(\"-\", end=\"\")\n elif (list1[i - 1] - list1[num]) > 2000:\n liat2.append(i - 1)\n into += \".\"\n # print(\"・\", end=\"\")\n count += 1\n num = i\n if (list1[len(list1) - 1] - list1[num]) > 5999:\n liat2.append(i - 1)\n into += \"-\"\n # print(\"-\")\n elif (list1[len(list1) - 1] - list1[num]) > 2000:\n liat2.append(i - 1)\n into += \".\"\n # print(\"・\")\n\n n = 0\n for i in range(0, len(liat2)):\n if (list1[liat2[i] + 1] - list1[liat2[i]]) > 5999:\n for k, v in MORSE_CODE_DICT.items():\n if v == into[n:i + 1]:\n print(k, end=\"\")\n n = i + 1\n if i == len(liat2) - 1:\n for k, v in MORSE_CODE_DICT.items():\n if v == into[n:i + 1]:\n print(k, end=\"\")\n n = i + 1\n\n else:\n pass\n # print(\"No morse\")\n\n\ndef recording():\n RECORD_SECONDS = 5 # 録音する時間の長さ(秒)\n WAVE_OUTPUT_FILENAME = wav_file_name # 音声を保存するファイル名\n iDeviceIndex = 1 # 録音デバイスのインデックス番号\n\n # 基本情報の設定\n FORMAT = pyaudio.paInt16 # 音声のフォーマット\n CHANNELS = 1 # モノラル\n RATE = 44100 # サンプルレート\n CHUNK = 2 ** 11 # データ点数\n audio = pyaudio.PyAudio() # pyaudio.PyAudio()\n\n stream = audio.open(format=FORMAT, channels=CHANNELS,\n rate=RATE, input=True,\n input_device_index=iDeviceIndex, # 録音デバイスのインデックス番号\n frames_per_buffer=CHUNK)\n\n # recording\n frames = []\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n\n # finished recording\n stream.stop_stream()\n stream.close()\n audio.terminate()\n waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n waveFile.setnchannels(CHANNELS)\n waveFile.setsampwidth(audio.get_sample_size(FORMAT))\n waveFile.setframerate(RATE)\n waveFile.writeframes(b''.join(frames))\n waveFile.close()\n\n\nrecording()\nth = threading.Thread(target=morse_analysis)\nth.start()\n\nwhile True:\n recording()\n th.join()\n th = threading.Thread(target=morse_analysis)\n th.start()\n","sub_path":"cw software .py","file_name":"cw software .py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"418272711","text":"#search algorithms\n\nimport math\nimport queue\nimport timeit\nimport random\n\n\nclass Node:\n def __init__(self, element, parent):\n self.element = element\n self.parent = parent\n\nclass NodeV:\n def __init__(self, element, parent, costSoFar):\n self.element = element\n self.parent = parent\n self.costSoFar = costSoFar\n\n# BFS\ndef BFS(Graph, initialState):\n\n if Graph.isGoal(initialState):\n return (initialState,0)\n\n toVisit = queue.Queue()\n\n init = Node(initialState, initialState)\n finished = {init.element}\n for i in Graph.successors(initialState):\n it = Node(i,init)\n toVisit.put(it)\n\n path = []\n cost = 0\n while not toVisit.empty():\n visitIt = toVisit.get()\n if visitIt.element not in finished:\n finished.add(visitIt.element)\n if Graph.isGoal(visitIt.element):\n while visitIt.element != initialState:\n path = [visitIt.element] + path\n visitIt = visitIt.parent\n cost += 1\n return ([initialState] + path, cost)\n else:\n for i in Graph.successors(visitIt.element):\n if i not in finished:\n it = Node(i, visitIt)\n toVisit.put(it)\n\"\"\"\n it = len(finished)\n goThrough = finished[it - 1]\n while goThrough.element != initialState:\n path = [goThrough.element] + path\n goThrough = goThrough.suc\n cost += 1\n return ([initialState] + path, cost)\n\"\"\"\n\n# DFS\ndef DFS(Graph, initialState):\n\n if Graph.isGoal(initialState):\n return (initialState,0)\n\n toVisit = queue.LifoQueue()\n init = Node(initialState,initialState)\n finished = {init.element}\n for i in Graph.successors(initialState):\n it = Node(i,init)\n toVisit.put(it)\n\n path = []\n cost = 0\n while not toVisit.empty():\n visitIt = toVisit.get()\n if Graph.isGoal(visitIt.element):\n while visitIt.element != initialState:\n path = [visitIt.element] + path\n visitIt = visitIt.parent\n cost += 1\n return ([initialState] + path, cost)\n else:\n if visitIt.element not in finished:\n finished.add(visitIt.element)\n for i in Graph.successors(visitIt.element):\n if i not in finished:\n it = Node(i, visitIt)\n toVisit.put(it)\n\n\n# DLS\ndef DLS(Graph, initialState, depthLimit):\n finished = {1} # here set as well ???\n finished.pop()\n path = []\n cost = 0\n found = False\n init = Node(initialState, initialState)\n (finished, found) = DLS_rec(Graph, init, depthLimit, finished, found)\n if found == True:\n # path finding by parents\n it = len(finished)\n goThrough = list(finished)[it-1]\n while goThrough.element != initialState:\n path = [goThrough.element] + path\n goThrough = goThrough.parent\n cost += 1\n return ([initialState] + path, cost)\n else:\n #print('Goal node not found (assuming its existence because of Depth Limit', depthLimit)\n return ([],0)\n\ndef DLS_rec(Graph, cur, depthLimit, finished, found):\n #print('Depthlimit:',depthLimit)\n if found == True:\n return (finished, True)\n else:\n if Graph.isGoal(cur.element):\n finished.add(cur)\n found = True\n else:\n if cur not in finished:\n finished.add(cur)\n if depthLimit > 0:\n depthLimit -= 1\n for suc in Graph.successors(cur.element):\n if suc not in [c.element for c in finished]:\n sucNode = Node(suc,cur)\n (path, found) = DLS_rec(Graph, sucNode, depthLimit, finished, found)\n\n return (finished, found)\n\n\n# IDS\nNRANGE = 1000\ndef IDS(Graph, initialState):\n\n for depth in range(1,NRANGE):\n (path, cost) = DLS(Graph, initialState, depth)\n end = timeit.default_timer()\n if len(path) != 0:\n #print('Depth:', depth)\n return (path, cost)\n\n\ndef gen_sucs(graph, cur):\n sucs = graph.successors(cur[1])\n for i in range(len(sucs)):\n yield sucs[i]\n\n# UCS\ndef UCS(valuedGraph, sInit):\n path = []\n pathCost = 0\n curCost = 0\n toAnalyse = queue.PriorityQueue()\n # toAnalyseDocu = []\n tupleInit = (0, sInit) # (cost, state)\n nodeInit = NodeV(tupleInit, NodeV(0, tupleInit, 0), 0)\n visited = {nodeInit.element} # set of visited Nodes\n countQ = 0 # counter for equal prio\n toAnalyse.put((0, countQ, nodeInit))\n # toAnalyseDocu.append(nodeInit)\n countQ += 1\n\n while toAnalyse.empty() == 0:\n current = toAnalyse.get()[2]\n # curCost += current.element[0]\n visited.add(current.element)\n SucsCur = [it for it in valuedGraph.successors(current.element[1])]\n for iter in SucsCur:\n nodeIter = NodeV(iter, current, current.costSoFar + iter[0])\n\n if valuedGraph.isGoal(nodeIter.element[1]):\n # path finding by parents\n goThrough = nodeIter\n while goThrough.element[1] != sInit:\n path = [goThrough.element[1]] + path\n pathCost += goThrough.element[0]\n goThrough = goThrough.parent\n return ([sInit] + path, pathCost)\n\n if iter not in visited or nodeIter.costSoFar < curCost: # or cost is better\n prio = nodeIter.costSoFar\n insert = (prio, countQ, nodeIter)\n toAnalyse.put(insert)\n visited.add(nodeIter.element)\n countQ += 1\n curCost = current.costSoFar\n\n return path, pathCost\n\n\n\n\ndef A_star(valuedGraph, sInit, heuristic):\n path = []\n pathCost = 0\n curCost = 0\n toAnalyse = queue.PriorityQueue()\n #toAnalyseDocu = []\n tupleInit = (0, sInit) # (cost, state)\n nodeInit = NodeV(tupleInit, NodeV(0, tupleInit, 0), 0)\n visited = {nodeInit.element} # set of visited Nodes\n countQ = 0 # counter for equal prio\n toAnalyse.put((0, countQ, nodeInit))\n #toAnalyseDocu.append(nodeInit)\n countQ += 1\n\n while toAnalyse.empty() == 0:\n current = toAnalyse.get()[2]\n #curCost += current.element[0]\n visited.add(current.element)\n SucsCur = [it for it in valuedGraph.successors(current.element[1])]\n for iter in SucsCur:\n nodeIter = NodeV(iter, current, current.costSoFar + iter[0])\n\n if valuedGraph.isGoal(nodeIter.element[1]):\n # path finding by parents\n goThrough = nodeIter\n while goThrough.element[1] != sInit:\n path = [goThrough.element[1]] + path\n pathCost += goThrough.element[0]\n goThrough = goThrough.parent\n return ([sInit] + path, pathCost)\n\n if iter not in visited or nodeIter.costSoFar < curCost: # or cost is better\n prio = nodeIter.costSoFar + heuristic(iter[1])\n insert = (prio, countQ, nodeIter)\n toAnalyse.put(insert)\n #visited.add(nodeIter.element)\n countQ += 1\n curCost = nodeIter.costSoFar\n\n return path, pathCost\n\n\n\nclass NodeT:\n def __init__(self, state, sucs, Q, N, parent, reward):\n self.state = state # State(value, blank position)\n self.sucs = sucs # tuple (cost, successor)\n self.Q = Q\n self.N = N\n self.parent = parent\n self.reward = reward\n\n def add_suc(self, sucState):\n self.sucState = sucState\n\n def update(self, reward):\n self.reward = reward\n\n\n\ndef MCTS(ValuedGraph, state, budget):\n initNodeState = NodeT(state, [], 0, 0, [], 0)\n #treeList = []\n for iter in range(int(budget)):\n leaf = treePolicy(ValuedGraph, initNodeState)\n reward = rolloutPolicy(ValuedGraph, leaf)\n backUp(ValuedGraph, leaf[1], reward)\n return bestChild(ValuedGraph, state) # reward\n\ndef treePolicy(ValuedGraph, nodeT):\n num = 0\n cur = (0, nodeT.state)\n while not ValuedGraph.isGoal(cur[1]) and num < 1000: #len(ValuedGraph.successors(suc[1])) != 0\n if len(nodeT.sucs) == 0:\n return expand(ValuedGraph, nodeT)\n else:\n cur = bestChild(ValuedGraph, nodeT)\n num += 1\n curNode = NodeT(cur[1],[],0,0,0,0)\n return (cur[0],curNode)\n\n\ndef bestChild(graph, nodeT):\n c = 1/math.sqrt(2)\n sucsCost = [0]*len(nodeT.sucs)\n i = 0\n for nC in nodeT.sucs:\n if nodeT.N != 0:\n sucsCost[i] = [(nodeT.Q / nodeT.N + c * math.sqrt(2*math.log(nC[1].N / nodeT.N))) for i in nodeT.sucs]\n else:\n for itSuc in nodeT.sucs:\n if itSuc[1].N == 0:\n sucsCost[i] = 99999 #never visited -> first priority\n else:\n sucsCost[i] = (c * math.sqrt(2 * math.log(itSuc.N / nodeT.N)))\n i += 1\n iSuc = sucsCost.index(max(sucsCost))\n suc = graph.successors(nodeT.state)[iSuc]\n return suc # tuple (cost, state)\n\ndef expand(ValuedGraph, nodeT):\n succs = [i for i in ValuedGraph.successors(nodeT.state) if i not in nodeT.sucs]\n toExpand = succs[0] # choose first element\n toExpandNode = NodeT(toExpand[1],[],0,0,nodeT,0)\n nodeT.sucs.append((toExpand[0], toExpandNode))\n expandNode = NodeT(toExpand[1],[],0,0,0,0)\n return (toExpand[0], expandNode) # (cost, state)\n\ndef rolloutPolicy(ValuedGraph, leaf):\n nodeG = Node(leaf[1], 0)\n cost = 0\n num = 1000\n visited = []\n while (not ValuedGraph.isGoal(nodeG.element)) and num != 0:\n simSuc = random.randint(0,len(ValuedGraph.successors(nodeG.element))-1)\n g = ValuedGraph.successors(nodeG.element)[simSuc]\n if g not in visited:\n nodeG = Node(g[1],nodeG)\n cost += g[0]\n visited.append(g)\n num -= 1\n if cost == 0:\n reward = 1\n else:\n reward = 1 / cost\n\n return reward\n\ndef backUp(ValuedGraph, nodeT, reward):\n while nodeT.parent != 0:\n nodeT.N += 1\n nodeT.Q += reward\n nodeT = nodeT.parent","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":10308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"639227875","text":"import string\r\nimport random\r\n# import sys\r\n# sys.setrecursionlimit(1000000)\r\n\r\nm = string.ascii_letters + string.digits\r\n\r\ndef get_key():\r\n key = ''\r\n for i in range(1 ,10):\r\n key += random.choice(m)\r\n if i % 3 == 0 and i != 9:\r\n key += '-'\r\n return key\r\n\r\ndef show_key():\r\n tmp = []\r\n for i in range(200):\r\n one_key = get_key()\r\n if one_key not in tmp:\r\n tmp.append(one_key)\r\n for key in tmp:\r\n with open('002.txt', 'a+') as f:\r\n f.write(key+' ')\r\n\r\nif __name__ == '__main__':\r\n show_key()","sub_path":"001.py","file_name":"001.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437242178","text":"import arcade\n\nclass Base_Unit(arcade.Sprite):\n def __init__(self, damage: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.unit_damage = damage\n\n\nclass Tower(Base_Unit):\n def __init__(self, attack_speed: int, attack_radius: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.unit_attack_radius = attack_radius\n self.unit_attack_speed = attack_speed\n self.modify = 1\n\n def level_up(self, texture: str, modify: int, *args, **kwargs):\n self.unit_damage = self.unit_damage * self.modify\n self.unit_attack_speed = self.unit_attack_speed * self.modify\n self.modify = modify\n self.filename = texture\n\n\nclass Enemy(Base_Unit):\n def __init__(self, health: int, speed: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.unit_health = health\n self.unit_speed = speed","sub_path":"Source/Unit/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177227060","text":"import functools\nclass logging(object):\n def __init__(self, level='INFO', desc=None):\n self.level = level\n self.desc = desc\n\n def __call__(self, func): # 接受函数\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n print(\"[{level}]: enter function {func}()\".format(level=self.level,func=func.__name__))\n print(\"{desc}>>>>>>开始\".format(desc=self.desc))\n funReturn = func(*args, **kwargs)\n print(\"{desc}<<<<<<完成\".format(desc=self.desc))\n if isinstance(funReturn, dict):\n for key, value in funReturn.items():\n print(\"[{key}]:{value}\".format(key = key, value = value))\n # else:\n # print('非字典类型不打印结果')\n return funReturn\n return wrapper # 返回函数\n\n# @logging(level='INFO')\n# def say(something):\n# print(\"say {}!\".format(something))\n\n# def say(something):\n# print(\"say {}!\".format(something))\n\n\n# class logging(object):\n# def __init__(self, func):\n# self.func = func\n#\n# def __call__(self, *args, **kwargs):\n# print(\"[DEBUG]: enter function {func}()\".format(func=self.func.__name__))\n# return self.func(*args, **kwargs)\n# @logging(level='INFO')\n# def say(something):\n# print(\"say {}!\".format(something))\n# return 'a'\n#\n# a = say('0k')\n# print(a)\n","sub_path":"python_InterfaceAuto/baseLib/baseUtils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"317931388","text":"# Load the modules and setup our workspace\nimport pandas as pd\nimport helper_fxns as hf\n\n\n# A function to import and subset data to a unique patient ID\n# headerFile and detailFile should be strings containing full paths to the data files\ndef importPatientData(patientID, headerFile, detailFile, patientHeadIDColName, patientDetailIDColName):\n \"\"\"\n This function takes in a patient ID and information about where data files are stored and returns the\n two data files (header & detail) and subsetted to the specific patient ID. The function then returns\n the patient ID, the subset header data, and the subset detail data.\n parameters:\n workingDir - The string directory where the data is stored (two .csv files)\n patientID - The unique string patient ID\n headerFile - The string file name for the file (.csv) containing header data about the patients initial diagnosis.\n detailFile - The string file name for the file (.csv) containing detail data about the patients treatment history.\n patientHeadIDColName - The string column name for the unique patient ID in the header file.\n patientDetailIDColName - The string column name for the unique patient ID in the header file.\n \"\"\"\n\n # Import the data\n headerData = pd.read_csv(headerFile)\n detailData = pd.read_csv(detailFile)\n\n # Subset the data\n patientHeaderData = headerData[headerData[patientHeadIDColName] == patientID]\n patientDetailData = detailData[detailData[patientDetailIDColName] == patientID]\n\n # Remove all data rows where the provider specialty if not provieded (indicating machine billing).\n pd.options.mode.chained_assignment = None # Turn off the warning that doesn't apply in this case; default='warn'\n patDetailData = patientDetailData[pd.notnull(patientDetailData['XSNACodeProviderSpecialty'])]\n patDetailData.loc[:,'XCodeDate'] = patDetailData['XCodeDate'].apply(hf.dateParse)\n patDetailData.sort_values('XCodeDate', axis=0, ascending=True, inplace=True)\n patientHeaderData.loc[:,'XDateDX'] = patientHeaderData['XDateDX'].apply(hf.dateParse)\n patientHeaderData.loc[:,'XDateFirstSurg'] = patientHeaderData['XDateFirstSurg'].apply(hf.dateParse)\n\n return patientID, patientHeaderData, patDetailData, detailData\n\n\n# Import data\ndef import_data(headerFile, detailFile, rmDuplicates = True):\n header_data = pd.read_csv(headerFile)\n detail_data = pd.read_csv(detailFile)\n\n if rmDuplicates:\n # Remove rows where the provider specialty is NULL, indicating a billing code from a machine\n detail_data = detail_data[pd.notnull(detail_data['XSNACodeProviderSpecialty'])]\n\n return header_data, detail_data","sub_path":"data_import.py","file_name":"data_import.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516161339","text":"from django.shortcuts import render, redirect \nfrom vehicles.forms import VehicleForm \nfrom vehicles.models import Vehicle \n# Create your views here. \ndef new(request): \n if request.method == \"POST\": \n form = VehicleForm(request.POST) \n if form.is_valid(): \n try: \n form.save() \n return redirect('/vehicles/show') \n except: \n pass \n else: \n form = VehicleForm() \n return render(request,'new.html',{'form':form}) \ndef show(request): \n vehicles = Vehicle.objects.all() \n return render(request,\"show.html\",{'vehicles': vehicles}) \ndef edit(request, id): \n vehicle = Vehicle.objects.get(id=id) \n return render(request,'edit.html', {'vehicle': vehicle}) \ndef update(request, id): \n vehicle = Vehicle.objects.get(id=id) \n form = VehicleForm(request.POST, instance = vehicle) \n if form.is_valid(): \n form.save() \n return redirect(\"/vehicles/show\")\n print (form.is_valid())\n print (form.errors) \n return render(request, 'edit.html', {'vehicle': vehicle}) \ndef destroy(request, id): \n vehicle = Vehicle.objects.get(id=id) \n vehicle.delete() \n return redirect(\"/vehicles/show\") \n","sub_path":"vehicles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119870324","text":"# -*- coding:utf-8 -*-\n\nimport datetime\nimport time\n\nimport tornado\nimport tornado.escape\nfrom torcms.core import tools\nfrom torcms.model.core_tab import g_Wiki\nfrom torcms.model.supertable_model import MSuperTable\nimport peewee\n\nclass MPage(MSuperTable):\n def __init__(self):\n self.tab = g_Wiki\n self.kind = '2'\n try:\n g_Wiki.create_table()\n except:\n pass\n\n def update(self, slug, post_data):\n title = post_data['title'].strip()\n if len(title) < 2:\n return False\n\n entry = g_Wiki.update(\n title=title,\n date=datetime.datetime.now(),\n cnt_html=tools.markdown2html(post_data['cnt_md']),\n cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']),\n time_update=time.time(),\n kind = '2',\n ).where(g_Wiki.uid == slug)\n entry.execute()\n\n def insert_data(self, post_data):\n title = post_data['title'].strip()\n if len(title) < 2:\n return False\n\n slug = post_data['slug']\n uu = self.get_by_uid(slug)\n if uu is None:\n pass\n else:\n return (False)\n\n try:\n g_Wiki.create(\n title=title,\n date=datetime.datetime.now(),\n uid=slug,\n cnt_html=tools.markdown2html(post_data['cnt_md']),\n time_create=time.time(),\n user_name=post_data['user_name'],\n cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']),\n time_update=time.time(),\n view_count=1,\n kind = '2', # 2 for page\n )\n return slug\n except:\n return ''\n\n\n def query_all(self, kind = '2'):\n return self.tab.select().where(self.tab.kind == kind)\n\n def view_count_plus(self, slug):\n entry = g_Wiki.update(\n view_count=g_Wiki.view_count + 1,\n ).where(g_Wiki.uid == slug)\n entry.execute()\n\n\n def query_random(self, num=6):\n return self.tab.select().where(self.tab.kind == self.kind).order_by(peewee.fn.Random()).limit(num)\n\n\n def query_recent(self, num=8):\n return self.tab.select().where(self.tab.kind == self.kind).order_by(self.tab.time_update).limit(num)","sub_path":"torcms/model/page_model.py","file_name":"page_model.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"619577865","text":"import csv\n\ndef main():\n f = open('training_fake.csv', 'r')\n d = csv.DictReader(f)\n # r = d.__next__()\n # print (r)\n # print(r['domain_name'])\n for row in d:\n print(row)\n print(row[\"is_suspicious\"])\n return 0\n\nmain()","sub_path":"ia/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370794961","text":"import os\nfrom mininet.net import Mininet\nfrom mininet.log import info, setLogLevel\nfrom mininet.node import OVSSwitch, Controller, RemoteController\nfrom mininet.link import TCLink\nfrom mininet.topo import Topo, SingleSwitchTopo\nimport subprocess\nfrom time import time, sleep\nimport psutil\nfrom mininet.cli import CLI\nfrom subprocess import Popen, PIPE, STDOUT\nfrom select import poll, POLLIN\nfrom controlador import RYU, POX\nfrom mininet.topolib import TreeTopo \n\nclass UnidadExperimental:\n\n \"\"\"\n Clase que define la unidad experimental\n\n ...\n\n Attributes\n ----------\n name : Topo\n Topologia a evaluar\n controller : RYU o POX\n Controlador a usar\n atacante : str\n Nodo de la topologia que sera empleado como atacante \n cliente : str\n Nodo de la topologia que sera empleado como cliente\n victima : str\n Nodo de la topologia que sera empleado como victima\n\n Methods\n -------\n setTopo(topo)\n Asigna la topologia al experimento\n setController(controller, appsController)\n Asigna el controlador (RYU o POX) y la aplicacion que este correra en la unidad experimental. \n getTopo()\n Obtiene la topologia asociada a la unidad experimental\n getController()\n Obtiene el controlador a la unidad experimental\n definirNodosClaves(A = None,C = None ,V = None)\n Defina los nodos de la topologia (h_i) que seran atacante (A), cliente (C) y victima (V)\n obtenerNodosClaves()\n Obtiene los nodos clave (atacante, cliente y servidor)\n \"\"\"\n\n\n def __init__(self, topo = None, controller = None):\n \"\"\"\n Parameters\n ----------\n topo : Topo\n Topologia para la unidad experimental\n controller : RYU o POX\n Controlador de la unidad experimental\n \"\"\"\n self.topo = topo\n self.controller = controller\n self.atacante = None\n self.victima = None\n self.cliente = None\n\n def setTopo(self,topo):\n \"\"\"\n Asigna la topologia a la unidad experimental.\n\n Parameters\n ----------\n topo: Topo\n La topologia a emplear\n\n Returns\n -------\n None\n\n Raises\n ------\n No hay manejo de errores\n \"\"\"\n self.topo = topo\n\n # Puede que sea necesario revisarlo\n def setController(self,controller,appsController):\n \"\"\"\n Asigna el controlador a la unidad experimental.\n\n Parameters\n ----------\n controller: str\n nombre del controlador a usar\n \n Returns\n -------\n None\n\n Raises\n ------\n No hay manejo de errores\n \"\"\"\n if controller == 'ryu':\n self.controller = RYU(name='c0',\n ryuArgs =appsController)\n else:\n self.controller = POX(name = 'c0') # Mejorar ---------------\n\n def getTopo(self):\n \"\"\"\n Retorna la topologia empleada en la unidad experimental\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Topo\n Topologia asignada en la unidad experimental\n \"\"\"\n return self.topo\n\n def getController(self):\n \"\"\"\n Retorna el controlador empleada en la unidad experimental\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Controladores RYU o POX\n Controlador empleado en la unidad experimental\n \"\"\"\n return self.controller\n\n def definirNodosClaves(self,A = None,C = None ,V = None):\n \"\"\"\n Asigna los nodos atacante, cliente y victima de la unidad experimental.\n\n Parameters\n ----------\n A: str\n Nombre del nodo atacante\n C: str\n Nombre del nodo cliente\n V: str\n Nombre del nodo victima\n \n Returns\n -------\n None\n\n Raises\n ------\n No hay manejo de errores\n \"\"\"\n self.atacante = A\n self.cliente = C\n self.victima = V\n\n def obtenerNodosClaves(self):\n \"\"\"\n Retorna los nodos atacante, cliente y victima de la unidad experimental.\n\n Parameters\n ----------\n None\n \n Returns\n -------\n Los nodos atacante, victima y cliente configurados en la unidad experimental\n\n Raises\n ------\n No hay manejo de errores\n \"\"\"\n return [self.atacante, self.cliente, self.victima]\n\ndef test_ue1():\n # Parametros de la unidad experimental\n setLogLevel(\"info\")\n info(\"Configurando unidad experimental\\n\")\n c_ryu = RYU('c0')\n topo_tree = TreeTopo( depth=2, fanout=2 )\n ue1 = UnidadExperimental(topo=topo_tree,controller=c_ryu)\n ue1.definirNodosClaves('h1','h2','h3')\n # Iniciando mininet a partir de la unidad experimental\n info(\"Configurando e inicializando la red\\n\")\n net = Mininet( topo=ue1.getTopo(), controller=ue1.getController())\n net.start()\n net.pingAll()\n CLI( net )\n net.stop()\n\ndef test_ue2():\n # Parametros de la unidad experimental\n setLogLevel(\"info\")\n info(\"Configurando unidad experimental\\n\")\n ue2 = UnidadExperimental(topo=SingleSwitchTopo(k = 4),controller=POX('c0'))\n ue2.definirNodosClaves('h1','h2','h3')\n # Iniciando mininet a partir de la unidad experimental\n info(\"Configurando e inicializando la red\\n\")\n net = Mininet( topo=ue2.getTopo(), controller=ue2.getController())\n net.start()\n net.pingAll()\n CLI( net )\n net.stop()\n\n\nif __name__ == \"__main__\":\n # test_ue1()\n test_ue2()","sub_path":"marzo/9/fw/beta/unidadExperimental.py","file_name":"unidadExperimental.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"181070447","text":"import string\nimport random\ndef id_gen(size = 6, chars=string.ascii_uppercase + string.digits):\n\treturn ''.join(random.choice(chars) for _ in range(size))\n\nsamplesize = 100000\nf = open('random_grades' + str(samplesize) + '.txt', 'w')\nf.truncate()\n\nfor i in range(0, samplesize):\n\tname = id_gen(10)\n\tmidterm = int(random.uniform(40, 100))\n\tfinal = int(random.uniform(40, 100))\n\thw = [int(random.uniform(40, 100)) for a in range(4,int(random.uniform(6,10)))]\n\tf.write(name + ' ' + str(midterm) + ' ' + str(final))\n\tfor a in range(0, len(hw)):\n\t\tf.write(' ' + str(hw[a]))\n\tf.write('\\n')\n\nf.close()","sub_path":"src/data/generategrades.py","file_name":"generategrades.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"261412260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 27 17:10:56 2019\n\n@author: yzr\n\"\"\"\n\n\nimport re\nimport jieba\nimport pickle\nimport numpy as np\nimport jieba.posseg\nimport scipy.sparse as sp\nfrom itertools import chain\nfrom collections import Counter\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\n\n# 去掉开头的 \\ufeff,encoding='utf-8-sig'\nwith open('.\\\\raw_data\\\\comment_all.txt', 'r', encoding='utf-8-sig') as f1:\n comment = f1.read().split()\n\nwith open('.\\\\raw_data\\\\list_all.txt', encoding='utf-8') as f2:\n data = f2.readlines()\n list_all = []\n for line in data:\n a = re.split('[\\t\\n]', line)\n del a[-1]\n list_all.append(a)\n\nprint(comment[:100])\nprint(list_all[:100])\n\nprint(len(comment))\nprint(len(list_all))\n\n# 去掉表头\ndel list_all[0]\nlist_name = []\nlist_typeid = []\nlist_id = []\n\n# 只留取 name type_id id\nfor i in range(len(list_all)):\n list_name.append(list_all[i][0])\n list_typeid.append(list_all[i][1])\n list_id.append(list_all[i][2])\n\n# 每个 POI 对应的标签\nPOI_name_dic = {}\nfor i in range(len(list_id)):\n POI_name_dic[list_id[i]] = list_name[i]\n\nlist_file = open('POI_name_dic.pickle', 'wb')\npickle.dump(POI_name_dic, list_file)\nlist_file.close()\n\n# 首先用id对每个用户的评论进行分割\nseg = []\nPOI = []\nfor i in range(len(comment)):\n if comment[i] in list_id:\n seg.append(i)\n if comment[i] not in POI:\n POI.append(comment[i])\n\nprint(len(POI))\n\n# 将每个评论的用户评论和id连接起来\ncomment_user = []\ncomment_id = []\n\nfor i in range(len(seg)):\n comment_id.append(comment[seg[i]])\n if i != len(seg) - 1:\n if (i + 1) % 1000 == 0:\n print('%d 已经拼接' % (i + 1))\n start = seg[i] + 2\n end = seg[i + 1]\n user = []\n for k in range(start, end):\n user.append(comment[k])\n user1 = ','.join(user)\n comment_user.append(user1)\n else:\n start = seg[-1]\n comment_user.append(comment[start + 2])\n\n# 添加新词\nfor word in list_name:\n jieba.add_word(word)\n\n# 加载停用词\nwith open(r'D:\\科研\\knowledge graph\\实验\\NMF\\NMF1.0\\stop_words.txt') as f:\n stopwords = f.read().split()\n\n# 去掉 ci 里面词性的词\nci = ['c', 'e', 'y', 'u', 'r', 'q', 'p', 'o', 'm']\n\n\n# 分词,并判断词性\ndef comment_user_cut(com):\n user_cut = []\n for i in range(len(com)):\n if (i + 1) % 1000 == 0:\n print('%d 分词完成' % (i + 1))\n line_cut = jieba.posseg.cut(com[i])\n cixing = []\n for t in line_cut:\n cixing.append((t.word, t.flag))\n list_w = []\n for i in range(len(cixing)):\n element = cixing[i]\n\n # 去掉 ci 里面词性的词,去掉长度小于1的词,去掉停用词\n if element[1] not in ci:\n if len(element[0]) > 1:\n if element[0] not in stopwords:\n list_w.append(element[0])\n a = ' '.join(list_w)\n user_cut.append(a)\n return user_cut\n\n\nuser_cut = comment_user_cut(comment_user)\n\n# 取出所有 id 的索引,按降序排列\nid_ind = []\nid_set = list(set(comment_id))\nfor i in id_set:\n a = comment_id.index(i)\n id_ind.append(a)\nid_ind.sort(reverse=False)\n\n# 每个 POI 对应的标签\nPOI_name = []\nfor i in id_ind:\n POI_name.append(comment_id[i])\n\nlist_file = open('POI_name.pickle', 'wb')\npickle.dump(POI_name, list_file)\nlist_file.close()\n\n# 拼接评论\nPOI = []\nfor i in range(len(id_ind)):\n if i != len(id_ind) - 1:\n if (i + 1) % 50 == 0:\n print('%d POI 拼接' % (i + 1))\n start = id_ind[i]\n end = id_ind[i + 1]\n poi = user_cut[start:end]\n poi = ' '.join(poi)\n POI.append(poi)\n else:\n start = id_ind[-1]\n poi = user_cut[start:]\n poi = ' '.join(poi)\n POI.append(poi)\n\n\n# 只保留中文\ndef is_uchar(uchar):\n \"\"\"判断一个unicode是否是汉字\"\"\"\n if uchar >= u'\\u4e00' and uchar <= u'\\u9fa5' or uchar == u' ':\n return True\n return False\n\n\ndef is_ustr(in_str):\n out_str = ''\n for i in range(len(in_str)):\n if is_uchar(in_str[i]):\n out_str = out_str + in_str[i]\n return out_str\n\n\nPOI_ch = []\nfor i in range(len(POI)):\n if (i + 1) % 50 == 0:\n print('%d 保留中文完成' % (i + 1))\n content = is_ustr(POI[i])\n POI_ch.append(content)\n\n# 保存POI\nlist_file = open('POI_ch.pickle', 'wb')\npickle.dump(POI_ch, list_file)\nlist_file.close()\n\n## 生成词频矩阵\n# vectorizer = CountVectorizer(min_df = 1)\n# POI_matrix = vectorizer.fit_transform(POI_cut)\n# POI_dictionary = vectorizer.get_feature_names()\n\n## 生成 TFIDF 矩阵\n# transformer = TfidfTransformer()\n# POI_tfidf = transformer.fit_transform(POI_matrix)\n\n\n# 直接生成 TFIDF 矩阵\ntfidf_vec = TfidfVectorizer(max_df=0.2, min_df=2)\nPOI_matrix = tfidf_vec.fit_transform(POI_ch)\nPOI_dictionary = tfidf_vec.get_feature_names()\n\nsp.save_npz('POI_matrix.npz', POI_matrix, True)\n\nlist_file = open('POI_dic.pickle', 'wb')\npickle.dump(POI_dictionary, list_file)\nlist_file.close()\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539896661","text":"class Numeros():\n @staticmethod\n def is_number(s,tipo=''):\n try:\n if tipo=='int':\n int(s)\n elif tipo=='float':\n float(s)\n return True\n except ValueError:\n return False\n\n @staticmethod\n def PedirEntero(mensaje=''):\n entero=''\n while not Numeros.is_number(entero,'int'):\n entero=input(mensaje)\n return int(entero)\n\n @staticmethod\n def PedirDecimal(mensaje=''):\n decimal=''\n while not Numeros.is_number(decimal,'float'):\n decimal=input(mensaje)\n return float(decimal)\n","sub_path":"Python/Practicas/Clases/FuncionesNumeros.py","file_name":"FuncionesNumeros.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333099717","text":"import os\nimport cv2\nimport time \nimport shutil\nimport sys\nimport speech_recognition as sr \n\nif __name__ == \"__main__\":\n\n\n\tmode_type = sys.argv[1]\n\n\tif mode_type == 'speech':\n\t\tprint(\"Enter a paragraph:\")\n\t\tuser_input = input()\n\t\tdata = user_input.split(' ')\n\telif mode_type == 'voice':\n\t\tr = sr.Recognizer() \n\t\twith sr.Microphone(device_index = 2) as source: \n\t\t\tr.adjust_for_ambient_noise(source)\n\t\t\tprint(\"Speak:\") \n\t\t\taudio = r.listen(source) \n\t\t\n\t\ttry:\n\t\t\tprint(\"You said \" + r.recognize_google(audio))\n\t\t\tdata = r.recognize_google(audio).split(' ')\n\t\texcept sr.UnknownValueError:\n\t\t\tprint(\"Could not understand audio\")\n\t\texcept sr.RequestError as e:\n\t\t\tprint(\"Could not request results; {0}\".format(e))\n\n\n\tthresh = 20\n\timages = []\n\n\n\tcurrent_dir2 = os.path.dirname(os.path.realpath('__file__'))\n\tcurrent_dir = '/media/ubnutu/Windows/Users/hp/Documents/GitHub/AIhackathon/20 pic dataset'\n\n\n\tif os.path.isdir(current_dir2 + '/' + 'sentence'):\n\t\tshutil.rmtree(current_dir2 + '/' + 'sentence')\n\tos.mkdir(current_dir2 + '/' + 'sentence')\n\tcnt2 =0\n\tfor word in data:\n\t\tfor alphabet in word:\n\t\t\t#print(alphabet)\n\t\t\tfor folder_name in os.listdir(current_dir):\n\n\t\t\t\t#print(folder_name.lower() + alphabet) \n\t\t\t\tif folder_name.lower() == alphabet:\n\t\t\t\t \t#print(\"yes\")\n\t\t\t\t \t\n\t\t\t\t \tcnt = 0\n\t\t\t\t \tfor image_name in os.listdir(current_dir + '/' + folder_name):\n\n\t\t\t\t \t\tif cnt == 0:\n\t\t\t\t \t\t\timage_0 = image_name\n\t\t\t\t \t\tif cnt < thresh:\n\t\t\t\t \t\t\tframe = cv2.imread(current_dir + '/' + folder_name + '/' + image_name)\n\t\t\t\t \t\t\tcv2.imwrite(current_dir2 + '/sentence/' + image_name,frame)\n\t\t\t\t \t\t\timages.append(image_name)\n\t\t\t\t \t\t\t#print(image_name)\n\t\t\t\t \t\t\tcnt += 1\n\t\tfor image_name in os.listdir(current_dir + '/nothing'):\n\t\t\tframe = cv2.imread(current_dir + '/nothing/' + image_name)\n\t\t\tcv2.imwrite(current_dir2 + '/sentence/' + image_name,frame)\n\t\t\timages.append(image_name)\n\t\t\tcnt2 += 1\n\n\tframe = cv2.imread(current_dir2 + '/sentence/' + image_0)\n\tcv2.imshow('video',frame)\n\theight, width, channels = frame.shape\n\n\tfourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case\n\tout = cv2.VideoWriter('out', fourcc, 20.0, (width, height))\n\n\n\tfor image in images:\n\t image_path = current_dir2 + '/sentence/' + image\n\t frame = cv2.imread(image_path)\n\t time.sleep(0.045)\n\t out.write(frame) # Write out frame to video\n\t print(image)\n\n\t cv2.imshow('video',frame)\n\t if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit\n\t break\n\n\t# Release everything if job is finished\n\tout.release()\n\tcv2.destroyAllWindows()\n\n\tprint(\"The output video is {}\".format('out'))","sub_path":"code/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541230772","text":"import deepthought3\nimport os\n\ndeepthought3.DATA_PATH = os.path.realpath(os.getcwd() + \"/../../..\")\n\nfrom deepthought3.datasets.openmiir.preprocessing.pipeline import Pipeline\n\nimport pandas as pd\nimport numpy as np\nimport os\n\nSETTINGS = dict(\n debug=False,\n mne_log_level=\"Info\",\n sfreq=64\n) # optional pipeline settings\n\n\ndef preprocess(subject):\n \"\"\"\n Preprocesses a single subject's eeg file.\n \"\"\"\n print(\"Preprocessing\", subject, \"!\")\n\n # create pipeline instance and load raw\n pipeline = Pipeline(subject, SETTINGS)\n pipeline.load_raw(interpolate_bad_channels=True)\n\n # check and merge trials\n pipeline.check_trial_events()\n pipeline.check_trial_audio_onset_merge(use_audio_onsets=True)\n pipeline.merge_trial_and_audio_onsets()\n\n # bandpass filtering\n pipeline.bandpass_filter()\n\n # beat & eog epoching\n pipeline.generate_beat_events() # Note: this includes cue-beats !!!\n # pipeline.beat_epochs.average().plot()\n pipeline.find_eog_events()\n\n # downsampling\n pipeline.downsample()\n pipeline.check_resampled_trial_events()\n\n # apply ica\n pipeline.load_ica(\"100p_64c\")\n pipeline.raw = pipeline.ica.apply(pipeline.raw, exclude=(pipeline.ica.exclude))\n if len(pipeline.raw.info[\"bads\"]) > 0:\n pipeline.raw.interpolate_bads() # interpolate bad channels afterwards as they are not processed by the ICA\n\n pipeline.raw.save(\"./output/{}-preprocessed.fif\".format(subject))\n del pipeline\n return\n\n\ndef main():\n if os.path.isdir(\"./output\"):\n os.system('rm -rf ./output')\n os.mkdir('./output')\n subjects = [\"P01\", \"P04\", \"P06\", \"P07\", \"P09\", \"P11\", \"P12\", \"P13\", \"P14\"]\n for subject in subjects:\n preprocess(subject)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"eeg/preprocessing/preprocess_openmiir.py","file_name":"preprocess_openmiir.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"545862887","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# d_base.py\n#\n# Copyright 2020 Andrew Taylor \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n#\n\n\"\"\"\nDatabase methods used for bip_btle program to set and get data.\nSQLITE3 Database schema\n\nCREATE TABLE `fitness` (\n `d_t`\tNUMERIC NOT NULL UNIQUE,\n `r_k`\tINTEGER NOT NULL,\n `r_i`\tINTEGER NOT NULL,\n `s_t`\tINTEGER NOT NULL,\n `h_r`\tINTEGER,\n PRIMARY KEY(`d_t`)\n);\nCREATE TABLE `parameters` (\n `id`\tINTEGER NOT NULL,\n `mac_add`\tNUMERIC,\n `battery`\tINTEGER,\n `soft_ver`\tNUMERIC,\n `hard_rev`\tNUMERIC,\n `ser_num`\tINTEGER,\n `u_time`\tNUMERIC NOT NULL,\n `hours`\tINTEGER NOT NULL,\n `s_image`\tBLOB NOT NULL,\n PRIMARY KEY(`id`)\n);\n\n\"\"\"\n\n# Library Imports\nimport sqlite3\nfrom sqlite3 import Error\nfrom datetime import datetime\nimport csv\nimport os\n\n# Variables\ntesting = False\napp_file_name = \"app.db\"\npwd = os.getcwd()\napp_db = pwd + '/' + app_file_name\ndata_file_name = \"fitness.db\"\ndata_db = pwd + '/' + data_file_name\n\n\ndef get_para(c_name):\n \"\"\"\n Connects to default Database and retreive application\n settings\n ARGS: c_name = Colomn to get data from\n RETURNS: row[0] = Id\n row[1] = appliaction db\n row[2] = table schema\n row[3] = MAC Address\n row[4] = battery percentage\n row[5] = software revision\n row[6] = hardware version\n row[7] = serial number\n row[8] = last update time\n row[9] = Hours Interval\n row[10] = Blank Image\n row[11] = App Icon\n \"\"\"\n result = []\n try:\n conn = connect_DB(app_db)\n sql = conn.cursor()\n if testing:\n print(\"Cursor Created\")\n sql_statement = \"SELECT {} FROM parameters WHERE id = 1\".format(c_name)\n sql.execute(sql_statement)\n row = sql.fetchone()\n result = row[0]\n except Error:\n print(\n \"Error with SQLite retrieving app data {}\".format(Error))\n finally:\n conn.close()\n return result\n\n\ndef set_para(c_name, n_value):\n \"\"\"\n Connects to default Database and updates application values\n ARGS: c_name = Column Name in DB\n n_value = value to place in colomn\n RETURNS None\n \"\"\"\n try:\n conn = connect_DB(app_db)\n sql = conn.cursor()\n if testing:\n print(\"Cursor Created\")\n sql_statement = (\n \"UPDATE parameters SET {} = (?) WHERE id = 1\".format(c_name))\n sql.execute(sql_statement, (n_value,))\n conn.commit()\n except Error:\n print(\"Error while working with SQLite \", Error)\n finally:\n conn.close()\n\n\ndef get_watch_data(hours):\n \"\"\"\n Method to return all data from Database from now back a number of hours\n ARGS: hours - Hours back to fetch data\n RETURNS: d_t - Unix time data\n r_k - Category data\n r_i - Intensity data\n s_t - Steps data\n h_r - Heart Rate data\n \"\"\"\n d_t, r_k, r_i, s_t, h_r = ([] for i in range(5))\n date_now = int(datetime.now().timestamp())\n date_before = date_now - (hours * 60 * 60)\n try:\n conn = connect_DB(data_db)\n sql = conn.cursor()\n if testing:\n print(\"Cursor Created\")\n sql_statement = \"\"\"SELECT * FROM fitness\n WHERE d_t > {}\"\"\".format(date_before)\n sql.execute(sql_statement)\n rows = sql.fetchall()\n for row in rows:\n d_t.append(row[0])\n r_k.append(row[1])\n r_i.append(row[2])\n s_t.append(row[3])\n h_r.append(row[4])\n except Error:\n print(\"Error while working with SQLite \", Error)\n finally:\n conn.close()\n return d_t, r_k, r_i, s_t, h_r\n\n\ndef store_watch_data(in_data):\n \"\"\"\n Method to store data in sqlite3 database\n ARGS: in_data = [(\n unix_time,\n category,\n intensity,\n steps,\n heart_rate)]\n RETURNS: None\n \"\"\"\n update_utime = None\n try:\n conn = connect_DB(data_db)\n sql = conn.cursor()\n if testing:\n print(\"Cursor Created\")\n sql_statement = \"\"\"INSERT or IGNORE into fitness\n (d_t, r_k, r_i, s_t, h_r) VALUES\n (?, ?, ?, ?, ?)\"\"\"\n sql.executemany(sql_statement, in_data)\n if testing:\n print(\"Data Inserted\")\n conn.commit()\n for item in in_data:\n update_utime = item[0]\n update_utime = int(update_utime) - 43200\n set_para('u_time', update_utime)\n except Error:\n print(\"Error while working with SQLite \", Error)\n finally:\n conn.close()\n\n\ndef get_all_data():\n \"\"\"\n Method to export all Database information to CSV File\n ARGS: None\n RETURNS: file_n - File Name Created under\n \"\"\"\n try:\n conn = connect_DB(data_db)\n sql = conn.cursor()\n if testing:\n print(\"Cursor Created\")\n file_n = \"export_{}.csv\".format(datetime.now())\n with open(file_n, \"w\") as f_w:\n csv_out = csv.writer(f_w)\n for row in sql.execute(\"SELECT * FROM fitness\"):\n csv_out.writerow(row)\n except Error:\n print(\"Error while working with SQLite \", Error)\n finally:\n conn.close()\n return file_n\n\n\ndef connect_DB(db_name):\n conn = sqlite3.connect(db_name)\n if testing:\n print(\"Connected to DB\")\n conn.text_factory = bytes\n return conn\n\n\nif testing:\n \"\"\"\n Testing of the individual methods\n \"\"\"\n set_para('mac_add', 'CA:0D:D7:A9:99:48')\n set_para(\"battery\", 100)\n set_para(\"soft_ver\", 100)\n set_para(\"hard_rev\", 100)\n set_para(\"ser_num\", 100)\n set_para(\"u_time\", 100)\n set_para(\"hours\", 100)\n print(\"*** Tested 'set_para' Method ***\")\n\n print(get_para('id'))\n print(get_para('mac_add'))\n print(get_para(\"battery\"))\n print(get_para(\"soft_ver\"))\n print(get_para(\"hard_rev\"))\n print(get_para(\"ser_num\"))\n print(get_para(\"u_time\"))\n print(get_para(\"hours\"))\n print(\"*** Tested 'get_para' Method ***\")\n\n store_watch_data([\n (1590408146, 1, 1, 1, 1),\n (1590408147, 2, 2, 2, 2),\n (1590408148, 3, 3, 3, 3)])\n print(\"*** Tested 'store_watch_data' Method ***\")\n\n d_t, r_k, r_i, s_t, h_r = get_watch_data(999)\n print(d_t, r_k, r_i, s_t, h_r)\n print(\"*** Tested 'get_watch_data' Method ***\")\n\n file_n = get_all_data()\n if os.path.isfile(file_n):\n print(\"*** Test 'get_all_data' Method ***\")\n os.remove(file_n)\n","sub_path":"d_base.py","file_name":"d_base.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521255704","text":"import numpy as np\nimport pandas as pd\nfrom statsmodels.tsa.stattools import adfuller\nimport argparse\nimport pickle\nimport os\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--path', type=str, required=True, help='The folder of raw yahoo dataset')\n parser.add_argument('-o', '--output', type=str, default='yahoo.pkl')\n args = parser.parse_args()\n\n all_train_data = {}\n all_train_labels = {}\n all_train_timestamps = {}\n all_test_data = {}\n all_test_labels = {}\n all_test_timestamps = {}\n\n for i in range(1, 368):\n with open(os.path.join(args.path, str(i)), 'rb') as f:\n ts = pickle.load(f)\n data = np.array(ts['value'])\n labels = np.array(ts['label'])\n timestamps = np.array(ts['timestamp'])\n k = 'yahoo_' + str(i)\n l = len(data) // 2\n\n n = 0\n while adfuller(data[:l], 1)[1] > 0.05 or adfuller(data[:l])[1] > 0.05:\n data = np.diff(data)\n labels = labels[1:]\n timestamps = timestamps[1:]\n n += 1\n l -= n\n\n all_train_data[k] = data[:l]\n all_test_data[k] = data[l:]\n all_train_labels[k] = labels[:l]\n all_test_labels[k] = labels[l:]\n all_train_timestamps[k] = timestamps[:l]\n all_test_timestamps[k] = timestamps[l:]\n\n mean, std = all_train_data[k].mean(), all_train_data[k].std()\n all_train_data[k] = (all_train_data[k] - mean) / std\n all_test_data[k] = (all_test_data[k] - mean) / std\n\n with open(args.output, 'wb') as f:\n pickle.dump({\n 'all_train_data': all_train_data,\n 'all_train_labels': all_train_labels,\n 'all_train_timestamps': all_train_timestamps,\n 'all_test_data': all_test_data,\n 'all_test_labels': all_test_labels,\n 'all_test_timestamps': all_test_timestamps,\n 'delay': 3\n }, f)\n ","sub_path":"datasets/preprocess_yahoo.py","file_name":"preprocess_yahoo.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"498406425","text":"#!/usr/bin/python3\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"fetches top ten info from reddit API\"\"\"\n link = 'http://www.reddit.com/r/{}/hot.json'.format(subreddit)\n red = requests.get(link, headers={'User-Agent': 'tope628'}).json()\n try:\n subs = red.get('data').get('children')\n for x in range(0, 10):\n print(subs[x]['data'].get('title'))\n except:\n print(None)\n","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"250592740","text":"#==============================================================================#\n# Configuration Settings\n#==============================================================================#\n\n# General\nAPP_MODULE_DIRECTORY = 'cs191'\nDEBUG = True\n\n# Database\nDATABASE_NAME = \"test.db\"\nSQLALCHEMY_DATABASE_URI = \"sqlite:///\" + DATABASE_NAME\n\n# Uploading\nUPLOAD_FOLDER = 'static/uploads'\nALLOWED_EXTENSIONS = set(\n ['pdf', 'mp3', 'wav', 'mp4', 'jpg', 'jpeg', 'png', 'gif']\n)\nMAX_USER_IMAGE_SIZE = (200,200)\n\n# Security\nSECRET_KEY = \"hereismysecretkey\"\n","sub_path":"app_settings.py","file_name":"app_settings.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437738267","text":"import datetime as dt\n\nimport pytest\nimport pytz\n\nimport stix2\nfrom stix2.exceptions import InvalidValueError\n\nfrom .constants import (\n CAMPAIGN_ID, IDENTITY_ID, INDICATOR_ID, INDICATOR_KWARGS, RELATIONSHIP_ID,\n REPORT_ID,\n)\n\nEXPECTED = \"\"\"{\n \"type\": \"report\",\n \"spec_version\": \"2.1\",\n \"id\": \"report--84e4d88f-44ea-4bcd-bbf3-b2c1c320bcb3\",\n \"created_by_ref\": \"identity--311b2d2d-f010-4473-83ec-1edf84858f4c\",\n \"created\": \"2015-12-21T19:59:11.000Z\",\n \"modified\": \"2015-12-21T19:59:11.000Z\",\n \"name\": \"The Black Vine Cyberespionage Group\",\n \"description\": \"A simple report with an indicator and campaign\",\n \"report_types\": [\n \"campaign\"\n ],\n \"published\": \"2016-01-20T17:00:00Z\",\n \"object_refs\": [\n \"indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7\",\n \"campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f\",\n \"relationship--df7c87eb-75d2-4948-af81-9d49d246f301\"\n ]\n}\"\"\"\n\n\ndef test_report_example():\n report = stix2.v21.Report(\n id=REPORT_ID,\n created_by_ref=IDENTITY_ID,\n created=\"2015-12-21T19:59:11.000Z\",\n modified=\"2015-12-21T19:59:11.000Z\",\n name=\"The Black Vine Cyberespionage Group\",\n description=\"A simple report with an indicator and campaign\",\n published=\"2016-01-20T17:00:00Z\",\n report_types=[\"campaign\"],\n object_refs=[\n INDICATOR_ID,\n CAMPAIGN_ID,\n RELATIONSHIP_ID,\n ],\n )\n\n assert report.serialize(pretty=True) == EXPECTED\n\n\ndef test_report_example_objects_in_object_refs():\n report = stix2.v21.Report(\n id=REPORT_ID,\n created_by_ref=IDENTITY_ID,\n created=\"2015-12-21T19:59:11.000Z\",\n modified=\"2015-12-21T19:59:11.000Z\",\n name=\"The Black Vine Cyberespionage Group\",\n description=\"A simple report with an indicator and campaign\",\n published=\"2016-01-20T17:00:00Z\",\n report_types=[\"campaign\"],\n object_refs=[\n stix2.v21.Indicator(id=INDICATOR_ID, **INDICATOR_KWARGS),\n CAMPAIGN_ID,\n RELATIONSHIP_ID,\n ],\n )\n\n assert report.serialize(pretty=True) == EXPECTED\n\n\ndef test_report_example_objects_in_object_refs_with_bad_id():\n with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:\n stix2.v21.Report(\n id=REPORT_ID,\n created_by_ref=IDENTITY_ID,\n created=\"2015-12-21T19:59:11.000Z\",\n modified=\"2015-12-21T19:59:11.000Z\",\n name=\"The Black Vine Cyberespionage Group\",\n description=\"A simple report with an indicator and campaign\",\n published=\"2016-01-20T17:00:00Z\",\n report_types=[\"campaign\"],\n object_refs=[\n stix2.v21.Indicator(id=INDICATOR_ID, **INDICATOR_KWARGS),\n \"campaign-83422c77-904c-4dc1-aff5-5c38f3a2c55c\", # the \"bad\" id, missing a \"-\"\n RELATIONSHIP_ID,\n ],\n )\n\n assert excinfo.value.cls == stix2.v21.Report\n assert excinfo.value.prop_name == \"object_refs\"\n\n\n@pytest.mark.parametrize(\n \"data\", [\n EXPECTED,\n {\n \"created\": \"2015-12-21T19:59:11.000Z\",\n \"created_by_ref\": IDENTITY_ID,\n \"description\": \"A simple report with an indicator and campaign\",\n \"id\": REPORT_ID,\n \"report_types\": [\n \"campaign\",\n ],\n \"modified\": \"2015-12-21T19:59:11.000Z\",\n \"name\": \"The Black Vine Cyberespionage Group\",\n \"object_refs\": [\n INDICATOR_ID,\n CAMPAIGN_ID,\n RELATIONSHIP_ID,\n ],\n \"published\": \"2016-01-20T17:00:00Z\",\n \"spec_version\": \"2.1\",\n \"type\": \"report\",\n },\n ],\n)\ndef test_parse_report(data):\n rept = stix2.parse(data, version=\"2.1\")\n\n assert rept.type == 'report'\n assert rept.spec_version == '2.1'\n assert rept.id == REPORT_ID\n assert rept.created == dt.datetime(2015, 12, 21, 19, 59, 11, tzinfo=pytz.utc)\n assert rept.modified == dt.datetime(2015, 12, 21, 19, 59, 11, tzinfo=pytz.utc)\n assert rept.created_by_ref == IDENTITY_ID\n assert rept.object_refs == [\n INDICATOR_ID,\n CAMPAIGN_ID,\n RELATIONSHIP_ID,\n ]\n assert rept.description == \"A simple report with an indicator and campaign\"\n assert rept.report_types == [\"campaign\"]\n assert rept.name == \"The Black Vine Cyberespionage Group\"\n\n\ndef test_report_on_custom():\n with pytest.raises(InvalidValueError):\n stix2.v21.Report(\n name=\"my report\",\n published=\"2016-01-20T17:00:00Z\",\n object_refs=[\n \"indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7\",\n \"some-type--2672975a-ce1e-4473-a1c6-0d79868930c7\",\n ],\n )\n\n report = stix2.v21.Report(\n name=\"my report\",\n published=\"2016-01-20T17:00:00Z\",\n object_refs=[\n \"indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7\",\n \"some-type--2672975a-ce1e-4473-a1c6-0d79868930c7\",\n ],\n allow_custom=True,\n )\n\n assert \"some-type--2672975a-ce1e-4473-a1c6-0d79868930c7\" \\\n in report.object_refs\n","sub_path":"stix2/test/v21/test_report.py","file_name":"test_report.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35348083","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nimport logging\nimport base64\nimport json\nfrom odoo.exceptions import ValidationError, RedirectWarning, UserError\n\n_logger = logging.getLogger(__name__)\n\nclass almouggar_pos_cache (models.Model):\n _name = 'pos.cache'\n _inherit = 'pos.cache'\n\n time_format = \"%Y-%m-%d %H:%M:%S\"\n\n @api.model\n def refresh_all_caches(self):\n self.env['pos.cache'].search([]).update_cache()\n\n @api.one\n def update_cache(self):\n # We decode the existing cache\n decoded_cache = json.loads(base64.decodestring(self.cache).decode('utf-8'))\n decoded_cache = self._remove_unavailable_products_from_decoded_cache(decoded_cache)\n decoded_cache = self._add_or_update_products_in_decoded_cache(decoded_cache)\n datas = {\n 'cache': base64.encodestring(json.dumps(decoded_cache).encode('utf-8')),\n }\n self.write(datas)\n\n def _get_products_products_based_on_products_template(self, domain):\n products_template = self.env['product.template'].sudo(self.compute_user_id.id).search(domain)\n ids = list(map(lambda product_tml: product_tml.id,\n products_template))\n return self.env['product.product'].sudo(self.compute_user_id.id).search([('product_tmpl_id', 'in', ids)])\n\n def _remove_unavailable_products_from_decoded_cache(self, decoded_cache):\n products_to_remove = self._get_products_products_based_on_products_template([\n ('write_date', '>', self.write_date.strftime(self.time_format)),\n '!', ('available_in_pos', '=', 'True')\n ])\n for prod in products_to_remove:\n index = next((index for (index, cache_elem) in enumerate(decoded_cache) if cache_elem['id'] == prod.id),\n None)\n if index:\n decoded_cache.pop(index)\n return decoded_cache\n\n def _add_or_update_products_in_decoded_cache(self, decoded_cache):\n products = self._get_products_products_based_on_products_template(\n [['write_date', '>', self.write_date.strftime(self.time_format)]] +\n self.get_product_domain()\n )\n # prod_ctx.sudo(self.compute_user_id.id)\n prod_ctx = products.with_context(pricelist=self.config_id.pricelist_id.id, display_default_code=False,\n lang=self.compute_user_id.lang)\n prod_ctx.sudo(self.compute_user_id.id)\n for prod in prod_ctx:\n index = next((index for (index, cache_elem) in enumerate(decoded_cache) if cache_elem['id'] == prod.id),\n None)\n if index:\n decoded_cache[index] = prod.read(self.get_product_fields())[0]\n else:\n decoded_cache = decoded_cache + prod.read(self.get_product_fields())\n return decoded_cache\n","sub_path":"models/pos_cache.py","file_name":"pos_cache.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}