diff --git "a/3876.jsonl" "b/3876.jsonl" new file mode 100644--- /dev/null +++ "b/3876.jsonl" @@ -0,0 +1,1608 @@ +{"seq_id":"41692645734","text":"import sys\ninput=sys.stdin.readline\n\n# https://www.acmicpc.net/problem/6068\n\nN=int(input())\n\nworks=[tuple(map(int,input().split())) for _ in range(N)]\n# 늦게 끝내도 되는 일 먼저 확인하려고 내림차순 정렬\nworks.sort(key=lambda x:(x[1],x[0]),reverse=True)\n# works = [(5, 20), (1, 16), (8, 14), (3, 5)]\n\n# ans = 15\nans=works[0][1]-works[0][0]\n\nfor i in range(1,N):\n # 일어나야할 시간보다 끝내야 할 일의 데드라인이 더 이르면\n if (ans > works[i][1]):\n # 일어나야할 시간 = 데드라인 - 걸리는 시간\n ans = works[i][1] - works[i][0]\n # 일어나야할 시간이 끝내야할 일의 데드라인보다 이르면\n else:\n # 일어나야할 시간 -= 그 일을 마치는데 걸리는 시간\n ans -= works[i][0]\n # 답이 음수면 끝마칠 수 없음\n if (ans < 0):\n ans=-1\n break\n\nprint(ans)","repo_name":"AlmSmartDoctor/study-2023-03-algorithm-problem-solving","sub_path":"day04 Greedy/assignments/이재혁/6068.py","file_name":"6068.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"9340214887","text":"\nimport os.path\ndebug = os.path.exists(\"debug\")\n\nimport logging\nif debug:\n\tlogging.basicConfig(level=logging.DEBUG)\nelse:\n\tlogging.basicConfig(filename='app.log', filemode='w', level=logging.ERROR)\n\nimport photon\nif debug:\n\tbot = photon.Bot(\"305643264:AAGwALg3QDiH2OrNzqehgoPdeXwpIqY416c\")\nelse:\n\tbot = photon.Bot(\"1013987821:AAF8bmXQmkvDl2B4Hx5dxPzqmV1Q-H9xsew\")\n\n# import yaml\n# from photon.object_dict import objectify\n\n# languages = []\n# for x in ['language_uz.yaml']:\n# \tlanguages.append(objectify(yaml.load(open(x).read(), Loader=yaml.Loader)))\n","repo_name":"matrix1220/yetsum","sub_path":"sotuvchicom_bot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17136515367","text":"import os\nimport io\nimport shutil\nimport requests\n\nfrom github import Github\nfrom dulwich import porcelain\nfrom acscore.counter import Counter\nfrom acscore.analyzer import Analyzer\nfrom unidiff import PatchSet\nfrom tenacity import retry, stop_after_attempt, wait_exponential, RetryError\nfrom subprocess import Popen, PIPE, STDOUT\nfrom django.conf import settings\n\n\nSETTINGS = {\n 'name': 'CheckiePy',\n 'url': 'http://checkiepy.com',\n 'attempt': 3,\n 'multiplier': 2,\n 'max': 10,\n 'apply': '{0}/bash/apply_patch.sh'.format(settings.BASE_DIR),\n}\n\n\nclass Logger:\n def info(self, text):\n print(text)\n\n\nclass Requester:\n def __init__(self, access_token, bot_access_token=None):\n self.github = Github(access_token)\n if bot_access_token:\n self.bot = Github(bot_access_token)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def get_repositories(self, username):\n return self.github.get_user(username).get_repos()\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def create_pull_request_hook(self, username, repository_name, callback_url):\n self.github.get_user(username).get_repo(repository_name)\\\n .create_hook('web', {'url': callback_url, 'content_type': 'json'}, ['pull_request'], True)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def delete_pull_request_hook(self, username, repository_name, callback_url):\n hooks = self.github.get_user(username).get_repo(repository_name).get_hooks()\n for hook in hooks:\n if hook.config['url'] == callback_url:\n hook.delete()\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def clone_repository(self, clone_url, save_path, bytes_io):\n if os.path.exists(save_path):\n shutil.rmtree(save_path)\n porcelain.clone(clone_url, save_path, errstream=bytes_io)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def get_file(self, file_url):\n return requests.get(file_url)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def get_pull_request(self, username, repository_name, pull_request_number):\n return self.github.get_user(username).get_repo(repository_name).get_pull(pull_request_number)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def get_latest_commit_from_pull_request(self, pull_request):\n return pull_request.get_commits().reversed[0]\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def create_status(self, commit, state, target_url, description, context):\n commit.create_status(state, target_url, description, context)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def create_issue_comment(self, pull_request, text):\n pull_request.create_issue_comment(text)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def create_comment(self, pull_request, text, commit, file, line):\n pull_request.create_comment(text, commit, file, line)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def create_issue_comment_bot(self, username, repository_name, pull_request_number, text):\n self.bot.get_user(username).get_repo(repository_name).get_pull(pull_request_number).create_issue_comment(text)\n\n @retry(stop=stop_after_attempt(SETTINGS['attempt']),\n wait=wait_exponential(multiplier=SETTINGS['multiplier'], max=SETTINGS['max']))\n def create_comment_bot(self, username, repository_name, pull_request_number, text, commit, file, line):\n self.bot.get_user(username).get_repo(repository_name).get_pull(pull_request_number).create_comment(text, commit,\n file, line)\n\n\nclass Reviewer:\n def __init__(self, requester, logger):\n self.requester = requester\n self.logger = logger\n\n def run_command(self, command):\n p = Popen(command, stdout=PIPE, stderr=STDOUT)\n result = ''\n for line in p.stdout.readlines():\n result += line.decode() + '\\n'\n return result\n\n def get_repositories(self, username):\n self.logger.info(\"Obtaining of {0}'s repositories was started\".format(username))\n repositories = self.requester.get_repositories(username)\n self.logger.info('Repositories was obtained')\n return repositories\n\n def create_pull_request_hook(self, username, repository_name, callback_url):\n self.logger.info('Setting of pull request web hook was started')\n self.requester.create_pull_request_hook(username, repository_name, callback_url)\n self.logger.info('Pull request web hook was successfully set')\n\n def delete_pull_request_hook(self, username, repository_name, callback_url):\n self.logger.info('Deleting of pull request web hook for repository {0}/{1} was started'.format(username,\n repository_name))\n self.requester.delete_pull_request_hook(username, repository_name, callback_url)\n self.logger.info('Pull request web hook for repository {0}/{1} was successfully deleted'.format(username,\n repository_name))\n\n def clone_repository(self, clone_url, save_path):\n self.logger.info('Repository cloning from {0} to {1} was started'.format(clone_url, save_path))\n if os.path.exists(save_path):\n self.logger.info('Directory {0} already exists. It will be deleted'.format(save_path))\n shutil.rmtree(save_path)\n bytes_io = io.BytesIO()\n self.requester.clone_repository(clone_url, save_path, bytes_io)\n self.logger.info(bytes_io.getvalue().decode())\n self.logger.info('Repository was cloned successfully')\n\n def get_file(self, file_url, save_path):\n self.logger.info('File downloading from {0} to {1} was started'.format(file_url, save_path))\n file = self.requester.get_file(file_url)\n self.logger.info('File {0} was downloaded successfully'.format(file_url))\n with open(save_path, 'w') as f:\n f.write(file.content.decode())\n self.logger.info('File {0} was saved successfully'.format(save_path))\n\n def apply_patch(self, repository_path, patch_path):\n self.logger.info('Applying of the patch was started')\n message = self.run_command([SETTINGS['apply'], repository_path, patch_path])\n self.logger.info('{0}\\nApplying of the patch was completed'.format(message))\n\n def get_pull_request_and_latest_commit(self, username, repository_name, pull_request_number):\n self.logger.info('Obtaining of pull request with number {0} from repository {1}/{2} was started'.format(\n pull_request_number, username, repository_name))\n pull_request = self.requester.get_pull_request(username, repository_name, pull_request_number)\n self.logger.info('Pull request with name {0} was obtained\\nObtaining of latest commit was started'\n .format(pull_request.title))\n commit = self.requester.get_latest_commit_from_pull_request(pull_request)\n self.logger.info('Commit with sha {0} was obtained'.format(commit.sha))\n return pull_request, commit\n\n def review_pull_request(self, metrics, repository_path, diff_path, commit, username, repository_name,\n pull_request_number):\n self.logger.info('Review was started')\n self.requester.create_status(commit, 'pending', SETTINGS['url'], 'Review was started', SETTINGS['name'])\n analyzer = Analyzer(metrics)\n counter = Counter()\n with open(os.path.join(repository_path, diff_path), 'r') as f:\n patch_set = PatchSet(f)\n sent_inspection_count = 0\n sent_inspections = {}\n for patch in patch_set:\n file_metrics = counter.metrics_for_file(os.path.join(repository_path, patch.path), verbose=True)\n self.logger.info('Here are metrics for file {0}: {1}'.format(patch.path, file_metrics))\n inspections = analyzer.inspect(file_metrics)\n first_line_in_diff = patch[0][0].diff_line_no\n for hunk in patch:\n self.logger.info('Here are inspections for file {0}: {1}'.format(patch.path, inspections))\n for metric_name, inspection_value in inspections.items():\n for inspection, value in inspection_value.items():\n self.logger.info('Inspection {0} has value {1}'.format(inspection, value))\n if inspection in sent_inspections:\n continue\n elif 'lines' not in value:\n sent_inspections[inspection] = True\n self.logger.info('Issuing comment {0} for file {1}'.format(value['message'], patch.path))\n self.requester.create_issue_comment_bot(username, repository_name, pull_request_number,\n '{0}:\\n{1}'.format(patch.path, value['message']))\n sent_inspection_count += 1\n else:\n for line in value['lines']:\n if hunk.target_start <= line <= hunk.target_start + hunk.target_length:\n # 3 is offset for unidiff hunk header\n hunk_line = line - hunk.target_start + 3\n try:\n line_object = hunk[hunk_line]\n target_line = line_object.diff_line_no - first_line_in_diff\n self.logger.info('Line with number {0} and value {1} was calculated\\n'\n 'File {2} was commented on line {3} with message {4}\\n'\n 'Hunk is from line {5} to line {6}'\n .format(line_object.diff_line_no, line_object.value, patch.path,\n line, value['message'], hunk.target_start,\n hunk.target_start + hunk.target_length))\n self.requester.create_comment_bot(username, repository_name, pull_request_number,\n value['message'], commit, patch.path, target_line)\n sent_inspection_count += 1\n except Exception as e:\n self.logger.info('Hunk processing failed with exception {0} for hunk line {1} '\n '(source length {2}, target length {3})'\n .format(e, hunk_line, len(hunk.source), len(hunk.target)))\n if sent_inspection_count == 0:\n self.requester.create_status(commit, 'success', SETTINGS['url'], 'Review was completed. No issues found',\n SETTINGS['name'])\n else:\n self.requester.create_status(commit, 'error', SETTINGS['url'], 'Review was completed. Found some issues',\n SETTINGS['name'])\n self.logger.info('Review was completed. {0} issues found'.format(sent_inspection_count))\n\n def path_basename(self, path):\n return os.path.basename(os.path.normpath(path))\n\n def handle_hook(self, username, pull_request_number, metrics, base_path, clone_url, patch_url, diff_url):\n self.logger.info('Handling of hook for repository {0} was started'.format(clone_url))\n repository_name = os.path.splitext(self.path_basename(clone_url))[0]\n repository_path = os.path.join(base_path, repository_name)\n self.clone_repository(clone_url, repository_path)\n patch_path = os.path.join(repository_path, self.path_basename(patch_url))\n self.get_file(patch_url, patch_path)\n self.apply_patch(repository_path, patch_path)\n diff_path = os.path.join(repository_path, self.path_basename(diff_url))\n self.get_file(diff_url, diff_path)\n pull_request, commit = self.get_pull_request_and_latest_commit(username, repository_name, pull_request_number)\n self.review_pull_request(metrics, repository_path, diff_path, commit, username, repository_name,\n pull_request_number)\n self.logger.info('Hook for repository {0} was successfully processed'.format(clone_url))\n","repo_name":"CheckiePy/CheckiePyBackend","sub_path":"acs/repository/reviewer.py","file_name":"reviewer.py","file_ext":"py","file_size_in_byte":13845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38590543121","text":"################################################################\n\n# AUDIO & AV SURPRISAL PROJECT\n\n# This script combines EEG data (csv files, exported from ERP, audio, video and video replication) with: \n# 1. Design matrix: e.g. surprisal info per word\n# 2. Channel coordinate: x, y, z coordinates of each electrode, measuring topographic distribution of electrodes \n# 3. Baseline: EEG amplitude -100 to 0 before words, as the baseline correction was not performed during preprocessing (see Frank et al., 2015; Alday et al, 2017; Zhang et al., 2021)\n# 4. Artifact rejection label: artifact rejection is completed in erplab but somehow the information is not recorded correctly when exporting. Therefore, the eventlists are exported after artifact rejection and the rejection label per word is extracted and combined with data.\n# 5. Sentence order: sentence order is extracted from eventlist as a control variable in analysis\n# Note that the pipeline did not include 4 and 5 for the video replication data. The error in 4 is not present for the replication data, and we didn't use sentence order in 4 in the final analysis. \n\n################################################################\n\n# Importing modules and setting display\nimport pandas as pd\n\npd.set_option('display.width', 400)\npd.set_option('display.max_columns', 40)\npd.set_option('display.max_rows', 500)\npd.options.mode.chained_assignment = None # default='warn'\n\n# Setting parameters based on audio/video data, change mode below according to need\n#mode = 'audio'\nmode = 'video'\n#mode = 'video_replication'\npath = '/Users/yezhang/Library/CloudStorage/OneDrive-UniversityCollegeLondon/surprisal_audio/data_' + mode + '/'\n\nif mode == 'audio':\n part_num = 25\n skipnum = 4733\n # Reading design matrix according to data slice\n info = pd.read_csv('/Users/yezhang/OneDrive - University College London/surprisal_audio/stimuli/word_merged_audio_seq.csv')\n info['bin_id'] = info['bin_id_audio']\nelif mode == 'video':\n part_num = 30\n skipnum = 2674\n info = pd.read_csv('/Users/yezhang/OneDrive - University College London/surprisal_audio/stimuli/word_merged_audio_seq.csv')\n info['bin_id'] = info['bin_id_video']\nelif mode == 'video_replication':\n info = pd.read_csv('/Users/yezhang/Library/CloudStorage/OneDrive-UniversityCollegeLondon/surprisal_audio/stimuli/word_merged_replication_surprisal.csv')\nelse:\n print('Error in mode!')\n\n# Reading electrode positions\nelectrode = pd.read_csv('/Users/yezhang/OneDrive - University College London/surprisal_audio/stimuli/channel_coordinate.csv')\nelectrode.columns = ['electrode', 'x', 'y', 'z']\nelectrode['electrode'] = electrode['electrode'].str.strip(' ')\n\n# Loading data & baseline\nbaseline = pd.read_csv(path + 'lmer/baseline.txt', sep=\"\\t\",\n usecols=[' value', ' chlabel', ' bini', 'ERPset'])\nbaseline.columns = ['baseline', 'electrode', 'bin_id', 'part_id']\n\ndata = pd.read_csv(path + 'lmer/300-500.txt', sep=\"\\t\",\n usecols=[' value', ' chlabel', ' bini', 'ERPset'])\ndata.columns = ['ERP', 'electrode', 'bin_id', 'part_id']\n\n# Merging data with design matrix & baseline\ndata_baselined = pd.merge(data, baseline, on=['electrode', 'bin_id', 'part_id'])\ndata_baselined['electrode'] = data_baselined['electrode'].str.strip(' ')\ndata_baselined_removed = data_baselined[(data_baselined['ERP']!= 0) | (data_baselined['baseline']!= 0)]\n\ndata_baselined_removed_info = pd.merge(data_baselined_removed, info, on = ['bin_id'])\n\ndata_baselined_removed_info_location = pd.merge(data_baselined_removed_info, electrode,on=['electrode'])\n\nif mode == 'audio' or mode == 'video':\n # Extracting info from eventlist (AR, sentence sequence)\n elist_full = []\n info_filtered = info[info['bin_id'].notnull()] # there are empty entries in video mode, filtering out to make sure there is not bug in mapping\n sentence_dic = dict(zip(info_filtered.bin_id, info_filtered.sentence_id))\n for i in range (1, part_num+1):\n part_id = 'part' + str(i)\n elist_slice = pd.read_csv(path + 'preprocessing/eventlist/export_ar/eventlist_export_AR_' + str(part_id) + '.txt',\n sep=\"\\t\", skiprows=skipnum, header=None, usecols=[2, 7])\n elist_slice.columns = ['bin_id', 'ar']\n elist_slice['part_id'] = part_id\n\n # Somehow there's error when syncing AR info with ERP in audio data. Therefore do it manually here. Remove if not needed\n elist_slice['ar_good'] = elist_slice['ar'].apply(lambda x: True if x == ' 00000000 00000000' else False)\n\n elist_slice['sentence_id'] = elist_slice['bin_id'].map(sentence_dic)\n sentence_order = elist_slice.groupby('sentence_id', sort=False).count().reset_index()['sentence_id'].reset_index()\n sentence_order.columns = ['sentence_order', 'sentence_id']\n elist_slice_order = pd.merge(elist_slice, sentence_order, on = 'sentence_id')\n\n elist_full.append(elist_slice_order)\n elist_info = pd.concat(elist_full)\n\n # Merging data with AR lable and sentence sequence\n data_baselined_removed_info_location_elist = pd.merge(data_baselined_removed_info_location, elist_info, on=['part_id', 'bin_id'])\n data_baselined_removed_info_location_elist.drop(columns=\n ['meaningful_gesture_prev', 'beat_gesture_prev', 'gesture_corres_prev', 'mouth_dist_prev', 'ar', 'sentence_id_y'], inplace=True\n )\n\n # Saving final file\n data_baselined_removed_info_location_elist.to_csv(path + 'lmer/300-500_info.csv')\n \nelif mode == 'video_replication':\n data_baselined_removed_info_location['ar_good'] = True # Replication data didn't have the artifact syncing error, so all data are good\n # sentence order was not used in the model, so the part is skipped for the replication data.\n # Saving final file\n data_baselined_removed_info_location.to_csv(path + 'lmer/300-500_info.csv')","repo_name":"pmadhyastha/multimodal_comprehension","sub_path":"code/preprocessing_variables.py","file_name":"preprocessing_variables.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71265508746","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib.ticker import AutoMinorLocator\nimport matplotlib.patches as mpatches\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\nfilepath = \"C:/Users/hanna/Documents/GitHub/Ar_dissolution_HvdM/Experiment_2/RADI\"\n\n#%% \n\n#Append data of average pH's per 'set'\nA_set = []\nfor i in range(0, 7):\n A_set.append(pd.read_excel(filepath + \"/RADI_results.xlsx\",\n header=0, sheet_name='Cuvette A'))\n\nB_set = []\nfor i in range(0, 7):\n B_set.append(pd.read_excel(filepath + \"/RADI_results.xlsx\",\n header=0, sheet_name='Cuvette B'))\nfor i in range(0, 7):\n A_set[i]['Depth (cm)'] = A_set[i]['Depth (cm)']*-1\n B_set[i]['Depth (cm)'] = B_set[i]['Depth (cm)']*-1\n\n#%% Creating plot\nfig, ax = plt.subplots(3, 2,\n dpi=300, \n figsize=(5, 14.5))\n\n#List colours for plot\ncolors = ['xkcd:bright lavender',\n 'xkcd:soft blue',\n 'xkcd:topaz',\n 'xkcd:lightish green',\n 'xkcd:yellowish',\n 'xkcd:tangerine',\n 'xkcd:watermelon']\n\n#List labels for plot\nlabels = ['1hr',\n '2hrs',\n '4hrs',\n '7hrs',\n '25hrs',\n '50hrs',\n '72hrs']\n\ntimes = [1, 2, 4, 7, 25, 50, 70]\n\n#Plotting each profile\nfor i in range(0, 7):\n #CUVA\n ax[0,0].plot('pH.{}'.format(times[i]), 'Depth (cm)', data = A_set[i], \n c = colors[i], label=labels[ i], alpha=0.9, lw=1.5, zorder=10)\n ax[1,0].plot('Omega_C.{}'.format(times[i]), 'Depth (cm)', data = A_set[i], \n c = colors[i], label=labels[i], alpha=0.9, lw=1.5, zorder=10) \n ax[2,0].plot('Calcite_prod.{}'.format(times[i]), 'Depth (cm)', data = A_set[i],\n c=colors[i], label=labels[i], alpha=0.9, lw=1.5, zorder=10)\n ax[2,0].plot('Ar_prod.{}'.format(times[i]), 'Depth (cm)', data = A_set[i],\n c=colors[i], label=labels[i], alpha=0.9, lw=1.5, linestyle=':', zorder=10)\n #CUVB\n ax[0,1].plot('pH.{}'.format(times[i]), 'Depth (cm)', data = B_set[i], \n c = colors[i], label=labels[i], alpha=0.9, lw=1.5, zorder=10)\n ax[1,1].plot('Omega_C.{}'.format(times[i]), 'Depth (cm)', data = B_set[i], \n c = colors[i], label=labels[i], alpha=0.9, lw=1.5, zorder=10)\n ax[2,1].plot('Calcite_prod.{}'.format(times[i]), 'Depth (cm)', data = B_set[i],\n c=colors[i], label=labels[i], alpha=0.9, lw=1.5, zorder=10)\n ax[2,1].plot('Ar_prod.{}'.format(times[i]), 'Depth (cm)', data = B_set[i],\n c=colors[i], label=labels[i], alpha=0.9, lw=1.5, linestyle=':', zorder=10) \n#Add baselines\n ax[0,0].vlines(x=7.4, ymin=3.25, ymax=-1.2, color='grey', linestyle='--', alpha=0.8)\n ax[0,1].vlines(x=7.4, ymin=3.25, ymax=-1.2, color='grey', linestyle='--', alpha=0.8)\n ax[1,0].vlines(x=0.31, ymin=3.25, ymax=-1.2, color='grey', linestyle='--', alpha=0.8)\n ax[1,1].vlines(x=0.31, ymin=3.25, ymax=-1.2, color='grey', linestyle='--', alpha=0.8)\n\n#%%\n#Adding sediment and pteropod shading \nax[0,0].axhspan(0, 2.9, color='xkcd:sand', alpha=0.15, lw=0)\nax[0,0].axhline(y=0, color='black', linewidth=0.5, linestyle='--', alpha=0.4)\nax[0,0].axhline(y=-0.15, color='black', linewidth=0.5, linestyle=':', alpha=0.4)\nax[0,0].axhspan(-0.15, 0, color='grey', alpha=0.3, lw=0)\n\nax[1,0].axhspan(0, 2.9, color='xkcd:sand', alpha=0.15, lw=0)\nax[1,0].axhline(y=0, color='black', linewidth=0.5, linestyle='--', alpha=0.4)\nax[1,0].axhline(y=-0.15, color='black', linewidth=0.5, linestyle=':', alpha=0.4)\nax[1,0].axhspan(-0.15, 0, color='grey', alpha=0.3, lw=0)\n\nax[2,0].axhspan(0, 2.9, color='xkcd:sand', alpha=0.15, lw=0)\nax[2,0].axhline(y=0, color='black', linewidth=0.5, linestyle='--', alpha=0.4)\nax[2,0].axhline(y=-0.15, color='black', linewidth=0.5, linestyle=':', alpha=0.4)\nax[2,0].axhspan(-0.15, 0, color='grey', alpha=0.3, lw=0)\n\nax[0,1].axhspan(0, 2.9, color='xkcd:sand', alpha=0.15, lw=0)\nax[0,1].axhline(y=0, color='black', linewidth=0.5, linestyle='--', alpha=0.4)\n\nax[1,1].axhspan(0, 2.9, color='xkcd:sand', alpha=0.15, lw=0)\nax[1,1].axhline(y=0, color='black', linewidth=0.5, linestyle='--', alpha=0.4)\n\nax[2,1].axhspan(0, 2.9, color='xkcd:sand', alpha=0.15, lw=0)\nax[2,1].axhline(y=0, color='black', linewidth=0.5, linestyle='--', alpha=0.4)\n#%%\n#Obtain handles and labels from ax1\nhandles, labels = ax[0,0].get_legend_handles_labels()\n\n#Handles is a list, so append pteropod, SWI and calcite sand patches\npatch0 = mpatches.Patch(color='white', alpha=0.3, edgecolor='white')\npatch1 = mpatches.Patch(color='grey', label='Pteropods', alpha=0.3)\npatch2 = mpatches.Patch(color='xkcd:sand', label='Calcite sand', alpha=0.15)\nline1 = Line2D([0], [0], color='black', alpha=0.4, linewidth=0.5, linestyle='--', label='SWI')\nline2 = Line2D([0], [0], color='grey', alpha=0.8, linewidth=1.5, linestyle='--', label='Baseline')\n\nline3 = Line2D([0], [0], color='xkcd:watermelon', linewidth=1.5, linestyle='-', label='Calcite')\nline4 = Line2D([0], [0], color='xkcd:watermelon', linewidth=1.5, linestyle=':', label='Aragonite')\n\nfor i in [line2, patch0, patch1, patch2, line1]:\n handles.append(i) \n\n#Plot legend\n# leg = fig.legend(handles=handles, \n# bbox_to_anchor=(1.18, 0.6), \n# fontsize='x-small', \n# ncol=1,\n# title='Hours elapsed:',\n# title_fontsize='x-small')\n\n# leg2 = fig.legend(handles=[line3, line4],\n# bbox_to_anchor=(1.18, 0.22), \n# fontsize='x-small', \n# ncol=1,\n# title='CaCO$_{3}$ polymorph:',\n# title_fontsize='x-small')\n\n# leg._legend_box.align = \"left\"\n# leg2._legend_box.align = \"left\"\n\n#%%\n#Axes CUVA\nax[0,0].xaxis.set_major_locator(ticker.MultipleLocator(0.25))\nax[0,0].xaxis.set_minor_locator(AutoMinorLocator(5))\nax[0,0].yaxis.set_minor_locator(AutoMinorLocator(5))\nax[0,0].grid(alpha=0.3, which='both')\nax[0,0].set_xlim(7.2, 8)\nax[0,0].set_ylim(2.9, -1.15)\n\nax[1,0].xaxis.set_major_locator(ticker.MultipleLocator(0.25))\nax[1,0].xaxis.set_minor_locator(AutoMinorLocator(5))\nax[1,0].yaxis.set_minor_locator(AutoMinorLocator(5))\nax[1,0].grid(alpha=0.3, which='both')\nax[1,0].set_xlim(0.15, 1.3)\nax[1,0].set_ylim(2.9, -1.15)\n\nax[2,0].xaxis.set_major_locator(ticker.MultipleLocator(5))\nax[2,0].xaxis.set_minor_locator(AutoMinorLocator(5))\nax[2,0].yaxis.set_minor_locator(AutoMinorLocator(5))\nax[2,0].grid(alpha=0.3, which='both')\nax[2,0].set_xlim(-12, 8)\nax[2,0].set_ylim(2.9, -1.15)\n\n#Axes CUVB\nax[0,1].xaxis.set_major_locator(ticker.MultipleLocator(0.25))\nax[0,1].xaxis.set_minor_locator(AutoMinorLocator(5))\nax[0,1].yaxis.set_minor_locator(ticker.MultipleLocator(0.1))\nax[0,1].grid(alpha=0.3, which='both')\nax[0,1].set_xlim(7.2, 8)\nax[0,1].set_ylim(2.9, -1.15)\n\nax[1,1].xaxis.set_major_locator(ticker.MultipleLocator(0.25))\nax[1,1].xaxis.set_minor_locator(AutoMinorLocator(5))\nax[1,1].yaxis.set_minor_locator(ticker.MultipleLocator(0.1))\nax[1,1].grid(alpha=0.3, which='both')\nax[1,1].set_xlim(0.15, 1.3)\nax[1,1].set_ylim(2.9, -1.15)\n\nax[2,1].xaxis.set_major_locator(ticker.MultipleLocator(2.5))\nax[2,1].xaxis.set_minor_locator(AutoMinorLocator(5))\nax[2,1].yaxis.set_minor_locator(ticker.MultipleLocator(0.1))\nax[2,1].grid(alpha=0.3, which='both')\nax[2,1].set_xlim(-5, 1)\nax[2,1].set_ylim(2.9, -1.15)\n\n#Labels\n#fig.suptitle(\"pH microprofiles\", fontsize=16)\nax[0,0].title.set_text('CUV P')\nax[0,1].title.set_text('CUV CTRL')\n\nax[0,0].set_xlabel(r\"pH$_{T}$\")\nax[0,1].set_xlabel(r\"pH$_{T}$\")\nax[0,0].set_ylabel('Depth (cm)')\nax[0,1].axes.yaxis.set_ticklabels([])\n\nax[1,0].set_xlabel(r\"Ω$_{ca}$\")\nax[1,1].set_xlabel(r\"Ω$_{ca}$\")\nax[1,0].set_ylabel('Depth (cm)')\nax[1,1].axes.yaxis.set_ticklabels([])\n\nax[2,0].set_xlabel(\"CaCO$_3$ production \\n(mol m$^{-3}$ solid yr$^{-1}$)\")\nax[2,1].set_xlabel(\"CaCO$_3$ production \\n(mol m$^{-3}$ solid yr$^{-1}$)\")\nax[2,0].set_ylabel('Depth (cm)')\nax[2,1].axes.yaxis.set_ticklabels([])\n\n#%%\nfig.suptitle(\"RADI model\", fontsize=16)\n\nplt.tight_layout()\nplt.subplots_adjust(wspace=None, hspace=None)\nplt.savefig(\"figures/RADI_profiles.png\", bbox_inches='tight')\nplt.show()\n\n","repo_name":"hannavdmortel/Ar_dissolution_HvdM","sub_path":"Experiment_2/RADI/RADI_profiles.py","file_name":"RADI_profiles.py","file_ext":"py","file_size_in_byte":8204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24094133120","text":"import psycopg2\nimport psycopg2.extras\nimport psycopg2.extensions\n\n\"\"\"\nDon't do:\n\nsql = \"INSERT INTO TABLE_A (COL_A,COL_B) VALUES (%s, %s)\" % (val1, val2)\ncursor.execute(sql)\n\nDo:\n\nsql = \"INSERT INTO TABLE_A (COL_A,COL_B) VALUES (%s, %s)\"\ncursor.execute(sql, (val1, val2))\ncursor.execute('SELECT * FROM stocks WHERE symbol=?', t)\n\"\"\"\n\nparams = {\n 'database': 'motherless',\n 'cursor_factory': psycopg2.extras.DictCursor,\n}\n\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODE)\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)\n\nclass DB(object):\n def __init__(self, commitOnClose=True, **xargs):\n super(DB, self).__init__()\n self._commitOnClose = commitOnClose\n if len(xargs)==0: xargs=params\n self.dbconnection = psycopg2.connect(**xargs)\n self.cursor = self.dbconnection.cursor()\n #self.execute = self.cursor.execute\n #self.cursor = self.dbconnection.cursor()\n \n \n def __del__(self):\n if self._commitOnClose:\n self.dbconnection.commit()\n self.dbconnection.close()\n \n def commit(self): self.dbconnection.commit()\n \n def rollback(self): self.dbconnection.rollback() \n \n def execute(self, *args, **kwargs):\n \"\"\" \n this function is being replaced by cursor.execute \n \"\"\"\n self.cursor.execute(*args,**kwargs)\n\n query=execute\n\nclass ExtendedDB(DB):\n \n def update(self, table, where, returnkey=None, **x):\n \"\"\"\n table String\n where Dict\n returnkey String or None\n x args key=val\n \"\"\"\n keys=[] # keep order of keys and vals\n values=[]\n for k,v in x.items():\n keys.append(k)\n values.append(v)\n \n setq = \",\".join([\"%s=%s\"%(k,\"%s\") for k in keys])\n wherekeys=[]\n for key, val in where.items():\n wherekeys.append(key)\n values.append(val)\n whereq = \",\".join([\"%s=\"%key+\"%s\" for key in wherekeys])\n q = \"UPDATE %s SET %s WHERE %s\"%(table,setq,whereq)\n args = tuple(values)\n if returnkey: q = \"%s RETURNING %s\"%(q, returnkey)\n self.query(q,args)\n if returnkey: \n r = self.cursor.fetchone()\n return r[0]\n else: return None\n\n def save(self, table, returnkey=None, **x):\n \"\"\" \n Put data into database according schema of @table \n @table String table name\n @x key=value, [key2=value2, [...]]\n \"\"\"\n keys=[] # keep order of keys and vals\n values=[]\n for k,v in x.items():\n keys.append(k)\n values.append(v)\n \n q=\"INSERT INTO \"+table+\" (\"+\",\".join(keys)+\") VALUES (%s\"+\\\n \",%s\"*(len(x)-1)+\")\"\n args = tuple(values)\n if returnkey: q = \"%s RETURNING %s\"%(q, returnkey)\n self.query(q,args)\n if returnkey: \n r = self.cursor.fetchone()\n return r[0]\n else: return None\n \n","repo_name":"juix/scripts","sub_path":"motherless-ai/myDb.py","file_name":"myDb.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34657424266","text":"import unittest\nfrom stack import Stack\n\nclass SortedStack():\n def __init__(self, array):\n self.stack1 = Stack()\n for num in array:\n self.stack1.push(num)\n self.stack2 = Stack()\n\n def sort(self):\n if self.stack1.isEmpty():\n return\n else:\n elem = self.stack1.pop()\n if self.stack2.isEmpty():\n self.stack2.push(elem)\n else:\n while not self.stack2.isEmpty() and self.comparator(elem, self.stack2.peek()):\n elem2 = self.stack2.pop()\n self.stack1.push(elem2)\n self.stack2.push(elem)\n self.sort()\n\n def rebuildStack(self):\n while not self.stack2.isEmpty():\n elem = self.stack2.pop()\n self.stack1.push(elem)\n\n def comparator(self, elem, stack2Elem):\n if elem < stack2Elem:\n return True\n else:\n return False\n\nclass TestSortedStack(unittest.TestCase):\n def setUp(self):\n array = [7,5,9,11,2,4,3]\n self.sortedStack = SortedStack(array)\n self.sortedStack.sort()\n\n def test_sort(self):\n result = self.sortedStack.stack2.items\n self.assertEqual(result, [2,3,4,5,7,9,11])\n\n def test_rebuildStack(self):\n self.sortedStack.rebuildStack()\n result = self.sortedStack.stack1.items\n self.assertEqual(result, [11, 9, 7, 5, 4, 3, 2])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"redixhumayun/ctci","sub_path":"Stacks/SortStack.py","file_name":"SortStack.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41321802253","text":"\"\"\"\nModels for a discussion forum.\n\"\"\"\nimport datetime\nfrom itertools import izip\n\nfrom django.contrib.auth.models import User\nfrom django.db import connection, models, transaction\nfrom django.utils.encoding import smart_unicode\nfrom django.utils.text import truncate_words\n\nfrom forum import app_settings\nfrom forum.formatters import post_formatter\nfrom forum.utils import models as model_utils\nfrom pytz import common_timezones\n\nif app_settings.USE_REDIS:\n from forum import redis_connection as redis\n\n__all__ = ['ForumProfile', 'Section', 'Forum', 'Topic', 'Post', 'Search']\n\nqn = connection.ops.quote_name\n\nclass ForumProfileManager(models.Manager):\n def get_for_user(self, user):\n \"\"\"\n Returns the Forum Profile for the given User, creating it first\n if necessary and caching it in the User the first time it is\n looked up.\n \"\"\"\n if not hasattr(user, '_forum_profile_cache'):\n profile, created = self.get_or_create(user=user)\n user._forum_profile_cache = profile\n return user._forum_profile_cache\n\n def update_post_counts_in_bulk(self, user_ids):\n \"\"\"\n Updates ``post_count`` for Users with the given ids.\n \"\"\"\n opts = self.model._meta\n post_opts = Post._meta\n query = \"\"\"\n UPDATE %(forum_profile)s\n SET %(post_count)s = (\n SELECT COUNT(*)\n FROM %(post)s\n WHERE %(post)s.%(post_user_fk)s=%(forum_profile)s.%(user_fk)s\n )\n WHERE %(user_fk)s IN (%(user_pks)s)\"\"\" % {\n 'forum_profile': qn(opts.db_table),\n 'post_count': qn(opts.get_field('post_count').column),\n 'post': qn(post_opts.db_table),\n 'post_user_fk': qn(post_opts.get_field('user').column),\n 'user_fk': qn(opts.get_field('user').column),\n 'user_pks': ','.join(['%s'] * len(user_ids)),\n }\n cursor = connection.cursor()\n cursor.execute(query, user_ids)\n\nTIMEZONE_CHOICES = tuple([(tz, tz) for tz in common_timezones])\n\nTOPICS_PER_PAGE_CHOICES = (\n (10, '10'),\n (20, '20'),\n (30, '30'),\n (40, '40'),\n)\n\nPOSTS_PER_PAGE_CHOICES = (\n (10, '10'),\n (20, '20'),\n (30, '30'),\n (40, '40'),\n)\n\nclass ForumProfile(models.Model):\n \"\"\"\n Forum-specific information and configuration details for a User.\n \"\"\"\n USER_GROUP = 'U'\n MODERATOR_GROUP = 'M'\n ADMIN_GROUP = 'A'\n\n GROUP_CHOICES = (\n (USER_GROUP, 'Users'),\n (MODERATOR_GROUP, 'Moderators'),\n (ADMIN_GROUP, 'Admins'),\n )\n\n user = models.ForeignKey(User, unique=True, related_name='forum_profile')\n group = models.CharField(max_length=1, choices=GROUP_CHOICES, default=USER_GROUP)\n title = models.CharField(max_length=100, blank=True)\n location = models.CharField(max_length=100, blank=True)\n avatar = models.URLField(max_length=200, verify_exists=False, blank=True)\n website = models.URLField(max_length=200, verify_exists=False, blank=True)\n\n # Board settings\n timezone = models.CharField(max_length=25, choices=TIMEZONE_CHOICES, blank=True)\n topics_per_page = models.PositiveIntegerField(choices=TOPICS_PER_PAGE_CHOICES, null=True, blank=True)\n posts_per_page = models.PositiveIntegerField(choices=POSTS_PER_PAGE_CHOICES, null=True, blank=True)\n auto_fast_reply = models.BooleanField(default=False)\n\n # Denormalised data\n post_count = models.PositiveIntegerField(default=0)\n\n objects = ForumProfileManager()\n\n def __unicode__(self):\n return 'Forum profile for %s' % self.user\n\n class Meta:\n ordering = ('user',)\n\n @models.permalink\n def get_absolute_url(self):\n return ('forum_user_profile', (smart_unicode(self.user_id),))\n\n def is_moderator(self):\n \"\"\"\n Returns ``True`` if the User represented by this ForumProfile has\n moderation privileges, ``False`` otherwise.\n \"\"\"\n return self.group in (self.MODERATOR_GROUP, self.ADMIN_GROUP)\n\n def is_admin(self):\n \"\"\"\n Returns ``True`` if the User represented by this ForumProfile has\n administrative privileges, ``False`` otherwise.\n \"\"\"\n return self.group == self.ADMIN_GROUP\n\n def update_post_count(self):\n \"\"\"\n Updates this ForumProfile's ``post_count`` with the number of\n Posts currently associated with its User.\n \"\"\"\n self.post_count = self.user.posts.count()\n model_utils.update(self, 'post_count')\n update_post_count.alters_data = True\n\nclass SectionManager(models.Manager):\n def get_forums_by_section(self):\n \"\"\"\n Yields ordered two-tuples of (section, forums).\n \"\"\"\n section_forums = {}\n for forum in Forum.objects.all():\n section_forums.setdefault(forum.section_id, []).append(forum)\n for section in super(SectionManager, self).get_query_set():\n yield section, section_forums.get(section.pk, [])\n\n def increment_orders(self, start_at):\n \"\"\"\n Increments ``order`` for all Sections which have an ``order``\n greater than or equal to ``start_at``.\n \"\"\"\n self._change_orders(start_at, '+1')\n\n def decrement_orders(self, start_at):\n \"\"\"\n Increments ``order`` for all Sections which have an ``order``\n greater than or equal to ``start_at``.\n \"\"\"\n self._change_orders(start_at, '-1')\n\n def _change_orders(self, start_at, change):\n opts = self.model._meta\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n UPDATE %(section_table)s\n SET %(order)s=%(order)s%(change)s\n WHERE %(order)s>=%%s\"\"\" % {\n 'section_table': qn(opts.db_table),\n 'order': qn(opts.get_field('order').column),\n 'change': change,\n }, [start_at])\n\nclass Section(models.Model):\n \"\"\"\n Provides categorisation for forums.\n \"\"\"\n name = models.CharField(max_length=100, unique=True)\n order = models.PositiveIntegerField()\n\n objects = SectionManager()\n\n def __unicode__(self):\n return self.name\n\n def delete(self):\n \"\"\"\n This method is overridden to maintain consecutive ordering and\n to update the Post counts of any Users who had Posts in this\n Section.\n \"\"\"\n affected_user_ids = [user['id'] for user in \\\n User.objects.filter(posts__topic__forum__section=self) \\\n .distinct() \\\n .values('id')]\n super(Section, self).delete()\n Section.objects.decrement_orders(self.order)\n if len(affected_user_ids):\n ForumProfile.objects.update_post_counts_in_bulk(affected_user_ids)\n transaction.commit_unless_managed()\n\n class Meta:\n ordering = ('order',)\n\n @models.permalink\n def get_absolute_url(self):\n return ('forum_section_detail', (smart_unicode(self.pk),))\n\nclass ForumManager(models.Manager):\n def increment_orders(self, section_id, start_at):\n \"\"\"\n Increments ``order`` for all Forums in the given Section which\n have an ``order`` greater than or equal to ``start_at``.\n \"\"\"\n self._change_orders(section_id, start_at, '+1')\n\n def decrement_orders(self, section_id, start_at):\n \"\"\"\n Decrements ``order`` for all Forums in the given Section which\n have an ``order`` greater than or equal to ``start_at``.\n \"\"\"\n self._change_orders(section_id, start_at, '-1')\n\n def _change_orders(self, section_id, start_at, change):\n opts = self.model._meta\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n UPDATE %(forum_table)s\n SET %(order)s=%(order)s%(change)s\n WHERE %(section_fk)s=%%s\n AND %(order)s>=%%s\"\"\" % {\n 'forum_table': qn(opts.db_table),\n 'order': qn(opts.get_field('order').column),\n 'change': change,\n 'section_fk': qn(opts.get_field('section').column),\n }, [section_id, start_at])\n\nclass Forum(models.Model):\n \"\"\"\n Provides categorisation for discussion topics.\n \"\"\"\n name = models.CharField(max_length=100)\n section = models.ForeignKey(Section, related_name='forums')\n description = models.TextField(blank=True)\n order = models.PositiveIntegerField()\n\n # Administration\n locked = models.BooleanField(default=False)\n hidden = models.BooleanField(default=False)\n\n # Denormalised data\n topic_count = models.PositiveIntegerField(default=0)\n last_post_at = models.DateTimeField(null=True, blank=True)\n last_topic_id = models.PositiveIntegerField(null=True, blank=True)\n last_topic_title = models.CharField(max_length=100, blank=True)\n last_user_id = models.PositiveIntegerField(null=True, blank=True)\n last_username = models.CharField(max_length=30, blank=True)\n\n objects = ForumManager()\n\n def __unicode__(self):\n return self.name\n\n def delete(self):\n \"\"\"\n This method is overridden to maintain consecutive ordering and\n to update the Post counts of any Users who had posts in this\n Forum.\n \"\"\"\n affected_user_ids = [user['id'] for user in \\\n User.objects.filter(posts__topic__forum=self) \\\n .distinct() \\\n .values('id')]\n super(Forum, self).delete()\n Forum.objects.decrement_orders(self.section_id, self.order)\n if len(affected_user_ids) > 0:\n ForumProfile.objects.update_post_counts_in_bulk(affected_user_ids)\n transaction.commit_unless_managed()\n\n class Meta:\n ordering = ('order',)\n\n @models.permalink\n def get_absolute_url(self):\n return ('forum_forum_detail', (smart_unicode(self.pk),))\n\n def update_topic_count(self):\n \"\"\"\n Updates this Forum's ``topic_count``.\n \"\"\"\n self.topic_count = self.topics.count()\n model_utils.update(self, 'topic_count')\n update_topic_count.alters_data = True\n\n def set_last_post(self, post=None):\n \"\"\"\n Updates denormalised details about this Forum's last Post.\n\n It is assumed that any Post given is not a metapost and is not in\n a hidden Topic.\n\n If the last Post is not given, the last non-meta, non-hidden Post\n will be looked up. This method should never set the details of a\n Post in a hidden Topic as the last Post, as this would result in\n the display of latest Post links which do not work for regular and\n anonymous users.\n \"\"\"\n try:\n if post is None:\n post = Post.objects.filter(meta=False,\n topic__forum=self,\n topic__hidden=False) \\\n .order_by('-posted_at', '-id')[0]\n self.last_post_at = post.posted_at\n self.last_topic_id = post.topic.pk\n self.last_topic_title = post.topic.title\n self.last_user_id = post.user.pk\n self.last_username = post.user.username\n except IndexError:\n # No Post was given and there was no latest, non-hidden\n # Post, so there must not be any eligible Topics in the\n # Forum at the moment.\n self.last_post_at, self.last_topic_id, self.last_user_id = (None, None, None)\n self.last_topic_title, self.last_username = ('', '')\n model_utils.update(self, 'last_post_at', 'last_topic_id',\n 'last_topic_title', 'last_user_id', 'last_username')\n set_last_post.alters_data = True\n\nclass TopicManager(models.Manager):\n def _user_details(self, queryset):\n \"\"\"\n Uses ``extra`` to add User details to a Topic ``QuerySet``.\n \"\"\"\n opts = self.model._meta\n user_opts = User._meta\n user_table = qn(user_opts.db_table)\n return queryset.extra(\n select={\n 'user_username': '%s.%s' % (user_table, qn(user_opts.get_field('username').column)),\n },\n tables=[user_table],\n where=[\n '%s.%s=%s.%s' % (\n qn(opts.db_table),\n qn(opts.get_field('user').column),\n user_table,\n qn(user_opts.pk.column),\n ),\n ]\n )\n\n def _forum_details(self, queryset):\n \"\"\"\n Uses ``extra`` to add Forum details to a Topic ``QuerySet``.\n \"\"\"\n opts = self.model._meta\n forum_opts = Forum._meta\n forum_table = qn(forum_opts.db_table)\n return queryset.extra(\n select={\n 'forum_name': '%s.%s' % (forum_table, qn(forum_opts.get_field('name').column)),\n },\n tables=[forum_table],\n where=[\n '%s.%s=%s.%s' % (\n qn(opts.db_table),\n qn(opts.get_field('forum').column),\n forum_table,\n qn(forum_opts.pk.column),\n ),\n ]\n )\n\n def with_user_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Topics which have\n additional information about the User who created them.\n \"\"\"\n return self._user_details(super(TopicManager, self).get_query_set())\n\n def with_forum_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Topics which have\n additional information about the Forum they belong to.\n \"\"\"\n return self._forum_details(super(TopicManager, self).get_query_set())\n\n def with_forum_and_user_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Topics which have\n additional information about the User who created them and the\n Forum they belong to.\n \"\"\"\n return self._forum_details(self._user_details(\n super(TopicManager, self).get_query_set()))\n\n def with_display_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Topics which have additional Forum and\n Section information required to display a Topic's detail page without\n having to perform extra queries.\n \"\"\"\n opts = self.model._meta\n forum_opts = Forum._meta\n forum_table = qn(forum_opts.db_table)\n section_opts = Section._meta\n section_table = qn(section_opts.db_table)\n return super(TopicManager, self).get_query_set().extra(\n select={\n 'forum_name': '%s.%s' % (forum_table, qn(forum_opts.get_field('name').column)),\n 'section_id': '%s.%s' % (forum_table, qn(forum_opts.get_field('section').column)),\n 'section_name': '%s.%s' % (section_table, qn(section_opts.get_field('name').column)),\n },\n tables=[forum_table, section_table],\n where=[\n '%s.%s=%s.%s' % (\n qn(opts.db_table),\n qn(opts.get_field('forum').column),\n forum_table,\n qn(forum_opts.pk.column),\n ),\n '%s.%s=%s.%s' % (\n qn(forum_table),\n qn(forum_opts.get_field('section').column),\n section_table,\n qn(section_opts.pk.column),\n ),\n ]\n )\n\n def with_standalone_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Topics which have additional\n User, Forum and Section information required to display a Topic's\n complete details.\n \"\"\"\n opts = self.model._meta\n user_opts = User._meta\n user_table = qn(user_opts.db_table)\n return self.with_display_details().extra(\n select={\n 'user_username': '%s.%s' % (user_table, qn(user_opts.get_field('username').column)),\n },\n tables=[user_table],\n where=[\n '%s.%s=%s.%s' % (\n qn(opts.db_table),\n qn(opts.get_field('user').column),\n user_table,\n qn(user_opts.pk.column),\n ),\n ]\n )\n\n def add_last_read_times(self, topics, user):\n \"\"\"\n If the given User is authenticated, adds a ``last_read`` attribute\n to the given Topics.\n \"\"\"\n if user.is_authenticated():\n for topic, last_read in izip(topics,\n redis.get_last_read_times(user, topics)):\n topic.last_read = last_read\n return topics\n\n def add_view_counts(self, topics):\n \"\"\"\n Adds view counts to the given Topics.\n \"\"\"\n for topic, view_count in izip(topics,\n redis.get_view_counts([t.pk for t in topics])):\n topic.view_count = view_count\n return topics\n\nclass Topic(models.Model):\n \"\"\"\n A discussion topic.\n \"\"\"\n title = models.CharField(max_length=100)\n forum = models.ForeignKey(Forum, related_name='topics')\n user = models.ForeignKey(User, related_name='topics')\n description = models.CharField(max_length=100, blank=True)\n started_at = models.DateTimeField(editable=False)\n\n # Administration\n pinned = models.BooleanField(default=False)\n locked = models.BooleanField(default=False)\n hidden = models.BooleanField(default=False)\n\n # Denormalised data\n post_count = models.PositiveIntegerField(default=0)\n metapost_count = models.PositiveIntegerField(default=0)\n last_post_at = models.DateTimeField(null=True, blank=True)\n last_user_id = models.PositiveIntegerField(null=True, blank=True)\n last_username = models.CharField(max_length=30, blank=True)\n\n objects = TopicManager()\n\n def __unicode__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n \"\"\"\n This method is overridden to implement the following:\n\n - Populating the non-editable ``started_at`` field.\n - Updating denormalised data in the related Forum when this is a\n new Topic.\n - If ``title`` has been updated and this Topic was set in its\n Forum's last Post details, it needs to be updated in the\n Forum as well.\n \"\"\"\n is_new = False\n if not self.pk:\n self.started_at = datetime.datetime.now()\n is_new = True\n super(Topic, self).save(*args, **kwargs)\n if is_new:\n self.forum.update_topic_count()\n transaction.commit_unless_managed()\n elif self.pk == self.forum.last_topic_id and \\\n self.title != self.forum.last_topic_title and \\\n not self.hidden:\n self.forum.set_last_post()\n transaction.commit_unless_managed()\n\n def delete(self):\n \"\"\"\n This method is overridden to update denormalised data in related\n Forum and ForumProfile objects after this Topic has been deleted:\n\n - The Forum's Topic count always has to be updated.\n - The Post counts of ForumProfiles of any Users who posted in the\n Topic always have to be updated.\n - If it was set as the Topic in the Forum's last Post details,\n these need to be updated.\n \"\"\"\n forum = self.forum\n was_last_topic = self.pk == forum.last_topic_id\n affected_user_ids = [user['id'] for user in \\\n User.objects.filter(posts__topic=self).distinct().values('id')]\n super(Topic, self).delete()\n forum.update_topic_count()\n if was_last_topic:\n forum.set_last_post()\n if len(affected_user_ids) > 0:\n ForumProfile.objects.update_post_counts_in_bulk(affected_user_ids)\n transaction.commit_unless_managed()\n\n class Meta:\n ordering = ('-last_post_at', '-started_at')\n\n @models.permalink\n def get_absolute_url(self):\n return ('forum_topic_detail', (smart_unicode(self.pk),))\n\n @models.permalink\n def get_meta_url(self):\n return ('forum_topic_meta_detail', (smart_unicode(self.pk),))\n\n def get_first_post(self):\n \"\"\"\n Gets the first Post in this Topic.\n \"\"\"\n return self.posts.filter(meta=False).order_by('num_in_topic')[0]\n\n def update_post_count(self, meta=False):\n \"\"\"\n Updates one of this Topic's denormalised Post counts, based on\n ``meta``.\n \"\"\"\n field_name = '%spost_count' % (meta and 'meta' or '',)\n setattr(self, field_name, self.posts.filter(meta=meta).count())\n model_utils.update(self, field_name)\n update_post_count.alters_data = True\n\n def set_last_post(self, post=None):\n \"\"\"\n Updates details about this Topic's last Post and its\n denormalised ``post_count``.\n\n It is assumed that any Post given is not a metapost.\n\n If the last Post is not given, it will be looked up.\n \"\"\"\n if post is None:\n post = self.posts.filter(meta=False).order_by('-posted_at', '-id')[0]\n self.post_count = self.posts.filter(meta=False).count()\n self.last_post_at = post.posted_at\n self.last_user_id = post.user.pk\n self.last_username = post.user.username\n model_utils.update(self, 'post_count', 'last_post_at', 'last_user_id',\n 'last_username')\n set_last_post.alters_data = True\n\nclass PostManager(models.Manager):\n def with_user_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Posts which have additional\n information about the User who created them, as required to display\n Post details on Topic detail pages.\n \"\"\"\n opts = self.model._meta\n user_opts = User._meta\n forum_profile_opts = ForumProfile._meta\n user_table = qn(user_opts.db_table)\n forum_profile_table = qn(forum_profile_opts.db_table)\n return super(PostManager, self).get_query_set().extra(\n select={\n 'user_username': '%s.%s' % (user_table, qn(user_opts.get_field('username').column)),\n 'user_date_joined': '%s.%s' % (user_table, qn(user_opts.get_field('date_joined').column)),\n 'user_title': '%s.%s' % (forum_profile_table, qn(forum_profile_opts.get_field('title').column)),\n 'user_avatar': '%s.%s' % (forum_profile_table, qn(forum_profile_opts.get_field('avatar').column)),\n 'user_post_count': '%s.%s' % (forum_profile_table, qn(forum_profile_opts.get_field('post_count').column)),\n 'user_location': '%s.%s' % (forum_profile_table, qn(forum_profile_opts.get_field('location').column)),\n 'user_website': '%s.%s' % (forum_profile_table, qn(forum_profile_opts.get_field('website').column)),\n },\n tables=[user_table, forum_profile_table],\n where=[\n '%s.%s=%s.%s' % (\n qn(opts.db_table),\n qn(opts.get_field('user').column),\n user_table,\n qn(user_opts.pk.column),\n ),\n '%s.%s=%s.%s' % (\n forum_profile_table,\n qn(forum_profile_opts.get_field('user').column),\n user_table,\n qn(user_opts.pk.column),\n ),\n ]\n )\n\n def with_standalone_details(self):\n \"\"\"\n Creates a ``QuerySet`` containing Posts which have additional\n information about the User who created them and their Topic, Forum and\n Section, as required to display a Post's complete details.\n \"\"\"\n opts = self.model._meta\n topic_opts = Topic._meta\n forum_opts = Forum._meta\n section_opts = Section._meta\n topic_table = qn(topic_opts.db_table)\n forum_table = qn(forum_opts.db_table)\n section_table = qn(section_opts.db_table)\n return self.with_user_details().extra(\n select={\n 'topic_title': '%s.%s' % (topic_table, qn(topic_opts.get_field('title').column)),\n 'topic_post_count': '%s.%s' % (topic_table, qn(topic_opts.get_field('post_count').column)),\n 'forum_id': '%s.%s' % (topic_table, qn(topic_opts.get_field('forum').column)),\n 'forum_name': '%s.%s' % (forum_table, qn(forum_opts.get_field('name').column)),\n 'section_id': '%s.%s' % (forum_table, qn(forum_opts.get_field('section').column)),\n 'section_name': '%s.%s' % (section_table, qn(section_opts.get_field('name').column)),\n },\n tables=[topic_table, forum_table, section_table],\n where=[\n '%s.%s=%s.%s' % (\n qn(opts.db_table),\n qn(opts.get_field('topic').column),\n topic_table,\n qn(topic_opts.pk.column),\n ),\n '%s.%s=%s.%s' % (\n qn(topic_opts.db_table),\n qn(topic_opts.get_field('forum').column),\n forum_table,\n qn(forum_opts.pk.column),\n ),\n '%s.%s=%s.%s' % (\n forum_table,\n qn(forum_opts.get_field('section').column),\n section_table,\n qn(section_opts.pk.column),\n ),\n ]\n )\n\n def update_num_in_topic(self, topic, start_at, increment=False, meta=False):\n \"\"\"\n Updates ``num_in_topic`` for all Posts in the given Topic\n which have a ``num_in_topic`` greater than ``start_at``.\n\n Values will be incremented or decremented based on ``increment``.\n \"\"\"\n opts = self.model._meta\n cursor = connection.cursor()\n operator = {True: '+', False: '-'}[increment]\n cursor.execute(\"\"\"\n UPDATE %(post_table)s\n SET %(num_in_topic)s=%(num_in_topic)s%(operator)s1\n WHERE %(topic_fk)s=%%s\n AND %(meta)s=%%s\n AND %(num_in_topic)s>%%s\"\"\" % {\n 'post_table': qn(opts.db_table),\n 'meta': qn(opts.get_field('meta').column),\n 'num_in_topic': qn(opts.get_field('num_in_topic').column),\n 'operator': operator,\n 'topic_fk': qn(opts.get_field('topic').column),\n }, [topic.pk, meta, start_at])\n\n def add_topic_view_counts(self, posts):\n \"\"\"\n Adds view counts for the Topics of the given Posts.\n \"\"\"\n for post, view_count in izip(posts,\n redis.get_view_counts([p.topic_id for p in posts])):\n post.topic_view_count = view_count\n return posts\n\nclass Post(models.Model):\n \"\"\"\n A post which forms part of a discussion.\n \"\"\"\n user = models.ForeignKey(User, related_name='posts')\n topic = models.ForeignKey(Topic, related_name='posts')\n body = models.TextField()\n body_html = models.TextField(editable=False)\n posted_at = models.DateTimeField(editable=False)\n edited_at = models.DateTimeField(editable=False, null=True, blank=True)\n user_ip = models.IPAddressField(editable=False, null=True, blank=True)\n meta = models.BooleanField(default=False)\n emoticons = models.BooleanField(default=True)\n\n # Denormalised data\n num_in_topic = models.PositiveIntegerField(default=0)\n\n objects = PostManager()\n\n def __unicode__(self):\n return truncate_words(self.body, 25)\n\n def save(self, *args, **kwargs):\n \"\"\"\n This method is overridden to implement the following:\n\n - Formatting and escaping the raw Post body as HTML at save time.\n - Populating or updating non-editable time fields.\n - Populating denormalised data in related Topic, Forum and\n ForumProfile objects when this is a new Post.\n \"\"\"\n self.body = self.body.strip()\n self.body_html = post_formatter.format_post(self.body, self.emoticons)\n is_new = False\n if not self.pk:\n self.posted_at = datetime.datetime.now()\n self.num_in_topic = getattr(self.topic, '%spost_count' % \\\n (self.meta and 'meta' or '',)) + 1\n is_new = True\n else:\n self.edited_at = datetime.datetime.now()\n super(Post, self).save(*args, **kwargs)\n if is_new:\n if not self.meta:\n # Includes a non-metapost post count update\n self.topic.set_last_post(self)\n else:\n self.topic.update_post_count(meta=True)\n\n # Don't update the forum's last post if the topic is hidden\n # - this allows moderators to add posts to hidden topics\n # without them becoming visible on forum listing pages.\n if not self.meta and not self.topic.hidden:\n self.topic.forum.set_last_post(self)\n ForumProfile.objects.get_for_user(self.user).update_post_count()\n transaction.commit_unless_managed()\n\n def delete(self):\n \"\"\"\n This method is overridden to update denormalised data in related\n Topic, Forum, ForumProfile and other Post objects after the post has\n been deleted:\n\n - The ``post_count`` of the ForumProfile for the User who made\n the post always needs to be updated.\n - The ``post_count`` or ``metapost_count`` of the Post's Topic\n always needs to be updated.\n - If this is not a metapost and was the last Post in its Topic,\n the Topic's last Post details need to be updated.\n - If this is not a metapost was the last Post in its Topic's\n Forum, the Forum's last Post details need to be updated to the\n new last Post.\n - If this was not the last Post in its Topic, the\n ``num_in_topic`` of all later Posts need to be decremented.\n \"\"\"\n topic = self.topic\n forum = topic.forum\n forum_profile = ForumProfile.objects.get_for_user(self.user)\n super(Post, self).delete()\n forum_profile.update_post_count()\n if not self.meta and self.posted_at == topic.last_post_at:\n # Includes a non-metapost post count update\n topic.set_last_post()\n else:\n topic.update_post_count(meta=self.meta)\n if not self.meta and self.posted_at == forum.last_post_at:\n forum.set_last_post()\n Post.objects.update_num_in_topic(topic, self.num_in_topic,\n increment=False, meta=self.meta)\n transaction.commit_unless_managed()\n\n class Meta:\n ordering = ('-posted_at', '-id')\n\n @models.permalink\n def get_absolute_url(self):\n return ('forum_redirect_to_post', (smart_unicode(self.pk),))\n\nclass Search(models.Model):\n \"\"\"\n Caches search criteria and a limited number of results to avoid\n repitition of expensive searches when paginating results.\n \"\"\"\n POST_SEARCH = 'P'\n TOPIC_SEARCH = 'T'\n TYPE_CHOICES = (\n (POST_SEARCH, 'Posts'),\n (TOPIC_SEARCH, 'Topics'),\n )\n\n type = models.CharField(max_length=1, choices=TYPE_CHOICES)\n user = models.ForeignKey(User, related_name='searches')\n searched_at = models.DateTimeField(editable=False)\n criteria_json = models.TextField()\n result_ids = models.TextField()\n\n def __unicode__(self):\n return '%s searched for %s at %s' % (\n self.user, self.get_type_display(), self.searched_at)\n\n def save(self, *args, **kwargs):\n if not self.pk:\n self.searched_at = datetime.datetime.now()\n super(Search, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ('-searched_at',)\n verbose_name_plural = 'searches'\n\n @models.permalink\n def get_absolute_url(self):\n return ('forum_search_results', (smart_unicode(self.pk),))\n\n def get_result_model(self):\n \"\"\"\n Returns the model class corresponding to this Search's ``type``.\n \"\"\"\n return {self.POST_SEARCH: Post, self.TOPIC_SEARCH: Topic}[self.type]\n\n def is_post_search(self):\n \"\"\"\n Returns ``True`` if this is a Post Search, ``False`` otherwise.\n \"\"\"\n return self.type == self.POST_SEARCH\n\n def is_topic_search(self):\n \"\"\"\n Returns ``True`` if this is a Topic Search, ``False`` otherwise.\n \"\"\"\n return self.type == self.TOPIC_SEARCH\n","repo_name":"insin/forum","sub_path":"forum/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":32692,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"81"} +{"seq_id":"11638376034","text":"#Developed by Henrique Treza\n\n\n#Exercicio Que lê uma frase e diz se ela tem \"Uma palavra que voce escolheu\".\n#A palavra secreta é Desenvolvedor.\n\nvar = str(input(\"Insira uma frase e tente acertar a palavra secreta: \")).strip()\nr = ('DESENVOLVEDOR' in var.upper())\n\nif r:\n print('Parabens!!! Você acaba de ganhar um mega premio')\nelse:\n print('Você errou!! Continua tentando!!')","repo_name":"htreza/Python","sub_path":"Exercicios/Exercicio020.py","file_name":"Exercicio020.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7263537579","text":"import os\nimport pathlib\nimport pprint\nimport sys\nfrom argparse import Namespace\nfrom datetime import datetime\nfrom functools import partial\nfrom typing import Type\n\nimport numpy as np\nimport torch\nfrom gym.wrappers import NormalizeObservation, FrameStack\nfrom tensorboardX import SummaryWriter\n\nfrom tianshou.utils import TensorboardLogger\n\nsys.path.append(os.path.abspath(pathlib.Path(__file__).parent.parent))\n\nfrom levdoom.config import parse_args\nfrom levdoom.env.base.scenario import DoomEnv\nfrom levdoom.utils.enums import DoomScenarioImpl, Algorithm\nfrom levdoom.utils.wrappers import ResizeWrapper, RescaleWrapper\n\nfrom tianshou.data.collector import Collector\nfrom tianshou.env import ShmemVectorEnv\nfrom tianshou.utils.logger.wandb import WandbLogger\n\n\ndef create_single_env(scenario: Type[DoomEnv], args: Namespace, task: str):\n env = scenario(args, task)\n env = ResizeWrapper(env, args.frame_height, args.frame_width)\n env = RescaleWrapper(env)\n env = NormalizeObservation(env)\n env = FrameStack(env, args.frame_stack)\n return env\n\n\ndef train(args: Namespace):\n args.tasks_joined = '_'.join(task for task in args.tasks)\n args.timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n args.experiment_dir = pathlib.Path(__file__).parent.resolve()\n print('Experiment directory', args.experiment_dir)\n\n # Determine the log path\n log_path = f'{args.logdir}/{args.algorithm}/{args.scenario}/{args.seed}_{args.timestamp}'\n\n # Determine scenario and algorithm classes\n scenario_class = DoomScenarioImpl[args.scenario.upper()].value\n algorithm_class = Algorithm[args.algorithm.upper()].value\n\n args.cfg_path = f\"{args.experiment_dir}/maps/{args.scenario}/{args.scenario}.cfg\"\n args.res = (args.frame_skip, args.frame_height, args.frame_width)\n env = scenario_class(args, args.tasks[0])\n args.state_shape = args.res\n args.action_shape = env.action_space.shape or env.action_space.n\n print(\"Observations shape:\", args.state_shape) # should be N_FRAMES x H x W\n print(\"Actions shape:\", args.action_shape)\n\n # Create training and testing environments\n train_envs = ShmemVectorEnv(\n [\n partial(create_single_env, scenario_class, args, task) for task in args.tasks\n ],\n norm_obs=args.normalize\n )\n test_envs = ShmemVectorEnv(\n [\n partial(create_single_env, scenario_class, args, task) for task in args.test_tasks\n ],\n norm_obs=args.normalize\n )\n\n # Apply the seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n\n # Initialize the algorithm\n algorithm = algorithm_class(args, env, log_path)\n\n policy = algorithm.get_policy()\n # Load a previous policy\n if args.resume_path:\n resume_path = f'{args.logdir}/{args.algorithm}/{args.scenario}/{args.resume_path}'\n policy.load_state_dict(torch.load(resume_path, map_location=args.device))\n print(\"Loaded agent from: \", resume_path)\n\n # Create replay buffer\n buffer = algorithm.create_buffer(len(train_envs))\n\n # Create collectors\n train_collector = Collector(policy, train_envs, buffer, exploration_noise=True,\n extra_statistics=env.extra_statistics)\n test_collector = Collector(policy, test_envs, exploration_noise=True, extra_statistics=env.extra_statistics)\n\n # Initialize logging\n writer = SummaryWriter(log_path)\n writer.add_text(\"args\", str(args))\n\n wandb_id = f'{args.algorithm}_seed_{args.seed}_{args.timestamp}'\n\n logger = TensorboardLogger(writer) if not args.with_wandb else WandbLogger(project=args.wandb_project,\n name=wandb_id,\n entity=args.wandb_user,\n run_id=wandb_id,\n config=args,\n extra_statistics=env.extra_statistics)\n\n if args.with_wandb:\n logger.load(writer)\n\n # Watch the agent's performance\n def watch():\n print(\"Setup test envs ...\")\n policy.eval()\n test_envs.seed(args.seed)\n if args.save_buffer_name:\n print(f\"Generate buffer with size {args.buffer_size}\")\n buffer = algorithm.create_buffer(len(test_envs))\n extra_statistics = ['kills', 'health', 'ammo', 'movement', 'kits_obtained', 'hits_taken']\n collector = Collector(policy, test_envs, buffer, exploration_noise=True, extra_statistics=extra_statistics)\n result = collector.collect(n_step=args.buffer_size, frame_skip=args.frame_skip)\n print(f\"Save buffer into {args.save_buffer_name}\")\n # Unfortunately, pickle will cause oom with 1M buffer size\n buffer.save_hdf5(args.save_buffer_name)\n else:\n print(\"Testing agent ...\")\n test_collector.reset()\n result = test_collector.collect(n_episode=args.test_num, render=args.render_sleep,\n frame_skip=args.frame_skip)\n reward = result[\"reward\"].mean()\n lengths = result[\"length\"].mean() * args.frame_skip\n print(f'Mean reward (over {result[\"n/ep\"]} episodes): {reward}')\n print(f'Mean length (over {result[\"n/ep\"]} episodes): {lengths}')\n\n if args.watch:\n watch()\n exit(0)\n\n # test train_collector and start filling replay buffer\n train_collector.collect(n_step=args.batch_size * args.training_num, frame_skip=args.frame_skip)\n\n # Initialize trainer\n result = algorithm.create_trainer(train_collector, test_collector, logger)\n\n # Display the final results\n pprint.pprint(result)\n\n\nif __name__ == '__main__':\n train(parse_args())\n","repo_name":"TTomilin/LevDoom","sub_path":"levdoom/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"40100308307","text":"from __future__ import print_function\nimport timeit\nimport sys\nimport Utils.DataHelper as DataHelper\nimport Utils.CostFHelper as CostFHelper\nfrom Layers.HiddenLayer import *\nfrom Layers.SoftmaxLayer import *\nfrom Layers.ConvPoolLayer import *\n\n# Hyper parameters\nDATASET_NAME = '../Dataset/mnist.pkl.gz'\nLEARNING_RATE = 0.005\nNUM_EPOCH = 1000\nBATCH_SIZE = 20\nPATIENCE = 1000\nPATIENCE_INCREASE = 2\nIMPROVEMENT_THRESHOLD = 0.995\nVALIDATION_FREQUENCY = 500\n\ndef padData(sharedData):\n data = sharedData.get_value()\n numSamples = data.shape[0]\n data = data.reshape((numSamples, 28, 28))\n\n newData = numpy.zeros((numSamples, 32, 32))\n newData[:, 2 : 30, 2 : 30] = data\n newData = newData.reshape((numSamples, 32 * 32))\n return theano.shared(\n numpy.asarray(\n newData,\n dtype = theano.config.floatX\n ),\n borrow = True\n )\n\ndef evaluateLenet5():\n # Load datasets from local disk or download from the internet\n datasets = DataHelper.LoadData(DATASET_NAME)\n trainSetX, trainSetY = datasets[0]\n validSetX, validSetY = datasets[1]\n testSetX, testSetY = datasets[2]\n\n trainSetX = padData(trainSetX)\n validSetX = padData(validSetX)\n testSetX = padData(testSetX)\n\n nTrainBatchs = trainSetX.get_value(borrow=True).shape[0]\n nValidBatchs = validSetX.get_value(borrow=True).shape[0]\n nTestBatchs = testSetX.get_value(borrow=True).shape[0]\n nTrainBatchs //= BATCH_SIZE\n nValidBatchs //= BATCH_SIZE\n nTestBatchs //= BATCH_SIZE\n\n # Create model\n '''\n MODEL ARCHITECTURE\n INPUT -> Convolution -> Dropout\n (32x32) (6, 1, 5, 5) (6, 14, 14)\n -> Convolution -> Dropout\n (16, 6, 5, 5) (16, 5, 5)\n -> Hidden layer\n (120 neurons)\n -> Hidden layer\n (84 neurons)\n -> Output layer (Softmax)\n (10 neurons)\n '''\n # Create random state\n rng = numpy.random.RandomState(12345)\n\n # Create shared variable for input\n Index = T.lscalar('Index')\n X = T.matrix('X')\n Y = T.ivector('Y')\n\n X4D = X.reshape((BATCH_SIZE, 1, 32, 32))\n # Convolution & pooling layer 0\n convPoolLayer0 = ConvPoolLayer(\n rng = rng,\n input = X4D,\n inputShape = (BATCH_SIZE, 1, 32, 32),\n filterShape = (6, 1, 5, 5),\n\n )\n convPoolLayer0Output = convPoolLayer0.Output()\n convPoolLayer0Params = convPoolLayer0.Params()\n\n # Convolution & pooling layer 1\n convPoolLayer1 = ConvPoolLayer(\n rng = rng,\n input = convPoolLayer0Output,\n inputShape = (BATCH_SIZE, 6, 14, 14),\n filterShape = (16, 6, 5, 5)\n )\n convPoolLayer1Output = convPoolLayer1.Output()\n convPoolLayer1Params = convPoolLayer1.Params()\n convPoolLayer1OutputRes = convPoolLayer1Output.reshape((BATCH_SIZE, 16 * 5 * 5))\n\n # Hidden layer 0\n hidLayer0 = HiddenLayer(\n rng = rng,\n input = convPoolLayer1OutputRes,\n numIn = 16 * 5 * 5,\n numOut = 120,\n activation = T.tanh\n )\n hidLayer0Output = hidLayer0.Output()\n hidLayer0Params = hidLayer0.Params()\n\n # Hidden layer 1\n hidLayer1 = HiddenLayer(\n rng=rng,\n input=hidLayer0Output,\n numIn=120,\n numOut=84,\n activation=T.tanh\n )\n hidLayer1Output = hidLayer1.Output()\n hidLayer1Params = hidLayer1.Params()\n\n # Hidden layer 2\n hidLayer2 = HiddenLayer(\n rng = rng,\n input = hidLayer1Output,\n numIn = 84,\n numOut = 10,\n activation = None\n )\n hidLayer2Output = hidLayer2.Output()\n hidLayer2Params = hidLayer2.Params()\n\n # Softmax layer\n softmaxLayer0 = SoftmaxLayer(\n input=hidLayer2Output\n )\n softmaxLayer0Output = softmaxLayer0.Output()\n\n # List of params from model\n params = hidLayer2Params + \\\n hidLayer1Params + \\\n hidLayer0Params + \\\n convPoolLayer1Params + \\\n convPoolLayer0Params\n\n # Evaluate model - using early stopping\n # Define cost function = Regularization + Cross entropy of softmax\n costTrain = CostFHelper.CrossEntropy(softmaxLayer0Output, Y)\n\n # Define gradient\n grads = T.grad(costTrain, params)\n\n # Updates function\n updates = [\n (param, param - LEARNING_RATE * grad)\n for (param, grad) in zip(params, grads)\n ]\n\n # Train model\n trainModel = theano.function(\n inputs=[Index],\n outputs=costTrain,\n updates=updates,\n givens={\n X: trainSetX[Index * BATCH_SIZE: (Index + 1) * BATCH_SIZE],\n Y: trainSetY[Index * BATCH_SIZE: (Index + 1) * BATCH_SIZE]\n }\n )\n\n error = CostFHelper.Error(softmaxLayer0Output, Y)\n # Valid model\n validModel = theano.function(\n inputs=[Index],\n outputs=error,\n givens={\n X: validSetX[Index * BATCH_SIZE: (Index + 1) * BATCH_SIZE],\n Y: validSetY[Index * BATCH_SIZE: (Index + 1) * BATCH_SIZE]\n }\n )\n\n # Test model\n testModel = theano.function(\n inputs=[Index],\n outputs=error,\n givens={\n X: testSetX[Index * BATCH_SIZE: (Index + 1) * BATCH_SIZE],\n Y: testSetY[Index * BATCH_SIZE: (Index + 1) * BATCH_SIZE]\n }\n )\n\n doneLooping = False\n iter = 0\n patience = PATIENCE\n best_error = 1\n best_iter = 0\n start_time = timeit.default_timer()\n epoch = 0\n while (epoch < NUM_EPOCH) and (not doneLooping):\n epoch = epoch + 1\n for indexBatch in range(nTrainBatchs):\n iter = (epoch - 1) * nTrainBatchs + indexBatch\n cost = trainModel(indexBatch)\n\n if iter % VALIDATION_FREQUENCY == 0:\n print ('Validate model....')\n err = 0;\n for indexValidBatch in range(nValidBatchs):\n err += validModel(indexValidBatch)\n err /= nValidBatchs\n print ('Error = ', err)\n\n if (err < best_error):\n if (err < best_error * IMPROVEMENT_THRESHOLD):\n patience = max(patience, iter * PATIENCE_INCREASE)\n\n best_iter = iter\n best_error = err\n\n # Test on test set\n test_losses = [testModel(i) for i in range(nTestBatchs)]\n test_score = numpy.mean(test_losses)\n\n if (patience < iter):\n doneLooping = True\n break\n\n end_time = timeit.default_timer()\n print(('Optimization complete. Best validation score of %f %% '\n 'obtained at iteration %i, with test performance %f %%') %\n (best_error * 100., best_iter + 1, test_score * 100.))\n print(('The code for file ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)\n\n\n\nif __name__ == \"__main__\":\n evaluateLenet5()","repo_name":"CaoDuyThanh/CNN_Lenet5","sub_path":"Networks/Lenet5.py","file_name":"Lenet5.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7665751671","text":"import json\n\nfrom tkinter import *\n\nfrom .utils import THUMB_CONF_FILE_PATH\n\n# THUMB_CONF_FILE_PATH -> from root, we need to path to it manuallu\nfile = open(THUMB_CONF_FILE_PATH, \"r\")\njson_stuff = json.load(file) # will read from file (and convert to dictionary)\nfile.close()\nprint(json_stuff)\n\nroot = Tk()\nroot.title(\"Thumbnail Editor\")\n# root.geometry(\"400x400\")\n\nfoodOptions = [\"Pizza\", \"Salad\", \"Pasta\"]\noptionSelected = StringVar()\noptionSelected.set(foodOptions[0])\n\n\ndef buyStuff(x):\n Label(root, text=x).pack()\n\n\n# the * breaks the list into multiple items\nomFood = OptionMenu(root, optionSelected, *foodOptions)\nomFood.pack()\n\nbtnBuy = Button(root, text=\"Buy Item\", command=lambda: buyStuff(optionSelected.get()))\nbtnBuy.pack()\n\n# root.mainloop()\n","repo_name":"WeebNetsu/thumbnail-editor","sub_path":"src/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5266298569","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport pandas as pd\nimport logging\nfrom main_db import MyDatabase\n\n\ndef task_3_put_data():\n try:\n engine = create_engine(\"postgresql://postgres:9431@localhost:5432/employees\")\n Session = sessionmaker(bind=engine)\n\n with Session() as session:\n xlsx_path = \"/Users/shivamraj/Documents/Learning/python-sql/data/task_2.xlsx\"\n df = pd.read_excel(xlsx_path)\n # Uploading from excel to database\n df.to_sql('task_2_duplicate', engine, if_exists=\"replace\", index=False)\n except Exception as e:\n logging.error(e)\n\n\ndef read_task_3_data():\n try:\n db = MyDatabase()\n # For reading the newly created table\n select_pipeline = \"select * from task_2_duplicate\"\n result = db.query(select_pipeline)\n logging.debug(f\"Query Executed- {select_pipeline}\")\n return result\n except:\n logging.error(\"Failed to fetch cursor from Database\")\n finally:\n db.close()\n\n\ndef task_3():\n\n # Duplicate the data from xlxs and upload on PostgreSQL\n task_3_put_data()\n # Read the Data\n result = read_task_3_data()\n print(result)\n\n","repo_name":"shivam-sigmoid/Python-SQL-Assignment","sub_path":"app/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33370921004","text":"from views.tournament import tournament\nfrom models import player, tournament as tournament_model\nfrom models.match import Match\nfrom utils import to_boolean, ask_data\nimport datetime\n\n\nclass TournamentController:\n\n _view: tournament.TournamentView\n _tournament: tournament_model.Tournament\n\n @property\n def players(self):\n \"\"\"Return list of players in current tournament\"\"\"\n return self._tournament.players\n\n def __init__(self):\n self._view = tournament.TournamentView()\n\n def start_new_tournament(self):\n \"\"\"Initialize a new tournament in controller\"\"\"\n self._tournament = tournament_model.Tournament()\n\n def add_player(self, ply: player.Player, is_new: bool = False):\n \"\"\"Add a player in current tournament\"\"\"\n self._tournament.add_player(ply)\n\n if is_new:\n ply.add_player_in_json()\n\n self._view.print_player_added(f\"{ply.lastname} {ply.firstname}\")\n\n def add_players_in_tournament(self):\n \"\"\"Start the process of adding all players in new tournament\"\"\"\n while len(self.players) != 8:\n is_new_player: bool = to_boolean(input(\"Is this a new player ? (Y/N) : \"))\n lastname: str\n firstname: str\n sex: str\n birthdate: str\n rank: int = 0\n if is_new_player:\n lastname = input(\"Please enter player lastname : \")\n firstname = input(\"Please enter player firstname : \")\n sex = ask_data(\"Please enter player sex (M/F) : \", [\"m\", \"f\", \"M\", \"F\"])\n birthdate = input(\"Please enter player birthdate (DD/MM/YYYY) : \")\n rank = int(input(\"Please enter the rank of the player : \"))\n\n self.add_player(\n player.Player([lastname, firstname, birthdate, sex, rank]),\n is_new_player,\n )\n else:\n search = input(\"Please enter player's name to find : \")\n possible_players = player.Player.get_player_from_name(search)\n if len(possible_players) < 1:\n self._view.print_error(\"No player founded\")\n continue\n self._view.print_players_list(possible_players)\n selected = int(input(\"Please enter the id of the player : \"))\n final = next(x for x in possible_players if x.doc_id == selected)\n self.add_player(\n player.Player(\n [\n final[\"lastname\"],\n final[\"firstname\"],\n final[\"birthdate\"],\n final[\"sex\"],\n final[\"rank\"],\n ],\n final.doc_id,\n ),\n is_new_player,\n )\n\n def create_new_tournament(self):\n \"\"\"Ask user to enter data for a new tournament\"\"\"\n self._tournament = tournament_model.Tournament()\n self._tournament.name = input(\"Please enter tournament name : \")\n self._tournament.place = input(\"Please enter the place of the tournament : \")\n self._tournament.date = input(\"Please enter the date of the tournament : \")\n self._tournament.round_amount = int(\n input(\"Please enter the amount of rounds for this tournament: \")\n )\n\n self.add_players_in_tournament()\n\n self._tournament.save_tournament()\n\n self.generate_matches(True)\n\n def __get_next_opponent_for_player(self, ply, players):\n \"\"\"Return the next opponent for a player\"\"\"\n opponent: player.Player = None\n\n for user in players:\n m = Match(ply, user)\n hasPlayed = False\n for rnd in self._tournament.matches:\n for match in rnd:\n if match == m:\n hasPlayed = True\n break\n if not hasPlayed:\n opponent = user\n break\n\n return opponent\n\n def generate_matches(self, first_round: bool = False):\n \"\"\"Generate the list of all matchs for a new round\"\"\"\n self._tournament.current_round += 1\n self._tournament.matches.append([])\n matches = []\n if first_round:\n _temp_players = self._tournament.players.copy()\n _temp_players.sort(key=lambda x: x.rank, reverse=True)\n upper = []\n lower = []\n\n for i in range(len(_temp_players)):\n if i < (len(_temp_players) - 1) / 2:\n upper.insert(len(upper), _temp_players[i])\n else:\n lower.insert(len(lower), _temp_players[i])\n\n for i in range(len(upper)):\n match = Match()\n match.upPlayer = upper[i]\n match.downPlayer = lower[i]\n matches.insert(len(matches), match)\n\n else:\n _temp_players = self._tournament.players.copy()\n _temp_players.sort(key=lambda x: (x.tournament_rank, x.rank), reverse=True)\n\n while len(_temp_players) > 0:\n current = _temp_players[0]\n _temp_players.remove(current)\n opponent = self.__get_next_opponent_for_player(current, _temp_players)\n\n if opponent == None:\n self._view.print_error('No opponent found for a player')\n return\n\n matches.insert(len(matches), Match(current, opponent))\n\n _temp_players.remove(opponent)\n\n self._tournament.matches[self._tournament.current_round] = matches\n self._tournament.save_tournament()\n self._view.print_matches_list(matches, self._tournament.current_round)\n\n self.enter_match_result()\n\n def __load_tournament(self, data, start: bool = False):\n \"\"\"Load a tournament from database\"\"\"\n self._tournament = tournament_model.Tournament()\n self._tournament.load_from_database(data)\n\n if start and not self._tournament.ended:\n\n if (\n len(self._tournament.matches) >= self._tournament.round_amount\n and self._tournament.is_all_matches_of_round_ended()\n ):\n self._tournament.ended = True\n self._tournament.save_tournament()\n self.tournament_overview()\n return\n\n if self._tournament.is_all_matches_of_round_ended():\n if self._tournament.current_round == -1:\n self.generate_matches(True)\n else:\n self.generate_matches()\n else:\n self._view.print_matches_list(\n self._tournament.matches[self._tournament.current_round],\n self._tournament.current_round,\n )\n self.enter_match_result()\n\n def tournament_overview(self):\n \"\"\"Show tournament overview\"\"\"\n self._view.print_tournament_overview(self._tournament)\n\n selected = int(\n input(\"Enter 0 to print tournament players, 1 to print matches: \")\n )\n\n if selected == 0:\n sort = int(\n input(\n \"Enter 0 to print players sorted by their name, or 1 to sort them from their score: \"\n )\n )\n self._view.print_tournament_overview(self._tournament, 0, sort)\n elif selected == 1:\n self._view.print_tournament_overview(self._tournament, 1)\n\n def list_tournaments(self):\n \"\"\"Send a list of ended tournaments\"\"\"\n tournaments = tournament_model.Tournament.get_tournaments()\n\n self._view.print_tournaments_list(tournaments)\n\n selected = int(input(\"Please enter the id of the tournament to load: \"))\n final = next(x for x in tournaments if x.doc_id == selected)\n self.__load_tournament(final, True)\n\n def list_all_tournaments(self):\n \"\"\"Send a list of all tournaments\"\"\"\n tournaments = tournament_model.Tournament.get_all_tournaments()\n self._view.print_tournaments_list(tournaments)\n\n selected = int(input(\"Please enter the id of the tournament to load: \"))\n final = next(x for x in tournaments if x.doc_id == selected)\n self.__load_tournament(final, False)\n\n self.tournament_overview()\n\n def enter_match_result(self):\n \"\"\"Take user inputs for match results\"\"\"\n user_input = input(\"Please enter the id of the match or 'quit' to go back in home menu: \")\n if user_input == \"quit\":\n return\n matchId = int(user_input)\n match = self._tournament.matches[self._tournament.current_round][matchId]\n if match.ended:\n return self.enter_match_result()\n winner = int(\n input(\n \"Please enter 0 or 1 to define the winner, enter 3 if the result is a draw : \"\n )\n )\n match.winnedBy = winner\n if winner == 0:\n match.upPlayer.tournament_rank += 1\n elif winner == 1:\n match.downPlayer.tournament_rank += 1\n else:\n match.upPlayer.tournament_rank += 0.5\n match.downPlayer.tournament_rank += 0.5\n match.ended = True\n match.endTime = datetime.datetime.now().timestamp()\n\n self._tournament.save_tournament()\n\n if not self._tournament.is_all_matches_of_round_ended():\n self._view.print_matches_list(\n self._tournament.matches[self._tournament.current_round],\n self._tournament.current_round,\n )\n self.enter_match_result()\n else:\n if self._tournament.round_amount - 1 <= self._tournament.current_round:\n self.tournament_overview()\n self._tournament.ended = True\n self._tournament.save_tournament()\n else:\n self.generate_matches()\n","repo_name":"Matspyder51/OCR_P4","sub_path":"controllers/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":10015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12272607923","text":"\"\"\"\nDesigner: 小田桐光佑, 東間日向\nDate: 2023/6/27\nPurpose:通知処理を行う関数\n\"\"\"\nfrom flask_apscheduler import APScheduler\nfrom datetime import datetime\nfrom linebot import LineBotApi\nfrom info import Plan, get_start_time\nfrom sqlalchemy import func, desc\nfrom secret import CHANNEL_ACCESS_TOKEN\nfrom datetime import timedelta, datetime\nfrom typing import cast\nfrom linebot.models import (\n ButtonsTemplate,\n TextSendMessage,\n MessageAction,\n TemplateSendMessage,\n)\n\nline_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n\nsched = APScheduler()\n\n\nclass NotifPlan:\n \"\"\"通知する予定を保持する\n Planをそのまま保持すると,send_notificationを呼び出したとき,\n SQLのセッション切れによってエラーがでる.それを回避するためのクラス\n \"\"\"\n\n def __init__(\n self, line_id: str, title: str, start_time: datetime, notif_time: datetime\n ):\n self.line_id = line_id\n self.title = title\n self.start_time = start_time\n self.notif_time = notif_time\n\n\ndef gen_id(line_id: str, title: str, date: datetime) -> str:\n \"\"\"ジョブに対して唯一のIDを生成する:M20\n\n Args:\n line_id (str): lineID\n title (str): タイトル\n date (datetime): 日付\n Returns:\n line_id + \"_\" + title + \"_\" + str(date)\n \"\"\"\n return line_id + \"_\" + title + \"_\" + str(date)\n\n\ndef from_plan(plan: Plan) -> NotifPlan:\n return NotifPlan(\n plan.line_id,\n plan.title,\n cast(datetime, plan.allday or plan.start_time),\n plan.notif_time,\n )\n\n\ndef add_notification(plan: NotifPlan):\n \"\"\"予定通知処理をジョブリストに追加:M21\n start_timeかalldayのどちらか必ず値が入っている\n\n Args:\n plan (NofifPlan): 予定\n \"\"\"\n sched.add_job(\n gen_id(plan.line_id, plan.title, plan.start_time),\n send_notification,\n trigger=\"date\",\n run_date=plan.notif_time,\n args=[NotifPlan(plan.line_id, plan.title, plan.start_time, plan.notif_time)],\n )\n\n\ndef cancel_notification(plan: Plan):\n \"\"\"ジョブリストから通知処理通知を削除:M22\n\n Args:\n plan: (Plan): 予定\n \"\"\"\n start_time = cast(datetime, plan.start_time or plan.allday)\n sched.remove_job(\n gen_id(plan.line_id, plan.title, start_time),\n )\n\n\ndef snooze(line_id: str, after: int) -> str:\n \"\"\"利用者がスヌーズを押した場合,latest_planから最新の予定を取得し,5/10/30/分後に通知する\n その予定の開始時刻から30分以上経過している場合は,予定が古すぎることを知らせる:M23\n\n Args:\n line_id (str): lineID\n after (int): after分後に通知\n \"\"\"\n now = datetime.utcnow() + timedelta(hours=9)\n plans: list[Plan] = Plan.query.filter(Plan.line_id == line_id).all()\n plans = list(\n filter(\n lambda plan: get_start_time(plan).date() == now.date(),\n plans,\n )\n )\n if len(plans) == 0:\n return \"該当する予定が見つかりません。\"\n else:\n plan: Plan = min(plans, key=lambda plan: abs(plan.notif_time - now))\n\n ids = list(map(lambda job: job.id, sched.get_jobs()))\n start_time = cast(datetime, plan.start_time or plan.allday)\n if gen_id(plan.line_id, plan.title, start_time) in ids:\n return f\"既に設定されています\"\n else:\n n_plan: NotifPlan = from_plan(plan)\n n_plan.notif_time = now + timedelta(minutes=after)\n add_notification(n_plan)\n return f\"{after}分後にスヌーズします\"\n\n\ndef send_notification(plan: NotifPlan):\n \"\"\"利用者に予定の通知を行う:M24\n\n Args:\n plan (NotifPlan): プラン\n \"\"\"\n line_id = plan.line_id\n push_buttons_message(\n line_id,\n plan.title + \"の時間です\",\n \"何分後にスヌーズを設定するのか押してください\",\n [\"5分 スヌーズ\", \"10分 スヌーズ\", \"15分 スヌーズ\"],\n )\n\n\ndef push_text_message(line_id: str, message: str):\n \"\"\"利用者のLineにメッセージを送信する\n\n Args:\n line_id (str): Line ID\n message (str): メッセージ\n \"\"\"\n line_bot_api.push_message(line_id, TextSendMessage(message))\n\n\ndef push_buttons_message(line_id: str, title: str, message: str, buttons: list[str]):\n \"\"\"利用者にメッセージとボタンを送信する\n\n Args:\n line_id (str): Line ID\n title (str): タイトル\n message (str): メッセージ\n buttons (list[str]): ボタンに表示するメッセージ\n \"\"\"\n buttons_template_message = TemplateSendMessage(\n alt_text=title,\n template=ButtonsTemplate(\n title=\" \" if len(title) == 0 else title,\n text=\" \" if len(message) == 0 else message,\n actions=map(\n lambda button: MessageAction(label=button, text=button), buttons\n ),\n ),\n )\n line_bot_api.push_message(line_id, buttons_template_message)\n","repo_name":"whtsht/manager","sub_path":"app/src/plan/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36467971634","text":"#!/usr/bin/env python\r\nimport os\r\nimport os.path\r\nfrom os import path\r\nimport time\r\nimport argparse\r\nimport socket\r\nfrom zipfile import ZipFile\r\nimport subprocess\r\nimport sys\r\n\r\n################ - code to get hostname ##################\r\nhostname = socket.gethostname()\r\nprint(\"\\nTest will execute on : \"+hostname)\r\n\r\n\r\n################ - code to check if silkperformer is already running on server ##################\r\ndef process_exists(process_name):\r\n call = 'TASKLIST', '/FI', 'imagename eq %s' % process_name\r\n # use buildin check_output right away\r\n output = subprocess.check_output(call).decode()\r\n # check in last line for process name\r\n last_line = output.strip().split('\\r\\n')[-1]\r\n # because Fail message could be translated\r\n return last_line.lower().startswith(process_name.lower())\r\n\r\nif process_exists('performer.exe'):\r\n sys.exit(\"Silkperformer process is already running on \"+hostname+\"\\n\\n\\t\\t Exiting...\\n\")\r\nelse:\r\n print(\"\\nNo Silkperformer process is running on \"+hostname)\r\n\r\n\r\n\r\n################ - code to take project name and workload name as arugments to file ##############\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-p', '--projectname')\r\n parser.add_argument('-w', '--workloadname')\r\n parser.add_argument('-r', '--repo')\r\n args = parser.parse_args()\r\n print('\\nSelected project is '+args.projectname)\r\n print('\\nSelected workload is '+args.workloadname)\r\n print('\\nselected result repo is '+ args.repo)\r\n \r\n################ - code to define all directory and file paths ##############\r\n silk_path = 'E:\\Silk\\\\\"Silk Performer 20.5\\\"'\r\n project_path = '\"E:\\Silk\\Projects\\CSI2_Legacy2019\\\\'+args.projectname+'.ltp\"'\r\n result_dir2= \"E:\\Silk\\Projects\\CSI2_Legacy2019\\\\\"+args.repo\r\n workload = ' /WL:\"'+args.workloadname+'\"'\r\n\r\nsilk_exe = silk_path+\"\\\\Performer.exe\"\r\nprj_arg = \" \"+project_path+\" \"\r\nauto_rate = r' /Automation 30'\r\n\r\nresult_dir_arg=\" /Resultsdir:\\\"\"+result_dir2+\"\\\" \"\r\n\r\ntsd_result_file= \"m@\"+hostname+\"@\"+args.projectname+\".tsd\"\r\ncsv_result_file = \"m@\"+hostname+\"@\"+args.projectname+\".csv\"\r\nzip_result_file = \"m@\"+hostname+\"@\"+args.projectname+\".zip\"\r\nTsd2Csv_path = silk_path+'\\Tsd2Csv.exe'\r\njenkins_workspace_path = 'E:\\\\SCM\\\\workspace\\\\PRM\\\\Titanium\\\\Testing\\\\SilkPerf03VM_Test\\\\' \r\nzip_exe = 'C:\\\\\"Program Files\\\"\\\\7-Zip\\\\7z.exe'\r\n\r\narg_execute_test = silk_exe+prj_arg+auto_rate+workload+result_dir_arg\r\narg_tsd_file_path = result_dir2+'\\\\'+tsd_result_file\r\narg_csv_file_path = result_dir2+'\\\\'+csv_result_file\r\narg_convert_to_csv = Tsd2Csv_path+\" \"+arg_tsd_file_path+\" \"+arg_csv_file_path\r\nprint(\"\\n\")\r\nprint(\"\\nBelow is silkperformer test execution command\")\r\nprint(arg_execute_test)\r\nprint(\"\\n\")\r\ntime.sleep(10)\r\nprint(\"Starting Execution . . .\")\r\n#################### - below OS command executes silk performer test - #######################\r\nos.system(arg_execute_test)\r\nfile_status1 = str(path.exists(result_dir2+'\\\\projectSettings.xml'))\r\nif path.exists(result_dir2+'\\\\projectSettings.xml'):\r\n print('\\nTest started successfully')\r\nelse:\r\n sys.exit('\\nTest could not be started successfully for some reason')\r\n\r\nprint(\"\\nExecution Completed!\")\r\nprint(\"\")\r\n#################### - Below code is to get csv file , zip and send it to jenkins folder - ##########\r\n\r\nprint(\"This is result file path : \"+arg_tsd_file_path)\r\nprint(\"\\nConverting .tsd file to .csv\")\r\n\r\nprint(arg_convert_to_csv)\r\nprint(\"\")\r\nfile_status = str(path.exists(arg_tsd_file_path))\r\n\r\nwhile file_status == \"False\":\r\n print(\"Wait for result file to be generated...\")\r\n\r\n time.sleep(30)\r\n\r\n file_status = str(path.exists(arg_tsd_file_path))\r\nelse:\r\n if os.path.isfile(arg_csv_file_path):\r\n print(\"result csv already exists\")\r\n else:\r\n \r\n os.system(arg_convert_to_csv)\r\n print(\".csv result file created successfully\")\r\n print(\"zipping...\")\r\n os.system(zip_exe+\" a \"+result_dir2+\"\\\\\"+zip_result_file+\" \"+arg_csv_file_path)\r\n os.system(\"copy \"+result_dir2+\"\\\\\"+zip_result_file+\" \"+jenkins_workspace_path)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"singhvij/singhvij","sub_path":"silk_main.py","file_name":"silk_main.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42818386446","text":"# Add utility code here that will be shared across the application\n# We can add our functions here for fetching and transforming data\n# through that we can share those functions for both user types\nfrom PyQt5.QtWidgets import QMessageBox\nfrom datetime import datetime\n\n\ndef show_warning(self, title='Warning', message='something went wrong!'):\n print('working warning: ', message)\n QMessageBox().warning(self, title, message)\n\n\ndef str_to_date(str_date):\n return datetime.strptime(str_date, '%a %b %d %Y').date()\n\n\ndef get_age(birthdatestr):\n birthdate = str_to_date(birthdatestr)\n today = datetime.today()\n age = today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))\n print(\"age: \", age)\n return str(age)\n\n\ndef is_form_empty(self, form_values, message='Invalid, please fill out the form first!'):\n if type(form_values) is dict:\n if '' in form_values.values():\n show_warning(self, message=message)\n return True\n else:\n return False\n\n\ndef filter_report_submit(self):\n filter_report = {\n \"start_date\": self.dateEdit.text(),\n \"end_date\": self.dateEdit_2.text()\n }\n if is_form_empty(self, filter_report):\n return\n print(\"filter: \", filter_report)\n return filter_report\n\n\ndef compare_date(self, ex_range):\n if ex_range['start_date'] > ex_range['end_date']:\n show_warning(self, message=\"start date should be less than end date\")\n return None\n else:\n return ex_range\n\n\ndef get_exercise_history(self, histories, exercise_name):\n for history in histories:\n if exercise_name == history['ex_name']:\n if history['history'] is None:\n show_warning(self, message=f\"No history found for exercise {exercise_name}.\\n\"\n f\"Select another to continue\")\n return None\n return history['history']\n\n\ndef get_exercise_history_index(self, histories, exercise_name):\n index = 0\n for history in histories:\n if exercise_name == history['ex_name']:\n if history['history'] is None:\n show_warning(self, message=f\"No history found for exercise {exercise_name}.\\n\"\n f\"Select another to continue\")\n return None\n return index\n index = index + 1\n\n\ndef get_history_range(history, ex_range):\n history_info = {'score': [], 'days': []}\n if not history:\n return None\n for history_data in history:\n his_date = str_to_date(history_data['date'])\n if ex_range['start_date'] <= his_date <= ex_range['end_date']:\n history_info['score'].append(history_data['score'])\n print(\"his_date: \", his_date, type(his_date))\n ex_date_no = int(his_date.strftime('%Y%m%d'))\n print(\"=== ex date: \", ex_date_no)\n history_info['days'].append(ex_date_no) # 2022 01 02 - 2023 01 02\n return history_info\n","repo_name":"subhan97ahmed/PoseEstimation","sub_path":"src/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28922508358","text":"## Filtering anndata based on n_genes, percent mito and min cells/\n## originally written by Tom Thomas (https://github.com/tomthomas3000/TAURUS)\n## adapted and augmented for this pipeline by Charlotte Rich-Griffin 2020-09-30\n\nimport scanpy as sc\nimport argparse\nimport logging\nimport sys\nimport pandas as pd\nlogging.basicConfig(format=\"[ %(asctime)s: %(levelname)s: %(message)s ]\", level=logging.INFO, stream=sys.stdout)\nL = logging.getLogger(__name__)\n\n\nsc.settings.verbosity = 2 # verbosity: errors (0), warnings (1), info (2), hints (3)\n# comment this because of numba issues\n# sc.logging.print_versions()\n\n# parse arguments\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--input_anndata',\n default='data/anndata-n5000-unfilt.h5ad',\n help='')\nparser.add_argument('--output_anndata',\n default='data/anndata-n5000-filt.h5ad',\n help='')\nparser.add_argument('--min_genes', default=0,\n help='')\nparser.add_argument('--max_genes', default=\"inf\",\n help='')\n\nparser.add_argument('--min_cells', default=0,\n help='')\n\nparser.add_argument('--max_counts', default=\"inf\",\n help='')\n\nparser.add_argument('--percent_mito', default=100,\n help='exclude any cells with >n% mitochondrial content (pct_counts_mt column) default=100 (no filtering)')\nparser.add_argument('--percent_ribo', default=100,\n help='exclude any cells with >n% ribosomal content (percent.ribo column), default=100 (no filtering)')\nparser.add_argument('--percent_hb', default=100,\n help='exclude any cells with >n% haemoglobin content (percent.ribo column), default=100 (no filtering)')\n\nparser.add_argument('--drop_nas_col', default=None,\n help='')\nL.info(\"Running filter_anndata\")\n\nparser.set_defaults(verbose=True)\nargs = parser.parse_args()\n\nadata = sc.read(args.input_anndata)\n\n###filter cells, genes, and mitochondria - NOTE: might merit further screening if need to finetune\nn_obs = adata.n_obs\nL.info(\"Pre-filter number of cells %d\" % adata.n_obs)\n\nsc.pp.filter_cells(adata, min_genes=int(args.min_genes))\nsc.pp.filter_genes(adata, min_cells=int(args.min_cells))\n\n# exclude cells with higher total counts or genes than specified (default is inf)\n\nadata = adata[adata.obs['total_counts'] < float(args.max_counts), :]\nadata = adata[adata.obs['n_genes'] < float(args.max_genes), :]\n\nif 'pct_counts_mt' in adata.obs.columns:\n if float(args.percent_mito) < 1:\n msg = \"\"\"percent mito argument is below 1%, \n this is inadvisable as you will lose the majority of your data!, \n suggested values are in the range of 5-50%\"\"\"\n L.error(msg)\n raise ValueError(msg)\n adata = adata[adata.obs['pct_counts_mt'] < float(args.percent_mito), :]\n\nif 'pct_counts_ribo' in adata.obs.columns:\n if float(args.percent_ribo) < 1:\n msg=\"\"\"percent ribo argument is below 1%, \n this is inadvisable as you will lose the majority of your data!, \n suggested values are in the range of 5-50%\"\"\"\n L.error(msg)\n raise ValueError(msg)\n adata = adata[adata.obs['pct_counts_ribo'] < float(args.percent_ribo), :]\n\nif 'pct_counts_hb' in adata.obs.columns:\n if float(args.percent_hb) < 1:\n msg=\"\"\"percent hb argument is below 1%, \n this is inadvisable as you will lose the majority of your data!, \n suggested values are in the range of 5-50%\"\"\"\n L.error(msg)\n raise ValueError(msg)\n adata = adata[adata.obs['pct_counts_hb'] < float(args.percent_hb), :]\n\n# drop demultiplexing data without annotation\ncol_arg=args.drop_nas_col\n# print(adata)\nif col_arg is not None:\n # check if it refers to multiple cols\n col_choices=col_arg.split(',') # returns list\n # sequentially remove NAs, (dropna not an option because slicing the whole anndata)\n n_obs = adata.n_obs\n for col in col_choices:\n L.debug(\"Filtering nans from %s column\" % col)\n if col in adata.obs.columns:\n L.debug(col)\n adata = adata[adata.obs[col].notna(),:]\n else:\n msg=\"\"\"demultiplexing column not found in data, check inputs\"\"\"\n L.error(msg)\n raise ValueError(msg)\nelse:\n pass\n\nn_obs = n_obs - adata.n_obs\nL.info(\"No. cells removed for being NA %d\" % n_obs)\n# print(adata)\n\nL.info(\"Remaining cells %d\" % adata.n_obs)\n\nadata.write(args.output_anndata)\n\n#This stage is the point (i.e. pre-normalisation) where the adata file can be outputted so that we can extract raw matrix for the cellphonedb.\nL.info(\"Completed\")\n","repo_name":"DendrouLab/sc_pipelines_PSC","sub_path":"python/run_filter.py","file_name":"run_filter.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21879534216","text":"import cv2\nimport numpy as np\n\ncv2.namedWindow(\"Citra Keabuan\", cv2.WINDOW_AUTOSIZE)\ncv2.namedWindow(\"Citra Biner\", cv2.WINDOW_AUTOSIZE)\ncitra = cv2.imread('messi.jpg')\ncitrakeabuan = cv2.cvtColor(citra, cv2.COLOR_BGR2GRAY)\nambang = 127\nkeabuanmaks = 255\nretval, citrabiner = cv2.threshold(citrakeabuan, ambang, keabuanmaks, cv2.THRESH_BINARY)\ncv2.imshow(\"Citra Keabuan\", citrakeabuan)\ncv2.imshow(\"Citra Biner\", citrabiner)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"MuhammadSYahyaS/ISV2018","sub_path":"5. konversi ke biner dan keabuan.py","file_name":"5. konversi ke biner dan keabuan.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33390353398","text":"import socket\nimport struct\n\n\n###########################################################################################################\n# JSON FORMAT\n###########################################################################################################\nclass PacketProtocolJoin:\n PIM_TYPE = \"JOIN\"\n\n def __init__(self, source, group, sequence_number, upstream=0):\n self.source = source\n self.group = group\n self.sequence_number = sequence_number\n self.upstream = upstream\n\n def bytes(self) -> bytes:\n \"\"\"\n Obtain Protocol Join Packet in a format to be transmitted (JSON)\n \"\"\"\n msg = {\"SOURCE\": self.source,\n \"UPSTREAM\": self.upstream,\n \"GROUP\": self.group,\n \"SN\": self.sequence_number\n }\n\n return msg\n\n def __len__(self):\n return len(self.bytes())\n\n @classmethod\n def parse_bytes(cls, data: bytes):\n \"\"\"\n Parse received Protocol Join Packet from JSON format and convert it into ProtocolJoin object\n \"\"\"\n source = data[\"SOURCE\"]\n upstream = data[\"UPSTREAM\"]\n group = data[\"GROUP\"]\n sn = data[\"SN\"]\n return cls(source, upstream, group, sn)\n\n\nclass PacketProtocolPrune(PacketProtocolJoin):\n PIM_TYPE = \"PRUNE\"\n\n def __init__(self, source, group, sn):\n super().__init__(source, group, sn)\n\n###########################################################################################################\n# BINARY FORMAT\n###########################################################################################################\n'''\n 0 1 2 3\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n| Tree Source IP |U|\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n| Tree Group IP |\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n| Sequence Number |\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n'''\n\n\nclass PacketNewProtocolJoin:\n PIM_TYPE = 4\n\n PIM_HDR_INTEREST = \"! 4s ? 4s L\"\n PIM_HDR_INTEREST_LEN = struct.calcsize(PIM_HDR_INTEREST)\n\n def __init__(self, source_ip, group_ip, sequence_number, upstream=0):\n if type(source_ip) not in (str, bytes) or type(group_ip) not in (str, bytes):\n raise Exception\n if type(source_ip) is bytes:\n source_ip = socket.inet_ntoa(source_ip)\n if type(group_ip) is bytes:\n group_ip = socket.inet_ntoa(group_ip)\n\n self.source = source_ip\n self.upstream = upstream\n self.group = group_ip\n self.sequence_number = sequence_number\n\n def bytes(self) -> bytes:\n \"\"\"\n Obtain Protocol Join Packet in a format to be transmitted (binary)\n \"\"\"\n msg = struct.pack(PacketNewProtocolJoin.PIM_HDR_INTEREST, socket.inet_aton(self.source), self.upstream,\n socket.inet_aton(self.group), self.sequence_number)\n\n return msg\n\n def __len__(self):\n return len(self.bytes())\n\n @classmethod\n def parse_bytes(cls, data: bytes):\n \"\"\"\n Parse received Protocol Join Packet from binary format and convert it into ProtocolJoin object\n \"\"\"\n (tree_source, upstream, tree_group, sn) = struct.unpack(\n PacketNewProtocolJoin.PIM_HDR_INTEREST,\n data[:PacketNewProtocolJoin.PIM_HDR_INTEREST_LEN])\n return cls(tree_source, tree_group, sn, upstream)\n\n\nclass PacketNewProtocolPrune(PacketNewProtocolJoin):\n PIM_TYPE = 5\n\n def __init__(self, source_ip, group_ip, sequence_number, upstream=0):\n super().__init__(source_ip, group_ip, sequence_number, upstream)","repo_name":"CatarinaGrilo/HPIM-SSM","sub_path":"hpim_ssm/Packet/PacketProtocolInterest.py","file_name":"PacketProtocolInterest.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5008029066","text":"\n# Leitura do pequeno-gabarito\nwith open('Inputs/pequeno-gabarito.vcf', 'r') as reader:\n\tgaba = reader.readlines()\nreader.close()\n\n# Leitura das variantes\nwith open('temp/variants_filt.vcf', 'r') as reader:\n\tmine = reader.readlines()\nreader.close()\n\n# Definição da flag que vai indicar se foi encontrado algum problema\nflag = 0\n\n# Para cada linha no arquivo pequeno-gabarito, será feita a conferência\nfor i in gaba[5:]:\n\n\t# Separação das colunas do arquivo vcf\n\tk = i.split(\"\\t\")\n\t\n\t# Seleção dos atributos importantes para conferência\n\tpos = k[1] # posição\n\tref = k[3] # alelo referência\n\talt = k[4] # alelo alternativo\n\tpl = k[9][:3] # ploidia\n\n\t# Busca no vcf obtido por mim\n\tmini_flag = 0\n\tfor j in mine:\n\t\tif pos in j:\n\t\t\t# Separação das colunas do arquivo vcf\n\t\t\tk_meu = j.split(\"\\t\")\n\t\t\t\n\t\t\t# Seleção dos atributos importantes para conferência\n\t\t\tref2 = k_meu[3] # alelo referência\n\t\t\talt2 = k_meu[4] # alelo alternativo\n\t\t\tpl2 = k_meu[9][:3] # ploidia\t\t\t\n\t\t\t\n\t\t\t# Checagem dos valores esperados\n\t\t\tif (ref==ref2) & (alt==alt2) & (pl==pl2):\n\t\t\t\tmini_flag = 1\n\t\t\telse:\n\t\t\t\tflag = 1\n\t\t\t\tmini_flag = 1\n\t\t\t\tprint(\"Problema na posição \" + pos)\n\t\t\t\tprint(\"Ref: \" + ref + \" \" + ref2)\n\t\t\t\tprint(\"Alt: \" + alt + \" \" + alt2)\n\t\t\t\tprint(\"Pl: \" + pl + \" \" + pl2)\n\n\t\t\tbreak\n\tif mini_flag == 0:\n\t\tflag = 1\n\t\tprint(\"Variante não encontrada na posição \" + pos)\n\nif flag==0:\n\tprint(\"O vcf obtido está condizente com o pequeno-gabarito\")\n","repo_name":"anaorsi/L8hf4Bi2a9B","sub_path":"Scripts/checar_gabarito.py","file_name":"checar_gabarito.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5937617573","text":"\r\nfrom tensorflow import keras\r\nfrom Feature_Extractor import extract_features\r\n\r\n\r\n# ------------------------------------------------------------------------\r\n\r\n# This function takes the url and returns probability value\r\n\r\ndef get_prediction(url, model_path):\r\n print(\"Loading the model...\")\r\n model = keras.models.load_model(model_path)\r\n\r\n print(\"Extracting features from url...\")\r\n url_features = extract_features(url)\r\n print(url_features)\r\n\r\n print(\"Making prediction...\")\r\n prediction = model.predict([url_features])\r\n\r\n i = prediction[0][0] * 100\r\n i = round(i,3)\r\n print(\"There is \",i,\"% chance,the url is malicious !\")\r\n\r\n return i\r\n","repo_name":"deepeshdm/Phishing-Attack-Domain-Detection","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"81"} +{"seq_id":"24869595648","text":"'''Dada\ta\tlista\t=\t[12,\t-2,\t4,\t8,\t29,\t45,\t78,\t36,\t-17,\t2,\t12,\t8,\t3,\t3,\t-52]\tfaça\tum\tprograma\tque:\na)\timprima\to\tmaior\telemento\nb)\timprima\to\tmenor\telemento\nc)\timprima\tos\tnúmeros\tpares\nd)\timprima\to\tnúmero\tde\tocorrências\tdo\tprimeiro\telemento\tda\tlista\ne)\timprima\ta\tmédia\tdos\telementos\nf)\timprima\ta\tsoma\tdos\telementos\tde\tvalor\tnegativo'''\n\nprint(\"A LISTA É: [12,\t-2,\t4,\t8,\t29,\t45,\t78,\t36,\t-17,\t2,\t12,\t8,\t3,\t3,\t-52]\")\nlista = [12,\t-2,\t4,\t8,\t29,\t45,\t78,\t36,\t-17,\t2,\t12,\t8,\t3,\t3,\t-52]\nmaior = max( lista)\nprint(\"o\tmaior\telemento É: {}\".format(maior))\nprint(\"o\tmenor\telemento É: {}\".format(min(lista)))\npares=[]\nfor num in lista:\n if num % 2 == 0:\n pares.append(num)\nprint(\"Elemento pares: {}\".format(pares))\n\nquant = len(lista)\nsoma = 0\nfor ele in lista:\n soma += ele\nmedia = soma/quant\nprint(\"A média\tdos\telementos: {:.2f}\".format(media))\n\nsoma = 0\nfor elem in lista:\n if elem < 0:\n soma += elem\nprint(\"A soma\tdos\telementos\tde\tvalor\tnegativo: {}\".format(soma))\n","repo_name":"vicssb/Python-Projects","sub_path":"LISTAS/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5909633501","text":"# author: Yanan Qin\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport json\nimport os\nimport shutil\nimport random\nfrom os import path\nimport nibabel as nib\nimport glob\nimport sys\nimport cv2\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.models import load_model\nfrom tensorflow import Graph\nfrom tensorflow import Session\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.7\nset_session(tf.Session(config=config))\nK.set_image_data_format('channels_last') # TF dimension ordering in this code\nsize=1280\nos.system('rm -rf result')\nos.system('mkdir result')\n\ndef f2(element, thresh):\n '''\n input: pixel element; thresold \n output: 1 if element value >=threshold; 0 otherwise\n '''\n return 1 if element >= thresh else 0\nf2 = np.vectorize(f2)\n\n\navg_sample = pd.read_csv('/media/ssd/yananq/mace/avg_neg_sample.csv')\navg_sample.index = avg_sample['Unnamed: 0']\navg_sample = avg_sample.drop(columns=['Unnamed: 0'])['0']\n\n\n\njson = pd.read_json(\"/media/ssd2/yananq/mace/code/baseline/probe_correspondant.json\")\nmap_dict = json[0:54675]\nprobe2gene = map_dict.to_dict()['probe2gene']\n#building a matrix with probe set ids's at their corresponding location\nprobe_mat = np.empty((1164, 1164), dtype=np.dtype('U100'))\nfor index, row in map_dict.iterrows():\n probe_set = row.name\n x_loc = row['dict'][0]\n y_loc = row['dict'][1]\n for i in range(len(x_loc)):\n probe_mat[x_loc[i],y_loc[i]] = probe_set\n\n\n\n# map the cel file name to the location of corresponding rma file\ncel_rma_path = dict()\ndirectory = r'/media/ssd/yananq/mace/bg'\nfor filename in os.listdir(directory):\n filename = filename.strip()\n for cel in os.listdir(directory +'/'+ filename):\n pathh = '/media/ssd/yananq/mace/cel_expression/' +filename +'.txt'\n cel_rma_path[cel] = pathh\n \n \n \nNEW=open('var_contam_vs_noncontam.txt','w') \ncnt = 0 \nfor filename in os.listdir('/media/ssd2/yananq/mace/code/pos_pred/'):# for each sample predicted as positive\n pred_new = np.load('/media/ssd2/yananq/mace/code/pos_pred/'+filename)\n pred_new = f2(pred_new, 0.5) \n if True: \n cel = '.'.join(filename.strip().split('.')[:-2])\n print(cel) \n try:\n # get the probe expression of the sample and do correction\n probe_expression = pd.read_table(cel_rma_path[cel])\n probe_expression.index = probe_expression['Unnamed: 0']\n try:\n probe_expression = probe_expression.drop(columns=['Unnamed: 0'])[cel[:-4]]/avg_sample\n except:\n probe_expression = probe_expression.drop(columns=['Unnamed: 0'])[cel]/avg_sample\n\n # get the name set probes with contamination\n probe_contam = set(probe_mat[pred_new==1])\n probe_contam = [i for i in list(probe_contam) if i != ''] #remove ''\n\n #get the name set of probes with no contam\n probe_noncontam = probe2gene.keys()-probe_contam\n\n #get the name set of contmainated gene and non-contaminated genes\n gene_contam = set(probe2gene[i] for i in probe_contam if str(probe2gene[i])!= 'nan')\n gene_noncontam = set(probe2gene[i] for i in probe_noncontam if str(probe2gene[i])!= 'nan')\n gene_noncontam = gene_noncontam - gene_noncontam.intersection(gene_contam)\n\n probe_expression = pd.DataFrame(probe_expression)\n\n probe_expression['probe'] = probe_expression.index\n probe_expression['gene'] = probe_expression['probe'].map(probe2gene)\n probe_expression = probe_expression.dropna().set_index(['gene', 'probe'])\n\n # calculate variance intra-gene expression\n gene_var = probe_expression.groupby(['gene'], as_index=True).agg(np.var).dropna()\n\n gene_contam_var = gene_var[gene_var.index.isin(gene_contam)].mean()[0]\n gene_noncontam_var = gene_var[gene_var.index.isin(gene_noncontam)].mean()[0]\n\n\n print((gene_contam_var, gene_noncontam_var))\n NEW.write(('%s\\t%s\\t%.9f\\n') % (cel, gene_contam_var, gene_noncontam_var))\n cnt +=1\n except:\n print('pass')\n pass\n \nprint (cnt) #used as the denominater when calculating % of the images, {s.d.contaminated} showed higher standard error overall than the {s.d.uncontaminated} group\n# result = 1163","repo_name":"GuanLab/Microarray","sub_path":"compare_var/Var_compare.py","file_name":"Var_compare.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"33486252066","text":"import time\n\ndef main():\n print(\"\\n\\n\\nVamos fazer uma progressão aritmética!\\n\\n\")\n\n primeiroNum = int(input(\"Insira o número da progressão: \"))\n razao = int(input(\"Insira a razão: \"))\n elementos = int(input(\"Quantos elementos? \"))\n\n ultimoNum = primeiroNum + (elementos - 1) * razao\n ultimoNum += 1\n\n print(\"\\n\\nFoi informado o número {}, a razão {} e {} elementos.\".format(primeiroNum, razao, elementos))\n time.sleep(2)\n print(\"\\n\\nAguardando cálculo de sua progressão aritmética...\\n\\n\")\n time.sleep(2)\n \n for i in range(primeiroNum, ultimoNum, razao):\n print(i)\n \ndef loop():\n print(\"\\n\\nVocê deseja realizar uma nova consulta?\"\n \"\\n\\n1 - SIM\"\n \"\\n2 - NÃO\\n\")\n choice = int(input(\" \"))\n \n if choice == 1:\n main()\n loop()\n elif choice == 2:\n print(\"\\nCerto! Estamos encerrando sua solicitação!\\n\\n\")\n else:\n print(\"\\nInsira uma opção válida.\")\n loop()\n\nmain()\nloop()\n\n","repo_name":"Joaopdebem/Std_PythonBasicWorldTwo-Python","sub_path":"Mundo 02/046 a 056 - Repeticoes(for)/051 - Progressão Aritmética.py","file_name":"051 - Progressão Aritmética.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30430568086","text":"import numpy as np\nimport mdtraj as md\nimport itertools\nimport scipy\nimport os\nimport sys\n\nserial_in = \"2b\" #which version of the pocket set to load\nserial_out = \"3b-nr\" #which version of the pocket set to save\ninput_directory = \"/project/bowmanlab/borowsky.jonathan/FAST-cs/protein-sets/new_pockets_2\" #also used for some outputs\n\ndistance_threshold = 0.5\n\nall_pockets = np.load(f\"{input_directory}/filter_output/all_lowrmsd_ligand_pairs_v{serial_in}.npy\")\n\n#notes\n#6rsk has messed up numbering that skips 0 and starts with a negative numbered residues\n\n#select residue backbones and sidechains which coordinate the ligand using heavy atoms\ndef select_resi_parts(ligand_resn, holo_xtal, apo_xtal, distance_threshold, ind_debug):\n\n #--------------------ligand selection string formatting---------------------\n\n individual_resns_all = [\"'\"+i[0].split(\" \")[0]+\"'\" for i in ligand_resn]\n\n #note that one ligand goes by 986 in rcsb, 098 in moad, and 98 in mdtraj\n #L9K and L9W are actually two different tautomers of the same compound, but rcsb only includes L9W, presumably because the model has no hydrogens so the tautomer is not resolved\n #see http://polymorph.sgc.utoronto.ca/drugged_human_proteome/pages/col_12_IPR001680.html for more information\n #MOAD's TNR is broken into two residues in the modern pdb; the TNR residue is listed as obsolete in the european PDB\n\n # [MOAD residue name]:[mdtraj residue name]\n mdtraj_moad_ligand_mismatches = {\"ADE\":\"A\",\"URA\":\"U\",\"BAM\":\"BEN\",\"DCY\":\"DC\",\"098\":\"98\",\"L9K\":\"L9W\",\"TNR\":\"A2G' or (resname SER and resSeq 906) or resname 'Null\"}\n #note that \"or resname 'Null\" exists to make the single quotes match up properly when the ligand select string is assembled without having to modify the code below\n\n for x, i in enumerate(individual_resns_all):\n if i[1:-1] in mdtraj_moad_ligand_mismatches.keys():\n individual_resns_all[x] = mdtraj_moad_ligand_mismatches[i[1:-1]]\n\n #print(individual_resns_all)\n ligand_select_str = \"resname \"+\" or resname \".join(individual_resns_all)\n #print(ligand_select_str)\n\n #---------------------------------------------------------------------------\n\n #note that heavy atom selection does not account for deuterated proteins or ligands\n #select ligand heavy atoms\n heavy_lig = holo_xtal.top.select(f\"({ligand_select_str}) and not element H\")\n\n #select protein heavy atoms\n heavy_prot = holo_xtal.top.select(\"protein and not element H\")\n\n #compute protein-ligand distances\n lig_prot_pair_iis = np.array(list(itertools.product(heavy_lig, heavy_prot)))\n prot_lig_dists = md.compute_distances(holo_xtal,lig_prot_pair_iis, periodic=False).flatten()\n\n #select protein atoms within distance threshold\n lig_coord_iis = np.where(prot_lig_dists < distance_threshold)[0]\n prot_iis = np.unique([lig_prot_pair_iis[i][-1] for i in lig_coord_iis])\n\n #obtain all backbone and sidechain atom indices\n sidechain_iis = holo_xtal.top.select(\"sidechain\")\n bb_iis = holo_xtal.top.select(\"backbone\")\n\n #select backbones and sidechains containing the ligand-coordinating atoms\n sele = []\n for i in prot_iis:\n\n resi = holo_xtal.top.atom(i).residue.resSeq #get pdb residue number\n if resi < 0:\n i_odd.append(ind_debug)\n print(\"negative residue number encountered; skipping because mdtraj can't handle it\")\n continue\n\n #separate sidechains and backbones\n if i in sidechain_iis:\n sele.append(\"sidechain and resSeq %s\" % str(resi))\n elif i in bb_iis:\n sele.append(\"backbone and resSeq %s\" % str(resi))\n else:\n print(f'error: atom {i} is in neither sidechain nor backbone')\n break\n\n sele = np.unique(sele)\n\n prot_iis_holo = []\n prot_iis_apo = []\n\n prot_iis_holo_matching = []\n prot_iis_apo_matching = []\n\n for sel in sele:\n #include only indices of atoms in residues present in apo and holo structures\n if len(holo_xtal.top.select(f\"{sel} and not element H\")) > 0 and len(apo_xtal.top.select(f\"{sel} and not element H\")) > 0:\n prot_iis_holo.append(holo_xtal.top.select(f\"{sel} and not element H\"))\n prot_iis_apo.append(apo_xtal.top.select(f\"{sel} and not element H\"))\n\n #filter out incomplete residues with different numbers of atoms resolved for RMSD calculations\n #this is not robust to different atom numberings but this should be standardized\n if len(holo_xtal.top.select(f\"{sel} and not element H\")) == len(apo_xtal.top.select(f\"{sel} and not element H\")):\n prot_iis_holo_matching.append(holo_xtal.top.select(f\"{sel} and not element H\"))\n prot_iis_apo_matching.append(apo_xtal.top.select(f\"{sel} and not element H\"))\n\n #catch cases where apo is highly truncated and there are no pocket residues resolved\n #or where there are no well-resolved pocket residues to calculate RMSD from\n try:\n prot_iis_holo = np.concatenate(prot_iis_holo).ravel()\n prot_iis_apo = np.concatenate(prot_iis_apo).ravel()\n prot_iis_holo_matching = np.concatenate(prot_iis_holo_matching).ravel()\n prot_iis_apo_matching = np.concatenate(prot_iis_apo_matching).ravel()\n except ValueError:\n return False\n\n holo_lining = holo_xtal.atom_slice(prot_iis_holo_matching.astype(int)) #atoms lining the cryptic pocket\n apo_lining = apo_xtal.atom_slice(prot_iis_apo_matching.astype(int)) #atoms lining the cryptic pocket\n\n cs_rmsd = md.rmsd(apo_lining, holo_lining) #calculate the cryptic site rmsd\n\n return [sele, prot_iis_holo, prot_iis_apo, cs_rmsd]\n\n#get average apo and holo active site atom distances\ndef get_lining_distances(holo_xtal, apo_xtal, holo_iis, apo_iis):\n\n holo_lining_coords = holo_xtal.atom_slice(holo_iis).xyz[0]\n\n holo_size = np.mean(scipy.spatial.distance.cdist(holo_lining_coords, holo_lining_coords))\n #print(holo_lining_coords.xyz[0])\n apo_lining_coords = apo_xtal.atom_slice(apo_iis).xyz[0]\n #print(apo_lining_coords.xyz[0])\n apo_size = np.mean(scipy.spatial.distance.cdist(apo_lining_coords, apo_lining_coords))\n\n if apo_size > holo_size: #reverse pockets\n return 0\n elif holo_size > apo_size: #forward pockets\n return 1\n else:\n print(\"pockets are the same; probable filtering error\")\n return 2\n\n#---------------------------main filtering--------------------------------------\n\n#get a list or previously filtered pockets\nalready_filtered = np.load(\"/project/bowmanlab/borowsky.jonathan/FAST-cs/protein-sets/new_pockets/iofiles/output_indices/some_lowrmsd_ligand_pairs_v6.npy\")\nalready_filtered_holos = [i[0] for i in already_filtered]\n\n\nx_forward = 0\nx_reverse = 0\ni_odd = []\n\nforward_pockets = []\nreverse_pockets = []\n\n#assume that apo and holo structures are both in the same iofiles_directory\niofiles1 = \"iofiles\"\niofiles2 = \"iofiles_na\"\n\nholo_list1 = os.listdir(f\"{input_directory}/{iofiles1}/monomer_holo\")\nholo_list2 = os.listdir(f\"{input_directory}/{iofiles2}/monomer_holo\")\n\nfor testind in range(len(all_pockets)):\n\n print(f\"{testind}: {all_pockets[testind]}\")\n\n #print(all_pockets[testind][0])\n #print(already_filtered_holos)\n if all_pockets[testind][0] in already_filtered_holos:\n print(\"skipped\")\n continue\n\n if f\"{all_pockets[testind][0]}_chain{all_pockets[testind][1]}.pdb\" in holo_list1:\n iofiles_switch = iofiles1\n else:\n iofiles_switch = iofiles2\n\n holo_xtal = md.load(f\"{input_directory}/{iofiles_switch}/monomer_holo/{all_pockets[testind][0]}_chain{all_pockets[testind][1]}.pdb\")\n apo_xtal = md.load(f\"{input_directory}/{iofiles_switch}/monomer_apo/{all_pockets[testind][2]}_chain{all_pockets[testind][3]}.pdb\")\n\n sele_iis = select_resi_parts(all_pockets[testind][5], holo_xtal, apo_xtal, distance_threshold, testind)\n if sele_iis == False:\n print(\"no suitable pocket residues were resolved, probably due to a truncated apo structure or very poorly resolved cryptic site\")\n i_odd.append(testind)\n continue\n\n lining_result = get_lining_distances(holo_xtal, apo_xtal, sele_iis[1], sele_iis[2])\n\n if lining_result == 1:\n forward_pockets.append(np.append(all_pockets[testind], sele_iis[3]))\n x_forward += 1\n elif lining_result == 0:\n reverse_pockets.append(np.append(all_pockets[testind], sele_iis[3]))\n x_reverse += 1\n elif lining_result == 2:\n i_odd.append(testind)\n else:\n print(\"error; invalid lining comparison result\")\n\nprint(reverse_pockets)\n\nnp.save(f\"{input_directory}/filter_output/forward_lowrmsd_ligand_pairs_v{serial_out}.npy\", forward_pockets)\nnp.save(f\"{input_directory}/filter_output/reverse_lowrmsd_ligand_pairs_v{serial_out}.npy\", reverse_pockets)\n\nprint(\"---------------------------------------------------------------------------------------\")\nprint(f\"{len(all_pockets)} pockets\")\nprint(f\"{x_forward} forward pockets\")\nprint(f\"{x_reverse} reverse pockets\")\n\nprint(f\"indices of odd pockets: {np.unique(i_odd)}\")\n\n\n#indices of odd pockets: [ 12 79 80 99 147 193 218 276 401 422 499 540 722 777 1008 1071 1193 1277 1332 1499 1555]\n\n#1672 pockets\n#347 forward pockets\n#513 reverse pockets\n#indices of odd pockets: [ 12 79 99 147 193 218 276 401 499 540 722 777 1008 1071 1277 1332 1499]\n\n\n#---------------------------------------------------------------------------TRIMMINGS------------------------------------------------------------------------\n\n #sanity check:\n # a = np.array([-0.9117, 0.5899, -1.5034])\n # b = np.array([-0.8598, 0.5826, -1.6403])\n # print(np.sqrt(np.dot(a-b,a-b)))\n #works\n\n #note that this code does not introduce any duplicate indices and hence no application of np.unique is needed\n\n\n\n #more consise but does not address check that residues are present in both apo and holo\n #prot_iis_holo = np.concatenate([holo_xtal.top.select(f\"{sel} and not element H\") for sel in sele]).ravel()\n #prot_iis_apo = np.concatenate([apo_xtal.top.select(f\"{sel} and not element H\") for sel in sele]).ravel()\n\n #return [sele, prot_iis_holo, prot_iis_apo]\n","repo_name":"JonathanHB/cryptic-pocket-filtering","sub_path":"forward_reverse_pocket_separate.py","file_name":"forward_reverse_pocket_separate.py","file_ext":"py","file_size_in_byte":10189,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"74561428106","text":"#encoding:utf-8\n#数据库初始化\nimport datetime\nfrom .orm_session import create_session,_create_db_table\nfrom .orm import SystemPar,SystemCode,ProxyWebsite,ProxyCheck\nimport platform\nfrom python_common.selenium_common import init_database_system_par\nfrom python_common.database_common import base_system_code\n\n\ndef init_db(db_session):\n _create_db_table()\n db_session.commit()\n SystemPar.delete_all(db_session)\n SystemCode.delete_all(db_session)\n db_session.commit()\n system_type=''\n if platform.platform().find('Windows')>=0:\n system_type='Windows'\n elif platform.platform().find('Darwin')>=0:\n system_type='Mac'\n elif platform.platform().find('Linux')>=0:\n system_type='Linux'\n else:\n system_type=None\n base_system_code(db_session,SystemCode)\n init_database_system_par(system_type,db_session,SystemPar)\n # 基础数据\n systemPar = SystemPar(par_code='version',\n par_desc='版本信息', par_value='1.0', par_type=2)\n db_session.add(systemPar)\n systemPar = SystemPar(par_code='polling_second',\n par_desc='Queue轮询间隔秒数', par_value='5', par_type=1)\n db_session.add(systemPar)\n proxyWebsite=ProxyWebsite(p_name='快代理',p_url='https://www.kuaidaili.com/free/inha/%d/',p_min=1,p_max=40,p_lastcheck_time=datetime.datetime.now(),p_inuse=True)\n db_session.add(proxyWebsite)\n\n\n db_session.commit()\n print('init db ok!')\n\n\ndef main():\n db_session=create_session()\n init_db(db_session)\n db_session.close()\n \n\nif __name__ == '__main__':\n main()","repo_name":"cqqyd2014/vote2019","sub_path":"database/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73314335944","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn,s = map(int,input().split())\r\narr = list(map(int,input().split()))\r\n\r\nres = 100001\r\nstart = 0\r\nend = 0\r\ntmp = arr[0]\r\nwhile start end-start+1:\r\n res = end-start+1\r\n tmp -= arr[start]\r\n start += 1\r\n\r\nif res > n:\r\n print(0)\r\nelse:\r\n print(res)","repo_name":"SuGyoungIn/backjoon","sub_path":"백준/Gold/1806. 부분합/부분합.py","file_name":"부분합.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6463138744","text":"from web3 import Web3\nimport time\n\n\ninfura_url_arb_testnet = \"\"\n\nreceiverAddress = \"\"\n\ntargetAddress = \"\"\n_private = \"\"\n\nweb3 = Web3(Web3.HTTPProvider(infura_url_arb_testnet))\n\nclass Balance:\n def __init__(self):\n self.balance = web3.eth.get_balance(targetAddress)\n _gas = ((web3.eth.gas_price / 10**9) * 670000 * 10 ** 9)\n\n if self.balance > _gas:\n\n print(\"____________________________________________________________________________\")\n print(f\"target address has initial balance of: {str(self.balance / 10 **18)}\")\n print(\"withdrawing...\")\n createTransaction()\n self.balance = web3.eth.get_balance(targetAddress)\n # print(\"target's remaining balance: \" + str(web3.eth.get_balance(targetAddress) / 10**18)) \n \n\n\n def checkChange(self):\n newBalance = web3.eth.get_balance(targetAddress)\n if newBalance > self.balance:\n print(\" **new activity detected**\")\n print(\"____________________________________________________________________________\")\n print(\"\")\n print(f\"target address funded with: {str(newBalance/ 10 **18)} Eth\")\n print(\"withdrawing...\")\n createTransaction()\n self.balance = web3.eth.get_balance(targetAddress)\n\n\n\n\ndef createTransaction():\n _mainBalance = web3.eth.get_balance(targetAddress)\n _gas = ((web3.eth.gas_price / 10**9) * 670000 * 10 ** 9)\n signed_txn = web3.eth.account.signTransaction({\n 'from': targetAddress,\n 'to': receiverAddress,\n 'gas': 670000,\n 'gasPrice': web3.eth.gas_price,\n 'value': _mainBalance - int(_gas),\n 'nonce': web3.eth.getTransactionCount(targetAddress),\n },\n _private)\n \n txn_hash = web3.eth.send_raw_transaction(signed_txn.rawTransaction)\n web3.eth.wait_for_transaction_receipt(txn_hash, timeout=12, poll_latency=0.1)\n print(f\"sent {(_mainBalance - int(_gas)) / 10**18} Eth to {receiverAddress}\")\n print(f\"target remaining balance: {str(web3.eth.get_balance(targetAddress) / 10**18)}\")\n print(\"____________________________________________________________________________\")\n\n\n\ndef event_loop(poll_interval):\n while True:\n balance.checkChange()\n time.sleep(poll_interval)\n\ndef main():\n event_loop(5)\n\n\nprint(f\"target address: {targetAddress}\")\nprint(f\"receiver address {receiverAddress}\")\nprint(\"waiting for funding...\")\n\n\nbalance = Balance()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"nolanjannotta/python-web3-scripts","sub_path":"AddressTracker.py","file_name":"AddressTracker.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4621653437","text":"#Escreva um programa que receba dois números e um sinal, e faça a operação matemática definida pelo sinal.\nnum1 = input(\"Digite o primeiro número \")\nnum1 = int(num1)\noperador = input(\"Digite o operador \")\nnum2 = input(\"Digite o segundo número \")\nnum2 = int(num2)\n\nif operador == \"+\":\n\toperacao = num1 + num2\nprint(\"Resultado \")\nprint(operacao)","repo_name":"cami-codes/ExerciciosPython","sub_path":"sinal.py","file_name":"sinal.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13037463777","text":"import bpy\nfrom collections import defaultdict\nfrom .. tree_info import getNetworkByIdentifier, getNodesByType\n\nclass MoveViewToSubprogram(bpy.types.Operator):\n bl_idname = \"an.network_navigation\"\n bl_label = \"Move View to Subprogram\"\n bl_description = \"\"\n\n @classmethod\n def poll(cls, context):\n activeNode = getattr(context, \"active_node\", None)\n if activeNode is None: return False\n if not activeNode.select: return False\n if context.area.type != \"NODE_EDITOR\": return False\n return activeNode.isAnimationNode\n\n def execute(self, context):\n activeNode = context.active_node\n activeNetwork = activeNode.network\n\n if activeNode.bl_idname == \"an_InvokeSubprogramNode\":\n subnetwork = getNetworkByIdentifier(activeNode.subprogramIdentifier)\n self.jumpToNetwork(subnetwork, activeNode = subnetwork.getOwnerNode())\n elif getattr(activeNode, \"isSubprogramNode\", False):\n invokers = activeNode.getInvokeNodes()\n if len(invokers) == 1:\n self.jumpToNode(invokers[0])\n elif len(invokers) > 1:\n invokersByTree = defaultdict(list)\n for invokerNode in invokers:\n invokersByTree[invokerNode.nodeTree].append(invokerNode)\n\n invokersInActiveTree = invokersByTree[activeNode.nodeTree]\n if len(invokersInActiveTree) > 0:\n self.jumpToNodes(invokersInActiveTree, activeNode = invokersInActiveTree[0])\n elif len(invokersByTree.keys()) == 2:\n # 2 because the active tree is also in the dict\n self.jumpToNodes(invokers)\n else:\n self.report({\"INFO\"}, \"Cannot decide which node to jump to (yet).\")\n elif activeNetwork.isSubnetwork:\n self.jumpToNetwork(activeNetwork, activeNode = activeNetwork.getOwnerNode())\n else:\n self.jumpToNetwork(activeNetwork)\n\n return {\"FINISHED\"}\n\n def jumpToNetwork(self, network, activeNode = None):\n nodes = network.getNodes()\n self.jumpToNodes(nodes, activeNode)\n\n def jumpToNode(self, node):\n self.jumpToNodes([node], activeNode = node)\n\n def jumpToNodes(self, nodes, activeNode = None):\n if len(nodes) == 0: return\n activeTree = nodes[0].id_data\n bpy.context.space_data.node_tree = activeTree\n bpy.ops.node.select_all(action = \"DESELECT\")\n for node in nodes:\n node.select = True\n bpy.ops.node.view_selected()\n activeTree.nodes.active = activeNode\n","repo_name":"JacquesLucke/animation_nodes","sub_path":"animation_nodes/operators/network_navigation.py","file_name":"network_navigation.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":2231,"dataset":"github-code","pt":"81"} +{"seq_id":"75136287625","text":"from collections import defaultdict\n\n\nclass Solution:\n def fourSumCount(self, nums1, nums2, nums3, nums4) -> int:\n dp = defaultdict(lambda: 0)\n ans = 0\n for c in nums3:\n for d in nums4:\n e = c+d\n dp[e] += 1\n for b in nums2:\n for a in nums1:\n f = a+b\n ans += dp[-f]\n return ans\n","repo_name":"bamblebam/competitive-programming","sub_path":"2021/5_May_21/26-5-21/4sum2.py","file_name":"4sum2.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15684464133","text":"\n\nfrom collections import Counter\n\nN = Counter()\n\n# count tokens for ner\nfor dataset in ['bc5cdr', 'JNLPBA', 'NCBI-disease', 'sciie']:\n for split in ['train', 'dev', 'test']:\n with open(f'data/ner/{dataset}/{split}.txt') as f_in:\n for line in f_in:\n if line.strip() == '':\n N[dataset] += 1\n\n# count tokens for PICO\nfor split in ['train', 'dev', 'test']:\n with open(f'data/pico/ebmnlp/{split}.txt') as f_in:\n for line in f_in:\n for line in f_in:\n if line.strip() == '':\n N['ebmnlp'] += 1\n\n# count num sentences for parsing\nfor split in ['train', 'dev', 'test']:\n with open(f'data/parsing/genia/{split}.txt') as f_in:\n for line in f_in:\n if line.strip() == '':\n N['genia'] += 1\n\n# count num JSONs for CLS\nfor dataset in ['chemprot', 'citation_intent', 'pico', 'rct-20k', 'sci-cite', 'sciie-relation-extraction']:\n for split in ['train', 'dev', 'test']:\n with open(f'data/text_classification/{dataset}/{split}.txt') as f_in:\n for line in f_in:\n N[dataset] += 1\n\nimport json\nprint(json.dumps(N, indent=4))","repo_name":"robby927/scibert","sub_path":"scripts/compute_dataset_sizes.py","file_name":"compute_dataset_sizes.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"3160593472","text":"def main ():\r\n n1 = int(input('Digite o primeiro ângulo do triangulo: '))\r\n n2 = int(input('Digite o segundo ângulo do triangulo: '))\r\n n3 = int(input('Digite o terceiro ângulo do triangulo: '))\r\n\r\n qual_triangulo(n1, n2, n3)\r\n\r\ndef qual_triangulo(n1, n2, n3):\r\n if n1 + n2 + n3 == 180:\r\n if n1 < 90 and n2 < 90 and n3 < 90:\r\n print('Triângulo acutângulo')\r\n elif n1 == 90 or n2 == 90 or n3 == 90:\r\n print('Triângulo retângulo')\r\n elif n1 > 90 or n2 > 90 or n3 > 90:\r\n print('Triângulo obtusângulo')\r\n if n1 == 0 or n2 == 0 or n3 == 0:\r\n print('Não existe triângulo com grau zero')\r\n\r\n\r\nmain()\r\n\r\n","repo_name":"tpessoa10/-ifpi-ads-algoritmos2020","sub_path":"Fabio02a_Thiago_Pessoa/Q6.py","file_name":"Q6.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3339160969","text":"import tensorflow as tf\n\n\nclass PairNorm(tf.keras.layers.Layer):\n \"\"\"\n PairNorm: Tackling Oversmoothing in GNNs https://arxiv.org/abs/1909.12223\n \"\"\"\n\n def __init__(self, epsilon=1e-6, subtract_mean=False, **kwargs):\n self.epsilon = epsilon\n self.bias = None\n self.subtract_mean = subtract_mean\n super(PairNorm, self).__init__(**kwargs)\n\n def call(self, inputs, graph: tf.SparseTensor = None, **kwargs):\n \"\"\"\n :param graph: graph level adjacency matrix\n :param count_in_graph: element count in each graph\n :param inputs: input tensor variables or clauses state\n \"\"\"\n mask = graph.indices[:, 0] if graph is not None else None\n\n # input size: cells x feature_maps\n if self.subtract_mean: # subtracting mean may not be necessary: https://arxiv.org/abs/1910.07467\n if graph is not None:\n mean = tf.sparse.sparse_dense_matmul(graph, inputs)\n inputs -= tf.gather(mean, mask)\n else: # assume one graph per batch\n mean = tf.reduce_mean(inputs, axis=0, keepdims=True)\n inputs -= mean\n\n variance = tf.reduce_mean(tf.square(inputs), axis=1, keepdims=True)\n\n return inputs * tf.math.rsqrt(variance + self.epsilon)\n","repo_name":"LUMII-Syslab/QuerySAT","sub_path":"layers/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"81"} +{"seq_id":"21939510456","text":"import chess\nimport chess.pgn\nimport io\nimport threading\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nfrom boardPane import boardPane\nfrom dbHelper import DBHelper\nimport strings as sql\n\nclass dbResults(ttk.Treeview):\n\tdef __init__(self, parent, dbPane):\n\t\tttk.Treeview.__init__(self, parent)\n\t\tself.dbPane = dbPane\n\t\tself.games = None\n\t\tself.offsets = None\n\t\tself.setStyle()\n\t\tself.bind('<>', self.printHeadings)\n\t\tself.setup()\n\n\tdef printHeadings(self, e):\n\t\t'''\n\t\tWhen user selects a row in the treeview, display the game headers\n\t\tin the messages widget.\n\n\t\tThis is handled differently depending on whether the source of the games\n\t\tis a database (self.games is populated) or a \n\t\tpgn file (self.offsets is populated)\n\t\t'''\n\t\tm = self.dbPane.messages\n\t\tm.delete(\"0.1\", \"end\")\n\t\t# headers from database\n\t\tif self.games:\n\t\t\theaders = self.games[int(self.selection()[0])].headers\n\t\t# headers from pgn file\n\t\telif self.offsets:\n\t\t\toffset = self.offsets[int(self.selection()[0])]\n\t\t\tself.file.seek(offset)\n\t\t\tgame = chess.pgn.read_game(self.file)\n\t\t\theaders = game.headers\n\t\tfor h in headers:\n\t\t\tm.insert('end', f'{h}:\\t\\t{headers[h]}\\n')\n\n\tdef setStyle(self):\n\t\t'''\n\t\tstyle the treeview\n\t\t''' \n\t\tself.style = ttk.Style()\n\t\tself.style.theme_use('clam')\n\t\tself.style.configure(\"Treeview\",\n\t\t\t# background='silver',\n\t\t\t# foreground='black',\n\t\t\trowheight=50,\n\t\t\tfieldbackground='silver',\n\t\t\tfont = ('Helvetica', 12)\n\t\t)\n\t\tself.style.map(\"Treeview\", \n\t\t\tbackground=[('selected', 'green')]\n\t\t)\n\n\t\tself.style.configure(\"Treeview.Heading\",\n\t\t\trelief = 'raised',\n\t\t\tfont = ('Helvetica', 16)\n\t\t)\n\n\t\tself.style.configure(\"Treeview.Cell\",\n\t\t\tpadding = 10\n\t\t)\n\n\tdef setup(self):\n\t\tcolumns = (\"Date\", \"White\", \"Black\", \"Event\", \"Round\", \"Result\")\n\t\tself.config(show='headings', columns=columns)\n\t\t# set heading text\n\t\tfor col in columns:\n\t\t\tsort_method = lambda _col=col: self.treeview_sort_column(self, _col, False)\n\t\t\tself.heading(col, text=col, anchor='w', command=sort_method)\n\t\t# bind double click to open game\n\t\tself.bind('', self.loadGame)\n\n\tdef loadGame(self, e):\n\t\t'''\n\t\tLoad the double clicked game from the tree view. This is handled\n\t\tdifferently depending on whether the game is from a pgn file (self.offsets\n\t\tis populated) or a database (self.games is populated)\n\t\t'''\n\t\titem = int(self.selection()[0])\n\t\tif self.offsets:\n\t\t\toffset = self.offsets[item]\n\t\t\tself.file.seek(offset)\n\t\t\tgame = chess.pgn.read_game(self.file)\n\t\telif self.games:\n\t\t\tgame = self.games[item]\n\t\tself.dbPane.gui.addBoardPane(game)\n\n\tdef resetTree(self):\n\t\tself.games = []\n\t\tfor row in self.get_children():\n\t\t\tself.delete(row)\n\n\tdef getResults(self, db, sql, data):\n\t\tself.dbPane.messages.delete('1.0', 'end')\n\t\tself.dbPane.messages.insert('end', 'Starting Search...')\n\t\tself.resetTree()\n\t\tthreading.Thread(\n\t\t\ttarget=self.spawnDBTask,\n\t\t\targs = (db, sql, data),\n\t\t\tdaemon= True).start()\n\n\tdef spawnDBTask(self, db, sql, data):\n\t\tdbh = DBHelper(db)\n\t\treturnedData = dbh.query(sql, data)\n\t\tself.dbPane.messages.insert('end', f'Done! Found {len(returnedData)} games.')\n\t\tself.after(0, self.populateTree, returnedData)\n\n\tdef insertTreeRow(self, headers, iid):\n\t\tself.insert(\n\t\t\t\tparent='',\n\t\t\t\tindex = 'end', \n\t\t\t\tiid=iid,\n\t\t\t\ttext = '',\n\t\t\t\tvalues=(\n\t\t\t\t\theaders['Date'],\n\t\t\t\t\theaders['White'],\n\t\t\t\t\theaders['Black'],\n\t\t\t\t\theaders['Event'],\n\t\t\t\t\theaders['Round'],\n\t\t\t\t\theaders['Result']\n\t\t\t\t)\n\t\t\t)\n\n\tdef populateTree(self, data):\n\t\tiid = 0\n\t\tfor row in data:\n\t\t\tgame = self.getGameFromStr(row['pgnString'])\n\t\t\theaders = dict(game.headers)\n\t\t\tself.insertTreeRow(headers, iid)\n\t\t\tiid+=1\n\t\t\tself.games.append(game)\n\n\tdef pgn2Tree(self, filename):\n\t\t'''\n\t\tInsert games from pgn file into treeview\n\t\t'''\n\t\tself.offsets = [0]\n\t\tmessages = self.dbPane.messages\n\t\tif filename:\n\t\t\tmessages.insert('end', 'Importing Now...')\n\t\t\t# regading update_idletasks, see \n\t\t\t# https://www.tcl.tk/man/tcl8.7/TclCmd/update.htm\n\t\t\tself.update_idletasks()\n\t\telse:\n\t\t\tmessages.insert('end', \"Error: no .pgn file selected.\")\n\t\t\treturn\n\t\tiid = 0\n\t\tself.file = open(filename, encoding=\"Latin-1\")\n\t\twhile True:\n\t\t\theaders = chess.pgn.read_headers(self.file)\n\t\t\tif headers == None: break\n\t\t\tself.insertTreeRow(dict(headers), iid)\n\t\t\tself.offsets.append(self.file.tell())\n\t\t\tiid+=1\n\t\t\tif iid%100 == 0:\n\t\t\t\tmessages.insert('end', f\"\\nImported {iid} games.\")\n\t\t\t\tmessages.see('end')\n\t\t\t\tself.update_idletasks()\n\t\tmessages.insert('end', f\"\\n{iid} games displayed.\")\n\t\tmessages.see('end')\n\n\tdef getGameFromStr(self, pgnString):\n\t\tpgn = io.StringIO(pgnString)\n\t\tgame = chess.pgn.read_game(pgn)\n\t\treturn game\n\n\tdef treeview_sort_column(self, tv, col, reverse):\n\t\t'''\n\t\tSorting tree view table by click on heading from:\n\t\thttps://tekrecipes.com/2019/04/20/tkinter-treeview-enable-sorting-upon-clicking-column-headings/\n\t\t'''\n\t\tl = [(tv.set(k, col), k) for k in tv.get_children('')]\n\t\tl.sort(reverse=reverse)\n\n\t\t# rearrange items in sorted positions\n\t\tfor index, (val, k) in enumerate(l):\n\t\t\ttv.move(k, '', index)\n\n\t\t# reverse sort next time\n\t\ttv.heading(col, command=lambda _col=col: self.treeview_sort_column(tv, _col, not reverse))\n","repo_name":"glenray/chess","sub_path":"dbResults.py","file_name":"dbResults.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23225004798","text":"import tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.preprocessing import image\nimport matplotlib.pyplot as plt\nimport os\n\n\"\"\"\n## File path\n\"\"\"\n\n# dataset path\ndataset_dir_path = os.path.join('images')\n# encoder path\nencoder_path = os.path.join('models', 'encoder.h5')\n\n\"\"\"\n## Prepare the data\n\"\"\"\n\n# number of classes\nnum_classes = 50\n# target size\ntarget_size = [96, 96]\n# input shape\ninput_shape = (96, 96, 3)\n\n# hyper-parameters\nlearning_rate = 0.001\nbatch_size = 265\nhidden_units = 512\nprojection_units = 128\nnum_epochs = 50\ndropout_rate = 0.5\ntemperature = 0.05\n\n\ntrain_data_generator = image.ImageDataGenerator(\n rescale=1. / 255,\n validation_split=0.2,\n horizontal_flip=True,\n zoom_range=[0.9, 1.4],\n brightness_range=[0.75, 1.25],\n rotation_range=10\n)\n\ntraining_generator = train_data_generator.flow_from_directory(\n directory=dataset_dir_path,\n target_size=tuple(target_size),\n batch_size=batch_size,\n class_mode=\"categorical\"\n)\n\nvalidation_generator = train_data_generator.flow_from_directory(\n directory=dataset_dir_path,\n target_size=tuple(target_size),\n batch_size=batch_size,\n class_mode=\"categorical\",\n subset=\"validation\"\n)\n\n\n\"\"\"\n## Plot the history\n\"\"\"\n\ndef plot_history(history):\n plt.figure(1)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Training and validation loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n\n plt.figure(2)\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('Training and validation accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n\n plt.show()\n\n\n\"\"\"\n## Build the encoder model\nThe encoder model takes the image as input and turns it into a 2048-dimensional\nfeature vector.\n\"\"\"\n\ndef create_encoder():\n resnet = keras.applications.ResNet50V2(\n include_top=False, weights=None, input_shape=input_shape, pooling=\"avg\"\n )\n\n inputs = keras.Input(shape=input_shape)\n outputs = resnet(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs, name=\"painter-encoder\")\n return model\n\n\nencoder = create_encoder()\nencoder.summary()\n\n\n\"\"\"\n## Build the classification model\nThe classification model adds a fully-connected layer on top of the encoder,\nplus a softmax layer with the target classes.\n\"\"\"\n\ndef create_classifier(encoder, trainable=True):\n\n for layer in encoder.layers:\n layer.trainable = trainable\n\n inputs = keras.Input(shape=input_shape)\n features = encoder(inputs)\n features = layers.Dropout(dropout_rate)(features)\n features = layers.Dense(hidden_units, activation=\"relu\")(features)\n features = layers.Dropout(dropout_rate)(features)\n outputs = layers.Dense(num_classes, activation=\"softmax\")(features)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name=\"cifar10-classifier\")\n model.compile(\n optimizer=keras.optimizers.Adam(learning_rate),\n loss=keras.losses.SparseCategoricalCrossentropy(),\n metrics=[keras.metrics.SparseCategoricalAccuracy()],\n )\n return model\n\n\n\"\"\"\n## Experiment 1: Train the baseline classification model\nIn this experiment, a baseline classifier is trained as usual, i.e., the\nencoder and the classifier parts are trained together as a single model\nto minimize the crossentropy loss.\n\"\"\"\n\nencoder = create_encoder()\nclassifier = create_classifier(encoder)\nclassifier.summary()\n\nhistory = classifier.fit_generator(\n generator=training_generator,\n steps_per_epoch=training_generator.n / batch_size,\n epochs=num_epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.n / batch_size,\n verbose=1\n)\n\nplot_history(history)\n\n\n\"\"\"\n## Experiment 2: Use supervised contrastive learning\nIn this experiment, the model is trained in two phases. In the first phase,\nthe encoder is pretrained to optimize the supervised contrastive loss,\ndescribed in [Prannay Khosla et al.](https://arxiv.org/abs/2004.11362).\nIn the second phase, the classifier is trained using the trained encoder with\nits weights freezed; only the weights of fully-connected layers with the\nsoftmax are optimized.\n\n### 1. Supervised contrastive learning loss function\n\"\"\"\n\nclass SupervisedContrastiveLoss(keras.losses.Loss):\n def __init__(self, temperature=1, name=None):\n super(SupervisedContrastiveLoss, self).__init__(name=name)\n self.temperature = temperature\n\n def __call__(self, labels, feature_vectors, sample_weight=None):\n # Normalize feature vectors\n feature_vectors_normalized = tf.math.l2_normalize(feature_vectors, axis=1)\n # Compute logits\n logits = tf.divide(\n tf.matmul(\n feature_vectors_normalized, tf.transpose(feature_vectors_normalized)\n ),\n temperature,\n )\n return tfa.losses.npairs_loss(tf.squeeze(labels), logits)\n\n\ndef add_projection_head(encoder):\n inputs = keras.Input(shape=input_shape)\n features = encoder(inputs)\n outputs = layers.Dense(projection_units, activation=\"relu\")(features)\n model = keras.Model(\n inputs=inputs, outputs=outputs, name=\"cifar-encoder_with_projection-head\"\n )\n return model\n\n\n\"\"\"\n### 2. Pretrain the encoder\n\"\"\"\n\nencoder = create_encoder()\n\nencoder_with_projection_head = add_projection_head(encoder)\nencoder_with_projection_head.compile(\n optimizer=keras.optimizers.Adam(learning_rate),\n loss=SupervisedContrastiveLoss(temperature),\n)\n\nencoder_with_projection_head.summary()\n\nhistory = encoder_with_projection_head.fit_generator(\n generator=training_generator,\n steps_per_epoch=training_generator.n / batch_size,\n epochs=num_epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.n / batch_size,\n verbose=1\n)\n\n# save the encoder\nencoder.save(encoder_path)\n\n\n\"\"\"\n### 3. Train the classifier with the frozen encoder\n\"\"\"\n\nclassifier = create_classifier(encoder, trainable=False)\n\nhistory = classifier.fit_generator(\n generator=training_generator,\n steps_per_epoch=training_generator.n / batch_size,\n epochs=num_epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.n / batch_size,\n verbose=1\n)\n\nplot_history(history)\n","repo_name":"tintinrevient/period-recreation","sub_path":"contrastive_learning/supervised_contrastive_learning_of_painters.py","file_name":"supervised_contrastive_learning_of_painters.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21388415452","text":"from __future__ import absolute_import\nimport os\nimport numpy\nimport pykgraph\nfrom ann_benchmarks.algorithms.base import BaseANN\n\n# benchmark k-nn graph construction\nclass KGraph(BaseANN):\n def __init__(self, metric, params):\n if type(metric) == unicode:\n metric = str(metric)\n self.name = 'KGraph(%s)' % (metric)\n self._metric = metric\n self._L = params['L']\n self._recall = params['recall']\n\n\n def fit(self, X):\n if X.dtype != numpy.float32:\n X = X.astype(numpy.float32)\n self._kgraph = pykgraph.KGraph(X, self._metric)\n self._kgraph.build(\n reverse = 0,\n K = self._count,\n # L must always be > count\n L = self._count+self._L,\n recall = self._recall)\n\n def query(self, idx, n):\n # The graph contains more than k neighbors per node, but they seem to be in sorted order.\n return self._kgraph.get_nn(idx)[0][:self._count]\n\n def builds_graph(self):\n return True\n\n def set_count(self, count):\n self._count = count\n\n def __str__(self):\n return 'KGraph(l=%d, recall=%.2f)' % (self._L, self._recall)\n\n","repo_name":"mvesterli/thesis2019","sub_path":"ann-benchmarks/ann_benchmarks/algorithms/graph/kgraph.py","file_name":"kgraph.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7542339761","text":"import speech_recognition as sr\nfrom gtts import gTTS\nimport os\nimport randompi\nimport re\nimport requests\nfrom pygame import mixer\n\nr = sr.Recognizer()\nm = sr.Microphone()\n\ntry:\n myobj = gTTS(text=\"Hello my name is Andromeda, I am open for your requests!\", lang=\"en\", slow=False)\n rnd = random.choice(\"abcd352435324kjdfsighjiosfhoi3434590uf0909098cxv90cvxbkljn!@#e\")\n myobj.save(rnd + \"welcome.mp3\")\n os.system(\"cd /home/pi/final/hue-cli/ && /home/pi/final/hue-cli/bin/hue.sh hue 53356\")\n os.system(\"ffplay \" + rnd + \"welcome.mp3 -nodisp -autoexit >/dev/null 2>&1\")\n print(\"A moment of silence, please...\")\n with m as source: r.adjust_for_ambient_noise(source)\n print(\"Set minimum energy threshold to {}\".format(r.energy_threshold))\n while True:\n print(\"Say something!\")\n with m as source: audio = r.listen(source)\n print(\"Got it! Now to recognize it...\")\n requests.get(\"http://10.0.0.149:9080/api.php?action=brightness&value=240\")\n try:\n value = r.recognize_google(audio)\n\n if str is bytes: # this version of Python uses bytes for strings (Python 2)\n print(u\"You said {}\".format(value).encode(\"utf-8\"))\n else: # this version of Python uses unicode for strings (Python 3+)\n print(\"You said {}\".format(value))\n\n requests.get(\"http://10.0.0.149:9080/api.php?action=brightness&value=30\")\n rndt = random.choice(\"abcd4iouth34gh8re9shguigerbhgiufdsbhguib34uithdafioundjzifngiudfse\")\n\n if re.search(\".*(rocket|Rocket).*\", value):\n myobj = gTTS(text=\"Yes Sir!\", lang=\"en\", slow=False)\n myobj.save(rndt + \"welcomexx.mp3\")\n\n mixer.init()\n mixer.music.load(rndt + \"welcomexx.mp3\")\n mixer.music.play()\n\n requests.get(\"http://10.0.0.149:9080/api.php?action=brightness&value=120\")\n requests.get(\"http://10.0.0.149:9080/api.php?action=brightness&value=200\")\n os.system(\"cd /home/pi/final/hue-cli/ && /home/pi/final/hue-cli/bin/hue.sh hue 12\")\n os.system(\"cd /home/pi/final/hue-cli/ && /home/pi/final/hue-cli/bin/hue.sh sat 255\")\n requests.get(\"http://10.0.0.149:9080/phase.php?phase=1\")\n elif re.search(\".*(satellite|Satellite).*\", value):\n sat = requests.get(\"http://spacebar.hurma.tv/satellites\")\n jsonSat = sat.json()\n\n myobj = gTTS(text=\"Captain, you are near \" + jsonSat['name'] + \". You will see that for \" + jsonSat['duration'] + \" seconds\" , lang=\"en\", slow=False)\n myobj.save(rndt + \"welcomexx.mp3\")\n\n mixer.init()\n mixer.music.load(rndt + \"welcomexx.mp3\")\n mixer.music.play()\n\n requests.get(\"http://10.0.0.149:9080/phase.php?phase=2\")\n\n elif re.search(\".*(fireball|Fireball).*\", value):\n sat = requests.get(\"https://ssd-api.jpl.nasa.gov/fireball.api?www=1&vel-comp=true\")\n jsonSat = sat.json()\n\n myobj = gTTS(text=\"Last fireball that it was reported happend \" + jsonSat['data'][0][0] + \". Calculated total impacted energy of this fireball was about 4.2 kilotons.\" , lang=\"en\", slow=False)\n myobj.save(rndt + \"welcomexx.mp3\")\n\n mixer.init()\n mixer.music.load(rndt + \"welcomexx.mp3\")\n mixer.music.play()\n elif re.search(\".*(thank|Thank).*\", value):\n myobj = gTTS(text=\"Thank you for your attention captain! I hope that you liked our space journey!\", lang=\"en\", slow=False)\n myobj.save(rndt + \"welcomexx.mp3\")\n\n mixer.init()\n mixer.music.load(rndt + \"welcomexx.mp3\")\n mixer.music.play()\n\n requests.get(\"http://10.0.0.149:9080/phase.php?phase=3\")\n else:\n rndt = random.choice(\"abcd4iouth34gh8re9shguigerbhgiufdsbhguib34uithdafioundjzifngiudfse\")\n myobj = gTTS(text=\"I dont understand your query mr. captain!\", lang=\"en\", slow=False)\n myobj.save(rndt + \"welcomexx.mp3\")\n\n mixer.init()\n mixer.music.load(rndt + \"welcomexx.mp3\")\n mixer.music.play()\n\n #os.system(\"ffplay \" + rndt + \"welcomexx.mp3 -nodisp -autoexit >/dev/null 2>&1\")\n\n except sr.UnknownValueError:\n print(\"Oops! Didn't catch that\")\n except sr.RequestError as e:\n print(\"Uh oh! Couldn't request results from Google Speech Recognition service; {0}\".format(e))\nexcept KeyboardInterrupt:\n pass","repo_name":"spcbar/andromeda","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21176448830","text":"'''\nCrie um programa que tenha uma dupla totalmente preenchida com uma contagem por extenso, de zero até vinte.\nSeu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.'''\n\nnomes = ('zero', 'um', 'dois', 'tres', 'quatro', 'cinco',\n 'seis', 'sete', 'oito', 'nove', 'dez', 'onze',\n 'doze', 'trese', 'quatorze', 'quinze', 'dezeseis',\n 'dezesete', 'dezoito', 'dezenove','vinte')\n\nwhile True:\n numero = int(input('Digite um numero: '))\n if 0 <= numero <= 20:\n print(f'Voce digitou o numero {nomes[numero]}')\n else:\n numero = int(input('de 0 a 20 burro: '))\n resp = str(input('Sair S/N > ')).strip().upper()\n if resp not in 'SN':\n resp = str(input('Sair S/N > ')).strip().upper()\n if resp == 'S':\n break\nprint(f'{\" FIM \":&^30}')","repo_name":"Alexandre1961/Python","sub_path":"curso_em_video/ex072a.py","file_name":"ex072a.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71898009864","text":"from turtle import Screen, Turtle\nfrom math import *\n\n\n#win = Screen()\nWIDTH , HEIGHT = 20, 15\n### __ \n\ndef axis(turtle, distance, tick):\n position = turtle.position()\n turtle.pendown()\n\n for _ in range(0, distance // 2, tick):\n turtle.forward(tick)\n turtle.dot()\n\n turtle.setposition(position)\n\n for _ in range(0, distance // 2, tick):\n turtle.backward(tick)\n turtle.dot()\n turtle.penup()\n turtle.home()\n\ndef generate(f,interval):\n num = []\n start,end,inc = interval\n while start < end:\n num.append((start,f(start)))\n start += inc\n return num\n\ndef plot(turtle,f,interval):\n turtle.penup()\n turtle.pencolor('blue')\n\n num = generate(f,interval)\n\n for p in num:\n turtle.goto(p)\n turtle.pendown()\n\n turtle.penup()\n \n\n\nif __name__ == '__main__':\n win = Screen()\n win.setworldcoordinates(-WIDTH/2 , -HEIGHT/2 , WIDTH//2 , HEIGHT//2)\n \n T = Turtle(visible = False)\n T.speed('fastest')\n\n axis(T,WIDTH,1)\n T.setheading(90)\n axis(T,HEIGHT,1)\n \n\n f = lambda x : sin(x)\n\n interval = (0,9,0.1)\n\n plot(T,f,interval)\n\n win.exitonclick()\n\n \n\n \n","repo_name":"Deamerrong123/GraphingCalculator","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71099411465","text":"__author__ = \"Jesse Hermon\"\n\n\n#########################################################################\n# Random import #\n#########################################################################\n# What did you see on line 1?\n# What was the smallest number you could have seen, what was the largest?\n#########################################################################\n# The output was a random number between and including 5 and 20\n#########################################################################\n# What did you see on line 2?\n# What was the smallest number you could have seen, what was the largest?\n# Could line 2 have produced a 4?\n#########################################################################\n# The output displayed a random number between 3 and 10 including 3\n# and random numbers incrementing from 3 by 2. 3, 5, 7 and 9 are the possible\n# outputs\n#########################################################################\n# What did you see on line 3?\n# What was the smallest number you could have seen, what was the largest?\n#########################################################################\n# From the visible output it displays a floating point between the float numbers inputed\n# After the looking at the help, it shows that it can also be equal to the inputs\n#########################################################################\n# Exceptions #\n#########################################################################\n# 1. When will a ValueError occur?\n# 2. When will a ZeroDivisionError occur\n# 3. Could you change the code to avoid the possiblitiy of a ZeroDivisionError\n#########################################################################\n# The valueError will occur when the wrong type of input is inputted\n# The ZeroDivisionError will occur when the denominator entered is zero\n# To make the code safer you can add in a if-statement and a while loop to\n# keep requesting the denimonator if the inputted value is zero\n#########################################################################","repo_name":"hermonator/CP1404","sub_path":"Workshop 4/Question Answers.py","file_name":"Question Answers.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29817769949","text":"import json\n\nfrom wsme import types as wtypes\n\n\nclass Base(wtypes.DynamicBase):\n \"\"\"REST API Resource.\"\"\"\n _wsme_attributes = []\n\n def to_dict(self):\n d = {}\n\n for attr in self._wsme_attributes:\n attr_val = getattr(self, attr.name)\n if not isinstance(attr_val, wtypes.UnsetType):\n d[attr.name] = attr_val\n\n return d\n\n @classmethod\n def from_dict(cls, d):\n obj = cls()\n for key, val in d.items():\n if hasattr(obj, key):\n setattr(obj, key, val)\n return obj\n\n def to_string(self):\n return json.dumps(self.to_dict())\n\n def __str__(self):\n \"\"\"WSME based implementation of __str__.\"\"\"\n\n res = \"%s [\" % type(self).__name__\n\n first = True\n for attr in self._wsme_attributes:\n if not first:\n res += ', '\n else:\n first = False\n\n res += \"%s='%s'\" % (attr.name, getattr(self, attr.name))\n\n return res + \"]\"\n\n @classmethod\n def get_fields(cls):\n obj = cls()\n\n return [attr.name for attr in obj._wsme_attributes]\n\n\nclass Link(Base):\n \"\"\"Web link.\"\"\"\n\n href = wtypes.text\n target = wtypes.text\n\n @classmethod\n def sample(cls):\n return cls(href='http://example.com/here',\n target='here')\n\n\n","repo_name":"Aaron-DH/openstack_sample_project","sub_path":"report/api/controller/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9645761141","text":"# -*-coding:utf-8-*-\nimport re\n\nfp_in1 = open('test03_1.info','r')\nfp_in2 = open('test03_2.info','r')\nfp_in3 = open('test03_3.info','r')\nfp_out = open('test03_out.info','w')\n\nfp_out.write('Chr' + '\\t' + 'Position' + '\\t' + 'Ref' + '\\t' + 'test03_1' + '\\t' + 'test03_2' + '\\t' + 'test03_3' + '\\n')\n\nall_line1 = []\nall_line2 = []\nall_line3 = []\nlast1 = []\nlast2 = []\nlast3 = []\nchr_list = []\nfor line in fp_in1:\n line_list = line.rstrip().split('\\t')\n line2_list = line_list[:-1]\n line2 = '\\t'.join(line2_list)\n all_line1.append(line2)\n last1.append(line_list[-1])\n chr_list.append(line_list[0])\n\nfor line in fp_in2:\n line_list = line.rstrip().split('\\t')\n line2_list = line_list[:-1]\n line2 = '\\t'.join(line2_list)\n all_line2.append(line2)\n last2.append(line_list[-1])\n chr_list.append(line_list[0])\n\nfor line in fp_in3:\n line_list = line.rstrip().split('\\t')\n line2_list = line_list[:-1]\n line2 = '\\t'.join(line2_list)\n all_line3.append(line2)\n last3.append(line_list[-1])\n chr_list.append(line_list[0])\n\n#print all_line1\n#print last1\n\nall_lines = all_line1 + all_line2 + all_line3\nall_lines = list(set(all_lines))\n#print all_line\n\nchr_list = list(set(chr_list))\n#print chr_list\n\nchr_pos_ref = {}\nfor chr in chr_list:\n chr_tab = chr + '\\t'\n chr_pos_ref[chr] = []\n for item in all_lines:\n match1 = re.match(chr_tab, item)\n if match1:\n item_list = [int(item.split('\\t')[1]),item]\n chr_pos_ref[chr].append(item_list)\n\n#print chr_pos_ref\nfor chr in chr_list:\n chr_pos_ref[chr] = sorted(chr_pos_ref[chr])\n\nall_lines_sorted=[]\nfor chr in chr_list:\n\tfor i in range(len(chr_pos_ref[chr])):\n\t\tall_lines_sorted.append(chr_pos_ref[chr][i][1])\n\n#print all_lines_sorted\n\nfor sorted_line in all_lines_sorted:\n last_three = ['-', '-', '-']\n if sorted_line in all_line1:\n index = all_line1.index(sorted_line)\n last_three[0] = last1[index]\n if sorted_line in all_line2:\n index = all_line2.index(sorted_line)\n last_three[1] = last2[index]\n if sorted_line in all_line3:\n index = all_line3.index(sorted_line)\n last_three[2] = last3[index]\n\n fp_out.write(sorted_line + '\\t' + last_three[0] + '\\t' + last_three[1] + '\\t' + last_three[2] + '\\n')\n\n\n\nfp_in1.close()\nfp_in2.close()\nfp_in3.close()\nfp_out.close()\n\n'''\nchr_123 = {}\npos_ref = {}\n\nwhile True:\n line = fp_in1.readline()\n if len(line.rstrip()) == 0:\n break\n line = line.rstrip().split('\\t')\n key = line[0]\n value = line[3]\n chr_123[key] = []\n chr_123[key].append(value)\n\n#print chr_123\n\nwhile True:\n line = fp_in2.readline()\n if len(line.rstrip()) == 0:\n break\n line = line.rstrip().split('\\t')\n key = line[0]\n if key in chr_123.keys():\n chr_123[key].append(line[3])\n else:\n chr_123[key] = []\n chr_123[key].append(line[3])\n\nwhile True:\n line = fp_in3.readline()\n if len(line.rstrip()) == 0:\n break\n line = line.rstrip().split('\\t')\n key = line[0]\n if key in chr_123.keys():\n chr_123[key].append(line[3])\n else:\n chr_123[key] = []\n chr_123[key].append(line[3])\nfor j in chr_123.keys():\n for i in chr_123.values():\n if len(i) == 1:\n chr_123[j].append('-','-')\n if len(i) == 2:\n chr_123[j].append('-')\nprint chr_123\n'''\n\n","repo_name":"g-lyc/PRACTICE","sub_path":"Novogene/第一次编程考核/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4387534746","text":"import importlib\n\nfrom telegram import ParseMode\nfrom telegram.ext import Filters, MessageHandler\n\nfrom mensabot.bot.ext import dispatcher\nfrom mensabot.bot.util import ComHandlerFunc, get_args\nfrom mensabot.format import get_abbr\n\n__all__ = [\"cafete\", \"config\", \"mensa\", \"debug\"]\n\n\ndef init_commands():\n # commands are automatically added when their decorator is parsed,\n # so just make sure the modules are loaded\n for mod in __all__:\n importlib.import_module(\".\" + mod, __name__)\n\n def unknown(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Sorry, I didn't understand that command.\")\n\n unknown_handler = MessageHandler(Filters.command, unknown)\n dispatcher.add_handler(unknown_handler)\n\n\n@ComHandlerFunc(\"start\")\ndef start(bot, update):\n bot.sendMessage(chat_id=update.message.chat_id, text= \\\n \"MensaBot Passau to your service. \"\n \"Try /mensa or /cafete and add a time or location if you want.\")\n\n\n@ComHandlerFunc(\"help\")\ndef help(bot, update):\n bot.sendMessage(chat_id=update.message.chat_id, text= \\\n \"MensaBot Passau to your service. \"\n \"Try /mensa or /cafete and add a time or location if you want. \"\n \"Examples:\\n\"\n \"/mensa tomorrow\\n\"\n \"/cafete audimax\\n\"\n \"/cafete nk 26.09.\\n\"\n \"/abbr MV\\n\\n\"\n \"For configuration use /get and /set.\")\n\n\n@ComHandlerFunc(\"status\")\ndef status(bot, update):\n from mensabot.bot.api import health\n\n bot.sendMessage(chat_id=update.message.chat_id, text= \\\n (\"Everything is fine! 😊\\n\" if health.check(request=False)[1] == health.success_status else\n \"Uhoh. There seem to be some problems! 😕\\n\") +\n \"You can also check my uptime status online:\\n\"\n \"http://status.mensabot.niko.voidptr.de\")\n\n\n@ComHandlerFunc(\"abbr\")\ndef abbr(bot, update):\n args = get_args(update)\n abbrs = get_abbr()\n if not args or not args[0]:\n bot.sendMessage(chat_id=update.message.chat_id, text=abbrs, parse_mode=ParseMode.MARKDOWN)\n return\n found = [abbr for abbr in abbrs.split(\"\\n\") if abbr.startswith(\"`\" + args[0])]\n if not found:\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Abbreviation '{}' not found.\".format(args[0]))\n else:\n bot.sendMessage(chat_id=update.message.chat_id, text=\"\\n\".join(found), parse_mode=ParseMode.MARKDOWN)\n","repo_name":"N-Coder/mensabot","sub_path":"mensabot/bot/command/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"25364114401","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n# 拉普拉斯检测\ndef laplace_test():\n img = cv2.imread('data/edge.jpg')\n lap = cv2.Laplacian(img, cv2.CV_16S) # 拉普拉斯边缘检测\n lap = np.uint8(np.absolute(lap)) # 对lap去绝对值\n\n plt.figure(figsize=(18, 10))\n plt.subplot(1, 2, 1), plt.imshow(img), plt.title('origin')\n plt.subplot(1, 2, 2), plt.imshow(lap), plt.title('laplace')\n plt.xticks([]), plt.yticks([])\n\n plt.show()\n\n\n# Soble边缘检测\ndef soble_test():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # x方向的梯度\n sy = cv2.Sobel(gray, cv2.CV_64F, 0, 1) # y方向的梯度\n sx = np.uint8(np.absolute(sx)) # x方向梯度的绝对值\n sy = np.uint8(np.absolute(sy)) # y方向梯度的绝对值\n combine = cv2.bitwise_or(sx, sy)\n\n plt.figure(figsize=(18, 10))\n plt.subplot(2, 3, 1), plt.imshow(img), plt.title('origin')\n plt.subplot(2, 3, 2), plt.imshow(gray), plt.title('gray')\n plt.subplot(2, 3, 3), plt.imshow(sx), plt.title('sx')\n plt.subplot(2, 3, 4), plt.imshow(sy), plt.title('sy')\n plt.subplot(2, 3, 5), plt.imshow(combine), plt.title('combine')\n plt.xticks([]), plt.yticks([])\n\n plt.show()\n\n\n# Canny边缘检测\ndef canny_test():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n canny_img = cv2.Canny(gray, 30, 150)\n\n plt.figure(figsize=(18, 10))\n plt.subplot(1, 2, 1), plt.imshow(img), plt.title('origin')\n plt.subplot(1, 2, 2), plt.imshow(canny_img), plt.title('canny')\n plt.xticks([]), plt.yticks([])\n\n plt.show()\n\n\n# Laplace 算法\ndef laplace():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Laplace on origin\n lap_origin = cv2.Laplacian(img, cv2.CV_16S) # 拉普拉斯边缘检测\n lap_origin = np.uint8(np.absolute(lap_origin)) # 对lap去绝对值\n # Laplace on a gray scale picture\n lap_gray = cv2.Laplacian(gray, cv2.CV_16S) # 拉普拉斯边缘检测\n lap_gray = np.uint8(np.absolute(lap_gray)) # 对lap去绝对值\n # Laplace on color\n r, g, b = cv2.split(img)\n lap_r = cv2.Laplacian(r, cv2.CV_16S)\n lap_r = np.uint8(np.absolute(lap_r))\n lap_g = cv2.Laplacian(g, cv2.CV_16S)\n lap_g = np.uint8(np.absolute(lap_g))\n lap_b = cv2.Laplacian(b, cv2.CV_16S)\n lap_b = np.uint8(np.absolute(lap_b))\n lap_color = cv2.merge([lap_r, lap_g, lap_b])\n # 灰度轮廓二值化\n gray_threshold = cv2.threshold(lap_gray, 50, 255, cv2.THRESH_BINARY_INV)[1]\n cv2.imshow('laplace_origin', lap_origin)\n cv2.imshow('laplace_gray', lap_gray)\n cv2.imshow('laplace_color', lap_color)\n cv2.imshow('gray_threshold', gray_threshold)\n cv2.waitKey(0)\n\n\n# Sobel 算法\ndef sobel():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # x方向的梯度\n sy = cv2.Sobel(gray, cv2.CV_64F, 0, 1) # y方向的梯度\n sx = np.uint8(np.absolute(sx)) # x方向梯度的绝对值\n sy = np.uint8(np.absolute(sy)) # y方向梯度的绝对值\n combine = cv2.bitwise_or(sx, sy) # 合并\n threshold = cv2.threshold(combine, 100, 255, cv2.THRESH_BINARY_INV)[1] # 轮廓二值化\n cv2.imshow('origin', img)\n cv2.imshow('gray', gray)\n cv2.imshow('sobel', combine)\n cv2.imshow('threshold', threshold)\n cv2.waitKey(0)\n\n\n# 形态学\ndef morphology():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # 定义结构元素,图像膨胀、侵蚀、开闭运算用\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))\n # 腐蚀图像\n eroded = cv2.erode(gray, kernel)\n # 膨胀图像\n dilated = cv2.dilate(gray, kernel)\n # 获取轮廓\n open_close = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)\n # 轮廓二值化\n threshold = cv2.threshold(open_close, 30, 255, cv2.THRESH_BINARY_INV)[1]\n cv2.imshow('origin', img)\n cv2.imshow('gray', gray)\n cv2.imshow('eroded', eroded)\n cv2.imshow('dilated', dilated)\n cv2.imshow('open_close', open_close)\n cv2.imshow('threshold', threshold)\n cv2.waitKey(0)\n\n\n# Canny 算法\ndef canny():\n img = cv2.imread('data/canny.jpg', cv2.IMREAD_GRAYSCALE)\n # 获取轮廓\n canny_img = cv2.Canny(img, 200, 200)\n # 复制多维数组,用作画线\n canny_dst = np.copy(canny_img)\n # hough变换检测直线\n lines = cv2.HoughLines(canny_img, 1, np.pi / 180, 100)\n lines1 = lines[:, 0, :] # 提取为为二维\n for rho, theta in lines1[:]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * a)\n cv2.line(canny_dst, (x1, y1), (x2, y2), (255, 255, 0), 1)\n\n cv2.imshow('origin', img)\n cv2.imshow('canny_img', canny_img)\n cv2.imshow('canny_dst', canny_dst)\n cv2.waitKey(0)\n\n\n# FindContours 轮廓检测\ndef find_contours():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # 灰度图像二值化\n gray = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)[1]\n # 定义结构元素,图像膨胀、侵蚀、开闭运算用\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n # 获取轮廓\n opened = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)\n closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel)\n # 轮廓二值化\n closed = cv2.threshold(closed, 128, 255, cv2.THRESH_BINARY_INV)[1]\n # findContours计算轮廓\n contours = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(img, contours[1], -1, (0, 0, 255), 3)\n\n cv2.imshow('img', img)\n cv2.waitKey(0)\n\n\n# Harris 角点检测\ndef harris():\n img = cv2.imread('data/edge.jpg')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = np.float32(gray)\n # 输入图像必须是float32,最后一个参数在0.04 到0.05 之间\n dst = cv2.cornerHarris(gray, 2, 3, 0.04)\n # result is dilated for marking the corners, not important\n dst = cv2.dilate(dst, None)\n # Threshold for an optimal value, it may vary depending on the image.\n img[dst > 0.01 * dst.max()] = [0, 0, 255]\n\n cv2.imshow('img', img)\n cv2.waitKey(0)\n\n\nharris()\n","repo_name":"llfwer/python","sub_path":"图像处理/边缘与轮廓.py","file_name":"边缘与轮廓.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70416765066","text":"\"\"\"UI automation tests for the Request Demo page\"\"\"\nimport pytest\nfrom pages.codegen_page import CodeGenPage\nfrom pages.home_page import HomePage\n\n\n@pytest.mark.regression\nclass TestWalkNodes:\n \"\"\"UI automation tests for the Request Demo page\"\"\"\n\n @pytest.mark.set1\n @pytest.mark.parametrize(\n \"index\",\n [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n 15,\n 16,\n 17,\n 18,\n 19,\n 20,\n 21,\n 22,\n 23,\n ],\n )\n def test_walk_set1(self, index):\n self._execute(index)\n\n @pytest.mark.set2\n @pytest.mark.parametrize(\n \"index\",\n [\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n 15,\n 16,\n 17,\n 18,\n 19,\n ],\n )\n def test_walk_set_2(self, index):\n self._execute(index)\n\n def _execute(self, index):\n home_page = HomePage(self.driver)\n assert home_page.is_in_home_page()\n home_page.perform_tree_walk(index)\n\n","repo_name":"ShatlinDenistan/pyselenium","sub_path":"tests/test_walk.py","file_name":"test_walk.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30782597623","text":"from odoo.addons.shopinvader.tests.common import CommonCase\n\n\nclass TestMembershipProductService(CommonCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.membership_product_obj = cls.env[\"product.product\"]\n\n def setUp(self, *args, **kwargs):\n super().setUp(*args, **kwargs)\n with self.work_on_services(partner=self.backend.anonymous_partner_id) as work:\n self.service_guest = work.component(usage=\"membership_product\")\n\n def _check_data_content(self, data):\n \"\"\"\n Check data based on given membership products\n :param data: list\n :param membership_products: product.product recordset\n :return: bool\n \"\"\"\n membership_products = self.membership_product_obj.search(\n [(\"membership\", \"=\", True)]\n )\n self.assertEqual(len(data), len(membership_products))\n for current_data, membership_product in zip(data, membership_products):\n self.assertEqual(current_data.get(\"id\"), membership_product.id)\n self.assertEqual(current_data.get(\"name\"), membership_product.name)\n self.assertEqual(\n current_data.get(\"default_code\") or False,\n membership_product.default_code,\n )\n self.assertEqual(\n current_data.get(\"description_sale\") or False,\n membership_product.description_sale,\n )\n self.assertEqual(\n current_data.get(\"list_price\"), membership_product.list_price\n )\n\n def test_get_membership_product(self):\n result = self.service_guest.dispatch(\"search\")\n data = result.get(\"data\", [])\n self._check_data_content(data)\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_membership/tests/test_membership_product_service.py","file_name":"test_membership_product_service.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"13036748237","text":"import bpy\nfrom ... sockets.info import isBase, toBaseDataType\nfrom ... base_types import AnimationNode, ListTypeSelectorSocket, VectorizedSocket\n\nclass InsertListElementNode(AnimationNode, bpy.types.Node):\n bl_idname = \"an_InsertListElementNode\"\n bl_label = \"Insert List Element\"\n\n assignedType: ListTypeSelectorSocket.newProperty(default = \"Float\")\n useList: VectorizedSocket.newProperty()\n\n def create(self):\n prop = (\"assignedType\", \"BASE\")\n self.newInput(ListTypeSelectorSocket(\n \"List\", \"list\", \"LIST\", prop, dataIsModified = True))\n self.newInput(VectorizedSocket(self.assignedType, \"useList\",\n (\"Element\", \"element\"), (\"Elements\", \"elements\")), dataIsModified = True)\n self.newInput(\"Integer\", \"Index\", \"index\")\n self.newOutput(ListTypeSelectorSocket(\n \"List\", \"list\", \"LIST\", prop))\n\n def drawAdvanced(self, layout):\n self.invokeSelector(layout, \"DATA_TYPE\", \"assignListDataType\",\n dataTypes = \"LIST\", text = \"Change Type\", icon = \"TRIA_RIGHT\")\n\n def getExecutionCode(self, required):\n if not self.useList:\n yield \"list.insert(index, element)\"\n else:\n yield \"list = list[:index] + elements + list[index:]\"\n\n def assignListDataType(self, listDataType):\n self.assignType(toBaseDataType(listDataType))\n\n def assignType(self, baseDataType):\n if not isBase(baseDataType): return\n if baseDataType == self.assignedType: return\n self.assignedType = baseDataType\n self.refresh()\n","repo_name":"JacquesLucke/animation_nodes","sub_path":"animation_nodes/nodes/list/insert_list_element.py","file_name":"insert_list_element.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":2231,"dataset":"github-code","pt":"81"} +{"seq_id":"45890511786","text":"import math\nfrom hashlib import sha256\nfrom typing import NamedTuple, List, Union, Tuple\n\n\nclass Sibling(NamedTuple):\n direction: str\n idx: int # mypy doesn't like attributes called `index`\n\n\nclass ProofChainElement(NamedTuple):\n direction: str\n node_hash: bytes\n\n\nclass MerkleProof(NamedTuple):\n element_hash: bytes\n chain: List[ProofChainElement]\n root: bytes\n\n\ndef hash(something: bytes) -> bytes:\n return sha256(something).digest()\n\n\ndef hash_from_path(file_path) -> bytes:\n file_contents = open(file_path, 'rb').read()\n return hash(file_contents)\n\n\ndef _build_simple_merkle_tree_list(items: List[bytes]) -> List[bytes]:\n # shamelessly inspired by https://github.com/petertodd/python-bitcoinlib/\n data_structure = list(items)\n\n generation_size = len(items)\n generation_start_index = 0\n while generation_size > 1:\n for left_offset in range(0, generation_size, 2):\n right_offset = min(left_offset + 1, generation_size - 1)\n left_element = data_structure[generation_start_index + left_offset]\n right_element = data_structure[generation_start_index + right_offset]\n data_structure.append(hash(left_element + right_element))\n\n generation_start_index += generation_size\n generation_size = (generation_size + 1) // 2\n\n return data_structure\n\n\nclass MerkleTree:\n\n def __init__(self, items: List[bytes]) -> None:\n self.data = _build_simple_merkle_tree_list(items)\n self.size = len(items)\n self.height = len(self.data).bit_length()\n self.root = self.data[-1]\n\n\n @classmethod\n def from_filestream(cls, input_filestream):\n file_contents = input_filestream.readlines()\n items = [pic_hash.rstrip() for pic_hash in file_contents]\n return cls(items)\n\n\n def test_existence(self, file_path) -> bool:\n return self._is_hashed_element_present(hash_from_path(file_path))\n\n\n def _is_hashed_element_present(self, hashed_element: bytes):\n return hashed_element in self.data[:self.size]\n\n\n def _generation_bounds_for_index(self, index) -> Tuple[int, int]:\n gen_start, gen_end = (0, self.size - 1)\n for _ in range(self.height - 2):\n if (gen_start <= index <= gen_end):\n break\n current_gen_size = gen_end - gen_start + 1\n gen_start = gen_end + 1\n gen_end = math.ceil(current_gen_size / 2) + gen_start - 1\n return gen_start, gen_end\n\n\n def _sibling(self, index: int) -> Union[None, Sibling]:\n if index == len(self.data) - 1:\n # the root element has no siblings\n return None\n gen_start, gen_end = self._generation_bounds_for_index(index)\n\n if (index - gen_start) % 2 == 0:\n if index == gen_end:\n # item is its own sibling\n return Sibling('right', index)\n else:\n return Sibling('right', index + 1)\n else:\n return Sibling('left', index - 1)\n\n\n def _parent(self, index) -> Union[None, int]:\n if index == len(self.data) - 1:\n # the root element has no parent\n return None\n gen_start, gen_end = self._generation_bounds_for_index(index)\n return (index - gen_start) // 2 + gen_end + 1\n\n\n def proof(self, file_path) -> Union[MerkleProof, bool]:\n element_hash = hash_from_path(file_path)\n try:\n element_index = self.data.index(element_hash)\n except ValueError:\n # element not in tree\n return False\n\n # the proof chain is each element's parent's sibling\n # so walk that up to the root\n chain = []\n next_element = self._sibling(element_index)\n while next_element is not None:\n proof_chain_element = ProofChainElement(next_element.direction, self.data[next_element.idx])\n chain.append(proof_chain_element)\n next_element = self._sibling(self._parent(next_element.idx))\n\n return MerkleProof(element_hash, chain, self.root)\n","repo_name":"xgess/timestamp_all_photos","sub_path":"app/merkle_tree.py","file_name":"merkle_tree.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40821793122","text":"import sys\r\n\r\n\r\n# 1. the form \" Please convert *** \"\r\ndef form1(converse_words):\r\n A_to_R_single = {\"0\": \"\", \"1\": \"I\", \"2\": \"II\", \"3\": \"III\", \"4\": \"IV\", \"5\": \"V\", \"6\": \"VI\", \"7\": \"VII\", \"8\": \"VIII\",\r\n \"9\": \"IX\"}\r\n A_to_R_tens = {\"0\": \"\", \"1\": \"X\", \"2\": \"XX\", \"3\": \"XXX\", \"4\": \"XL\", \"5\": \"L\", \"6\": \"LX\", \"7\": \"LXX\", \"8\": \"LXXX\",\r\n \"9\": \"XC\"}\r\n A_to_R_hundred = {\"0\": \"\", \"1\": \"C\", \"2\": \"CC\", \"3\": \"CCC\", \"4\": \"CD\", \"5\": \"D\", \"6\": \"DC\", \"7\": \"DCC\", \"8\": \"DCCC\",\r\n \"9\": \"CM\"}\r\n A_to_R_thousand = {\"1\": \"M\", \"2\": \"MM\", \"3\": \"MMM\"}\r\n arabic = converse_words\r\n if arabic.isdigit():\r\n charge = int(arabic)\r\n if charge > 3999 or arabic[0] == \"0\":\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n result_A_to_R = \"\"\r\n if len(arabic) == 4:\r\n result_A_to_R = result_A_to_R + A_to_R_thousand[arabic[0]] + A_to_R_hundred[arabic[1]] \\\r\n + A_to_R_tens[arabic[2]] + A_to_R_single[arabic[3]]\r\n elif len(arabic) == 3:\r\n result_A_to_R = result_A_to_R + A_to_R_hundred[arabic[0]] + A_to_R_tens[arabic[1]] \\\r\n + A_to_R_single[arabic[2]]\r\n elif len(arabic) == 2:\r\n result_A_to_R = result_A_to_R + A_to_R_tens[arabic[0]] + A_to_R_single[arabic[1]]\r\n elif len(arabic) == 1:\r\n result_A_to_R = result_A_to_R + A_to_R_single[arabic[0]]\r\n\r\n return result_A_to_R\r\n\r\n else:\r\n roman = converse_words\r\n\r\n R_to_A = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\r\n check = [\"I\", \"V\", \"X\", \"L\", \"C\", \"D\", \"M\"]\r\n\r\n for a in roman:\r\n if a not in check:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n if roman.count(\"I\") > 3 or roman.count(\"X\") > 3 or roman.count(\"C\") > 3 or roman.count(\"M\") > 3 \\\r\n or roman.count(\"V\") > 1 or roman.count(\"L\") > 1 or roman.count(\"D\") > 1:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n i = 0\r\n while i < len(roman) - 1:\r\n if roman[i] == \"I\":\r\n if \"IXI\" in roman or \"IVI\" in roman:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif R_to_A[roman[i]] < R_to_A[roman[i + 1]] and roman[i + 1] != \"V\" and roman[i + 1] != \"X\":\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif R_to_A[roman[i]] < R_to_A[roman[i + 1]] and R_to_A[roman[i]] == R_to_A[roman[i - 1]]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n elif roman[i] == \"X\":\r\n if \"XLX\" in roman or \"XCX\" in roman:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif R_to_A[roman[i]] < R_to_A[roman[i + 1]] and roman[i + 1] != \"L\" and roman[i + 1] != \"C\":\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif R_to_A[roman[i]] < R_to_A[roman[i + 1]] and R_to_A[roman[i]] == R_to_A[roman[i - 1]]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n elif roman[i] == \"C\":\r\n if \"CMC\" in roman or \"CDC\" in roman:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n if R_to_A[roman[i]] < R_to_A[roman[i + 1]] and roman[i + 1] != \"M\" and roman[i + 1] != \"D\":\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif R_to_A[roman[i]] < R_to_A[roman[i + 1]] and R_to_A[roman[i]] == R_to_A[roman[i - 1]]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n elif roman[i] == \"V\" or roman[i] == \"L\" or roman[i] == \"D\":\r\n if R_to_A[roman[i]] < R_to_A[roman[i + 1]]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n i = i + 1\r\n\r\n if roman[i] not in check:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n result_R_to_A = 0\r\n i = 0\r\n while i < len(roman) - 1:\r\n if R_to_A[roman[i]] >= R_to_A[roman[i + 1]]:\r\n result_R_to_A = result_R_to_A + R_to_A[roman[i]]\r\n else:\r\n result_R_to_A = result_R_to_A + R_to_A[roman[i + 1]] - R_to_A[roman[i]]\r\n i = i + 1\r\n i = i + 1\r\n result_R_to_A = result_R_to_A + R_to_A[roman[i]]\r\n\r\n return result_R_to_A\r\n\r\n\r\n# 2. the form \" Please convert *** using *** \"\r\ndef form2(converse_words, rule):\r\n for i in rule:\r\n if rule.count(i) > 1:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n if converse_words.isdigit():\r\n arabic = converse_words\r\n if arabic[0] == \"0\":\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif arabic[0] == \"9\" and len(arabic) * 2 + 1 > len(rule):\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif int(arabic[0]) >= 4 and len(arabic) * 2 > len(rule):\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif len(arabic) * 2 - 1 > len(rule):\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n i = 0\r\n j = 0\r\n result_word = \"\"\r\n arabic = arabic[::-1]\r\n rule = rule[::-1]\r\n\r\n while i < len(arabic):\r\n if arabic[i] == \"0\":\r\n added = \"\"\r\n if arabic[i] == \"1\":\r\n added = \"\"\r\n added = added + rule[j]\r\n if arabic[i] == \"2\":\r\n added = \"\"\r\n added = added + rule[j] * 2\r\n if arabic[i] == \"3\":\r\n added = \"\"\r\n added = added + rule[j] * 3\r\n if arabic[i] == \"4\":\r\n added = \"\"\r\n added = added + rule[j] + rule[j + 1]\r\n if arabic[i] == \"5\":\r\n added = \"\"\r\n added = added + rule[j + 1]\r\n if arabic[i] == \"6\":\r\n added = \"\"\r\n added = added + rule[j + 1] + rule[j]\r\n if arabic[i] == \"7\":\r\n added = \"\"\r\n added = added + rule[j + 1] + rule[j] * 2\r\n if arabic[i] == \"8\":\r\n added = \"\"\r\n added = added + rule[j + 1] + rule[j] * 3\r\n if arabic[i] == \"9\":\r\n added = \"\"\r\n added = added + rule[j] + rule[j + 2]\r\n result_word = added + result_word\r\n i = i + 1\r\n j = j + 2\r\n\r\n return result_word\r\n\r\n elif converse_words.isalpha():\r\n roman = converse_words\r\n for i in roman:\r\n if i not in rule:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n rule = rule[::-1]\r\n count = 0\r\n value = 1\r\n dictionary = {}\r\n for i in rule:\r\n x = i\r\n y = value\r\n added = {x: y}\r\n dictionary.update(added)\r\n if count % 2 == 0:\r\n value = value * 5\r\n else:\r\n value = value * 2\r\n count = count + 1\r\n\r\n count = 0\r\n for i in dictionary:\r\n if count % 2 == 0 and roman.count(i) > 3:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif count % 2 == 1 and roman.count(i) > 1:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n count = count + 1\r\n\r\n check = []\r\n for i in rule:\r\n check.append(i)\r\n check.append(\"!\")\r\n check.append(\"!\")\r\n count = 0\r\n while count < len(check) - 2:\r\n if count % 2 == 0:\r\n if check[count + 1] != \"!\":\r\n invalid = \"\"\r\n invalid = check[count] + check[count + 1] + check[count]\r\n if invalid in roman:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n if check[count + 2] != \"!\":\r\n invalid = \"\"\r\n invalid = check[count] + check[count + 2] + check[count]\r\n if invalid in roman:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n j = 0\r\n while j < len(roman) - 1:\r\n if roman[j] == check[count] and dictionary[roman[j]] < dictionary[roman[j + 1]]:\r\n if roman[j + 1] != check[count + 1] and roman[j + 1] != check[count + 2]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif roman[j - 1] == roman[j]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n j = j + 1\r\n\r\n elif count % 2 == 1:\r\n j = 0\r\n while j < len(roman) - 1:\r\n if roman[j] == check[count] and dictionary[roman[j]] < dictionary[roman[j + 1]]:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n j = j + 1\r\n\r\n count = count + 1\r\n\r\n i = 0\r\n result_R_to_A = 0\r\n while i < len(roman) - 1:\r\n if dictionary[roman[i]] >= dictionary[roman[i + 1]]:\r\n result_R_to_A = result_R_to_A + dictionary[roman[i]]\r\n else:\r\n result_R_to_A = result_R_to_A + dictionary[roman[i + 1]] - dictionary[roman[i]]\r\n i = i + 1\r\n i = i + 1\r\n result_R_to_A = result_R_to_A + dictionary[roman[i]]\r\n return result_R_to_A\r\n\r\n else:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n\r\ndef form3(converse_words):\r\n if not converse_words.isalpha():\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n the_word = []\r\n for i in converse_words:\r\n the_word.append(str(i))\r\n\r\n A_to_R_single = {0: \"\", 1: \"0\", 2: \"00\", 3: \"000\", 4: \"01\", 5: \"1\", 6: \"10\", 7: \"100\", 8: \"1000\", 9: \"02\"}\r\n A_to_R_tens = {0: \"\", 1: \"2\", 2: \"22\", 3: \"222\", 4: \"23\", 5: \"3\", 6: \"32\", 7: \"322\", 8: \"3222\", 9: \"24\"}\r\n A_to_R_dict = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100}\r\n result_dict = {}\r\n\r\n num_dict = []\r\n i = 0\r\n times = 1\r\n while i < 10:\r\n j = 0\r\n while j < 10:\r\n abc = A_to_R_tens[i] + A_to_R_single[j]\r\n x = [abc.index(a) for a in abc]\r\n y = \"\"\r\n for p in x:\r\n y = y + str(p)\r\n j = j + 1\r\n if y == \"\":\r\n continue\r\n num_dict.append(y)\r\n i = i + 1\r\n\r\n result = \"\"\r\n while 1:\r\n if len(the_word) == 0:\r\n break\r\n save_word = \"\"\r\n save_the_word = the_word[:]\r\n value = 0\r\n\r\n while 1:\r\n if len(the_word) == 0:\r\n break\r\n temp_word = save_word\r\n i = the_word.pop(-1)\r\n count = the_word.count(i)\r\n temp_word = i + temp_word\r\n\r\n while 1:\r\n if count == 0:\r\n break\r\n j = the_word.pop(-1)\r\n if j == i:\r\n temp_word = j + temp_word\r\n count -= 1\r\n else:\r\n temp_word = j + temp_word\r\n\r\n temp_index = [temp_word.index(a) for a in temp_word]\r\n temp_index_str = \"\"\r\n for p in temp_index:\r\n temp_index_str = temp_index_str + str(p)\r\n\r\n if temp_index_str not in num_dict and value == 0:\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n elif temp_index_str in num_dict:\r\n q = 1\r\n for p in num_dict:\r\n if p == temp_index_str:\r\n value = q\r\n break\r\n q += 1\r\n save_word = temp_word\r\n save_the_word = the_word[:]\r\n\r\n elif temp_index_str not in num_dict and value != 0:\r\n the_word = save_the_word[:]\r\n break\r\n result = str(value) + result\r\n abc = form1(str(value))\r\n already_added = []\r\n k = 0\r\n for i in save_word:\r\n if i in already_added:\r\n k += 1\r\n continue\r\n already_added.append(i)\r\n x = i\r\n y = A_to_R_dict[abc[k]] * times\r\n add_element = {x: y}\r\n result_dict.update(add_element)\r\n k += 1\r\n times = times * 100\r\n\r\n result_dict_str = \"\"\r\n compare = 1\r\n element_count = len(result_dict)\r\n k = 0\r\n last_char = \"\"\r\n while element_count != 0:\r\n charge = 0\r\n for i in result_dict:\r\n if result_dict[i] == compare:\r\n result_dict_str = i + result_dict_str\r\n last_char = i\r\n charge = 1\r\n element_count -= 1\r\n if charge == 0 and last_char != \"_\":\r\n result_dict_str = \"_\" + result_dict_str\r\n last_char = \"_\"\r\n if k % 2 == 0:\r\n compare = compare * 5\r\n if k % 2 == 1:\r\n compare = compare * 2\r\n k = k + 1\r\n if result_dict_str[-1] == \"_\":\r\n print(\"Hey, ask me something that's not impossible to do!\")\r\n sys.exit()\r\n\r\n print(\"Sure! It is %s using %s\" % (result, result_dict_str))\r\n\r\n\r\n# main function\r\ntry:\r\n keyboard_typing = input(\"How can I help you? \")\r\n words = keyboard_typing.split()\r\n if len(words) < 3 or len(words) > 5:\r\n raise ValueError\r\n elif words[0] != \"Please\" or words[1] != \"convert\":\r\n raise ValueError\r\n elif len(words) == 5 and words[3] != \"using\":\r\n raise ValueError\r\n elif len(words) == 4 and words[3] != \"minimally\":\r\n raise ValueError\r\nexcept ValueError:\r\n print(\"I don't get what you want, sorry mate!\")\r\n sys.exit()\r\n\r\n# 1. when the form is 'Please convert ***'\r\nif len(words) == 3:\r\n result = form1(words[2])\r\n print(\"Sure! It is \" + str(result))\r\n\r\n# 2. when the form is 'Please convert *** using ***'\r\nif len(words) == 5:\r\n result = form2(words[2], words[4])\r\n print(\"Sure! It is \" + str(result))\r\n\r\n# 3. when the form is ' Please convert *** minimally'\r\nif len(words) == 4:\r\n form3(words[2])\r\n","repo_name":"chachaqia/python-practice","sub_path":"roman_arabic/roman_arabic.py","file_name":"roman_arabic.py","file_ext":"py","file_size_in_byte":15467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17131070727","text":"#!/usr/bin/env python\n\nimport roslib\nimport rospy\n\nroslib.load_manifest('collect_data')\nroslib.load_manifest('gazebo_msgs')\nroslib.load_manifest('geometry_msgs')\nroslib.load_manifest('tf')\nimport gazebo_msgs.srv as gazebo_srv\nimport gazebo_msgs.msg as gazebo_msg\nimport geometry_msgs.msg as geometry_msg\nimport collect_data.srv as collect_srv\n\nimport tf\nimport numpy as np\nimport utils\nimport dyn_mult_view\n\nKINECT_SDF_PATH = '/'.join(str.split(dyn_mult_view.__file__, '/')[:-1]) + '/collect_data/models/kinect/model.sdf'\n\nclass ObjectManager(object):\n def __init__(self):\n rospy.init_node('object_manager', anonymous=True)\n self._service_proxies = {}\n self._call_spawn_model = utils.persistent_service_proxy(\n 'gazebo/spawn_sdf_model', gazebo_srv.SpawnModel, self._service_proxies)\n self._call_delete_model = utils.persistent_service_proxy(\n 'gazebo/delete_model', gazebo_srv.DeleteModel, self._service_proxies)\n self._call_set_model_state = utils.persistent_service_proxy(\n 'gazebo/set_model_state', gazebo_srv.SetModelState, self._service_proxies)\n self._call_get_model_state = utils.persistent_service_proxy(\n 'gazebo/get_model_state', gazebo_srv.GetModelState, self._service_proxies)\n self.active_models = []\n\n # Spawn the camera and any other initial models\n init_models = {\n 'distorted_camera': {\n 'model_sdf_file': KINECT_SDF_PATH,\n 'position': [-1.7, 0, 5.0],\n 'orientation': {'w': 1, 'x': 0, 'y': 0, 'z': 0},\n },\n # 'pointlight_above': {\n # 'model_sdf_file': '/home/owen/.gazebo/models/pointlight_above/model.sdf',\n # 'position': [0, 0, 6.5],\n # 'orientation': {'w': 1, 'x': 0, 'y': 0, 'z': 0},\n # },\n # 'pointlight_below': {\n # 'model_sdf_file': '/home/owen/.gazebo/models/pointlight_below/model.sdf',\n # 'position': [0, 0, 2.0],\n # 'orientation': {'w': 1, 'x': 0, 'y': 0, 'z': 0},\n # },\n }\n for model_name, model_info in init_models.items():\n self.spawn_object(model_name, model_info['model_sdf_file'],\n model_info['position'], model_info['orientation'])\n\n # Define services\n def parse_spawn_object_req(req):\n return {\n 'model_name': req.model_name,\n 'model_sdf_file': req.model_sdf_file,\n 'pos_x': req.pos_x,\n 'pos_y': req.pos_y,\n 'pos_z': req.pos_z,\n 'rot_w': req.rot_w,\n 'rot_x': req.rot_x,\n 'rot_y': req.rot_y,\n 'rot_z': req.rot_z,\n }\n rospy.Service('/manage_objects/spawn_object', collect_srv.SpawnObject,\n utils.service_handler(self.spawn_object_wrapper, parse_spawn_object_req))\n\n def parse_rotate_object_req(req):\n return {\n 'model_name': req.model_name,\n 'r': req.r,\n 'p': req.p,\n 'y': req.y,\n }\n rospy.Service('/manage_objects/rotate_object', collect_srv.RotateObject,\n utils.service_handler(self.rotate_object_wrapper, parse_rotate_object_req))\n\n def parse_set_orientation_req(req):\n return {\n 'model_name': req.model_name,\n 'w': req.w,\n 'x': req.x,\n 'y': req.y,\n 'z': req.z,\n }\n rospy.Service('/manage_objects/set_orientation', collect_srv.SetOrientation,\n utils.service_handler(self.set_orientation, parse_set_orientation_req))\n\n def spawn_object_wrapper(self, model_name, model_sdf_file,\n pos_x, pos_y, pos_z, rot_w, rot_x, rot_y, rot_z):\n self.spawn_object(model_name, model_sdf_file, [pos_x, pos_y, pos_z],\n {'w': rot_w, 'x': rot_x, 'y': rot_y, 'z': rot_z})\n return True\n\n def spawn_object(self, model_name, model_sdf_file, position, orientation):\n \"\"\"\n MODEL_NAME - a string representing the name of the object to rotate\n MODEL_SDF_FILE - a string representing the path to the model's SDF file\n POSITION - a 3-tuple containing the XYZ coordinates of the spawn point\n ORIENTATION - a dict containing the desired orientation as a quaternion\n \"\"\"\n with open(model_sdf_file) as f:\n model_sdf = f.read()\n self._call_spawn_model(\n model_name=model_name, model_xml=model_sdf,\n initial_pose=geometry_msg.Pose(\n position=geometry_msg.Point(*position),\n orientation=geometry_msg.Quaternion(**orientation)\n )\n )\n self.active_models.append(model_name)\n\n def rotate_object_wrapper(self, model_name, r, p, y):\n self.rotate_object(model_name, (r, p, y,))\n return True\n\n def rotate_object(self, model_name, rotation):\n \"\"\"\n MODEL_NAME - a string representing the name of the object to rotate\n ROTATION - a 3-tuple containing RPY angles\n \"\"\"\n model_state_raw = self._call_get_model_state(model_name=model_name)\n model_state = gazebo_msg.ModelState()\n model_state.model_name = model_name\n model_state.pose = model_state_raw.pose\n model_state.twist = model_state_raw.twist\n\n Rq = tf.transformations.quaternion_from_euler(*rotation)\n rotated = tf.transformations.quaternion_multiply(Rq, np.array([\n model_state.pose.orientation.x,\n model_state.pose.orientation.y,\n model_state.pose.orientation.z,\n model_state.pose.orientation.w,\n ]))\n\n model_state.pose.orientation = geometry_msg.Quaternion(*rotated)\n self._call_set_model_state(model_state=model_state)\n\n def set_orientation(self, model_name, w, x, y, z):\n model_state_raw = self._call_get_model_state(model_name=model_name)\n model_state = gazebo_msg.ModelState(\n model_name=model_name, pose=model_state_raw.pose, twist=model_state_raw.twist)\n model_state.pose.orientation = geometry_msg.Quaternion(x, y, z, w)\n self._call_set_model_state(model_state=model_state)\n return True\n\n def listen(self):\n try:\n while not rospy.is_shutdown():\n rospy.sleep(0.1)\n finally:\n for model_name in self.active_models:\n self._call_delete_model(model_name=model_name)\n\nif __name__ == '__main__':\n object_manager = ObjectManager()\n try:\n object_manager.listen()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"febert/dynamic_multiview_3d","sub_path":"dyn_mult_view/collect_data/scripts/manage_objects.py","file_name":"manage_objects.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27011591445","text":"from scrapy.utils.misc import load_object\nfrom scrapy.utils.serialize import ScrapyJSONEncoder\nfrom twisted.internet.threads import deferToThread\n\nfrom . import connection\n\ndefault_serialize = ScrapyJSONEncoder().encode\n\n\nclass RabbitmqPipeline(object):\n def __init__(self, item_key, connection_url):\n self.server = connection.connect(connection_url)\n self.item_key = item_key\n self.serialize = default_serialize\n self.channel = connection.get_channel(self.server, self.item_key)\n\n @classmethod\n def from_crawler(cls, crawler):\n print(hasattr(crawler.spider, 'items_key'))\n if hasattr(crawler.spider, 'items_key'):\n item_key = crawler.spider.items_key\n else:\n item_key = 'items_{spider_name}'.format(\n spider_name=crawler.spider.name)\n return cls(item_key=item_key,\n connection_url=crawler.settings.get(\n 'RABBITMQ_CONNECTION_PARAMETERS'))\n\n def process_item(self, item, spider):\n data = self.serialize(item)\n\n self.channel.basic_publish(exchange='',\n routing_key=self.item_key,\n body=data)\n return item\n\n def close(self):\n \"\"\"Close channel\"\"\"\n self.channel.close()\n","repo_name":"Rockyzsu/CodePool","sub_path":"scrapy_rabbitmq/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"12067178086","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.utils.encoding import smart_str\nfrom django.shortcuts import render, HttpResponseRedirect, HttpResponse, get_object_or_404, redirect, reverse\n\nimport zipfile\nimport datetime\nimport os\nimport csv\n\nfrom gamescoring.models import GameNumber, Participant\nfrom gamescoring.backend import ranking\nfrom .models import zipcsvfile\nfrom .maketables import WriteTables, WriteScoreTables, Write_csv, CSV_to_db\n\n\ndef csvweb(request, category=None):\n if category in ['501','BB']:\n rtable, stable, headerrank,headersummary,maxplist = WriteTables(category)\n elif category == 'BBScores':\n rtable, stable, headerrank,headersummary,maxp = WriteScoreTables()\n else:\n messages.warning(request, \"Nothing to do\")\n return HttpResponseRedirect('/')\n # Create the HttpResponse object with the appropriate CSV header.\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = f'attachment; filename=\"{category}.csv\"'\n writer = csv.writer(response)\n writer.writerow(headerrank)\n writer.writerows(rtable.values())\n writer.writerows([''])\n writer.writerow(headersummary)\n writer.writerows(stable)\n writer.writerows([''])\n return response\n\n\ndef webtables(request, category=None):\n if category in ['501', 'BB']:\n rtable, stable, headerrank, headersummary, maxplist = WriteTables(category)\n stable_title = 'Standings'\n elif category == 'BBScores':\n rtable, stable, headerrank, headersummary, maxplist = WriteScoreTables()\n stable_title = 'Average scores'\n else:\n messages.warning(request, \"Nothing to do\")\n return HttpResponseRedirect('/')\n context = {'title': f'Tables {category}',\n 'maxplist': maxplist,\n 'headerrank': headerrank,\n 'headersummary': headersummary,\n 'maintable': rtable,\n 'summarytable': stable,\n 'stable_title': stable_title}\n return render(request, 'scoretable/table.html', context)\n\n\ndef csvzip(request):\n path = os.path.join(settings.MEDIA_ROOT, 'files')\n datesuffix = datetime.datetime.now().strftime('_%Y_%m_%d')\n zipfilename = 'DartsTablesCSV' + datesuffix + '.zip'\n longzipfilename = os.path.join(path, zipfilename)\n # try:\n if datesuffix:\n zip_archive = zipfile.ZipFile(longzipfilename, 'w')\n\n for tabletype in ['501', 'BB']:\n rtable, stable, headerrank, headersummary, maxplist = WriteTables(tabletype)\n if len(rtable) > 0:\n outfile = f'DartsTables{tabletype}' + datesuffix + '.csv'\n mf = Write_csv(headerrank, rtable, headersummary, stable)\n zip_archive.writestr(outfile, mf.read())\n if len(rtable) > 0:\n sbb, ssbb, headerscore, headersumscore, maxp = WriteScoreTables()\n outfile = 'DartsTablesBBScores' + datesuffix + '.csv'\n mf = Write_csv(headerscore, sbb, headersumscore, ssbb)\n zip_archive.writestr(outfile, mf.read())\n zip_archive.close()\n z, created = zipcsvfile.objects.get_or_create(filename=zipfilename)\n z.path = smart_str(longzipfilename)\n z.filename = zipfilename\n z.timesdownloaded = 0\n z.save()\n allfiles = zipcsvfile.objects.all()\n context = {'title': 'Download',\n 'allfiles': allfiles\n }\n return render(request, 'scoretable/download.html', context)\n # except:\n # messages.warning(request, \"Error making the csv files\")\n # return HttpResponseRedirect('/')\n\n\ndef downloadzip(request, slug=None):\n f = get_object_or_404(zipcsvfile, slug=slug)\n link = f.path\n response = HttpResponse()\n response['Content-Type'] = 'application/zip'\n response['Content-Disposition'] = f\"attachment; filename = '{f.filename}'\"\n response['X-Sendfile'] = smart_str(link)\n f.timesdownloaded += 1\n f.save()\n return response\n\n\ndef editgame(request, id):\n if request.method == \"GET\":\n q = get_object_or_404(GameNumber, id=id )\n # category = q.category\n # players = q.get_all_players()\n\n context = {'title': 'Edit',\n 'game': q}\n return render(request, 'scoretable/edit.html', context)\n if request.method == \"POST\":\n g = get_object_or_404(GameNumber, id=id)\n try:\n requestdict = dict(request.POST)\n selectedplayers = requestdict.get('selectp')\n todb = dict.fromkeys(requestdict.get('selectp'))\n if g.category == 'BB':\n pscores = requestdict.get('pscore')\n pranks = ranking(pscores)\n else:\n pranks = requestdict.get('prank')\n pscores = [None for i in range(len(pranks))]\n z = zip(selectedplayers,pranks,pscores)\n todb = {p:{'rank':int(r),'score':int(s)} for p,r,s in z}\n for p in g.participant_set.all():\n if p.rank != todb[p.player.name]['rank']:\n q = Participant.objects.get(game=g, player=p.player)\n q.rank = todb[p.player.name]['rank']\n q.save()\n if p.score != todb[p.player.name]['score']:\n q = Participant.objects.get(game=g, player=p.player)\n q.score = todb[p.player.name]['score']\n q.save()\n return redirect('scoretable:editgame', id=g.id)\n except:\n messages.warning(request, \"Sorry, something wrong happened entering data in the database\")\n return HttpResponseRedirect('/')\n\n\ndef deletegame(request,id):\n q = get_object_or_404(GameNumber, id=id )\n gcat = q.category\n #todel.delete()\n return HttpResponseRedirect(reverse(\"scoretable:webtables\", kwargs={'category':gcat}))\n\n\ndef deletezip(request,id):\n q = get_object_or_404(zipcsvfile, id=id )\n q.path.delete()\n q.delete()\n qs = zipcsvfile.objects.all().order_by(\"-timestamp\")\n context = {'title': 'Download',\n 'allfiles': qs\n }\n return render(request, 'scoretable/download.html', context)\n\n\ndef upload_csv(request):\n if request.method == \"POST\":\n csvfile = request.FILES['csvfile']\n if not csvfile.name.endswith('.csv'):\n messages.error(request, 'File is not CSV')\n return HttpResponseRedirect(reverse(\"scoretable:upload_csv\"))\n if CSV_to_db(csvfile):\n messages.success(request, \"It worked!\")\n return HttpResponseRedirect('/')\n else:\n messages.error(request, \"Something wrong happened. Better check the database...\")\n return render(request, 'scoretable/uploadcsv.html', {'title': 'Upload'})\n","repo_name":"lgalarno/DjangoDartsDB","sub_path":"scoretable/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73840447304","text":"# -*- coding:utf-8 -*-\n\nimport collections\nfrom ..utils import consts\n\n\n# class DeepMeta:\n# def __init__(self):\n#\n# self.categorical_columns\n# self.continuous_columns\n\n# class Column:\n# def __init__(self, name, dtype, has_nans):\n# self.name = name\n# self.dtype = dtype\n# self.has_nans = has_nans\n#\n#\n# class CategoricalColumn(Column):\n# def __init__(self, name, dtype, has_nans, num_uniques):\n# super(CategoricalColumn).__init__(name, dtype, has_nans)\n# self.num_uniques = num_uniques\n#\n#\n# class ContinuousColumn(Column):\n# def __init__(self, name, dtype, has_nans, min, max):\n# super(ContinuousColumn).__init__(name, dtype, has_nans)\n# self.min = min\n# self.max = max\n\n\nclass CategoricalColumn(collections.namedtuple('CategoricalColumn',\n ['name',\n 'vocabulary_size',\n 'embeddings_output_dim',\n 'dtype',\n 'input_name',\n ])):\n def __hash__(self):\n return self.name.__hash__()\n\n def __new__(cls, name, vocabulary_size, embeddings_output_dim=10, dtype='int32', input_name=None, ):\n if input_name is None:\n input_name = consts.INPUT_PREFIX_CAT + name\n if embeddings_output_dim == 0:\n embeddings_output_dim = int(round(vocabulary_size ** 0.25))\n return super(CategoricalColumn, cls).__new__(cls, name, vocabulary_size, embeddings_output_dim, dtype,\n input_name)\n\n\nclass VarLenCategoricalColumn(collections.namedtuple('VarLenCategoricalColumn',\n ['name',\n 'vocabulary_size',\n 'embeddings_output_dim',\n 'dtype',\n 'input_name',\n 'sep',\n 'pooling_strategy',\n ])):\n\n def __hash__(self):\n return self.name.__hash__()\n\n def __new__(cls, name, vocabulary_size, embeddings_output_dim=10, dtype='int32', input_name=None, sep=\"|\", pooling_strategy='max'):\n if input_name is None:\n input_name = consts.INPUT_PREFIX_CAT + name\n if embeddings_output_dim == 0:\n embeddings_output_dim = int(round(vocabulary_size ** 0.25))\n # max_elements_length need a variable not const\n return super(VarLenCategoricalColumn, cls).__new__(cls, name, vocabulary_size, embeddings_output_dim, dtype,\n input_name, sep, pooling_strategy)\n\n\nclass ContinuousColumn(collections.namedtuple('ContinuousColumn',\n ['name',\n 'column_names',\n 'input_dim',\n 'dtype',\n 'input_name',\n ])):\n def __hash__(self):\n return self.name.__hash__()\n\n def __new__(cls, name, column_names, input_dim=0, dtype='float32', input_name=None, ):\n input_dim = len(column_names)\n return super(ContinuousColumn, cls).__new__(cls, name, column_names, input_dim, dtype, input_name)\n","repo_name":"DataCanvasIO/DeepTables","sub_path":"deeptables/models/metainfo.py","file_name":"metainfo.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":607,"dataset":"github-code","pt":"81"} +{"seq_id":"41926987782","text":"import os\nimport h5py\nimport numpy as np\nfrom torch.utils import data\nimport scipy.misc\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\nVGG_MEAN = [103.939, 116.779, 123.68]\n\n\ndef normalizeImage(img):\n img = img.astype('float')\n # Do not touch the alpha channel\n for i in range(3):\n minval = img.min()\n maxval = img.max()\n if minval != maxval:\n img -= minval\n img /= (maxval-minval)\n return img*255\ndata.DataLoader\n\nclass img_loader_2labels(data.Dataset):\n def __init__(self, sub_list):\n self.sub_list = sub_list\n\n osize = [256, 256]\n\n transform_list = []\n transform_list.append(transforms.Scale(osize, Image.BICUBIC))\n self.transforms_scale = transforms.Compose(transform_list)\n\n transform_list = []\n transform_list.append(transforms.Scale(osize, Image.NEAREST))\n self.transforms_seg_scale = transforms.Compose(transform_list)\n\n transform_list = []\n transform_list.append(transforms.ToTensor())\n self.transforms_toTensor = transforms.Compose(transform_list)\n\n transform_list = []\n transform_list.append(transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)))\n self.transforms_normalize = transforms.Compose(transform_list)\n\n def __getitem__(self, index):\n # load image\n subinfo = self.sub_list[index]\n\n sub_name = subinfo[0]\n view_name = subinfo[1]\n img_name = subinfo[2]\n img_dir = subinfo[3]\n\n img_file = os.path.join(img_dir, img_name)\n\n A_img = Image.open(img_file).convert('L')\n\n A_img = self.transforms_scale(A_img)\n\n A_img = self.transforms_toTensor(A_img)\n\n data = self.transforms_normalize(A_img)\n\n return data, sub_name, view_name, img_name\n\n\n def __len__(self):\n self.total_count = len(self.sub_list)\n return self.total_count\n\n\n def untransform(self, img, lbl):\n img = img.numpy()\n img = img.transpose(1, 2, 0)\n img += np.array(VGG_MEAN)\n img = img.astype(np.uint8)\n img = img[:, :, ::-1]\n lbl = lbl.numpy()\n return img, lbl\n","repo_name":"mhyeonsoo/DeepSpleen","sub_path":"extra/python/img_loader_2labels.py","file_name":"img_loader_2labels.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34491636703","text":"# The goal of this program is to reverse the order of a list\n\ndef main():\n ascendingList = [1, 1, 1, 2, 2, 5, 18, 23, 36, 48, 48, 52, 54, 68, 78, 79]\n descendingList = listReverser(ascendingList)\n print(\"This is the list ascending: \" + str(ascendingList))\n print(\"This is the list descending: \" + str(descendingList))\n\ndef listReverser(inputList):\n reversedList = []\n for i in range(len(inputList)-1,-1,-1):\n reversedList.append(inputList[i])\n return reversedList\n\nmain()","repo_name":"Jaq432/InterviewPrep","sub_path":"Originals/listReverser.py","file_name":"listReverser.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25780227808","text":"import random\ntotal = int(0)\nwhile True:\n numeropc = int(random.randrange(1, 10))\n maoj = int(input('PAR ou ÍMPAR: \\n [1]Par \\n [2]Ímpar \\n '))\n numeropl = int(input('Insira um número: '))\n nfinal = numeropl + numeropc\n if (maoj == 1):\n if (nfinal %2 == 0):\n total += 1\n print('Jogador venceu {} vez(es)'.format(total))\n elif (nfinal %2 != 0):\n print('Jogador perdeu!')\n print('Total de vitórias consecutivas {}'.format(total))\n break\n elif (maoj == 2):\n if (nfinal %2 != 0):\n total += 1\n print('Jogador venceu {} vez(es)'.format(total))\n if (nfinal %2 == 0):\n print('Jogador perdeu!')\n print('Total de vitórias consecutivas {}'.format(total))\n break\n","repo_name":"Rietol/Aprendendo-Python-e-Java","sub_path":"ExercíciosPython/ex068.py","file_name":"ex068.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36610964046","text":"\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n# class Solution:\n# def longestUnivaluePath(self, root: TreeNode) -> int:\n\n\nroot = TreeNode(5)\nleft = TreeNode(4)\nright = TreeNode(5)\nroot.right = right\nroot.left = left\n\nleft1 = TreeNode(1)\nright1 = TreeNode(1)\nleft.left = left1\nleft.right = right1\n\nleft2 = TreeNode(None)\nright2 = TreeNode(5)\nright.left = left2\nright.right = right2\n\n\nresult = 0\n\ndef dfs(node):\n global result\n if node is None:\n return 0\n # 없는거까지 내려가면 리턴 0\n\n left = dfs(node.left)\n right = dfs(node.right)\n # 제일 말단(리프)까지 재귀로 타고 들어감\n\n # 리프노드 도착했을때부터 시작\n if node.left and node.left.val == node.val:\n # node 의 왼쪽 자식 노드가 존재하고, 그 자식 노드의 값과 현대 노드의 값이 같을 때\n left += 1\n else :\n left = 0\n\n if node.right and node.right.val == node.val:\n # node 의 오른쪽 자식 노드가 존재하고, 그 자식 노드의 값과 현대 노드의 값이 같을 때\n right += 1\n else :\n right = 0\n\n #왼쪽과 오른쪽 자식 노드 간 거리의 합 최댓값이 결과\n result = max(result, left + right)\n\n #자식 노드 상태값 중 큰 값 리턴\n return max(left, right)\n\ndfs(root)\nprint(result)","repo_name":"jwseo4074/Leetcode_Python","sub_path":"leetcode_687.py","file_name":"leetcode_687.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74267300104","text":"import os\nimport shutil\n\nimport pandas as pd\n\ndef find_action_name_by_log_content(logs: pd.DataFrame, content: str, n: int = 100000) -> str:\n \"\"\"\n Function to find the name of an action given a sub string contained in its log (must be unique)\n \n For example `find_action_name_by_log_content(\"crochet du droit\")` returns \"FIST_WOUNDED\"\n\n Parameters :\n ------------\n content : str\n Sub string contained in the log\n n : int\n Number of logs to search in the dataset\n \"\"\"\n action_name_found = False\n \n while not action_name_found:\n sample = logs.sample(n)\n for i in sample.index.to_list():\n if content in sample.loc[i, \"Log\"]:\n return sample.loc[i, \"Action\"]\n\ndef find_all_actions_by_name(logs: pd.DataFrame, action_name: str) -> pd.DataFrame:\n \"\"\"\n Function to find all the logs of a given action name\n\n Parameters :\n ------------\n action_name : str\n Name of the action to find\n \"\"\"\n return logs[logs[\"Action\"] == action_name]\n\ndef get_player_logs() -> pd.DataFrame:\n \"\"\"\n Function to get all the player logs in a DataFrame\n\n Returns :\n ---------\n logs : pd.DataFrame\n Dataframe containing all the player logs\n \"\"\"\n if not os.path.exists(\"../data/clean_player_logs.csv\"):\n shutil.unpack_archive(\"../data/clean_player_logs.zip\", \"../data/\")\n\n return pd.read_csv(\"../data/clean_player_logs.csv\", sep=\";\")","repo_name":"cmnemoi/mush_logs_analysis","sub_path":"notebooks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7284963252","text":"import csv\nfrom collections import OrderedDict\nfrom dateutil.parser import parse\nclass DataFrame(object):\n\n @classmethod\n def from_csv(cls, file_path, delimiting_character=',', quote_character='\"'):\n with open(file_path, 'rU') as infile:\n reader = csv.reader(infile, delimiter=delimiting_character, quotechar=quote_character)\n data = []\n\n for row in reader:\n data.append(row)\n\n return cls(list_of_lists=data)\n\n\n def __init__(self, list_of_lists, header=True):\n if header:\n self.header = list_of_lists[0]\n self.data = list_of_lists[1:]\n else:\n self.header = ['column' + str(index + 1) for index, column in enumerate(list_of_lists[0])]\n self.data = list_of_lists\n\n self.data = [[value.strip() for value in row] for row in self.data]\n\n\n self.data = [OrderedDict(zip(self.header, row)) for row in self.data]\n\n if len(self.header) != len(set(self.header)):\n raise Exception('There are duplicate column names in the header')\n\n\n\n\n def add_rows(self,listOfConvertedValues):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n\n for row in self.data:\n return data.append(row)\n\n self.listOfConvertedValues = [OrderedDict(zip(self.listOfConvertedValues, row)) for row in self.data]\n\n if len(self.column_name) != len(self.row in self.data):\n raise Exception('Numbers of rows do not match number of columns')\n\n\n def add_column(self, listOfConvertedValues, column_name):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n\n for row in self.column_name:\n return data.append(row)\n\n if len(self.listOfConvertedValues) != len(self.header):\n raise Exception('List of values does not equal number of rows in data frame')\n\n\n def mean(self, column_name):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n realMean = sum(listOfConvertedValues)/len(self[column_name])\n return realMean\n\n def min(self, column_name):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n realMin = min(listOfConvertedValues)\n return realMin\n\n def max(self, column_name):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n realMax = max(listOfConvertedValues)\n return realMax\n\n def median(self, column_name):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n realMedian = sorted(listOfConvertedValues)[len(listOfConvertedValues)//2]\n return realMedian\n\n import numpy\n assert df.median('Price') == numpy.median(df['Price'])\n\n def stDev(self, column_name):\n listOfConvertedValues = [convertToFloatOrDatetime(value) for value in self[column_name]]\n realMean = sum(listOfConvertedValues) / len(self[column_name])\n xSquared = sum(listOfConvertedValues)*sum(listOfConvertedValues)\n stDev = ((xSquared/len(listOfConvertedValues) - realMean*realMean))**.5\n return stDev\n\n\n\n\n def __getitem__(self, item):\n # this is for rows only\n if isinstance(item, (int, slice)):\n return self.data[item]\n\n # this is for columns only\n elif isinstance(item, (str, unicode)):\n return [row[item] for row in self.data]\n\n # this is for rows and columns\n elif isinstance(item, tuple):\n if isinstance(item[0], list) or isinstance(item[1], list):\n\n if isinstance(item[0], list):\n rowz = [row for index, row in enumerate(self.data) if index in item[0]]\n else:\n rowz = self.data[item[0]]\n\n if isinstance(item[1], list):\n if all([isinstance(thing, int) for thing in item[1]]):\n return [[column_value for index, column_value in enumerate([value for value in row.itervalues()]) if index in item[1]] for row in rowz]\n elif all([isinstance(thing, (str, unicode)) for thing in item[1]]):\n return [[row[column_name] for column_name in item[1]] for row in rowz]\n else:\n raise TypeError('What the hell is this?')\n\n else:\n return [[value for value in row.itervalues()][item[1]] for row in rowz]\n else:\n if isinstance(item[1], (int, slice)):\n return [[value for value in row.itervalues()][item[1]] for row in self.data[item[0]]]\n elif isinstance(item[1], (str, unicode)):\n return [row[item[1]] for row in self.data[item[0]]]\n else:\n raise TypeError('I don\\'t know how to handle this...')\n\n # only for lists of column names\n elif isinstance(item, list):\n return [[row[column_name] for column_name in item] for row in self.data]\n\n def get_rows_where_column_has_value(self, column_name, value, index_only=False):\n if index_only:\n return [index for index, row_value in enumerate(self[column_name]) if row_value==value]\n else:\n return [row for row in self.data if row[column_name]==value]\n\n\ndef convertToFloatOrDatetime(string):\n try:\n return float(string)\n except:\n return parse(string)\n\ninfile = open('SalesJan2009.csv')\nlines = infile.readlines()\nlines = lines[0].split('\\r')\ndata = [l.split(',') for l in lines]\nthings = lines[559].split('\"')\ndata[559] = things[0].split(',')[:-1] + [things[1]] + things[-1].split(',')[1:]\n\n\ndf = DataFrame(list_of_lists=data)\n# get the 5th row\nfifth = df[4]\nsliced = df[4:10]\n\n# get item definition for df [row, column]\n\n# get the third column\ntupled = df[:, 2]\ntupled_slices = df[0:5, :3]\n\ntupled_bits = df[[1, 4], [1, 4]]\n\n\n# adding header for data with no header\ndf = DataFrame(list_of_lists=data[1:], header=False)\n\n# fetch columns by name\nnamed = df['column1']\nnamed_multi = df[['column1', 'column7']]\n\n#fetch rows and (columns by name)\nnamed_rows_and_columns = df[:5, 'column7']\nnamed_rows_and_multi_columns = df[:5, ['column4', 'column7']]\n\n\n#testing from_csv class method\ndf = DataFrame.from_csv('SalesJan2009.csv')\ndf['Price']\n# testing dups in header exception\n# dfDup = DataFrame.from_csv('SalesJan2009withdupheader.csv')\nrows = df.get_rows_where_column_has_value('Payment_Type', 'Visa')\nindices = df.get_rows_where_column_has_value('Payment_Type', 'Visa', index_only=True)\n\nrows_way2 = df[indices, ['Product', 'Country']]\n\n2+2","repo_name":"zmulhaul/bia660-c","sub_path":"assignment 2/mypandasZachM.py","file_name":"mypandasZachM.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37289979316","text":"\"\"\"\nBackport of configurable stacklevel for logging added in Python 3.8.\n\nSee https://github.com/python/cpython/pull/7424\n\"\"\"\nimport io\nimport logging\nimport os\nimport sys\nimport traceback\nfrom contextlib import contextmanager\n\n\n__all__ = ['PatchedLoggerMixin', 'patch_logger']\n\n\nclass PatchedLoggerMixin(object):\n \"\"\"Mixin adding `temp_monkey_patched_logger` that allows stacklevel kwarg.\n\n Classes that include this mixin have a `temp_monkey_patched_logger`\n context manager that allows the use of the `stacklevel` keyword argument\n from Python 3.8.\n\n Classes using this mixin must have a `logging.Logger` instance as an\n attribute of the class. By default, this is assumed to be named `logger`,\n but you can override the `logger_attribute` class attribute with the\n name of a different attribute.\n \"\"\"\n\n #: Name of logger instance on the class inheriting this mixin.\n logger_attribute = 'logger'\n\n def __init__(self, *args, **kwargs):\n super(PatchedLoggerMixin, self).__init__(*args, **kwargs)\n self._patched_logger_class = None\n\n def _get_logger(self):\n if not hasattr(self, self.logger_attribute):\n msg = (\n \"Subclass of PatchedLoggerMixin must define `{}` attribute \"\n \"or override `logger_attribute`\".format(self.logger_attribute)\n )\n raise AttributeError(msg)\n return getattr(self, self.logger_attribute)\n\n @contextmanager\n def temp_monkey_patched_logger(self):\n \"\"\"Temporarily monkey patch logger to allow overriding log records.\n\n The monkey patching is reset so that the behavior change is limited\n to the scope of this logger.\n \"\"\"\n logger = self._get_logger()\n original_logger_class = logger.__class__\n\n # Cache patched logger class if not already defined.\n if self._patched_logger_class is None:\n self._patched_logger_class = patch_logger(logger.__class__)\n\n logger.__class__ = self._patched_logger_class\n try:\n yield\n finally:\n logger.__class__ = original_logger_class\n\n\ndef patch_logger(logger_class):\n \"\"\"Return logger class patched with stacklevel keyword argument.\"\"\"\n if sys.version_info.major >= 3 and sys.version_info.minor >= 8:\n return logger_class\n return type('ConfigurableStacklevelLogger',\n (ConfigurableStacklevelLoggerMixin, logger_class), {})\n\n\nclass ConfigurableStacklevelLoggerMixin(object):\n \"\"\"Mixin for adding `stacklevel` keyword argument for logging methods.\n\n This mixin can be used to monkey patch `logging.Logger` to include the\n `stacklevel` keyword argument that will be available in Python 3.8.\n\n See https://github.com/python/cpython/pull/7424\n \"\"\"\n\n def findCaller(self, stack_info=False, stacklevel=1): # pragma: no cover\n \"\"\"\n Find the stack frame of the caller so that we can note the source\n file name, line number and function name.\n \"\"\"\n f = logging.currentframe()\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == logging._srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv\n\n def _log(self, level, msg, args, exc_info=None, extra=None,\n stack_info=False, stacklevel=1): # pragma: no cover\n \"\"\"\n Low-level logging routine which creates a LogRecord and then calls\n all the handlers of this logger to handle the record.\n \"\"\"\n sinfo = None\n if logging._srcfile:\n # IronPython doesn't track Python frames, so findCaller raises an\n # exception on some versions of IronPython. We trap it here so that\n # IronPython can use logging.\n try:\n fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel)\n except ValueError:\n fn, lno, func = \"(unknown file)\", 0, \"(unknown function)\"\n else:\n fn, lno, func = \"(unknown file)\", 0, \"(unknown function)\"\n if exc_info:\n if isinstance(exc_info, BaseException):\n exc_info = (type(exc_info), exc_info, exc_info.__traceback__)\n elif not isinstance(exc_info, tuple):\n exc_info = sys.exc_info()\n\n if sys.version_info.major >= 3:\n record = self.makeRecord(self.name, level, fn, lno, msg, args,\n exc_info, func, extra, sinfo)\n else:\n record = self.makeRecord(self.name, level, fn, lno, msg, args,\n exc_info, func, extra)\n self.handle(record)\n","repo_name":"tonysyu/logquacious","sub_path":"logquacious/backport_configurable_stacklevel.py","file_name":"backport_configurable_stacklevel.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"33342382628","text":"import hub\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\n\n\naugment = A.Compose(\n [\n A.SmallestMaxSize(max_size=160),\n A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5),\n A.RandomCrop(height=128, width=128),\n A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=0.5),\n A.RandomBrightnessContrast(p=0.5),\n A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ToTensorV2(),\n ]\n)\n\n\ndef transform(sample):\n \"\"\"Sample is an ordered dictionary of dataset elements\"\"\"\n image, label = sample[\"images\"], sample[\"labels\"]\n image = augment(image=image)[\"image\"]\n return image, label\n\n\ndef loop():\n # Load the dataset\n ds = hub.load(\"hub://activeloop/cifar100-train\")\n\n # Define the dataloader with the transform\n dataloader = ds.pytorch(\n transform=transform,\n num_workers=2,\n batch_size=8,\n )\n\n # Iterate\n for images, labels in dataloader:\n print(images.shape, labels.shape)\n break\n\n\nif __name__ == \"__main__\":\n loop()\n","repo_name":"activeloopai/examples","sub_path":"albumentations/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"70673243785","text":"import json\r\ndef getData() : \r\n with open('example.json') as file :\r\n # Load the JSON data\r\n data = json.load(file)\r\n return data\r\n\r\n\r\n\r\nuser = {\r\n 'user': getData(),\r\n 'setUser': lambda data: user.update({'user': data})\r\n}\r\n\r\nuser_list = user.get('user')\r\nset_user_func = user.get('setUser')\r\nprint(user_list)\r\n\r\nset_user_func([5])\r\n\r\nprint(user['user'])","repo_name":"salaharb/python-project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6035926317","text":"# SEO环境: schedule\nimport warnings\nwarnings.filterwarnings('ignore')\nimport requests\nimport pandas as pd\nimport numpy as np\nimport json\nimport time\nimport datetime\nimport xlwings as xw\nimport telebot\nimport hmac, base64, struct, hashlib\nimport math\nimport threading\nimport queue\nimport concurrent.futures\nimport multiprocessing\n\n\nstart = int(time.time())\n\npd.set_option('display.max_colwidth', None) #显示单元格完整信息\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\nday = -1\npages_user = 150\npages_fircharge = 60\npages_fircharge_two = 100\npages_trade = 150\n\nurl_trade ='http://fundmng.bsportsadmin.com/api/manage/data/balance/record/list'\nurl_fircharge = 'http://fundmng.bsportsadmin.com/api/manage/data/detail/firstRecharge'\nurl_user = 'http://fundmng.bsportsadmin.com/api/manage/user/maintain/user/list'\nurl_huiyuan = 'http://fundmng.bsportsadmin.com/api/manage/data/loss/user/manage/list' #会员流失\n\ndaili = pd.read_excel(r'C:\\Users\\User\\Desktop\\SEO\\SEO提单数据\\1011\\代理线.xlsx')\n# 第一次获取token\nsubmit_url = 'http://fundmng.bsportsadmin.com/api/manage/user/admin/login/submit'\nheader0 = {\n 'Accept':'application/json, text/plain, */*',\n # 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Connection':'keep-alive',\n 'Content-Length':'48',\n 'Content-Type':'application/x-www-form-urlencoded',\n 'Cookie':'admin-uid=690; admin-token=db76bebda5274c80adaadd40bd794f24',\n 'Device_id':'1.0',\n 'Gl_version':'2.0',\n 'Host':'fundmng.bsportsadmin.com',\n 'Language':'zh_CN',\n 'Origin':'http://fundmng.bsportsadmin.com',\n 'Os_type':'0',\n 'Referer':'http://fundmng.bsportsadmin.com/login',\n 'Sign':'2bc4c378817f47731f0adf450a627d19',\n 'Some':'header',\n 'Systemid':\"\",\n 'Timestamp':'1692415901000',\n 'Token':'-1',\n 'Uid':'-1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',\n 'Version':'1.0'\n}\ndef get_google_code(secret):\n key = base64.b32decode(secret, True)\n msg = struct.pack(\">Q\", int(time.time()) // 30)\n google_code = hmac.new(key, msg, hashlib.sha1).digest()\n # 很多网上的代码��可用,就在于这儿,没有chr字符串\n o = ord(chr(google_code[19])) & 15\n # google_code = (struct.unpack(\">I\", google_code[o:o + 4])[0] & 0x7fffffff) % 1000000\n google_code = (struct.unpack(\">I\", google_code[o:o + 4])[0] & 0x7fffffff) % 1000000\n return '%06d' % google_code\n\n\n# 采集会员流失统计表\n# token\ndata0 = {\n 'username': 'Marquis',\n 'password': 'qwer123456',\n 'code': get_google_code('64ehnxj6yily5bhv23kgb62ozuh6yuu2')\n}\nsession0 = requests.Session()\nresponse0 =session0.post(url=submit_url,data=data0,headers=header0)\nresponse0.encoding = 'utf-8'\nobj0 = json.loads(response0.text)\ntoken = obj0['data']['token']\n\nheader = {\n 'Device_id':'1.0',\n 'Os_type':'0',\n 'Referer':'http://fundmng.bsportsadmin.com/system/statistics/member-loss',\n 'Sign':'6f518a02e3479ecaaf4ec58b3e5b3878',\n 'Timestamp':'1697073050000',\n 'Token':token,\n 'Uid':'690',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',\n 'Version':'1.0'\n}\n# 总条数\ndata_init = {\n 'page': 1,\n 'size': 20,\n 'vipLevel': 0,\n 'regStartTime': 1601481600000,\n 'regEndTime': int(time.time())*1000,\n}\n#获取会员流失页码\nsession = requests.session()\nresponse = session.post(url=url_huiyuan,data=data_init,headers=header)\nobj_init = json.loads(response.text)\nn_data = obj_init['data']['total']\nprint('总条数:',n_data)\npages = math.ceil(n_data/500)\nprint('总页码:',pages)\n\npage_list = []\nfor i in range(0,pages,pages//10):\n page_list.append(i)\npage_list[10]=pages\n\ndef huiyuan_q_fun(start_page,end_page,q):\n dic_huiyuan = {'会员账号':[],'代理':[],'vip等级':[],'首存时间':[]}\n for page in range(start_page,end_page+1):\n # 获取页码数量\n print(f'第{page}页。。。')\n data = {\n 'page': page,\n 'size': 500,\n 'vipLevel': 0,\n 'regStartTime': 1601481600000,\n 'regEndTime': int(time.time())*1000,\n }\n response = session.post(url=url_huiyuan,data=data,headers=header)\n response.encoding='utf8'\n obj = json.loads(response.text)\n\n for i in obj['data']['dataList']:\n dic_huiyuan['会员账号'].append(i['userName'])\n dic_huiyuan['代理'].append(i['parentName'])\n dic_huiyuan['vip等级'].append(i['vipLevel'])\n dic_huiyuan['首存时间'].append(i['firstTime'])\n print(pd.DataFrame(dic_huiyuan).shape)\n q.put(dic_huiyuan)\n return dic_huiyuan\n\n\n#多线程使用\nif __name__ =='__main__':\n\n q = queue.Queue()\n huiyuan = pd.DataFrame(columns=['会员账号','代理','vip等级','首存时间'])\n\n with concurrent.futures.ThreadPoolExecutor(10) as executor:\n future = executor.submit(huiyuan_q_fun,[(page_list[0]+1,page_list[i+1],q) for i in range(10)])\n # job = []\n # for i in range(10):\n # t = threading.Thread(huiyuan_q_fun(page_list[0]+1,page_list[i+1],q))\n # t.start()\n # job.append(t)\n # print(f'启动线程:{i}')\n # for j in job:\n # j.join()\n\n while not q.empty():\n huiyuan=huiyuan.append(pd.DataFrame(q.get()))\n\n print(huiyuan.shape)\n print('主线程运行完成。。')\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Marquis0065/project202308285","sub_path":"Bsport/SEO日报/提单/1013/多线程测试.py","file_name":"多线程测试.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15882609524","text":"import argparse\nimport os\nimport os.path as osp\nimport shutil\nimport tempfile\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import load_checkpoint, get_dist_info\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\n\nfrom mmdet.apis import init_dist\nfrom mmdet.core import results2json, coco_eval\nfrom mmdet.datasets import build_dataloader, get_dataset\nfrom mmdet.models import build_detector\n\n\nCLASSES = (\n'/m/0242l', '/m/03120', '/m/0h8l4fh', '/m/0167gd', '/m/01j51', '/m/029b3', '/m/02zt3', '/m/0kmg4',\n'/m/0174k2', '/m/01k6s3', '/m/029bxz', '/m/02pjr4', '/m/02wv84t', '/m/02x984l', '/m/03s_tn', '/m/040b_t',\n'/m/04169hn', '/m/063rgb', '/m/07xyvk', '/m/0fx9l', '/m/0llzx', '/m/03ldnb', '/m/0130jx', '/m/01vbnl', \n'/m/02f9f_', '/m/02jz0l', '/m/03dnzn', '/m/09g1w', '/m/01lsmm', '/m/01n5jq', '/m/025dyy', '/m/02d9qx',\n'/m/03m3vtv', '/m/04zwwv', '/m/05gqfk', '/m/09gtd', '/m/0frqm', '/m/0k1tl', '/m/02w3r3', '/m/034c16',\n'/m/02d1br', '/m/02pdsw', '/m/03v5tg', '/m/07v9_z', '/m/01_5g', '/m/01fh4r', '/m/02jvh9', '/m/02p5f1q',\n'/m/02x8cch', '/m/03q5c7', '/m/04dr76w', '/m/04kkgm', '/m/050gv4', '/m/054fyh', '/m/058qzx', '/m/08hvt4',\n'/m/099ssp', '/m/09tvcd', '/m/0cmx8', '/m/0dt3t', '/m/0h8n27j', '/m/0h8n6ft', '/m/04v6l4', '/m/084rd',\n'/m/02tsc9', '/m/03y6mg', '/m/0h8ntjv', '/m/03tw93', '/m/0b3fp9', '/m/0bt_c3', '/m/01mzpv', '/m/01s105',\n'/m/01y9k5', '/m/03m3pdh', '/m/0703r8', '/m/02z51p', '/m/03__z0', '/m/061hd_', '/m/026qbn5', '/m/047j0r',\n'/m/078n6m', '/m/0h8n5zk', '/m/05kyg_', '/m/0642b4', '/m/0cvnqh', '/m/0fqfqc', '/m/0fqt361', '/m/0gjbg72',\n'/m/0h8mzrc', '/m/04y4h8h', '/m/0h8n6f9', '/m/01jfsr', '/m/046dlr', '/m/06_72j', '/m/02s195', '/m/031b6r',\n'/m/03rszm', '/m/054_l', '/m/0152hh', '/m/04yqq2', '/m/01yx86', '/m/06z37_', '/m/0c06p', '/m/0dtln', '/m/0fm3zh',\n'/m/0162_1', '/m/01gllr', '/m/01j61q', '/m/015qff', '/m/01knjb', '/m/02pv19', '/m/01pns0', '/m/0220r2',\n'/m/033rq4', '/m/04h7h', '/m/079cl', '/m/0d5gx', '/m/01fdzj', '/m/03jm5', '/m/021sj1', '/m/0crjs', '/m/0b_rs',\n'/m/04yx4', '/m/03bt1vf', '/m/01bl7v', '/m/05r655', '/m/01b9xk', '/m/02y6n', '/m/01dwsz', '/m/01dwwc', '/m/01j3zr',\n'/m/01f91_', '/m/01hrv5', '/m/01tcjp', '/m/021mn', '/m/0cxn2', '/m/0fszt', '/m/0gm28', '/m/02g30s', '/m/014j1m',\n'/m/0388q', '/m/043nyj', '/m/061_f', '/m/07fbm7', '/m/07j87', '/m/09k_b', '/m/09qck', '/m/0cyhj_', '/m/0dj6p',\n'/m/0fldg', '/m/0fp6w', '/m/0hqkz', '/m/0jwn_', '/m/0kpqd', '/m/033cnk', '/m/01fb_0', '/m/09728', '/m/0jy4k',\n'/m/015wgc', '/m/02zvsm', '/m/052sf', '/m/05z55', '/m/0663v', '/m/0_cp5', '/m/0cjq5', '/m/0ll1f78', '/m/0n28_',\n'/m/07crc', '/m/015x4r', '/m/015x5n', '/m/047v4b', '/m/05vtc', '/m/0cjs7', '/m/05zsy', '/m/027pcv', '/m/0fbw6',\n'/m/0fj52s', '/m/0grw1', '/m/0hkxq', '/m/0jg57', '/m/02cvgx', '/m/0fz0h', '/m/0cdn1', '/m/06pcq', '/m/07030',\n'/m/03fp41', '/m/025nd', '/m/0cdl1', '/m/0cffdh', '/m/0mw_6', '/m/04gth', '/m/06m11', '/m/0ftb8', '/m/0jqgx',\n'/m/012n7d', '/m/018p4k', '/m/0199g', '/m/01bjv', '/m/01x3jk', '/m/0323sq', '/m/04_sv', '/m/076bq', '/m/07cmd',\n'/m/07jdr', '/m/07r04', '/m/01lcw4', '/m/0h2r6', '/m/0pg52', '/m/0qmmr', '/m/01btn', '/m/02068x', '/m/0ph39',\n'/m/01xs3r', '/m/09ct_', '/m/0cmf2', '/m/09rvcxw', '/m/01bfm9', '/m/01d40f', '/m/01gkx_', '/m/01gmv2', '/m/01krhy',\n'/m/01n4qj', '/m/01xygc', '/m/01xyhv', '/m/025rp__', '/m/02fq_6', '/m/02jfl0', '/m/02wbtzl', '/m/02h19r', '/m/01cmb2',\n'/m/032b3c', '/m/03grzl', '/m/0176mf', '/m/01llwg', '/m/01nq26', '/m/01r546', '/m/01rkbr', '/m/0gjkl', '/m/0hnnb',\n'/m/0nl46', '/m/04tn4x', '/m/0fly7', '/m/02p3w7d', '/m/01b638', '/m/06k2mb', '/m/03nfch', '/m/0h8mhzd', '/m/01940j',\n'/m/01s55n', '/m/0584n8', '/m/080hkjn', '/m/03p3bw', '/m/07qxg_', '/m/01dy8n', '/m/01f8m5', '/m/05n4y', '/m/05z6w',\n'/m/06j2d', '/m/09b5t', '/m/09csl', '/m/09d5_', '/m/09ddx', '/m/0ccs93', '/m/0dbvp', '/m/0dftk', '/m/0f6wt', '/m/0gv1x',\n'/m/0h23m', '/m/0jly1', '/m/0175cv', '/m/019h78', '/m/01h8tj', '/m/0d8zb', '/m/01h3n', '/m/0gj37', '/m/0_k2', '/m/0cydv',\n'/m/0cyf8', '/m/0ft9s', '/m/09kmb', '/m/0f9_l', '/m/01h44', '/m/01dxs', '/m/0633h', '/m/01yrx', '/m/0306r', '/m/0449p',\n'/m/04g2r', '/m/07dm6', '/m/096mb', '/m/0bt9lr', '/m/0c29q', '/m/0cd4d', '/m/0cn6p', '/m/0dq75', '/m/01x_v', '/m/01xq0k1',\n'/m/03bk1', '/m/03d443', '/m/03fwl', '/m/03k3r', '/m/03qrc', '/m/04c0y', '/m/04rmv', '/m/068zj', '/m/06mf6', '/m/071qp',\n'/m/07bgp', '/m/0898b', '/m/08pbxl', '/m/09kx5', '/m/0bwd_0j', '/m/0c568', '/m/0cnyhnx', '/m/0czz2', '/m/0dbzx',\n'/m/02hj4', '/m/084zz', '/m/0gd36', '/m/02l8p9', '/m/0pcr', '/m/029tx', '/m/04m9y', '/m/078jl', '/m/011k07', '/m/0120dh',\n'/m/09f_2', '/m/09ld4', '/m/03fj2', '/m/0by6g', '/m/0nybt', '/m/017ftj', '/m/02_n6y', '/m/05441v', '/m/0jyfg',\n'/m/02lbcq', '/m/013y1f', '/m/01xqw', '/m/026t6', '/m/0319l', '/m/0342h', '/m/03m5k', '/m/03q5t', '/m/057cc', '/m/05kms',\n'/m/05r5c', '/m/06ncr', '/m/07c6l', '/m/07gql', '/m/07y_7', '/m/0l14j_', '/m/0mkg', '/m/014y4n', '/m/01226z', '/m/02ctlc',\n'/m/02rgn06', '/m/05ctyq', '/m/0wdt60w', '/m/019w40', '/m/03g8mr', '/m/0420v5', '/m/044r5d', '/m/054xkw', '/m/057p5t',\n'/m/06__v', '/m/06_fw', '/m/071p9', '/m/04h8sr', '/m/03kt2w', '/m/030610', '/m/076lb9', '/m/0cyfs', '/m/0h8my_4',\n'/m/05_5p_0', '/m/04p0qw', '/m/02jnhm', '/m/02zn6n', '/m/07kng9', '/m/0bjyj5', '/m/0d20w4', '/m/012w5l', '/m/01bms0',\n'/m/01j5ks', '/m/01kb5b', '/m/04vv5k', '/m/05bm6', '/m/073bxn', '/m/07dd4', '/m/0dv5r', '/m/0hdln', '/m/0lt4_',\n'/m/01g3x7', '/m/020kz', '/m/02gzp', '/m/04ctx', '/m/06c54', '/m/06nrc', '/m/0gxl3', '/m/06y5r', '/m/04ylt', '/m/01b7fy',\n'/m/01c648', '/m/01m2v', '/m/01m4t', '/m/020lf', '/m/02522', '/m/03bbps', '/m/03jbxj', '/m/07c52', '/m/050k8', '/m/0h8lkj8',\n'/m/0bh9flk', '/m/0hg7b', '/m/01599', '/m/024g6', '/m/02vqfm', '/m/01z1kdw', '/m/07clx', '/m/081qc', '/m/01bqk0',\n'/m/03c7gz', '/m/02dgv', '/m/0d4v4', '/m/01lynh', '/m/04m6gz', '/m/014sv8', '/m/016m2d', '/m/04hgtk', '/m/0dzct',\n'/m/0283dt1', '/m/039xj_', '/m/0k0pj', '/m/03q69', '/m/0k65p', '/m/031n1', '/m/0dzf4', '/m/035r7c', '/m/015h_t',\n'/m/01jfm_', '/m/083wq', '/m/0dkzw', '/m/0h9mv', '/m/0djtd'\n)\n\n\ndef single_gpu_test(model, data_loader, show=False):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=not show, **data)\n results.append(result)\n\n if show:\n model.module.show_result(data, result, dataset.img_norm_cfg)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None):\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n results = collect_results(results, len(dataset), tmpdir)\n\n return results\n\n\ndef collect_results(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN, ),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n tmpdir = tempfile.mkdtemp()\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--out', help='output result file')\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],\n help='eval types')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument('--tmpdir', help='tmp dir for writing some results')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n args = parse_args()\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = get_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show)\n else:\n model = MMDistributedDataParallel(model.cuda())\n outputs = multi_gpu_test(model, data_loader, args.tmpdir)\n\n\n\n rank, _ = get_dist_info()\n if args.out and rank == 0:\n print('\\nwriting results to {}'.format(args.out))\n mmcv.dump(outputs, args.out)\n\n dataset.CLASSES = CLASSES\n\n data_list = []\n for i in tqdm(range(len(dataset))): \n img_id = dataset.img_infos[i]['filename'].split('.')[0]\n w = dataset.img_infos[i]['width']\n h = dataset.img_infos[i]['height']\n for l in range(len(dataset.CLASSES)):\n l_name = dataset.CLASSES[l] \n for b in outputs[i][l]: \n if len(b)>0: \n data_list.append((img_id, l_name, b[-1], b[0]/w, b[1]/h, b[2]/w, b[3]/h))\n data_df = pd.DataFrame(data_list, columns =['ImageID', 'LabelName', 'Score', 'XMin', 'YMin', 'XMax', 'YMax'])\n data_df.to_csv(args.out.split('.pkl')[0]+'.csv', index=False)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"boliu61/open-images-2019-instance-segmentation","sub_path":"mmdetection/tools/test_new.py","file_name":"test_new.py","file_ext":"py","file_size_in_byte":12411,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"11595099321","text":"import os\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport torch\nfrom model import RNN\nimport json\n\nobject_id = \"17002\"\nvideo_name = \"Video_37_2_3\"\nmodel_path = \"models/model-epoch-50.pth\"\n\nfeat_map_path = \"../tensors/train/tensor_\" + video_name + \".npy\"\ngt_path = \"gt/\" + video_name + \"_\" + object_id + \".npy\"\n\nfeat_map = np.load(feat_map_path)\ngt = np.load(gt_path)\ngt = torch.from_numpy(gt).type(torch.FloatTensor).squeeze(0)\n\n\ngt_samples_path = \"gt/gt.json\"\nwith open(gt_samples_path) as f:\n training_samples = json.load(f)\n\nsample = None\nfor item in training_samples:\n if item[\"video_name\"] == video_name and item[\"object_id\"] == object_id:\n sample = item\n break\n\nseq_start = sample[\"seq_start\"]\nseq_end = sample[\"seq_end\"]\n\nfeat_map = feat_map[seq_start:seq_end]\nfeat_map = torch.from_numpy(feat_map).type(torch.FloatTensor)\nfeat_map = feat_map.squeeze(0).permute(0, 3, 1, 2)\npadding = torch.zeros((100 - feat_map.shape[0], *feat_map.shape[1:]))\nfeat_map = torch.cat((feat_map, padding), dim=0)\n\nfeat_map = feat_map.unsqueeze(0).cuda()\ngt = gt.unsqueeze(0).cuda()\n\nmodel = RNN()\nmodel.load_state_dict(torch.load(model_path))\nmodel.cuda()\nmodel.eval()\n\nout = model(feat_map, gt)\nout = out.cpu().detach().numpy()\nprint(out.shape)\n\ncap = cv2.VideoCapture(\"../datasets/rrc-text-videos/ch3_train/\" + video_name + \".mp4\")\nvideo_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nvideo_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\ncap.set(cv2.CAP_PROP_POS_FRAMES, float(seq_start));\n\nfig, ax = plt.subplots(1)\nret, inp = cap.read()\ncurrent_frame = 0\nfor i in range(seq_end - seq_start):\n im = cv2.cvtColor(inp, cv2.COLOR_BGR2RGB)\n plt.cla()\n ax.imshow(im)\n\n heat = cv2.resize(out[0, current_frame] / 1, (int(video_width), int(video_height)), interpolation = cv2.INTER_AREA)\n ax.imshow(heat, cmap='jet', alpha=0.5)\n\n plt.axis('off')\n #plt.show(block=False)\n #plt.pause(0.00001)\n\n plt.savefig('images/file%05d.jpeg' % current_frame, bbox_inches = 'tight', pad_inches = 0)\n\n ret, inp = cap.read()\n current_frame += 1\n\nplt.close()\ncap.release()\n\nos.system(\"ffmpeg -framerate 10 -pattern_type glob -i 'images/*.jpeg' -c:v mpeg4 -vb 1M -qscale:v 2 \" + video_name + \".mp4\")\n\n\n\n","repo_name":"Sergigb/yolo-phoc-lstm","sub_path":"ConvLSTM/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1158569042","text":"from calattr import Calattr\nimport random\nimport csv\nfrom config import get_config\n\n\ndef Get_numservice(f):\n num_service = []\n f.readline()\n line = f.readline()\n candidates_c = line.split(' ')\n candidates = []\n for index in range(len(candidates_c)):\n candidates.append(candidates_c[index])\n # print('Candidates: ',candidates)\n for candidate in candidates:\n num = 0\n # rows = 0 # 使得服务限制在2个\n f1 = open('服务名聚类最终结果/' + candidate + '.txt')\n line1 = f1.readline()\n while line1:\n num = num + 1\n line1 = f1.readline()\n num_service.append(num)\n return num_service\n\n#if __name__ =='__main__':\ndef generate_data(calattr,config):\n path = config.train_from+\"/\"+config.ist_nodeset+\"/nodeSet.txt\"\n #print('path: ',path)\n f = open(path)\n num_service = Get_numservice(f)\n result=[]\n for i in range(config.init_gen_num):\n #print(i)\n pointer =[]\n for j in range(config.node_num):\n point = random.randint(0,num_service[j]-1)\n pointer.append(point)\n f = calattr.receive(pointer)\n pointer.append(f)\n result.append(pointer)\n\n def takeF(elem):\n return elem[-1]\n result.sort(key=takeF,reverse=True)\n result = result[:config.best_num]\n csvfile = open(config.result_dir, \"w\",newline=\"\")\n writer = csv.writer(csvfile)\n writer.writerows(result)\n csvfile.close()\n\n\n","repo_name":"xdbdilab/ppdrl","sub_path":"PPDRL/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"15593289268","text":"#?description=Set and reset custom bookmarks\n#?shortcut=\n#?deprecated\nimport datetime\nimport json\nimport time\nfrom com.pnfsoftware.jeb.client.api import IScript\nfrom com.pnfsoftware.jeb.core.units import UnitUtil\n\"\"\"\nSample script for JEB Decompiler.\n\nFor demo purposes only, this script should not be used to list bookmarks.\nJEB uses its own bookmarks facility for projects.\n\"\"\"\nclass BookmarkSet(IScript):\n BMKEY = 'BOOKMARKS'\n\n def run(self, ctx):\n f = ctx.getFocusedFragment()\n if not f:\n print('Set the focus on a UI fragment, and position the caret at the location you would like to bookmark.')\n return\n\n label = ctx.getFocusedView().getFragmentLabel(f)\n addr = f.getActiveAddress()\n unit = f.getUnit()\n uid = unit.getUid()\n unitname = unit.getName()\n unitpath = UnitUtil.buildFullyQualifiedUnitPath(unit)\n\n log('Unit: %d (%s)' % (uid, unitpath))\n log('Address: %s' % addr)\n log('Fragment: %s' % label)\n\n prj = ctx.getMainProject()\n bmstr = prj.getData(BookmarkSet.BMKEY)\n if bmstr != None:\n bm = json.loads(bmstr)\n else:\n bm = {}\n\n #log('Current bookmarks (%d): %s' % (len(bm), bm))\n log('Current bookmarks: %d' % len(bm))\n\n labelmap = bm.get(str(uid))\n if labelmap == None:\n labelmap = {}\n bm[uid] = labelmap\n\n addrmap = labelmap.get(label)\n if addrmap == None:\n addrmap = {}\n labelmap[label] = addrmap\n\n e = addrmap.get(addr)\n if e:\n log('Found existing entry')\n comment = e[2]\n savedts = e[3]\n title = 'Update a bookmark'\n caption = 'Current comment. (Clear to delete the bookmark.)\\nSet on ' + datetime.datetime.fromtimestamp(savedts).ctime()\n else:\n comment = 'bookmarked'\n title = 'Add a bookmark'\n caption = 'Optional comment.'\n\n comment = ctx.displayQuestionBox(title, caption, comment)\n if comment == None:\n return\n\n ts = time.time()\n if comment == '':\n log('Removing entry')\n if addr in addrmap:\n del addrmap[addr]\n else:\n log('Adding/modifying entry')\n addrmap[addr] = [unitpath, unitname, comment, ts]\n\n prj.setData(BookmarkSet.BMKEY, json.dumps(bm), True)\n\ndef log(s):\n pass#print(s)\n","repo_name":"pnfsoftware/jeb-samplecode","sub_path":"scripts/BookmarkSet.py","file_name":"BookmarkSet.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"81"} +{"seq_id":"71964898185","text":"import os\r\nimport logging\r\nimport sqlite3\r\n\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\r\n level=logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\nDB_NAME = 'RegisteredUser.db'\r\nTABLE_NAME = 'users'\r\nTABLE_COLUMNS = ['chat_id', 'branch', 'username']\r\n\r\n\r\ndef create_database():\r\n if not os.path.isfile(DB_NAME):\r\n conn = sqlite3.connect(DB_NAME)\r\n c = conn.cursor()\r\n c.execute(f\"CREATE TABLE {TABLE_NAME} ({TABLE_COLUMNS[0]} INTEGER PRIMARY KEY, \"\r\n f\"{TABLE_COLUMNS[1]} TEXT, {TABLE_COLUMNS[2]} TEXT)\")\r\n conn.commit()\r\n conn.close()\r\n logger.info(f\"Database {DB_NAME} created successfully\")\r\n\r\n\r\ndef register_user(chat_id: int, branch: str, username: str) -> str:\r\n if not os.path.isfile(DB_NAME):\r\n create_database()\r\n try:\r\n conn = sqlite3.connect(DB_NAME)\r\n c = conn.cursor()\r\n c.execute(f\"INSERT INTO {TABLE_NAME} ({TABLE_COLUMNS[0]}, {TABLE_COLUMNS[1]}, {TABLE_COLUMNS[2]}) \"\r\n f\"VALUES (?, ?, ?)\", (chat_id, branch, username))\r\n conn.commit()\r\n conn.close()\r\n logger.info(f\"User {username} registered successfully\")\r\n return \"You are now registered.\"\r\n except Exception as e:\r\n logger.error(f\"Error registering user {username}: {e}\")\r\n return \"An error occurred while registering. Please try again later.\"\r\n\r\n\r\ndef is_user_registered(chat_id: int) -> bool:\r\n if not os.path.isfile(DB_NAME):\r\n create_database()\r\n try:\r\n conn = sqlite3.connect(DB_NAME)\r\n c = conn.cursor()\r\n c.execute(f\"SELECT * FROM {TABLE_NAME} WHERE {TABLE_COLUMNS[0]}=?\", (chat_id,))\r\n result = c.fetchone()\r\n conn.close()\r\n return bool(result)\r\n except Exception as e:\r\n logger.error(f\"Error checking if user is registered: {e}\")\r\n return False\r\n\r\ndef get_user_branch(chat_id):\r\n conn = sqlite3.connect(\"RegisteredUser.db\")\r\n cursor = conn.cursor()\r\n query = \"SELECT branch FROM users WHERE chat_id = ?\"\r\n cursor.execute(query, (chat_id,))\r\n result = cursor.fetchone()\r\n conn.close()\r\n return result[0] if result else None","repo_name":"Vasuishere/telgram","sub_path":"UserDatabase_Register.py","file_name":"UserDatabase_Register.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9477499035","text":"\"\"\"\nScript for loading in all all the functions. Testing that loading is working.\n\npython3 -i py/loading_script.py -d -n 2000 -b 0.005 -w 100 -s 5\n\"\"\"\nimport argparse, sys, os, shutil, h5py\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport matplotlib.pyplot as plt\nfrom scipy.stats import binom, betabinom\nfrom scipy.optimize import minimize\nfrom multiprocessing import Pool\n\nparser = argparse.ArgumentParser(description='For loading in the functions and loading the cell info.')\nparser.add_argument('-n', '--num_cells', help='Number of cells to use.', default=100, type=int)\nparser.add_argument('-b', '--bin_width', help='Time bin with to use (in seconds).', default=0.001, type=float)\nparser.add_argument('-w', '--window_size', help='The number of bins to use for fitting.', default=100, type=int)\nparser.add_argument('-s', '--window_skip', help='The number of bins between fitting windows.', default=10, type=int)\nparser.add_argument('-d', '--debug', help='Enter debug mode.', default=False, action='store_true')\nargs = parser.parse_args()\n\npd.set_option('max_rows', 30)\nnp.set_printoptions(linewidth=shutil.get_terminal_size().columns)\n\nproj_dir = os.path.join(os.environ['PROJ'], 'Conway_Maxwell_Hierarchical_Model')\ncsv_dir = os.path.join(proj_dir, 'csv')\nmat_dir = os.path.join(proj_dir, 'mat')\npy_dir = os.path.join(proj_dir, 'py')\nh5_dir = os.path.join(proj_dir, 'h5')\nposterior_dir = os.path.join(proj_dir, 'posterior')\nfrontal_dir = os.path.join(proj_dir, 'frontal')\n\nsys.path.append(py_dir)\nsys.path.append(os.path.join(os.environ['PROJ'], 'Conway_Maxwell_Binomial_Distribution'))\nimport ConwayMaxwellHierarchicalModel as comh\nimport ConwayMaxwellBinomial as comb\n\ndef saveMeasurementsForAllTrials(bin_width, stim_info, region_to_spike_time_dict, h5_dir, window_size=100, window_skip=10):\n \"\"\"\n Get the measurements for each trial and save them down, one by one.\n Arguments: bin_width, float,\n stim_info, pandas DataFrame\n Returns: nothing\n \"\"\"\n region_to_num_cells = {r:len(d)for r,d in region_to_spike_time_dict.items()}\n for trial_index in stim_info.index.values:\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Processing trial number ' + str(trial_index) + '...')\n trial_bin_width_file_name = comh.getH5FileName(h5_dir, trial_index, bin_width, window_size)\n if os.path.isfile(trial_bin_width_file_name):\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Already have this file. Skipping...')\n continue\n trial_bin_width_file = h5py.File(trial_bin_width_file_name, 'w')\n trial_info = stim_info.loc[trial_index]\n bin_borders, region_to_active_cells, region_to_spike_counts = comh.getNumberOfActiveCellsByRegion(trial_info['read_starts'], trial_info['read_stops'], bin_width, region_to_spike_time_dict)\n is_stimulated = comh.isStimulatedBins(bin_borders, trial_info['stim_starts'], trial_info['stim_stops'])\n bin_centres = comh.getBinCentres(bin_borders)\n num_bins = bin_centres.size\n window_starts = np.arange(0, num_bins-window_size, window_skip)\n window_centre_times = bin_centres[window_starts+(window_size//2)]\n window_inds = np.vstack([ws + np.arange(window_size) for ws in window_starts])\n trial_bin_width_file.create_dataset('bin_width',data=bin_width)\n trial_bin_width_file.create_dataset('window_size',data=window_size)\n trial_bin_width_file.create_dataset('window_skip',data=window_skip)\n trial_bin_width_file.create_dataset('window_centre_times',data=window_centre_times)\n for region in region_to_active_cells.keys():\n regional_active_cells_binned = region_to_active_cells.get(region)\n regional_spike_count_array = region_to_spike_counts.get(region)\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Processing region ' + region + '...')\n moving_avg_activity_by_cell, moving_avg, moving_var, corr_avg, all_stimulated, any_stimulated, binom_params, binom_log_like, betabinom_ab, betabinom_log_like, comb_params, comb_log_like = comh.getTrialMeasurements(regional_active_cells_binned, regional_spike_count_array, is_stimulated, window_inds, region_to_num_cells.get(region), window_size=window_size, window_skip=window_skip)\n regional_group = trial_bin_width_file.create_group(region)\n regional_group.create_dataset('num_cells',data=region_to_num_cells.get(region))\n regional_group.create_dataset('cell_list',data=list(region_to_spike_time_dict.get(region).keys()))\n regional_group.create_dataset('num_active_cells_binned',data=regional_active_cells_binned)\n regional_group.create_dataset('region',data=region)\n regional_group.create_dataset('moving_avg_activity_by_cell',data=moving_avg_activity_by_cell)\n regional_group.create_dataset('moving_avg',data=moving_avg)\n regional_group.create_dataset('moving_var',data=moving_var)\n regional_group.create_dataset('corr_avg',data=corr_avg)\n regional_group.create_dataset('all_stimulated',data=all_stimulated)\n regional_group.create_dataset('any_stimulated',data=any_stimulated)\n regional_group.create_dataset('binom_params',data=binom_params)\n regional_group.create_dataset('binom_log_like',data=binom_log_like)\n regional_group.create_dataset('betabinom_ab',data=betabinom_ab)\n regional_group.create_dataset('betabinom_log_like',data=betabinom_log_like)\n regional_group.create_dataset('comb_params',data=comb_params)\n regional_group.create_dataset('comb_log_like',data=comb_log_like)\n trial_bin_width_file.close()\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Done saving.')\n return None\n\nif not args.debug:\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Starting main function...')\n cell_info = comh.loadCellInfo(csv_dir)\n stim_info, stim_ids = comh.loadStimulusInfo(mat_dir)\n adj_cell_ids = comh.getRandomSubsetOfCells(cell_info, args.num_cells)\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading spike time dictionary...')\n spike_time_dict = comh.loadSpikeTimeDict(adj_cell_ids, posterior_dir, frontal_dir, cell_info)\n region_to_spike_time_dict = comh.divideSpikeTimeDictByRegion(spike_time_dict,cell_info)\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loaded.')\n print(dt.datetime.now().isoformat() + ' INFO: ' + 'Measuring and saving...')\n saveMeasurementsForAllTrials(args.bin_width, stim_info, region_to_spike_time_dict, h5_dir, window_size=args.window_size, window_skip=args.window_skip)\n","repo_name":"thomasjdelaney/Conway_Maxwell_Hierarchical_Model","sub_path":"py/saving_hdf5_script.py","file_name":"saving_hdf5_script.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12303731229","text":"import os\n\nimport click\n\nimport db\nfrom frontend import create_app\nfrom config import config\n\n\napp = create_app()\n\n\n@app.cli.command(help=\"Clean database and remove uploaded files.\")\ndef clean():\n db.clean_db()\n print(\"Database cleaned and uploaded files removed\")\n\n\n@app.cli.group(help=\"Translation commands.\")\ndef translate():\n pass\n\n\n@translate.command(help=\"Initialize a new language.\")\n@click.argument('lang')\ndef init(lang):\n pot_location = os.path.join(config.BABEL_TRANSLATIONS_LOCATION, 'messages.pot')\n if os.system(f'pybabel extract -F {config.BABEL_CONFIG_LOCATION} -k _l -o {pot_location} .'):\n raise RuntimeError('extract command failed')\n if os.system(f'pybabel init -i {pot_location} -d {config.BABEL_TRANSLATIONS_LOCATION} -l ' + lang):\n raise RuntimeError('init command failed')\n os.remove(pot_location)\n print(\"Language init success\")\n\n\n@translate.command(help=\"Update all languages.\")\ndef update():\n pot_location = os.path.join(config.BABEL_TRANSLATIONS_LOCATION, 'messages.pot')\n if os.system(f'pybabel extract -F {config.BABEL_CONFIG_LOCATION} -k \"_l _t\" -o {pot_location} .'):\n raise RuntimeError('extract command failed')\n if os.system(f'pybabel update -i {pot_location} -d {config.BABEL_TRANSLATIONS_LOCATION}'):\n raise RuntimeError('update command failed')\n os.remove(pot_location)\n print(\"Translations update success\")\n\n\n@translate.command(help=\"Compile all languages.\")\ndef compile():\n if os.system(f'pybabel compile -d {config.BABEL_TRANSLATIONS_LOCATION}'):\n raise RuntimeError('compile command failed')\n print(\"Compilation success\")\n\n\n@app.cli.command(help=\"Update example data file.\")\n@click.argument('name')\ndef example(name):\n try:\n db.example_pack_update(name)\n except FileNotFoundError:\n print(\"Unable to find file \" + name)\n else:\n print(\"Updating example pack\")\n\n\n\n","repo_name":"klima7/Social-Insight","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73471750345","text":"from collections import Counter\nclass Solution:\n def hIndex(self, citations: List[int]) -> int:\n if len(citations) == 0:\n return 0\n maxcite = max(citations)\n c = Counter()\n c.update(citations)\n cumpapers = 0\n print\n for i in range(1, maxcite+1)[::-1]:\n cumpapers += c[i]\n if cumpapers >= i:\n return i\n return 0\n","repo_name":"gauravaror/programming","sub_path":"h-Index.py","file_name":"h-Index.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31381832702","text":"import csv\n\n\n# read plants_links.csv, only two columns: plant_name, link\n# read each row, and add the link to the list\n\nwith open('plants_links.csv', 'r') as f:\n reader = csv.reader(f)\n plants_links = list(reader)\n # print(plants_links[0], plants_links[1])\n c = {}\n for row in plants_links:\n if row[0] in c:\n c[row[0]] += 1\n else:\n c[row[0]] = 1\n # sort the dictionary by value, descending\n sorted_c = sorted(c.items(), key=lambda x: x[1], reverse=True)\n # write the list of dictionaries to a csv file\n with open('plants_count2.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['name', 'count'])\n for plant in sorted_c:\n writer.writerow([plant[0], plant[1]])\n quit()\n\n\n # sort the list by the count of links\n # write the list of links to a csv file","repo_name":"ohad24/encyclo-flower","sub_path":"scripts/inataturalist_scraper/count_plants_links.py","file_name":"count_plants_links.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"7887496354","text":"from sys import stdin, stdout\n\n#\n# 3 2\n# 1 3 1\n# 1 1 2 3 1\n# 1 2 3 | 4 1 2 3 4\n# 1 2 3 4 1 | 2 3 4\ndef the_best_vacation(n, x, d):\n\n i = j = days = hugs = res = 0\n\n while j < 2*n:\n\n ci = i%n\n pi = (i-1+n)%n\n pj = j%n\n\n if d[pj] >= x:\n res = max(res, gethugs(d[pj] - x + 1, d[pj]))\n j += 1\n i = j\n continue\n\n r = x - days\n\n if days + d[pj] > x:\n\n if d[pi] > r:\n res = max(res, hugs + gethugs(d[pi] - r + 1, d[pi]))\n else:\n res = max(res, hugs + gethugs(1, r))\n\n days -= d[ci]\n hugs -= gethugs(1, d[ci])\n i += 1\n else:\n if days + d[pi] > x:\n res = max(res, hugs + gethugs(d[pi] - r + 1, d[pi]))\n\n days += d[pj]\n hugs += gethugs(1, d[pj])\n j += 1\n\n return res\n\n\n# 1 2 3 => 6\ndef gethugs(a, b):\n return (a+b)*(b-a+1)//2\n\n\nif __name__ == '__main__':\n nx = list(map(int, stdin.readline().split()))\n d = list(map(int, stdin.readline().split()))\n\n stdout.write(str(the_best_vacation(nx[0], nx[1], d)) + '\\n')\n","repo_name":"tycyd/codeforces","sub_path":"greedy/1358D The Best Vacation.py","file_name":"1358D The Best Vacation.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2005171221","text":"\"\"\"appLex URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom appLex import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index),\n path('login/', views.index),\n path('registro/', views.register),\n path('registra_cliente', views.registrar_cliente),\n path('ingresar', views.ingresar_app),\n\n #cliente\n path('inicio/', views.home_cliente),\n path('mis_solicitudes/', views.c_solicitudes),\n path('mis_contratos/', views.c_contratos),\n path('modificar_cliente/',views.modificar_cliente),\n path('modificacion_cliente/',views.modificacion_cliente),\n\n #abogado\n path('inicio_abogado/', views.home_abogado),\n path('causas/', views.a_causas),\n path('presupuestos/', views.a_presupuestos),\n path('detalle_presupuesto/', views.a_det_presupuesto),\n\n #técnico jurídico\n path('inicio_tec/', views.home_tecjuridico),\n path('solicitudes/', views.tj_solicitudes),\n path('registrar_pagos/', views.tj_registrar_pagos)\n]\n","repo_name":"NatyNatur/ET_PRY3111","sub_path":"appLex/appLex/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36165693728","text":"##1\nclass Node:\n def __init__(self, elem,next):\n self.elem = elem\n self.next = next\n\nclass SinglyLinkedList:\n def __init__(self,a):\n tail = None\n self.head = None\n for i in range(0,len(a)):\n new_node = Node(a[i],None)\n if i == 0:\n self.head = new_node #Don't use Node(a[i],None) instead of new_node cause it creates a new node every time and your Linked List losts everything\n tail = new_node\n else:\n\n tail.next = new_node\n tail = new_node\n\n def printlist(self):\n temp=self.head\n STR_0=\"\"\n while temp!=None:\n STR_0+=str(temp.elem)+\"->\"\n temp=temp.next\n return print(STR_0+'None')\n\n\n def nodeat(self,idx):\n temp=self.head\n count=0\n while temp!=None:\n if count==idx:\n return temp\n temp=temp.next\n count+=1\n return None\n\n\n def remove(self,val):\n temp=self.head\n prev=None\n idx=0\n count=0\n #checking length of list\n while temp!=None:\n count+=1\n temp=temp.next\n temp=self.head\n while temp!=None:\n #when the node exists\n if temp.elem==val:\n #when the node is the head\n if idx==0:\n self.head=temp.next\n return temp.elem\n #when the node is the tail\n elif idx==count:\n prev=self.nodeat(idx-1) # Getting the previous Node of the desired node\n prev.next=None\n return temp.elem\n #when the node is in the middle\n else:\n prev=self.nodeat(idx-1) # Getting the previous Node of the desired node\n prev.next=temp.next\n return temp.elem\n\n temp=temp.next\n idx+=1\n return \"Value not in the LinkedList\"\n\n def insert(self,elem,pos):\n idx=pos-1\n new_node = Node(elem, None)\n temp=self.head\n count=0\n prev=None\n while temp!=None:\n count+=1\n temp=temp.next\n #for index=0\n if idx==0:\n new_node.next=self.head\n self.head=new_node\n #for last index\n elif idx+1==count:\n temp.next=new_node\n new_node.next=None\n #for index in middle\n else:\n prev=self.nodeat(idx-1)\n new_node.next = prev.next\n prev.next=new_node\n def rotateRight(self,k):\n for i in range(k):\n temp=self.head\n #getting the last node\n while temp.next!=None:\n temp=temp.next\n\n #getting the node before last node\n n=self.head\n while n.next!=temp:\n n=n.next\n\n #Setting the last node as head\n temp.next=self.head\n self.head=temp\n n.next=None\n\n def rotateLeft(self,k):\n for i in range(k):\n #setting temp_1 as the next node of head\n temp_1=self.head.next\n\n #getting the last node as temp\n temp=self.head\n while temp.next!=None:\n temp=temp.next\n\n #main process of rotating\n temp.next=self.head#setting the self.head as the last node\n self.head.next=None #seting None as the next node of the last node\n self.head=temp_1 #setting the temp_1 as the head\n def reverse(self):\n temp=self.head\n new_head=None\n while temp!=None:\n new_node=Node(temp.elem,None) #creating a new node\n new_node.next=new_head #setting of the next of the new node to self.head\n new_head = new_node #changing the head to the new node\n temp=temp.next # changing the temp to go to next node\n self.head=new_head # changing self.head to the last node (new head)\n\n\n\n\nh1=[92,56,88,55,19,67,66]\nh2=SinglyLinkedList(h1)\nprint(h2.remove(56))\nh2.printlist()\nid=547\nh2.insert(id%23,4)\nh2.printlist()\nbirthyear=2001\nh2.insert(birthyear%61,3)\nh2.printlist()\nh2.rotateRight(3)\nh2.printlist()\nh2.remove(92)\nh2.printlist()\nh2.remove(66)\nh2.printlist()\nh2.rotateLeft(4)\nh2.printlist()\nh2.reverse()\nh2.printlist()\n\n\n\n#2 no code\nclass Node:\n def __init__(self,elem,next):\n self.elem=elem\n self.next=next\nclass LinkeList:\n def __init__(self,a):\n self.head=None\n tail=None\n for i in range(0,len(a)):\n new_node=Node(a[i],None)\n if i==0:\n self.head=new_node\n tail=new_node\n else:\n tail.next=new_node\n tail=new_node\n\n def printlist(self):\n temp=self.head\n STR_0=\"\"\n while temp!=None:\n STR_0+=str(temp.elem)+\"->\"\n temp=temp.next\n return print(STR_0[:-2])\n\n def reverselist(self,head,k):\n temp=head\n prev=None\n next=None\n count=0\n while temp!=None and count5->6............. where 4 is next\n 3->2->1->None where 3 is previous\"\"\"\n head.next=self.reverselist(next,k)\n \"\"\"We have now set 3->2->1->4->5->6-...... and 4 is set as head of new list and the reverse will start from 4->5->6->..... \"\"\"\n \"\"\"After the 2nd iteration , we will have 3->2->1->6->5->4->7->8->9->None where 7 is head of new list and the reverse will start from 7->8->9->..... \"\"\"\n return prev\n\n\na=input(\"\")\nstr_0=a.split(\" and \")\ndemo=str_0[0].split(\"->\")\nl1=[]\nfor i in range(0,len(demo)):\n l1.append(int(demo[i]))\n\na1=LinkeList(l1)\na1.printlist()\na1.head=a1.reverselist(a1.head,int(str_0[1]))\na1.printlist()\n\n\n\n#3 no\nclass Node:\n def __init__(self,elem,next):\n self.elem=elem\n self.next=next\nclass LinkeList:\n def __init__(self,a):\n self.head=None\n tail=None\n for i in range(0,len(a)):\n new_node=Node(a[i],None)\n if i==0:\n self.head=new_node\n tail=new_node\n else:\n tail.next=new_node\n tail=new_node\n\n def printlist(self):\n temp=self.head\n STR_0=\"\"\n while temp!=None:\n STR_0+=str(temp.elem)+\"->\"\n temp=temp.next\n return print(STR_0[:-2])\n\n def reverse(self,a):\n temp = a\n new_head = None\n while temp != None:\n new_node = Node(temp.elem, None) # creating a new node\n new_node.next = new_head # setting of the next of the new node to self.head\n new_head = new_node # changing the head to the new node\n temp = temp.next # changing the temp to go to next node\n return new_head\n\n def even_odd_reverse(self):\n Even_start=None\n Even_end=None\n Odd_start=None\n Odd_end=None\n temp=self.head\n while temp!=None:\n if temp.elem%2==0:\n if Even_start==None:\n Even_start=temp\n Even_end=temp\n else:\n Even_end.next=temp\n Even_end=temp\n else:\n if Odd_start==None:\n Odd_start=temp\n Odd_end=temp\n else:\n Odd_end.next=temp\n Odd_end=temp\n temp=temp.next\n\n\n #Setting the next of the last node to None\n # 1->2->3->4->5->6->7->8 for this case we have 2->4->6->8->None and 1->3->5->7->None\n Even_end.next=None\n Odd_end.next=None\n\n #Setting the even head\n\n\n\n #If we need to reverse the Even list also\n # even_head=self.reverse(Even_start)\n # #getting last node of even list\n # even_tmp=even_head\n # while even_tmp.next!=None:\n # even_tmp=even_tmp.next\n # #Gets the even end\n # Even_end=even_tmp\n\n #Setting Odd_head\n odd_head=self.reverse(Odd_start)\n\n # getting last node of even list\n odd_tmp = odd_head\n while odd_tmp.next != None:\n odd_tmp = odd_tmp.next\n\n #Setting odd end\n Odd_end = odd_tmp\n\n\n\n #Now connecting this 2 linked list\n Even_end.next=odd_head\n Odd_end.next=None\n\n #Setting the head of the list as Even head\n self.head=Even_start\n\n\n\n\n\n\na=input(\"\")\n\ndemo=a.split(\"->\")\nl1=[]\nfor i in range(0,len(demo)):\n l1.append(int(demo[i]))\n\na1=LinkeList(l1)\n#a1.printlist()\na1.even_odd_reverse()\na1.printlist()\n\n\n","repo_name":"mitul3737/My-University-Life-Learning","sub_path":"CSE_220/Practice sheet/Sheet_3.py","file_name":"Sheet_3.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"27614057295","text":"import os\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom unidecode import unidecode\nimport pandas as pd\n\nPATH = r\"files\"\nresult_file = os.path.join(PATH, \"result.txt\")\n\ndef clean_list(string):\n \"\"\"\n Used for filter() to check any instance of \"\"\n \"\"\"\n if string == \"\":\n return False\n return True\n\ndef get_races(year):\n \"\"\"\n This will get the races held the year inputted and prompt the user to select their desired race\n Returns \"if the user wants to stop\", \"url for the race\", \"race name\"\n \"\"\"\n url = f\"https://www.formula1.com/en/results.html/{year}/races.html\"\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html, \"html.parser\")\n table = soup.find(\"table\")\n table = table.find_all(class_=\"dark bold ArchiveLink\")\n\n races = {}\n print(\"Please the type the race you want to get or type 'quit' to exit\\n\")\n for race in table:\n gp = race.get_text(strip=True)\n href = \"https://www.formula1.com\" + race.get(\"href\")\n races[unidecode(gp.lower())] = href\n print(gp)\n\n selection = unidecode(input().lower())\n while selection != \"quit\":\n if selection in races:\n return False, races[selection], selection\n\n print(\"Error: please enter a name in the list above\\n\")\n selection = unidecode(input().lower())\n return True, \"\", \"\"\n\ndef get_results(url):\n \"\"\"\n Gets the race results from the specific grand prix that is inputted\n \"\"\"\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html, \"html.parser\")\n temp_table = soup.find(\"table\")\n return temp_table.find_all(\"tr\")\n\ndef create_txt(table):\n \"\"\"\n Converts table into txt file, then formats the data for the csv,\n finally converts the txt file into a csv and deletes the txt file\n \"\"\"\n text_file = open(result_file, \"w\", encoding=\"utf-8\")\n for driver in table:\n temp_list = list(filter(clean_list, unidecode(driver.text).strip().split(\"\\n\")))\n temp_str = \"\\n\".join(temp_list) + \"\\n\"\n text_file.write(temp_str)\n text_file.close()\n\n with open(result_file, 'r', encoding=\"utf-8\") as fin:\n data = fin.read().split(\"\\n\")\n\n print(data)\n formatted_str = data[0] + \",\" + data[1] + \",\" + data[2] + \",\"\n formatted_str += data[3] + \",\" + data[4] + \",\" + data[5]\n formatted_str += \",\" + data[6] + \"\\n\"\n\n for index in range(7, len(data)):\n if(index+2)%9 == 2:\n formatted_str += data[index] + \" \"\n elif(index+2)%9 == 8:\n formatted_str += data[index] + \"\\n\"\n elif(index+2)%9 != 4:\n formatted_str += data[index] + \",\"\n\n text_file = open(result_file, \"w\", encoding=\"utf-8\")\n text_file.write(formatted_str)\n text_file.close()\n\ndef create_csv():\n \"\"\"\n Converts the all the data into a csv\n \"\"\"\n desired_year = -1\n while(desired_year < 1950 or desired_year > 2022 or desired_year == 0):\n desired_year = int(input(\"Please enter desired year between 1950-2022 or 0 to exit: \"))\n if desired_year != 0:\n end_search, race_url, race = get_races(desired_year)\n if not end_search:\n table = get_results(race_url)\n create_txt(table)\n file_name = f\"{desired_year}_{race}_results.csv\"\n dataframe1 = pd.read_csv(result_file)\n dataframe1.to_csv(os.path.join(PATH, file_name),\n index = None)\n os.remove(result_file)\n","repo_name":"fbaig873/F1_Data_To_CSV","sub_path":"commands/races.py","file_name":"races.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41662138536","text":"import os\nimport shutil\nimport sys\n\nfrom nova.compute import task_states\nfrom nova import exception\nfrom nova.image import glance\nfrom nova.openstack.common import cfg\nfrom nova.openstack.common import log as logging\nfrom nova.virt.hyperv import baseops\nfrom nova.virt.hyperv import constants\nfrom nova.virt.hyperv import ioutils\nfrom nova.virt.hyperv import vmutils\nfrom xml.etree import ElementTree\n\n# Check needed for unit testing on Unix\nif sys.platform == 'win32':\n import wmi\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\nclass SnapshotOps(baseops.BaseOps):\n def __init__(self):\n super(SnapshotOps, self).__init__()\n self._vmutils = vmutils.VMUtils()\n\n def snapshot(self, context, instance, name, update_task_state):\n \"\"\"Create snapshot from a running VM instance.\"\"\"\n instance_name = instance[\"name\"]\n vm = self._vmutils.lookup(self._conn, instance_name)\n if vm is None:\n raise exception.InstanceNotFound(instance=instance_name)\n vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]\n vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]\n\n LOG.debug(_(\"Creating snapshot for instance %s\"), instance_name)\n (job_path, ret_val, snap_setting_data) = \\\n vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())\n if ret_val == constants.WMI_JOB_STATUS_STARTED:\n success = self._vmutils.check_job_status(job_path)\n if success:\n job_wmi_path = job_path.replace('\\\\', '/')\n job = wmi.WMI(moniker=job_wmi_path)\n snap_setting_data = job.associators(\n wmi_result_class='Msvm_VirtualSystemSettingData')[0]\n else:\n success = (ret_val == 0)\n if not success:\n raise vmutils.HyperVException(\n _('Failed to create snapshot for VM %s') %\n instance_name)\n else:\n update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)\n\n export_folder = None\n f = None\n\n try:\n src_vhd_path = os.path.join(CONF.instances_path, instance_name,\n instance_name + \".vhd\")\n\n image_man_svc = self._conn.Msvm_ImageManagementService()[0]\n\n LOG.debug(_(\"Getting info for VHD %s\"), src_vhd_path)\n (src_vhd_info, job_path, ret_val) = \\\n image_man_svc.GetVirtualHardDiskInfo(src_vhd_path)\n if ret_val == constants.WMI_JOB_STATUS_STARTED:\n success = self._vmutils.check_job_status(job_path)\n else:\n success = (ret_val == 0)\n if not success:\n raise vmutils.HyperVException(\n _(\"Failed to get info for disk %s\") %\n (src_vhd_path))\n\n src_base_disk_path = None\n et = ElementTree.fromstring(src_vhd_info)\n for item in et.findall(\"PROPERTY\"):\n if item.attrib[\"NAME\"] == \"ParentPath\":\n src_base_disk_path = item.find(\"VALUE\").text\n break\n\n export_folder = self._vmutils.make_export_path(instance_name)\n\n dest_vhd_path = os.path.join(export_folder, os.path.basename(\n src_vhd_path))\n LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),\n locals())\n shutil.copyfile(src_vhd_path, dest_vhd_path)\n\n image_vhd_path = None\n if not src_base_disk_path:\n image_vhd_path = dest_vhd_path\n else:\n dest_base_disk_path = os.path.join(export_folder,\n os.path.basename(src_base_disk_path))\n LOG.debug(_('Copying base disk %(src_vhd_path)s to '\n '%(dest_base_disk_path)s'), locals())\n shutil.copyfile(src_base_disk_path, dest_base_disk_path)\n\n LOG.debug(_(\"Reconnecting copied base VHD \"\n \"%(dest_base_disk_path)s and diff VHD %(dest_vhd_path)s\"),\n locals())\n (job_path, ret_val) = \\\n image_man_svc.ReconnectParentVirtualHardDisk(\n ChildPath=dest_vhd_path,\n ParentPath=dest_base_disk_path,\n Force=True)\n if ret_val == constants.WMI_JOB_STATUS_STARTED:\n success = self._vmutils.check_job_status(job_path)\n else:\n success = (ret_val == 0)\n if not success:\n raise vmutils.HyperVException(\n _(\"Failed to reconnect base disk \"\n \"%(dest_base_disk_path)s and diff disk \"\n \"%(dest_vhd_path)s\") %\n locals())\n\n LOG.debug(_(\"Merging base disk %(dest_base_disk_path)s and \"\n \"diff disk %(dest_vhd_path)s\"),\n locals())\n (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(\n SourcePath=dest_vhd_path,\n DestinationPath=dest_base_disk_path)\n if ret_val == constants.WMI_JOB_STATUS_STARTED:\n success = self._vmutils.check_job_status(job_path)\n else:\n success = (ret_val == 0)\n if not success:\n raise vmutils.HyperVException(\n _(\"Failed to merge base disk %(dest_base_disk_path)s \"\n \"and diff disk %(dest_vhd_path)s\") %\n locals())\n image_vhd_path = dest_base_disk_path\n\n (glance_image_service, image_id) = \\\n glance.get_remote_image_service(context, name)\n image_metadata = {\"is_public\": False,\n \"disk_format\": \"vhd\",\n \"container_format\": \"bare\",\n \"properties\": {}}\n f = ioutils.open(image_vhd_path, 'rb')\n LOG.debug(\n _(\"Updating Glance image %(image_id)s with content from \"\n \"merged disk %(image_vhd_path)s\"),\n locals())\n update_task_state(task_state=task_states.IMAGE_UPLOADING,\n expected_state=task_states.IMAGE_PENDING_UPLOAD)\n glance_image_service.update(context, image_id, image_metadata, f)\n\n LOG.debug(_(\"Snapshot image %(image_id)s updated for VM \"\n \"%(instance_name)s\"), locals())\n finally:\n LOG.debug(_(\"Removing snapshot %s\"), name)\n (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(\n snap_setting_data.path_())\n if ret_val == constants.WMI_JOB_STATUS_STARTED:\n success = self._vmutils.check_job_status(job_path)\n else:\n success = (ret_val == 0)\n if not success:\n raise vmutils.HyperVException(\n _('Failed to remove snapshot for VM %s') %\n instance_name)\n if f:\n f.close()\n if export_folder:\n LOG.debug(_('Removing folder %s '), export_folder)\n shutil.rmtree(export_folder)\n","repo_name":"maoy/zknova","sub_path":"nova/virt/hyperv/snapshotops.py","file_name":"snapshotops.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"21819087138","text":"import math\n\nW, H, M, N = map(int, input().split())\ncams = []\nfor i in range(M):\n cams.append(list(map(int, input().split())))\nobjs = []\nfor i in range(N):\n objs.append(list(map(int, input().split())))\n\ndef euclid_distance(x, y, a, b):\n return math.sqrt((x-a)**2+(y-b)**2)\n \ndef reachable(x, y, a, b, r):\n return r - euclid_distance(x, y, a, b) > 0\n\ndef in_angle(x, y, a, b, t, d):\n t_deg = math.atan2(b-y, a-x) * 180 / math.pi \n if(t_deg < 0):\n t_deg += 360\n low_bound_deg = t - d/2\n upper_bound_deg = t + d/2\n return t_deg >= low_bound_deg and t_deg <= upper_bound_deg\n\ndef judge_oservable(a,b):\n for i in range(M):\n if(reachable(cams[i][0], cams[i][1], a, b, cams[i][4])):\n if(in_angle(cams[i][0], cams[i][1], a, b, cams[i][2], cams[i][3])):\n return \"yes\"\n return \"no\"\n \nfor i in range(N):\n print(judge_oservable(objs[i][0], objs[i][1]))\n\n","repo_name":"kmzn128/CCI_Python","sub_path":"CCI_1/Paiza/a018.py","file_name":"a018.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14420906486","text":"\ndef enterDate():\n\n while(True):\n\n date = int(\n input(\"Enter a one date with 8 Characters, in this format dia/mes/año = 24082022: \"))\n date = str(date)\n\n day = date[0:2]\n month = date[2:4]\n year = date[4:8]\n\n if int(day) > 31:\n print(\"-\"*90)\n print(\n \"The day is invalid, because it is bigger than 31, please try again with a valid date\")\n print(\"-\"*90)\n continue\n if int(month) > 12:\n print(\"-\"*90)\n print(\n \"The month is invalid, because it is bigger than 12, please try again with a valid date\")\n print(\"-\"*90)\n continue\n\n if(len(date) == 8):\n formatDate = day + \"/\" + month + \"/\" + year\n print(\"-\"*45)\n print(\"The entered date is: \", str(formatDate))\n print(\"-\"*45)\n break\n elif (len(date) < 8 or len(date) > 8):\n print(\"-\"*50)\n print(\"The date is not valid, please enter 8 characters\")\n print(\"-\"*50)\n continue\n\n\nenterDate()\n","repo_name":"CesarMartinez23/practica_semana_6_python","sub_path":"Ejercicio2Pract.py","file_name":"Ejercicio2Pract.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12389738595","text":"from utils import *\nfrom sys import exit\n\n\n# OPEN, CLOSE = \"([{<\", \")]}>\"\nDELIMS = {\n \"(\" : \")\",\n \"[\" : \"]\",\n \"{\" : \"}\",\n \"<\" : \">\",\n}\nCORRUPT_POINTS = {\n \")\": 3,\n \"]\": 57,\n \"}\": 1197,\n \">\": 25137,\n}\nCOMPLETION_POINTS = {\n \")\": 1,\n \"]\": 2,\n \"}\": 3,\n \">\": 4,\n}\n\n\nSAMPLE = False\ndata = read_lines(input_for(__file__, SAMPLE), str)\n\nsyntax_score = 0\ncompletion_scores = []\n\nfor line in data:\n stack = []\n\n # Part 1\n\n for c in line:\n if c in DELIMS:\n stack.append(c)\n else:\n expected = DELIMS[stack.pop()]\n if expected != c:\n syntax_score += CORRUPT_POINTS[c]\n break\n\n # Part 2\n\n else: # incomplete line\n compl_score = 0\n for c in reversed(stack):\n compl_score *= 5\n compl_score += COMPLETION_POINTS[DELIMS[c]]\n completion_scores.append(compl_score)\n \n\n# 1\nprint(syntax_score)\n\n# 2\ncompletion_scores.sort()\nprint(completion_scores[len(completion_scores) // 2])","repo_name":"jppellet/adventofcode","sub_path":"2021_10.py","file_name":"2021_10.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"22549804206","text":"from __future__ import print_function, division\n\nimport sys\n\nimport numpy as np\n\nfrom sklearn import preprocessing, metrics\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.datasets import load_breast_cancer, fetch_openml\nfrom sklearn.impute import SimpleImputer\nfrom dwave.system.samplers import DWaveSampler\nfrom dwave.system.composites import EmbeddingComposite\n\nfrom qboost import WeakClassifiers, QBoostClassifier, QboostPlus\n\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef metric(y, y_pred):\n\n return metrics.accuracy_score(y, y_pred)\n\n\ndef train_model(X_train, y_train, X_test, y_test, lmd):\n \"\"\"\n Train qboost model\n\n :param X_train: train input\n :param y_train: train label\n :param X_test: test input\n :param y_test: test label\n :param lmd: lmbda to control regularization term\n :return:\n \"\"\"\n NUM_READS = 3000\n NUM_WEAK_CLASSIFIERS = 35\n # lmd = 0.5\n TREE_DEPTH = 3\n\n # define sampler\n dwave_sampler = DWaveSampler(solver={'qpu': True})\n # sa_sampler = micro.dimod.SimulatedAnnealingSampler()\n emb_sampler = EmbeddingComposite(dwave_sampler)\n\n N_train = len(X_train)\n N_test = len(X_test)\n\n print(\"\\n======================================\")\n print(\"Train#: %d, Test: %d\" %(N_train, N_test))\n print('Num weak classifiers:', NUM_WEAK_CLASSIFIERS)\n print('Tree depth:', TREE_DEPTH)\n\n\n # input: dataset X and labels y (in {+1, -1}\n\n # Preprocessing data\n # imputer = SimpleImputer()\n scaler = preprocessing.StandardScaler() # standardize features\n normalizer = preprocessing.Normalizer() # normalize samples\n\n # X = imputer.fit_transform(X)\n X_train = scaler.fit_transform(X_train)\n X_train = normalizer.fit_transform(X_train)\n\n # X_test = imputer.fit_transform(X_test)\n X_test = scaler.fit_transform(X_test)\n X_test = normalizer.fit_transform(X_test)\n\n ## Adaboost\n print('\\nAdaboost')\n\n clf = AdaBoostClassifier(n_estimators=NUM_WEAK_CLASSIFIERS)\n\n # scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy')\n print('fitting...')\n clf.fit(X_train, y_train)\n\n hypotheses_ada = clf.estimators_\n # clf.estimator_weights_ = np.random.uniform(0,1,size=NUM_WEAK_CLASSIFIERS)\n print('testing...')\n y_train_pred = clf.predict(X_train)\n y_test_pred = clf.predict(X_test)\n\n print('accu (train): %5.2f'%(metric(y_train, y_train_pred)))\n print('accu (test): %5.2f'%(metric(y_test, y_test_pred)))\n\n # Ensembles of Decision Tree\n print('\\nDecision tree')\n\n clf2 = WeakClassifiers(n_estimators=NUM_WEAK_CLASSIFIERS, max_depth=TREE_DEPTH)\n clf2.fit(X_train, y_train)\n\n y_train_pred2 = clf2.predict(X_train)\n y_test_pred2 = clf2.predict(X_test)\n print(clf2.estimator_weights)\n\n print('accu (train): %5.2f' % (metric(y_train, y_train_pred2)))\n print('accu (test): %5.2f' % (metric(y_test, y_test_pred2)))\n\n # Ensembles of Decision Tree\n print('\\nQBoost')\n\n DW_PARAMS = {'num_reads': NUM_READS,\n 'auto_scale': True,\n # \"answer_mode\": \"histogram\",\n 'num_spin_reversal_transforms': 10,\n # 'annealing_time': 10,\n 'postprocess': 'optimization',\n }\n\n clf3 = QBoostClassifier(n_estimators=NUM_WEAK_CLASSIFIERS, max_depth=TREE_DEPTH)\n clf3.fit(X_train, y_train, emb_sampler, lmd=lmd, **DW_PARAMS)\n\n y_train_dw = clf3.predict(X_train)\n y_test_dw = clf3.predict(X_test)\n\n print(clf3.estimator_weights)\n\n print('accu (train): %5.2f' % (metric(y_train, y_train_dw)))\n print('accu (test): %5.2f' % (metric(y_test, y_test_dw)))\n\n\n # Ensembles of Decision Tree\n print('\\nQBoostPlus')\n clf4 = QboostPlus([clf, clf2, clf3])\n clf4.fit(X_train, y_train, emb_sampler, lmd=lmd, **DW_PARAMS)\n y_train4 = clf4.predict(X_train)\n y_test4 = clf4.predict(X_test)\n print(clf4.estimator_weights)\n\n print('accu (train): %5.2f' % (metric(y_train, y_train4)))\n print('accu (test): %5.2f' % (metric(y_test, y_test4)))\n\n\n print(\"=============================================\")\n print(\"Method \\t Adaboost \\t DecisionTree \\t Qboost \\t QboostIt\")\n print(\"Train\\t %5.2f \\t\\t %5.2f \\t\\t\\t %5.2f \\t\\t %5.2f\"% (metric(y_train, y_train_pred),\n metric(y_train, y_train_pred2),\n metric(y_train, y_train_dw),\n metric(y_train, y_train4)))\n print(\"Test\\t %5.2f \\t\\t %5.2f \\t\\t\\t %5.2f \\t\\t %5.2f\"% (metric(y_test, y_test_pred),\n metric(y_test,y_test_pred2),\n metric(y_test, y_test_dw),\n metric(y_test, y_test4)))\n print(\"=============================================\")\n\n # plt.subplot(211)\n # plt.bar(range(len(y_test)), y_test)\n # plt.subplot(212)\n # plt.bar(range(len(y_test)), y_test_dw)\n # plt.show()\n\n return\n\n##########\n#This section of code was added by Dr. Dani Caputi and Leo Madrid @ PEACE Inc.\n \n#This is a pilot test of a basic machine learning algorithm for non-deterministic BMI\n\nUseBytes = False # if True, uses integer bytes as training data instead of raw bits\nBitsToSample = 20000 # samples this many bits (or bytes if UseBytes = True) before binary input selector\n\n \n#Reads data file for testing and training and generates input matricies for the machine learning\nreadFile = open('C:/Users/Aslan/Documents/python-machine-learning/NED_Output/NED_1594413673058.txt', 'r')\nsepfile = readFile.read().split('\\n')\nRbyte=[]\nfeedback=[]\nRbits=[]\nfor a in range (0,len(sepfile)):\n if sepfile[a].startswith('Question'):\n di=0\n for b in range (a-5,a):\n if sepfile[b].startswith('Feedback'):\n di=1\n if di==0:\n for b in range (a-5,a):\n nodes = sepfile[b].split(',')\n for c in range (0,1000):\n bt = int(nodes[c])\n Rbyte.append(bt)\n strbin = str(bin(256+bt)[3:])\n for d in range (0,len(strbin)):\n Rbits.append(int(strbin[d]))\n else:\n for b in range (a-6,a):\n nodes = sepfile[b].split(',')\n if len(nodes)>10:\n for c in range (0,1000):\n bt = int(nodes[c])\n Rbyte.append(bt)\n strbin = str(bin(256+bt)[3:])\n for d in range (0,len(strbin)):\n Rbits.append(int(strbin[d]))\n if sepfile[a].startswith('Feedback'):\n xandy = sepfile[a].split(',')\n feedback.append(str(xandy[1]))\n \n \n \n data=[]\n labels=[]\n \n for a in range (0,len(feedback)):\n if feedback[a]=='h' or feedback[a]=='m':\n if feedback[a]=='h':\n kval = 1\n else:\n kval = 0\n labels.append(kval)\n \n #ll=a*5000\n #ul=ll+5000\n nByte =[]\n if UseBytes==True:\n ll_offset = 5000-(BitsToSample+1)\n ll=(a*5000)+ll_offset#so that we don't include the last byte which contains the answer. on BYTE level seems to work well! but inconsistent and sometimes outputs that one way function problem. try even less bytes/bits etc. \n ul=ll+BitsToSample\n for b in range (ll,ul):\n nByte.append((Rbyte[b]/255.0)-0.5)\n else:\n ll_offset = 40000-(BitsToSample+1)\n ll=(a*40000)+ll_offset#so that we don't include the last byte which contains the answer. on BYTE level seems to work well! but inconsistent and sometimes outputs that one way function problem. try even less bytes/bits etc. \n ul=ll+BitsToSample\n for b in range (ll,ul):\n nByte.append((Rbits[b])) \n \n data.append(nByte)\n \nX_train, X_test, y_train, y_test = train_test_split(np.array(data), np.array(labels), test_size=0.5)\n\n##########\n\nclfs = train_model(X_train, y_train, X_test, y_test, 1.0)\n\n","repo_name":"Agent-Aslan/Halo-AI","sub_path":"HALO-ML.py","file_name":"HALO-ML.py","file_ext":"py","file_size_in_byte":8342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"14725169693","text":"import json\nimport os\nimport re\nfrom os.path import basename\nfrom string import Template\n\nimport yaml\n\nARG_DELI = '_' # if key has this, means it has argument\nMAP_KEY = 'key' # if value is a hash, we use this variable to define the argument name\nDEF_DELI = '@' # if args has this deli, we assuming the first part is the variable type\n\nclass JavaTemplate(Template):\n delimiter = '%'\n\n\nclass ShareKeyTemplate(Template):\n delimiter = '$@'\n\n\ndef flatten_json(y):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a)\n else:\n out[name] = x\n\n flatten(y)\n return out\n\n\ndef get_args(args: list):\n ret = []\n def_type = \"String\"\n for arg in args:\n parts = arg.split(DEF_DELI)\n tp = def_type\n vn = arg\n pl = len(parts)\n if pl == 2: # formatted like int@name\n tp = parts[0]\n vn = parts[1]\n ret.append(\"%s %s\" % (tp, vn))\n return \",\".join(ret)\n\n\ndef generate_interface(name, args=None):\n if args:\n argname = get_args(args)\n return \"String %s(%s)=>'';\" % (name, argname)\n else:\n return \"String get %s => '';\" % name\n\n\ndef generate_override(name, value, args=None): # value can be a dictionary\n if not args:\n return '@override String get %s => \"%s\";' % (name, value)\n argname = \"\"\n extra = \"\" # extra content added to template\n isdict = isinstance(value, dict)\n if args:\n argname = get_args(args)\n if isdict:\n map_name = \"map\"\n extra = 'final %s = {%s};\\n' % (map_name, \",\\n\".join([''' '%s' : \"%s\" ''' % (k, v)\n for k, v in value.items()]))\n value = \"%s[%s] ?? ''\" % (map_name, args[0])\n else:\n value = '\"%s\"' % (repr(value.replace('\"', r'\\\"'))[1:-1])\n\n return ('''\n @override\n String %s(%s){\n %sreturn %s;\n }\n''') % (name, argname, extra, value)\n\n\ndef shift_arg(name, key):\n global ARG_DELI\n deli = ARG_DELI\n return re.sub(r'^([^%s]+)(.*)$' % deli, r'\\1%s%s\\2' % (deli, key), name)\n\n\ndef main(\n YAMLFILE,\n OUTPUTDIR,\n HELPER_NAME,\n DEFAULT_CLS,\n DEFAULT_OBJ,\n INTERFACE_ONLY,\n ARGS=\"\",\n):\n obj = yaml.safe_load(open(YAMLFILE))\n T_SETTINGS = 'settings' # YAML has settings can override\n settings = obj.get(T_SETTINGS, {})\n\n l18n = settings.get(\"l18n\", \"l18n\") # give user to override in YAML file\n HELPER_NAME = settings.get(\"helper\", HELPER_NAME)\n DEFAULT_CLS = settings.get(\"default_class\", DEFAULT_CLS)\n DEFAULT_OBJ = settings.get(\"default_object\", DEFAULT_OBJ)\n delegate = settings.get(\"delegate\", \"TRLocalizationDelegate\")\n\n EXT = \".dart\"\n DEFAULT_PKG = HELPER_NAME + EXT\n\n script_dir, script = os.path.split(__file__)\n yaml_full = os.path.realpath(YAMLFILE) # get the relative path\n os.chdir(os.path.dirname(yaml_full))\n NOTES = \"/// generated content don't modify it manually, modify %s instead\\n///Via: %s %s\\n\" % (\n YAMLFILE, script, ARGS)\n\n DEFAULT_TEMPLATE = JavaTemplate(NOTES + '''\n part of '%package';\n class %interface {\n %code\n static %interface instance() => %interface();\n }\n ''')\n CLS_TEMPLATE = JavaTemplate(NOTES + '''\n part of '%package';\n \n class %cls extends %interface {\n %code\n static %cls instance() => %cls();\n }\n ''')\n\n SIMPLEHELPER = NOTES + '''\n import 'dart:io';\n import 'package:flutter/material.dart';\n %extra\n\n // where the auto generated language implementations\n %parts;\n '''\n\n HELPER = SIMPLEHELPER + '''\n \n class %cls {\n static const map = {\n // locale to instance map\n %code\n };\n \n // alias map\n static const aliases = %alias;\n // default locale \n static const defaultLocale = Locale(%defaultLocale);\n \n static Locale? currentLocale;\n static dynamic supportedLocale(Locale locale){\n final sLocal = locale.toString();\n var cls = map[sLocal];\n if (cls == null ){ \n if (aliases.containsKey(sLocal)){\n cls = map[aliases[sLocal]];\n }else {\n final short = locale.languageCode;\n cls = map[short];\n }\n }\n return cls;\n }\n static %interface get %default_obj {\n if (currentLocale == null) {\n final PL = Platform.localeName.replaceAll(r'\\.*$',\"\").split('_');\n currentLocale = Locale(PL[0],PL.length > 1 ? PL[1] : null) ;\n }\n final cls = supportedLocale(currentLocale!);\n final found = cls ?? map[defaultLocale.toString()];\n if (found == null){\n throw Exception('Unknown locale $currentLocale specified');\n }\n return found() as %interface;\n }\n }\n %interface %default_obj = %cls.%default_obj;\n \n class %delegate extends LocalizationsDelegate<%interface> {\n const %delegate();\n \n List get supportedLocales {\n return %cls.map.keys.map(\n (name) { \n List code = name.split(\"_\");\n String? cc = code.length > 1 ? code[1] : null;\n return Locale.fromSubtags(languageCode: code[0], countryCode: cc);\n }\n ).toList();\n }\n \n @override\n bool isSupported(Locale locale) => _isSupported(locale);\n @override\n Future<%interface> load(Locale locale) {\n %cls.currentLocale = locale;\n return Future.value(%cls.%default_obj);\n }\n @override \n bool shouldReload(TRLocalizationDelegate old) => false;\n \n bool _isSupported(Locale locale) {\n return %cls.supportedLocale(locale) != null;\n }\n } \n extension LExt on BuildContext {\n %interface get %l18n => %cls.%default_obj;\n }\n \n '''\n\n\n sharedPrefix = 'Shared'\n language = obj.get('Languages', None)\n strings = obj.get('Strings', None)\n shared = obj.get(sharedPrefix, None) # shared keywords\n if not language:\n print(\"Missing language definition\")\n sys.exit(-1)\n\n\n T_NAME = 'name'\n T_LOCALE = 'locale'\n T_ALIAS = 'alias'\n T_DEFAULT = 'default'\n\n result = []\n names = []\n locales = {} # locales to name map\n aliases = {}\n default_locale = ''\n for value in language:\n result.append({})\n name = value[T_NAME]\n alias = value.get(T_ALIAS, None)\n locale = value.get(T_LOCALE, None)\n default = value.get(T_DEFAULT, None)\n names.append(name)\n if locale:\n locales[name] = locale\n if not default_locale and default:\n default_locale = locale\n if alias:\n if alias is not list:\n alias = [alias]\n for a in alias:\n aliases[a] = locale\n if not default_locale: # default to the first one\n default_locale = locales.get(names[0], '')\n\n def generate(names, result, alias, locales, default_locale, extra=\"\", interface_only=False):\n code = {\n\n }\n keys = sorted(result[0].keys())\n for k in keys:\n first, *rest = k.split(ARG_DELI)\n rest = list(filter(lambda a: a, rest)) # filter out all empty string\n code.setdefault(DEFAULT_CLS, []).append(generate_interface(first, rest))\n j = 0\n for name in names:\n code.setdefault(name, []).append(generate_override(first, result[j][k], rest))\n j += 1\n template = {\n DEFAULT_CLS: DEFAULT_TEMPLATE\n }\n for k, v in code.items():\n temp = template.get(k, CLS_TEMPLATE)\n with open(os.path.join(OUTPUTDIR, \"%s%s\" % (k, EXT)), \"w\") as f:\n f.write(\n temp.safe_substitute(dict(\n package=DEFAULT_PKG,\n interface=DEFAULT_CLS,\n code=\"\\n\".join(v),\n cls=k\n ))\n )\n template_str = HELPER\n if interface_only:\n template_str = SIMPLEHELPER\n\n\n with open(os.path.join(OUTPUTDIR, \"%s%s\" % (HELPER_NAME, EXT)), \"w\") as f:\n f.write(\n JavaTemplate(template_str).safe_substitute(\n dict(\n extra=extra or \"\",\n package=DEFAULT_PKG,\n interface=DEFAULT_CLS,\n code=\",\\n\".join([\n '\"%s\" : %s.instance' % (locales.get(k, k), k) for k in names\n ]),\n cls=HELPER_NAME,\n default_obj=DEFAULT_OBJ,\n parts=\";\\n\".join(\n [\"part '%s%s'\" % (n, EXT) for n in names + [DEFAULT_CLS]]\n ),\n # if there are different locale string pointing to same translation. for instance: zh_HK and zh_TW\n alias=alias,\n # set the default locale\n defaultLocale=\",\".join([\"'%s'\" % locale for locale in default_locale]),\n l18n=l18n,\n delegate=delegate\n )\n )\n )\n\n sharedKeys = [{} for i in range(len(language))]\n\n def convertShared(value, i):\n return ShareKeyTemplate(value).safe_substitute(**sharedKeys[i])\n\n if shared:\n for sk, sv in shared.items():\n if not isinstance(sv, list):\n sv = [sv] * len(language)\n i = 0\n for v in sv:\n cv = convertShared(v, i)\n sharedKeys[i][sk] = cv\n i += 1\n\n if strings:\n\n nl = len(names)\n strings = flatten_json(strings)\n for key, value in strings.items():\n key = re.sub(r'\\s', '', key)\n i = 0\n if not isinstance(value, list): # same value cross different languages\n value = [value] * nl\n if len(value) < nl:\n for j in range(len(value),\n nl): # if not enough list, use the first one repetitively\n value.append(value[0])\n needConversion = False\n new_key = key\n for v in value:\n if isinstance(v, dict):\n needConversion = True\n v = json.dumps(v)\n sv = str(v)\n # sv = ShareKeyTemplate(sv).safe_substitute(**sharedKeys[i])\n try:\n sv = convertShared(sv, i)\n except IndexError as e:\n print(\"For your key: %s has too many values to pack, expected less than %s\"\n % (key, len(language)), file=sys.stderr)\n raise e\n if needConversion:\n sv = json.loads(sv)\n new_key = shift_arg(key, MAP_KEY)\n result[i][new_key] = sv\n i += 1\n if i != nl:\n raise \"%s has less value than %d\" % (key, nl)\n i = 0\n for keys in sharedKeys:\n result[i].update({\"%s%s\" % (sharedPrefix, k): v for k, v in keys.items()})\n i += 1\n generate(names, result, aliases, locales, default_locale.split(\"_\"),\n interface_only=INTERFACE_ONLY,\n extra=obj.get('extra', ''))\n\n\nSAMPLE_YAML = '''\nLanguages:\n - locale: en_US\n name: English\n default: true\n - locale: zh_CN\n name: Chinese\nShared:\n # we put all the shared keywords here, refer it by using $@ prefix, value can be one or an array by languages\n AppName: XSleep\n App: XSleep App\n CompanyName: XSleep Inc.\n CompanyLogo: \"https://api.secure.xsleep.com/html/img/cover.png\"\n CompanyLogoImgTag: \"\"\n DiaryNoTitle:\n - No title\n - 无题\n\nStrings:\n ### each string key will be generated as a method, name convention as string name, underscored with its format pattern like string_name_value and its value would look like \"string named as %1$s value as %2$s\n HourMeasure:\n - Hours\n - 小时\n MinuteMeasure:\n - Minutes\n - 分钟\n\n Sleep:\n Set:\n Hours:\n # It's for exception, need to omit the format parameters\n toMuch:\n - \"%s %s of sleep might be too much, did you want to make adjustment?\"\n - \"%s%s的睡眠时间是不是有点夸张了?\"\n toLess:\n - \"%s %s of sleep might be too less, did you want to make adjustment?\"\n - \"%s%s的睡眠时间是不是有点不够把?\"\n Analysis:\n Color:\n - NotEnough: \"#F08080\" #lightcoral\n JustRight: \"#5d8aa8\" # air force blue\n TooMuch: \"#FF8C00\" # DarkOrange\n Incomplete: \"#DC143C\" #Crimson\n Desc:\n - NotEnough: You don't seem to have enough sleep!\n JustRight: You must have got a very good dream, mind to share?\n TooMuch: \"You seem to have overslept!\"\n Incomplete: You might have forgot to end your sleep?\n - NotEnough: 你好象睡得太少了呀!\n JustRight: 你肯定作了个很好的梦,记得和大家分享哦?\n TooMuch: 你可能睡过头了?\n Incomplete: 你可能忘了打卡?\n'''\nif __name__ == '__main__':\n import sys\n import argparse\n\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--yaml', default=\"strings.yaml\",\n help='Specify a YAML file containing the all string variable info')\n parser.add_argument('--output', default=\"./\",\n help='Specify where to save generated dart files, it will be a relative path to YAML file.')\n parser.add_argument('--helper', default=\"S\",\n help='Specify helper class name')\n parser.add_argument('--interface', default=\"TI\",\n help='Specify interface class name')\n parser.add_argument('--static', default=\"R\",\n help='Specify the name can be referred from outside world')\n parser.add_argument('-I', '--interface_only', action='store_true', default=False,\n help='Save to an individual interface class file without generating others')\n\n parser.add_argument('--example', action='store_true',\n help='show an example YAML')\n args = parser.parse_args()\n if args.example:\n print(SAMPLE_YAML)\n sys.exit(1)\n used = \" \".join(sys.argv[1:])\n if not os.path.exists(args.output):\n os.makedirs(args.output, exist_ok=True)\n\n main(args.yaml, args.output, args.helper, args.interface, args.static, args.interface_only,\n ARGS=used)\n","repo_name":"whowillcare/flutter_bloc_generator","sub_path":"i18n/l18n_gen.py","file_name":"l18n_gen.py","file_ext":"py","file_size_in_byte":14831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8375344168","text":"\"\"\"\nTopic: Intersection of two dictionaries\n\"\"\"\n\ndict_1 = {\"first_name\": \"Leo\", \"last_name\": \"Topno\", \"college\" : \"IIT Guwahati\", \"Degree\": \"B.Tech\"}\ndict_2 = {\"first_name\": \"John\", \"last_name\" : \"Paul\", \"designation\":\"Group Manager\", \"location\": \"Chennai\"}\n\n# intersection = [\"first_name\", \"last_name\"]\n\n# solution aproach: 1\nintersection_1 = []\nfor key in dict_1.keys(): #dict_1 -> dict_1.keys()\n if key in dict_2.keys():\n intersection_1.append(key)\n\nprint(intersection_1)\n\n\n# solution appraoch:2 -> using dict.keys(), same as sol_1\nintersection_2 = []\nfor key in dict_1:\n if key in dict_2:\n intersection_2.append(key)\n\nprint(intersection_2)\n\n\n# solution approach: 3 -> using list_comprehension] in sol_2\nintersection_3 = [key for key in dict_1 if key in dict_2]\nprint(intersection_3)\n\n\n# ============= Something Extra: How to access \nprint(dict_1.items())\nprint(dict_1.keys())\nprint((dict_1.values()))","repo_name":"LeonardTopno/python-coding-practice","sub_path":"taking-interview/2_ques_set_dicts.py","file_name":"2_ques_set_dicts.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15866770810","text":"import os\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_historical(symbol: str):\n url = 'https://www.nasdaq.com/symbol/%s/historical' % symbol\n css = '#quotes_content_left_pnlAJAX > table'\n\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'lxml')\n table = soup.select(css).pop()\n\n df: pd.DataFrame = pd.read_html(str(table), header=0, index_col=0).pop()\n df.columns = ['Open', 'High', 'Low', 'Last', 'Volume']\n df = df.dropna(0)\n\n return df\n\n\nif __name__ == '__main__':\n\n citi: pd.DataFrame = ()\n\n datafile = os.path.join(os.getcwd(), 'data', 'citi.csv')\n if not os.path.exists(datafile):\n citi = get_historical('c')\n citi.to_csv(datafile)\n else:\n citi = pd.read_csv(datafile, index_col=0, header=0)\n","repo_name":"crudalex/microsoft-r","sub_path":"python/citi.py","file_name":"citi.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3141211935","text":"class St:\n \"\"\" A segment table that simulates a segment table.\n\n Attributes:\n _pm (:obj:`list` of int): List of integers, which represents physical memory.\n _disk (:obj:`list` of :obj:`list` of int): List of list of integers, which represents disk blocks.\n _availableFrames (:obj:`list` of int): List of integers that represent available frames.\n\n \"\"\"\n\n def __init__(self, pm: list, disk: list, af: list):\n self._pm = pm\n self._disk = disk\n self._availableFrames = af\n\n def addSeg(self, segN: int, segL: int, fraN: int):\n \"\"\" Function that adds a segment to physical memory.\n\n Args:\n segN (int): Segment number\n segL (int): Segment length\n fraN (int): Frame number\n\n Returns:\n None\n \n \"\"\"\n self._pm[2*segN] = segL\n self._pm[(2*segN)+1] = fraN\n if (fraN > 0):\n self._availableFrames.pop(self._availableFrames.index(fraN))\n return None\n\n def frameNum(self, s: int) -> int:\n \"\"\" Function that returns the frame number of a given segment's page table.\n\n Args:\n s (int): Segment number\n\n Returns:\n int: Frame number of segment s's page table\n\n Raises:\n ValueError in the event that a segment does not have a page table.\n\n \"\"\"\n fn = self._pm[(2*s)+1]\n if (fn == None):\n raise ValueError\n elif (fn < 0):\n next = self._availableFrames.pop(0)\n self._pm[(2*s)+1] = next\n for i in range(512):\n self._pm[(next*512)+i] = self._disk[-1*fn][i]\n fn = next\n return fn\n","repo_name":"awang48/CS143B-Proj2","sub_path":"st.py","file_name":"st.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21634047510","text":"from __future__ import annotations\n\nimport nox\n\nfrom nox import Session\n\n\nPYTHONS_TESTS = [\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"]\nPYTHONS_BASE = [\"3.10\"]\nnox.options.reuse_existing_virtualenvs = False\n\n\n@nox.session(python=PYTHONS_TESTS)\ndef tests(session: Session) -> None:\n # Create reports folder for pytest & coverage\n session.run(\"mkdir\", \"-p\", f\"reports/pytest/{session.name}\", external=True)\n session.run(\"mkdir\", \"-p\", f\"reports/coverage/{session.name}\", external=True)\n\n # Remove pytest cache directory\n session.run(\"rm\", \"-fr\", \".pytest_cache\", external=True)\n\n # Install tests dependencies\n session.install(\"pytest\", \"coverage\", \"pymongo\")\n\n # Run pytest without dateparser\n session.run(\n \"coverage\",\n \"run\",\n f\"--data-file=./reports/coverage/{session.name}/.coverage\",\n \"-m\",\n \"pytest\",\n f\"--junitxml=./reports/pytest/{session.name}/junit.xml\"\n f\"--junit-prefix={session.name}\",\n \"-vv\",\n )\n\n # Install dateparser extra dep\n session.install(\"dateparser\")\n\n # Run pytest with dateparser\n session.run(\n \"coverage\",\n \"run\",\n f\"--data-file=./reports/coverage/{session.name}/.coverage.extra\",\n \"-m\",\n \"pytest\",\n f\"--junitxml=./reports/pytest/{session.name}/junit-extra.xml\"\n f\"--junit-prefix={session.name}\",\n \"-vv\",\n )\n\n # Combine coverage\n session.run(\n \"coverage\",\n \"combine\",\n \"--append\",\n f\"--data-file=./reports/coverage/{session.name}/.coverage.full\",\n f\"./reports/coverage/{session.name}/.coverage\",\n f\"./reports/coverage/{session.name}/.coverage.extra\",\n )\n\n # Generate coverage HTML result\n session.run(\n \"coverage\",\n \"html\",\n f\"--data-file=./reports/coverage/{session.name}/.coverage.full\",\n f\"--directory=./reports/coverage/{session.name}/html\",\n )\n\n # Generate coverage XML result\n session.run(\n \"coverage\",\n \"xml\",\n f\"--data-file=./reports/coverage/{session.name}/.coverage.full\",\n f\"-o ./reports/coverage/{session.name}/coverage-{session.name}-full.xml\",\n )\n\n # Display coverage report\n session.run(\n \"coverage\",\n \"report\",\n f\"--data-file=./reports/coverage/{session.name}/.coverage.full\",\n )\n\n\n@nox.session(python=PYTHONS_BASE)\ndef lints(session: Session) -> None:\n # Create reports folder for flake8\n session.run(\"mkdir\", \"-p\", f\"reports/flake8/{session.name}\", external=True)\n\n # Install lints dependencies\n session.install(\"pylint\", \"flake8>=4.0.0,<5.0.0\", \"flake8-html==0.4.2\")\n\n # Run flake8\n session.run(\n \"flake8\",\n \"mongo_queries_manager\",\n \"--format=html\",\n f\"--htmldir=./reports/flake8/{session.name}\",\n )\n\n # Run pylint\n session.run(\"pylint\", \"mongo_queries_manager\")\n\n\n@nox.session(python=PYTHONS_BASE)\ndef formats(session: Session) -> None:\n # Install formats dependencies\n session.install(\"black\", \"isort\")\n\n # Run black\n session.run(\"black\", \"mongo_queries_manager\", \"--check\")\n\n # Run isort\n session.run(\"isort\", \"mongo_queries_manager\", \" --check\")\n\n\n@nox.session(python=PYTHONS_BASE)\ndef types(session: Session) -> None:\n # Install types dependencies\n session.install(\"dateparser\", \"mypy\", \"types-dateparser\")\n\n # Remove mypy cache directory\n session.run(\"rm\", \"-fr\", \".mypy_cache\", external=True)\n\n # Run mypy\n session.run(\"mypy\", \"mongo_queries_manager\")\n","repo_name":"comic31/MongoDBQueriesManager","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"6434194438","text":"from django.urls import path\n\nfrom service.apps import ServiceConfig\nfrom service.views import MailingsView, MailingCreateView, MailingUpdateView, ClientsView, \\\n ClientCreateView, ClientUpdateView, ClientDeleteView, MailingDeleteView, MessagesView, MessageDeleteView, \\\n MessageUpdateView, MessageCreateView, BlogView, BlogPostCreateView, BlogPostDetailView, BlogPostUpdateView, \\\n BlogPostDeleteView, MainView, ToggleAccount, ToggleMailing\n\napp_name = ServiceConfig.name\n\nurlpatterns = [\n path('', MainView.as_view(template_name='service/main.html'), name='main'),\n\n\n path('mailings/', MailingsView.as_view(), name='mailing_list'),\n path('mailings/create/', MailingCreateView.as_view(), name='mailing_create'),\n path('mailings/update//', MailingUpdateView.as_view(), name='mailing_update'),\n path('mailings/delete//', MailingDeleteView.as_view(), name='mailing_delete'),\n path('mailings/', ToggleMailing.as_view(), name='toggle_mailing'),\n\n path('clients/', ClientsView.as_view(), name='clients'),\n path('clients/create/', ClientCreateView.as_view(), name='clients_create'),\n path('clients/update//', ClientUpdateView.as_view(), name='clients_update'),\n path('clients/delete//', ClientDeleteView.as_view(), name='clients_delete'),\n path('clients/', ToggleAccount.as_view(), name='toggle_account'),\n\n path('messages/', MessagesView.as_view(), name='messages'),\n path('messages/create/', MessageCreateView.as_view(), name='messages_create'),\n path('messages/update//', MessageUpdateView.as_view(), name='messages_update'),\n path('messages/delete//', MessageDeleteView.as_view(), name='messages_delete'),\n\n path('blog/', BlogView.as_view(), name='blog'),\n path('blog/create_post/', BlogPostCreateView.as_view(), name='create_post'),\n path('blog/post/////', BlogPostDetailView.as_view(), name='post'),\n path('blog/update_post/////', BlogPostUpdateView.as_view(), name='update_post', ),\n path('blog/delete_post/////', BlogPostDeleteView.as_view(), name='delete_post'),\n]\n","repo_name":"HowletH1/django_MailingService","sub_path":"service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36708648621","text":"n = int(input())\n\narr = list(range(1, 10))\n\nfor i in range(2, n+1):\n add = []\n for j in range(0, 10):\n for k in arr:\n newk = k * 10 + j;\n if newk % i == 0:\n add.append(newk)\n arr = add\n\n\nprint(len(arr))\n\n","repo_name":"nxson2004/Kattis-Solutions","sub_path":"luckynumber.py","file_name":"luckynumber.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17066131609","text":"'''\nEscribir un programa que solicite un valor entero al usuario y determine si es par o impar.\n'''\ndef esPar(num):\n try:\n return \"Par\" if num%2==0 else \"Impar\"\n except Exception:\n return \"Valor de entrada no válido.\"\n\nprint(esPar(int(input(\"Número: \"))))","repo_name":"JonaThanPabonP/problemarioProgramacion","sub_path":"estructurasSeleccion/par.py","file_name":"par.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41913379067","text":"# inp = [2,3,1,1,2,4,2,0,1,1]\r\ninp = [2,3,1,1,4]\r\n# inp = [3,2,1,0,4]\r\n\r\njumps= [0]*(len(inp)+1)\r\n\r\njumps[0]=0\r\n\r\nfor i in range(1,len(inp)):\r\n jumps[i]=float('inf')\r\n for j in range(i):\r\n if i <=j+inp[j]:\r\n\r\n jumps[i]=min(jumps[i],jumps[j]+1)\r\nprint(jumps)","repo_name":"parag1995/basic_code","sub_path":"minimum_jumps.py","file_name":"minimum_jumps.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74358399626","text":"#https://leetcode.com/problems/maximum-depth-of-binary-tree/\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def maxDepth(self, node: TreeNode) -> int:\n if node is None:\n return 0\n else:\n return 1 + max(self.maxDepth(node.left), self.maxDepth(node.right))\n\ns = Solution()\n\nnine = TreeNode(9, None, None)\nfifteen = TreeNode(15, None, None)\nseven = TreeNode(7, None, None)\ntwenty = TreeNode(20, fifteen, seven)\nthree = TreeNode(3, nine, twenty)\n\nresult = s.maxDepth(three)\nprint(result)\n","repo_name":"SergeySatunin/leetcode","sub_path":"trees/maximum_depth_of_binary_tree.py","file_name":"maximum_depth_of_binary_tree.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41879848222","text":"\"\"\"\nFrancisco Abimael Oro Estrada\nN.C. 423115639\nPractica 11 - Actividad 5\n\"\"\"\nimport time as tm\n\ndef fact(n):\n if n == 0 or n==1:\n return 1\n else: \n return n * fact(n - 1)\n \ndef fact2(n): \n fact = 1\n while(n):\n fact = fact * n\n n-=1\n return fact\n \nif __name__ == '__main__':\n num = int(input(\"Input a number : \"))\n start = tm.time()\n print(f\"The factorial of {num} is : {fact(num)}\")\n end = tm.time()\n print(f\"Elapsed time using recurssive function : {end - start}\")\n start = tm.time()\n print(f\"The factorial of {num} is : {fact2(num)}\")\n end = tm.time()\n print(f\"Elapsed time using iterative function : {end - start}\")","repo_name":"francisco-oro/EDA-I","sub_path":"practica11/actividad05.py","file_name":"actividad05.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14993290241","text":"from db.models import Student\nimport cv2\nfrom pyzbar import pyzbar\n\ndef read_barcodes():\n \n cam = cv2.VideoCapture(0)\n barcode_info = ''\n while True:\n result, frame = cam.read()\n\n cv2.imshow('Barcode Capture', frame)\n\n barcodes = pyzbar.decode(frame)\n \n key = cv2.waitKey(10)\n\n if key & 0xFF == ord('q'):\n break\n\n if len(barcodes) == 1:\n barcode_info = barcodes[0].data.decode('utf-8')\n break\n else:\n continue\n\n cam.release()\n cv2.destroyAllWindows()\n return barcode_info\n\n\ndef get_reg_number():\n student_reg_number = input(\"Enter Student's registration number: \")\n\n try:\n Student.objects.get(reg_number=student_reg_number)\n except:\n print(\"Student Enrollment\")\n first_name = input(\"Enter student's first name: \")\n last_name = input(\"Enter student's last name: \")\n level_of_study = input(\"Enter student's level of study: \")\n new_student = Student(reg_number=student_reg_number, first_name=first_name,\n last_name=last_name, level_of_study=level_of_study)\n new_student.save()\n else:\n print(\"Update Student Enrollment\")\n student = Student.objects.get(reg_number=student_reg_number)\n print(student)\n\n return student_reg_number.replace(\"/\", \"_\")\n\ndef get_reg_number_verify():\n\n \"\"\"Function for getting student registration number for verification\"\"\"\n\n # student_reg_number = input(\"Enter Student's registration number: \")\n student_reg_number = read_barcodes()\n\n try:\n Student.objects.get(reg_number=student_reg_number)\n except:\n print(\"Student not enrolled yet\")\n return True\n else:\n print(\"Student verification\")\n student = Student.objects.get(reg_number=student_reg_number)\n print(student)\n\n return student_reg_number.replace(\"/\", \"_\")\n\n","repo_name":"mebi36/TAMS","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"29297601960","text":"import json\nimport common\nfrom common import CocoPart\n\ndef produceSkeletonData(frameNumber, humans, image):\n image_h, image_w = image.shape[:2]\n skeleton = {\n \"frameNumber\":frameNumber,\n \"score\": 0,\n \"keypoints\": []\n }\n\n for human in humans:\n for i in range(common.CocoPart.Background.value):\n if i not in human.body_parts.keys():\n continue\n else:\n body_part = human.body_parts[i]\n keypoint = {\n \"position\":{\n \"x\": body_part.x * image_w,\n \"y\": body_part.y * image_h \n },\n \"partName\":CocoPart(body_part.part_idx).name,\n \"score\": body_part.score\n } \n skeleton['keypoints'].append(keypoint)\n skeleton['score'] = human.score\n\n return skeleton\n","repo_name":"alvarozornoza/poses_comparator","sub_path":"tf-pose-estimation/src/exportData.py","file_name":"exportData.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17467452539","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n#----------------------------------------------------------------\n# Implementation of Early Stopping to be used in LSTM Autoencoder\n# source : https://github.com/Bjarten/early-stopping-pytorch\n\nclass EarlyStopping:\n \"\"\"\n Early stops the training if validation loss doesn't improve after a given patience.\n \"\"\"\n def __init__(self, patience=20, verbose=False, delta=-0.00001):\n \"\"\"\n Parameters\n ----------\n patience : int, default 7\n How long to wait after last time validation loss improved.\n verbose : bool, default False\n If True, prints a message for each validation loss improvement.\n delta : float, default 0\n Minimum change in the monitored quantity to qualify as an improvement.\n \"\"\"\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n\n def __call__(self, val_loss, model):\n score = -val_loss\n\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n elif score < self.best_score - self.delta:\n self.counter += 1\n if self.verbose:\n print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n print(f\"Early Stopping activated. Final validation loss : {self.val_loss_min:.7f}\")\n self.early_stop = True\n # if the current score does not exceed the best scoee, run the following code below\n else: \n self.best_score = score\n self.save_checkpoint(val_loss, model)\n self.counter = 0\n\n def save_checkpoint(self, val_loss, model):\n '''\n Saves model when validation loss decrease.\n '''\n if self.verbose:\n print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\n torch.save(model.state_dict(), './checkpoint.pt')\n self.val_loss_min = val_loss\n\n#-----------------\n# LSTM Autoencoder\n# code inspired by https://github.com/hellojinwoo/TorchCoder/blob/master/autoencoders/rae.py that\n# was inspired by https://github.com/shobrook/sequitur/blob/master/sequitur/autoencoders/rae.py\n# annotation sourced by https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM\n\n# (1) Encoder\nclass Encoder(nn.Module):\n def __init__(self, seq_len, num_features, embedding_size):\n super().__init__()\n\n self.seq_len = seq_len\n self.num_features = num_features # The number of expected features(= dimension size) in the input x\n self.embedding_size = embedding_size # the size of the resulting embedding (LSTM hidden states)\n self.LSTM = nn.LSTM(\n input_size = num_features,\n hidden_size = embedding_size,\n num_layers = 1,\n batch_first = True\n )\n\n def forward(self, x):\n # Inputs: input, (h_0, c_0). -> If (h_0, c_0) is not provided, both h_0 and c_0 default to zero.\n x, (hidden_state, cell_state) = self.LSTM(x)\n last_lstm_layer_hidden_state = hidden_state[-1, :, :]\n return last_lstm_layer_hidden_state\n\n# (2) Decoder\nclass Decoder(nn.Module):\n def __init__(self, seq_len, num_features, output_size):\n super().__init__()\n\n self.seq_len = seq_len\n self.num_features = num_features\n self.output_size = output_size\n self.LSTM = nn.LSTM(\n input_size = num_features,\n hidden_size = self.output_size,\n num_layers = 1,\n batch_first = True\n )\n\n def forward(self, x):\n x = x.unsqueeze(1).repeat(1, self.seq_len, 1)\n x, (hidden_state, cell_state) = self.LSTM(x)\n x = x.reshape((-1, self.seq_len, self.output_size))\n return x\n\n# (3) Autoencoder: putting the encoder and decoder together\nclass LSTM_AE(nn.Module):\n def __init__(\n self, seq_len, num_features, embedding_dim,\n learning_rate=1e-4,\n every_epoch_print=False,\n epochs=100,\n patience=10,\n max_grad_norm=1):\n\n super().__init__()\n \n self.seq_len = seq_len\n self.num_features = num_features\n self.embedding_dim = embedding_dim\n\n self.encoder = Encoder(self.seq_len, self.num_features, self.embedding_dim)\n self.decoder = Decoder(self.seq_len, self.embedding_dim, self.num_features)\n \n self.epochs = epochs\n self.learning_rate = learning_rate\n self.patience = patience\n self.max_grad_norm = max_grad_norm\n self.every_epoch_print = every_epoch_print\n \n def forward(self, x):\n torch.manual_seed(0)\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return encoded, decoded\n \n def fit(self, x):\n \"\"\"\n Trains the model's parameters over a fixed number of epochs, specified by `epochs`, as long as the loss keeps decreasing.\n \"\"\"\n optimizer = torch.optim.Adam(self.parameters(), lr = self.learning_rate)\n criterion = nn.MSELoss(reduction='mean')\n self.train()\n # initialize the early_stopping object\n early_stopping = EarlyStopping(patience=self.patience, verbose=False)\n\n for epoch in range(1 , self.epochs + 1):\n # updating early_stopping's epoch\n early_stopping.epoch = epoch\n optimizer.zero_grad()\n encoded, decoded = self(x)\n loss = criterion(decoded, x)\n\n # early_stopping needs the validation loss to check if it has decresed, \n # and, if it has, it will make a checkpoint of the current model\n early_stopping(loss, self)\n \n if early_stopping.early_stop:\n break\n\n # Backward pass\n loss.backward()\n nn.utils.clip_grad_norm_(self.parameters(), max_norm = self.max_grad_norm)\n optimizer.step()\n\n if epoch % self.every_epoch_print == 0:\n print(f\"epoch : {epoch}, loss_mean : {loss.item():.7f}\")\n\n # load the last checkpoint with the best model\n self.load_state_dict(torch.load('./checkpoint.pt'))\n\n # to check the final_loss\n encoded, decoded = self(x)\n final_loss = criterion(decoded , x).item()\n \n return final_loss\n \n def encode(self, x):\n self.eval()\n encoded = self.encoder(x)\n return encoded\n \n def decode(self, x):\n self.eval()\n decoded = self.decoder(x)\n squeezed_decoded = decoded.squeeze()\n return squeezed_decoded\n \n def load(self, PATH):\n \"\"\"\n Loads the model's parameters from path `PATH`.\n \"\"\"\n self.is_fitted = True\n self.load_state_dict(torch.load(PATH))\n","repo_name":"ger6-illini/dl4h-sp23-team27-project","sub_path":"code/lstm_ae.py","file_name":"lstm_ae.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30957269994","text":"from django.db import models\nimport cx_Oracle\nimport os\n\nLOCATION = r\"C:\\oraclexe\\instantclient_19_11\"\nos.environ[\"PATH\"]=LOCATION+\";\"+os.environ[\"PATH\"]\n\ndef getConnection():\n try:\n conn=cx_Oracle.connect(\"hr/happy@localhost:1521/xe\")\n except Exception as e:\n print(e)\n return conn\n\ndef recipeListData(page):\n rowSize=12\n start=(rowSize*page)-(rowSize-1)\n end=(rowSize*page)\n #연결\n conn=getConnection()\n cursor=conn.cursor()\n sql=f\"\"\"\n SELECT no,title,poster,chef,num \n FROM (SELECT no,title,poster,chef,rownum as num \n FROM (SELECT /*+ INDEX_ASC(recipe recipe_no_pk) */ no,title,poster,chef\n FROM recipe))\n WHERE num BETWEEN {start} AND {end}\n \"\"\"\n cursor.execute(sql)\n list=cursor.fetchall()\n print(list)\n cursor.close()\n conn.close()\n return list\n\ndef recipeDetailData(rno):\n conn=getConnection()\n cursor=conn.cursor()\n sql=f\"\"\"\n SELECT no,poster,chef,chef_poster,title,content,info1,info2,info3,chef_info\n FROM recipe_make\n WHERE rno={rno}\n \"\"\"\n cursor.execute(sql)\n\n detail=cursor.fetchone()\n\n cursor.close()\n conn.close()\n return detail\n\ndef recipeTotalPage():\n conn=getConnection()\n cursor=conn.cursor()\n sql=\"SELECT CEIL(COUNT(*)/12.0) FROM recipe\"\n\n cursor.execute(sql)\n total=cursor.fetchone()\n print(\"totalPage:\"+str(total[0]))\n # cursor,conn닫기\n cursor.close()\n conn.close()\n return total[0]\n\n#recipeTotalPage()\n#recipeListData(1)","repo_name":"rachel5004/DjangoProject","sub_path":"django_recipeapp/recipeapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18513518303","text":"import json\nfrom datetime import datetime\nfrom functools import reduce\n\n\ndef get_klass_sectional_division(sectional_division_versions):\n response_dict = sectional_division_versions\n versions_list = response_dict[\"versions\"]\n newest_version = reduce((\n lambda a, b:\n a if datetime.strptime(a[\"validFrom\"], '%Y-%m-%d').date() > datetime.strptime(b[\"validFrom\"], '%Y-%m-%d').date()\n else b\n ), versions_list)\n\n return newest_version[\"_links\"][\"self\"][\"href\"]\n\n\ndef produce_org_info(newest_version_data):\n response_dict = newest_version_data\n org_info_list = response_dict[\"classificationItems\"]\n sections = list(filter(lambda s: s[\"level\"] == '2', org_info_list))\n simplified_sections = list(map(simplify_sections, sections))\n\n return simplified_sections\n\n\ndef simplify_sections(section):\n return {\n \"code\": section[\"code\"],\n \"parent_code\": section[\"parentCode\"],\n \"name\": section[\"name\"]\n }\n","repo_name":"statisticsnorway/dapla-start-api","sub_path":"server/org_info.py","file_name":"org_info.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19555904150","text":"\n#!usr/bin/python\n#coding=utf-8\n\n# importing the basic library\nfrom __future__ import print_function\nimport sys\n\nfrom models.ConvNet import * \nfrom layers import *\nfrom data_utils import *\nfrom data_noise import *\nfrom utils import *\nctx = check_ctx()\n\nfrom optim import *\n\n# from fast_progress import master_bar, progress_bar\n\n# importing MxNet >= 1.0\nimport mxnet as mx\nfrom mxnet import ndarray as nd\nfrom mxnet import autograd, gluon\n\nimport random\n\nmx.random.seed(1)\nrandom.seed(1)\n\n\ndef evaluate_accuracy(data_iterator, num_examples, batch_size, params, net, pool_type,pool_size,pool_stride):\n numerator = 0.\n denominator = 0.\n for batch_i, (data, label) in enumerate(data_iterator):\n data = data.as_in_context(ctx).reshape((data.shape[0],1,1,-1))\n label = label.as_in_context(ctx)\n label_one_hot = nd.one_hot(label, 10)\n output, _ = net(data, params,pool_type=pool_type,pool_size = pool_size,pool_stride=pool_stride)\n predictions = nd.argmax(output, axis=1)\n numerator += nd.sum(predictions == label)\n denominator += data.shape[0]\n print('Evaluating accuracy. (complete percent: %.2f/100' %(1.0 * batch_i / (num_examples//batch_size) * 100) +')' , end='')\n sys.stdout.write(\"\\r\")\n return (numerator / denominator).asscalar()\n\n\ndef Solver(train, test, Debug, batch_size, lr\n , smoothing_constant, num_fc1, num_fc2, num_outputs, epochs, SNR\n , sl, pool_type ,pool_size ,pool_stride, params_init=None, period=None):\n \n num_examples = train.shape[0]\n # 训练集数据类型转换\n y = nd.array(~train.sigma.isnull() +0)\n X = nd.array(Normolise(train.drop(['mass','positions','gaps','max_peak','sigma','SNR_mf','SNR_mf0'],axis=1)))\n print('Label for training:', y.shape)\n print('Dataset for training:', X.shape, end='\\n\\n')\n\n dataset_train = gluon.data.ArrayDataset(X, y)\n train_data = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True, last_batch='keep')\n\n y = nd.array(~test.sigma.isnull() +0)\n X = nd.array(Normolise(test.drop(['mass','positions','gaps','max_peak','sigma','SNR_mf','SNR_mf0'],axis=1)))\n print('Label for testing:', y.shape)\n print('Dataset for testing:', X.shape, end='\\n\\n')\n \n # 这里使用data模块来读取数据。创建测试数据。 (suffle)\n dataset_test = gluon.data.ArrayDataset(X, y)\n test_data = gluon.data.DataLoader(dataset_test, batch_size, shuffle=True, last_batch='keep')\n\n \n # Train\n loss_history = []\n loss_v_history = []\n moving_loss_history = []\n test_accuracy_history = []\n train_accuracy_history = []\n \n# assert period >= batch_size and period % batch_size == 0\n \n # Initializate parameters\n if params_init:\n print('Loading params...')\n params = params_init\n\n [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6] = params\n\n # random fc layers\n weight_scale = .01\n W7 = nd.random_normal(loc=0, scale=weight_scale, shape=(sl, num_fc1), ctx=ctx )\n W8 = nd.random_normal(loc=0, scale=weight_scale, shape=(num_fc1, num_fc2), ctx=ctx ) \n W9 = nd.random_normal(loc=0, scale=weight_scale, shape=(num_fc2, num_outputs), ctx=ctx )\n b7 = nd.random_normal(shape=num_fc1, scale=weight_scale, ctx=ctx)\n b8 = nd.random_normal(shape=num_fc2, scale=weight_scale, ctx=ctx) \n b9 = nd.random_normal(shape=num_outputs, scale=weight_scale, ctx=ctx) \n\n params = [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6]\n print('Random the FC1&2-layers...')\n\n vs = []\n sqrs = [] \n for param in params:\n param.attach_grad()\n vs.append(param.zeros_like())\n sqrs.append(param.zeros_like()) \n else:\n params, vs, sqrs = init_params(num_fc1 = 64, num_fc2 = 64, num_outputs = 2, sl=sl)\n print('Initiate weights from random...')\n\n # Debug\n if Debug:\n print('Debuging...')\n if params_init:\n params = params_init\n else:\n params, vs, sqrs = init_params(num_fc1 = 64, num_fc2 = 64, num_outputs = 2, sl=sl)\n for data, _ in train_data:\n data = data.as_in_context(ctx).reshape((batch_size,1,1,-1))\n break\n print(pool_type, pool_size, pool_stride)\n _, _ = ConvNet(data, params, debug=Debug, pool_type=pool_type,pool_size = pool_size,pool_stride=pool_stride)\n print()\n \n# total_loss = [Total_loss(train_data_10, params, batch_size, num_outputs)]\n \n t = 0\n# Epoch starts from 1.\n print('pool_type: ', pool_type)\n print('pool_size: ', pool_size)\n print('pool_stride: ', pool_stride)\n print('sl: ', sl)\n for epoch in range(1, epochs + 1):\n Epoch_loss = []\n# 学习率自我衰减。\n if epoch > 2:\n# lr *= 0.1\n lr /= (1+0.01*epoch)\n for batch_i, ((data, label),(data_v, label_v)) in enumerate(zip(train_data, test_data)):\n data = data.as_in_context(ctx).reshape((data.shape[0],1,1,-1))\n label = label.as_in_context(ctx)\n label_one_hot = nd.one_hot(label, num_outputs)\n with autograd.record():\n output, _ = ConvNet(data, params, pool_type=pool_type,pool_size = pool_size,pool_stride=pool_stride)\n loss = softmax_cross_entropy(output, label_one_hot)\n loss.backward()\n# print(output)\n # params = sgd(params, lr, batch_size)\n\n# Increment t before invoking adam.\n t += 1\n params, vs, sqrs = adam(params, vs, sqrs, lr, batch_size, t)\n\n data_v = data_v.as_in_context(ctx).reshape((data_v.shape[0],1,1,-1))\n label_v = label_v.as_in_context(ctx)\n label_v_one_hot = nd.one_hot(label_v, num_outputs)\n output_v, _ = ConvNet(data_v, params, pool_type=pool_type,pool_size = pool_size,pool_stride=pool_stride)\n loss_v = softmax_cross_entropy(output_v, label_v_one_hot) \n \n# #########################\n# Keep a moving average of the losses\n# #########################\n curr_loss = nd.mean(loss).asscalar()\n curr_loss_v = nd.mean(loss_v).asscalar()\n moving_loss = (curr_loss if ((batch_i == 0) and (epoch-1 == 0))\n else (1 - smoothing_constant) * moving_loss + (smoothing_constant) * curr_loss)\n\n loss_history.append(curr_loss)\n loss_v_history.append(curr_loss_v)\n moving_loss_history.append(moving_loss)\n Epoch_loss.append(curr_loss)\n# if batch_i * batch_size % period == 0:\n# print('Curr_loss: ', curr_loss)\n \n print('Working on epoch %d. Curr_loss: %.5f (complete percent: %.2f/100' %(epoch, curr_loss*1.0, 1.0 * batch_i / (num_examples//batch_size) * 100) +')' , end='')\n sys.stdout.write(\"\\r\")\n # print('{\"metric\": \"Training Loss for ALL\", \"value\": %.5f}' %(curr_loss*1.0) )\n # print('{\"metric\": \"Testing Loss for ALL\", \"value\": %.5f}' %(curr_loss_v*1.0) )\n# print('{\"metric\": \"Training Loss for SNR=%s\", \"value\": %.5f}' %(str(SNR), curr_loss*1.0) )\n# print('{\"metric\": \"Testing Loss for SNR=%s\", \"value\": %.5f}' %(str(SNR), curr_loss_v*1.0) )\n train_accuracy = evaluate_accuracy(train_data, num_examples, batch_size, params, ConvNet,pool_type=pool_type,pool_size = pool_size,pool_stride=pool_stride)\n test_accuracy = evaluate_accuracy(test_data, num_examples, batch_size, params, ConvNet,pool_type=pool_type,pool_size = pool_size,pool_stride=pool_stride)\n test_accuracy_history.append(test_accuracy)\n train_accuracy_history.append(train_accuracy)\n\n\n print(\"Epoch %d, Moving_loss: %.6f, Epoch_loss(mean): %.6f, Train_acc %.4f, Test_acc %.4f\" %\n (epoch, moving_loss, np.mean(Epoch_loss), train_accuracy, test_accuracy))\n# print('{\"metric\": \"Train_acc. for SNR=%s in epoches\", \"value\": %.4f}' %(str(SNR), train_accuracy) )\n# print('{\"metric\": \"Test_acc. for SNR=%s in epoches\", \"value\": %.4f}' %(str(SNR), test_accuracy) )\n yield (params, loss_history, loss_v_history, moving_loss_history, test_accuracy_history, train_accuracy_history)\n \n \n \ndef predict(data, net, params):\n\n X = nd.array(Normolise(data.drop(['mass','positions','gaps','max_peak','sigma','SNR_mf','SNR_mf0'],axis=1)))\n # num_examples = data.shape[0]\n # batch_size = [2**i for i in range(10) if num_examples%(2**i) == 0][-1]\n # print('Batch size = %s' %batch_size)\n data = nd.array(X).as_in_context(ctx).reshape((-1,1,1,8192))\n output, interlayer = net(data, params)\n prob = transform_softmax(output)[:,1].asnumpy().tolist()[0]\n return prob, interlayer\n\n\n\n \ndef predict_(data, net, params):\n\n X = nd.array(Normolise(data))\n # num_examples = data.shape[0]\n # batch_size = [2**i for i in range(10) if num_examples%(2**i) == 0][-1]\n # print('Batch size = %s' %batch_size)\n data = nd.array(X).as_in_context(ctx).reshape((-1,1,1,8192))\n output, _ = net(data, params)\n prob = transform_softmax(output)[:,1].asnumpy().tolist()[0]\n return prob, output\n\n\n\nif __name__ == '__main__':\n print('CPU or GPU? : ', ctx)\n \n \n \nclass Solver_nd(object):\n \n def __init__(self, model, train, test, SNR, **kwargs):\n self.model = model\n self.num_channel = model.input_dim[0]\n self.train_ori = train\n self.test_ori = test\n self.peak_samppoint, self.peak_time = cal_peak_nd(train)\n self.train_shift_list = []\n self.test_shift_list = [] \n\n try:\n assert self.train_ori.shape == self.test_ori.shape\n except:\n print('self.train_ori.shape != self.test_ori.shape')\n \n self.SNR = SNR\n\n # self.update_rule = kwargs.pop('update_rule', 'sgd')\n # self.optim_config = kwargs.pop('optim_config', {})\n\n self.batch_size = kwargs.pop('batch_size', 256)\n self.lr_rate = kwargs.pop('lr_rate', 0.01)\n self.lr_decay = kwargs.pop('lr_decay', 0.01)\n self.num_epoch = kwargs.pop('num_epoch', 10)\n self.smoothing_constant = kwargs.pop('smoothing_constant', 0.01)\n \n self.save_checkpoints_address = kwargs.pop('save_checkpoints_address', './checkpoints/')\n self.checkpoint_name = kwargs.pop('checkpoint_name', None)\n self.verbose = kwargs.pop('floydhub_verbose', False)\n self.oldversion = kwargs.pop('oldversion', False)\n # self.print_every = kwargs.pop('print_every', 100)\n \n self.params = kwargs.pop('params', None) # Transfer learning\n self.RandMLP = kwargs.pop('RandMLP', None)\n if self.params: # 若有迁移学习\n self.params = self.params.copy()\n try: # 考察导入的模型参数变量 与 导入模型的参数之间得到关系\n assert [np.allclose(p1.asnumpy(), p2.asnumpy()) for (_,p1), (_,p2) in zip(self.params.items(), model.params.items())]\n\n except:\n print('导入的模型参数与导入模型现默认参数有着相同的值~')\n raise\n self._reset_params_Transfer()\n else:\n self._reset_params()\n\n if len(kwargs) != 0:\n extra = ', '.join('\"%s\"' %k for k in list(kwargs.keys()))\n raise ValueError('Unrecognized arguments %s' % extra)\n \n# if not hasattr(optim, self.update_rule):\n# raise ValueError('Unrecognized update rule: \"%s\"' % self.update_rule)\n# self.update_rule = getattr(optim, self.update_rule)\n \n if self.oldversion:\n self._reset_data_old()\n else:\n self._random_data()\n self._reset_data()\n print('SNR = %s' %self.SNR)\n print('Label for training:', self.y_train.shape)\n print('Label for testing:', self.y_test.shape)\n\n\n\n def _reset_data_old(self):\n try:\n assert self.train_ori.shape[1] == self.test_ori.shape[1]\n except:\n print('self.train_ori.shape[1] != self.test_ori.shape[1],',self.train_ori.shape[1],self.test_ori.shape[1])\n self.train_ori_size = self.train_ori.shape[0]\n self.test_ori_size = self.test_ori.shape[0]\n \n y = nd.array(~self.train_ori.sigma.isnull() +0)\n X = nd.array(Normolise(self.train_ori.drop(['mass','positions','gaps','max_peak','sigma','SNR_mf','SNR_mf0'],axis=1)))\n print('Label for training:', y.shape)\n print('Dataset for training:', X.shape, end='\\n\\n')\n\n dataset_train = gluon.data.ArrayDataset(X, y)\n self.train_data = gluon.data.DataLoader(dataset_train, self.batch_size, shuffle=True, last_batch='keep')\n\n y = nd.array(~self.test_ori.sigma.isnull() +0)\n X = nd.array(Normolise(self.test_ori.drop(['mass','positions','gaps','max_peak','sigma','SNR_mf','SNR_mf0'],axis=1)))\n print('Label for testing:', y.shape)\n print('Dataset for testing:', X.shape, end='\\n\\n')\n\n dataset_test = gluon.data.ArrayDataset(X, y)\n self.test_data = gluon.data.DataLoader(dataset_test, self.batch_size, shuffle=True, last_batch='keep') \n\n\n def _random_data(self):\n \n # print('Random data!!')\n self.train, train_shift_list = shuffle_data_nd(self.train_ori,self.peak_samppoint, self.peak_time, 2)\n self.test, test_shift_list = shuffle_data_nd(self.test_ori,self.peak_samppoint, self.peak_time, 2)\n\n self.train_shift_list.extend(train_shift_list.asnumpy().tolist())\n self.test_shift_list.extend(test_shift_list.asnumpy().tolist())\n self.train = self.train.reshape(self.train_ori.shape[0]*2,self.num_channel,-1).as_in_context(ctx)\n self.test = self.test.reshape(self.test_ori.shape[0]*2,self.num_channel,-1).as_in_context(ctx)\n\n\n def _reset_data(self):\n \n try:\n assert self.train.shape[1] == self.test.shape[1]\n except:\n print('self.train.shape[1] != self.test.shape[1]')\n \n self.train_size = self.train.shape[0]\n self.test_size = self.test.shape[0]\n self.noiseAll_size = self.train_size+self.test_size\n\n# self.param_noise = Pre_zero(size = (noiseAll_size,) + (self.train.shape[1:]))\n self.b = nd.array(pre_fir().reshape((-1,1)), ctx=ctx)\n self.pp = pre_fftfilt(self.b, shape = (self.noiseAll_size, self.train.shape[-1]), nfft=None)\n \n self.y_train = nd.concat(nd.ones(shape = (self.train_size,), ctx = ctx), nd.zeros(shape = (self.train_size,), ctx = ctx) , dim = 0)\n self.y_test = nd.concat(nd.ones(shape = (self.test_size,), ctx = ctx), nd.zeros(shape = (self.test_size,), ctx = ctx) , dim = 0)\n\n\n def _reset_params_Transfer(self):\n self.epoch = 0\n self.best_test_acc = 0\n self.best_params = {}\n self.moving_loss = 0\n\n self.train_acc_history = []\n self.test_acc_history = []\n\n self.loss_history = []\n self.loss_v_history = []\n self.moving_loss_history = []\n\n# self.optim_configs = {}\n# for p in self.model.params:\n# d = {k: v for k, v in self.optim_config.items()}\n# self.optim_configs[p] = d \n\n\n # Opt. for Adam ############\n self.vs = []\n self.sqrs = []\n \n # Transfer Learning ########\n self.model.init_params()\n for key, params in self.params.items():\n if (params.shape[0] == self.model.flatten_dim) and (self.RandMLP):\n break\n self.model.params[key] = params.copy()\n \n if self.RandMLP:\n print('(Transfer Learning) Random the MLP part!')\n else:\n print('(Transfer Learning) NOT random the MLP part!')\n print('------------')\n # And assign space for gradients\n for param in self.model.params.values():\n param.attach_grad()\n self.vs.append(param.zeros_like())\n self.sqrs.append(param.zeros_like()) \n\n def _reset_params(self):\n self.epoch = 0\n self.best_test_acc = 0\n self.best_params = {}\n self.moving_loss = 0\n\n self.train_acc_history = []\n self.test_acc_history = []\n\n self.loss_history = []\n self.loss_v_history = []\n self.moving_loss_history = []\n \n# self.optim_configs = {}\n# for p in self.model.params:\n# d = {k: v for k, v in self.optim_config.items()}\n# self.optim_configs[p] = d \n\n\n # Opt. for Adam ############\n self.vs = []\n self.sqrs = []\n \n # And assign space for gradients\n for param in self.model.params.values():\n param.attach_grad()\n self.vs.append(param.zeros_like())\n self.sqrs.append(param.zeros_like())\n print('------------')\n\n \n\n\n def Training(self, Iterator = False):\n \n t = 0 \n try:\n# self.mb = master_bar(range(1, self.num_epoch + 1))\n# self.mb.names = ['loss', 'loss_var']\n \n # if self.oldversion: pass\n # else: self._reset_noise()\n for epoch in range(1, self.num_epoch + 1):\n# for epoch in self.mb:\n self.epoch = epoch\n self.lr_rate = lr_decay(self.lr_rate, epoch, self.lr_decay)\n\n self._random_data()\n\n if self.oldversion: pass\n else: self._reset_noise()\n\n self._iteration(t, epoch)\n \n self.train_acc_history.append(self.check_acc(self.train_data))\n val_acc = self.check_acc(self.test_data)\n self.test_acc_history.append(val_acc)\n\n\n if val_acc >= self.best_test_acc:\n self.best_test_acc = val_acc\n self.best_params = {}\n self.best_params_epoch = 0\n self.findabest = 0\n for k, v in self.model.params.items():\n self.best_params[k] = v.copy()\n self.best_params_epoch = epoch\n self.findabest = 1\n\n if self.verbose:\n print('{\"metric\": \"Train_acc. for SNR=%s in epoches\", \"value\": %.4f}' %(str(self.SNR), self.train_acc_history[-1]) )\n print('{\"metric\": \"Test_acc. for SNR=%s in epoches\", \"value\": %.4f}' %(str(self.SNR), self.test_acc_history[-1]) )\n else:\n print(\"Epoch {:d}, Moving_loss: {:.6f}, Epoch_loss(mean): {:.6f}, Train_acc {:.4f}, Test_acc {:.4f}(Best:{:.4f})\".format(epoch, self.moving_loss_history[-1], np.mean(self.Epoch_loss), self.train_acc_history[-1], self.test_acc_history[-1], self.best_test_acc))\n \n self._save_checkpoint()\n\n# self.mb.first_bar.comment = f'first bar stat'\n# self.mb.write(f'Finished loop {epoch}')\n \n# if Iterator:\n# yield self.loss_history, self.loss_v_history, self.moving_loss_history, self.train_acc_history, self.test_acc_history\n\n except KeyboardInterrupt as e:\n print(e)\n print('Early stoping at epoch=%s' %str(epoch))\n\n self.model.params = self.best_params\n print('Finished!')\n \n def _iteration(self, t, epoch):\n \n self.Epoch_loss = []\n\n \n for batch_i, ((data, label),(data_v, label_v)) in enumerate(zip(self.train_data, self.test_data,)):\n\n loss = self.loss(data, label, train = True)\n loss_v, _= self.loss(data_v, label_v, train = False)\n# print(loss)\n # Increment t before invoking adam.\n t += 1\n self.model.params, self.vs, self.sqrs = adam(self.model.params, self.vs, self.sqrs, self.lr_rate, self.batch_size, t)\n \n # Keep a moving average of the losses\n curr_loss = nd.mean(loss).asscalar()\n curr_loss_v = nd.mean(loss_v).asscalar()\n self.moving_loss = (curr_loss if ((batch_i == 0) and (epoch-1 == 0))\n else (1 - self.smoothing_constant) * self.moving_loss + (self.smoothing_constant) * curr_loss)\n\n self.loss_history.append(curr_loss)\n self.loss_v_history.append(curr_loss_v)\n self.moving_loss_history.append(self.moving_loss)\n self.Epoch_loss.append(curr_loss)\n\n if self.verbose:\n print('{\"metric\": \"Training Loss for ALL\", \"value\": %.5f}' %(curr_loss*1.0) )\n print('{\"metric\": \"Testing Loss for ALL\", \"value\": %.5f}' %(curr_loss_v*1.0) )\n print('{\"metric\": \"Training Loss for SNR=%s\", \"value\": %.5f}' %(str(self.SNR), curr_loss*1.0) )\n print('{\"metric\": \"Testing Loss for SNR=%s\", \"value\": %.5f}' %(str(self.SNR), curr_loss_v*1.0) ) \n else:\n print('Working on epoch {:d}. Curr_loss: {:.5f} (complete percent: {:.2f}/100)'.format(epoch, curr_loss*1.0, 1.0 * batch_i / (self.train_size/self.batch_size) * 100/ 2) , end='')\n sys.stdout.write(\"\\r\")\n \n# x = np.arange(1, len(self.loss_history)+1,1)\n# graphs = [[x, self.loss_history], [x, self.loss_v_history]]\n# x_bounds = [0, 12]\n# y_bounds = [0, 1]\n# self.mb.update_graph(graphs, x_bounds, y_bounds)\n# self.mb.child.comment = f'second bar stat'\n\n\n\n def loss(self, data, label, train=True):\n data = data.as_in_context(ctx).reshape((data.shape[0],self.num_channel,1,-1))\n label = label.as_in_context(ctx)\n label_one_hot = nd.one_hot(label, self.model.output_dim)\n \n if train:\n with autograd.record():\n output, _= self.model.network(X=data)\n loss = softmax_cross_entropy(output, label_one_hot)\n loss.backward()\n return loss\n else:\n output, _ = self.model.network(X=data)\n loss = softmax_cross_entropy(output, label_one_hot)\n return loss, output\n \n \n\n def gen_noise(self):\n \n# if ctx == mx.gpu():\n# noise, _ = TimeseriesFromPSD_nd(self.param_noise)\n# elif ctx == mx.cpu():\n# noise, _ = TimeseriesFromPSD(self.param_noise)\n# noise = nd.array(noise)\n if ctx == mx.gpu():\n noise = GenNoise_matlab_nd(shape = (self.noiseAll_size, self.train.shape[-1]), params = self.pp)\n # print('Random noise!!')\n nd.save('./noise',noise[:10])\n else:\n raise\n \n \n return noise\n\n \n\n def _reset_noise(self):\n\n # noise for mixing\n noise = self.gen_noise().reshape(shape= (self.noiseAll_size,) + (self.train.shape[1:]))\n\n try: sigma = self.train.max(axis = 2) / float(self.SNR) / nd.array(noise[:self.train_size].asnumpy().std(axis = 2,dtype='float64'),ctx=ctx)\n except: sigma = self.train.max(axis = -1) / float(self.SNR) / nd.array(noise[:self.train_size].asnumpy().std(axis = -1,dtype='float64'),ctx=ctx) \n self.sigma = sigma\n signal_train = nd.divide(self.train, sigma.reshape((self.train_size,self.num_channel,-1)))\n data_train = signal_train + noise[:self.train_size]\n\n try: sigma = self.test.max(axis = 2) / float(self.SNR) / nd.array(noise[-self.test_size:].asnumpy().std(axis = 2,dtype='float64'),ctx=ctx)\n except: sigma = self.test.max(axis = -1) / float(self.SNR) / nd.array(noise[-self.test_size:].asnumpy().std(axis = -1,dtype='float64'),ctx=ctx)\n signal_test = nd.divide(self.test, sigma.reshape((self.test_size,self.num_channel,-1)))\n data_test = signal_test + noise[-self.test_size:]\n \n\n # noise for pure conterpart\n noise = self.gen_noise().reshape(shape= (self.noiseAll_size,) + (self.train.shape[1:]))\n\n X_train = Normolise_nd(nd.concat(data_train, noise[:self.train_size], dim=0), self.num_channel)\n\n try: dataset_train = gluon.data.ArrayDataset(X_train, self.y_train)\n except: dataset_train = gluon.data.ArrayDataset(X_train, self.y_train)\n self.train_data = gluon.data.DataLoader(dataset_train, self.batch_size, shuffle=True, last_batch='keep')\n \n X_test = Normolise_nd(nd.concat(data_test, noise[-self.test_size:], dim=0), self.num_channel)\n try: dataset_test = gluon.data.ArrayDataset(X_test, self.y_test)\n except: dataset_test = gluon.data.ArrayDataset(X_test, self.y_test)\n self.test_data = gluon.data.DataLoader(dataset_test, self.batch_size, shuffle=True, last_batch='keep')\n \n\n def check_acc(self, data_iterator):\n numerator = 0.\n denominator = 0.\n for batch_i, (data, label) in enumerate(data_iterator):\n _, output = self.loss(data, label, train = False)\n predictions = nd.argmax(output, axis=1).as_in_context(ctx)\n numerator += nd.sum(predictions == label.as_in_context(ctx))\n denominator += data.shape[0]\n print('Evaluating accuracy. (complete percent: {:.2f}/100)'.format(1.0 * batch_i / (self.train_size/self.batch_size) * 100 /2)+' '*20, end='')\n sys.stdout.write(\"\\r\") \n\n return (numerator / denominator).asscalar()\n\n\n def _save_checkpoint(self):\n if self.checkpoint_name is None:\n return\n \n checkpoint = {\n# 'update_rule': self.update_rule,\n 'lr_decay': nd.array([self.lr_decay]),\n 'lr_rate': nd.array([self.lr_rate]),\n# 'optim_config': self.optim_config,\n 'batch_size': nd.array([self.batch_size]),\n# 'num_train_samples': self.num_train_samples,\n# 'num_val_samples': self.num_val_samples,\n 'train_shift_list': nd.array(self.train_shift_list),\n 'test_shift_list': nd.array(self.test_shift_list),\n 'num_epoch': nd.array([self.num_epoch]),\n 'epoch': nd.array([self.epoch]),\n 'loss_history': nd.array(self.loss_history),\n 'loss_v_history': nd.array(self.loss_v_history),\n 'moving_loss_history': nd.array(self.moving_loss_history),\n 'train_acc_history': nd.array(self.train_acc_history),\n 'test_acc_history': nd.array(self.test_acc_history),\n }\n \n file_address = self.save_checkpoints_address\n # save the model modification\n if self.epoch == 1: \n os.system('mkdir -p %s' %file_address)\n np.save(file_address+ '%s_structure_epoch.pkl' %(self.checkpoint_name) ,self.model.structure) \n # save the best params\n if self.findabest:\n os.system('rm -rf '+file_address+'%s_best_params_epoch@*' %self.checkpoint_name)\n nd.save(file_address+'%s_best_params_epoch@%s_%s.pkl' %(self.checkpoint_name, self.best_params_epoch, self.best_test_acc) , self.best_params)\n self.findabest = 0\n # save all the parsms during the training\n # nd.save(file_address+'%s_params_epoch@%s.pkl' %(self.checkpoint_name, self.epoch), self.model.params)\n # save the processing info. within the training\n nd.save(file_address+'%s_info.pkl' %(self.checkpoint_name), checkpoint)\n \n \n def predict_nd(self):\n\n self._random_data()\n \n if self.oldversion: pass\n else: self._reset_noise()\n \n prob_list = []\n label_list = []\n for batch_i, (data, label) in enumerate(self.test_data,):\n\n data = data.as_in_context(ctx).reshape((data.shape[0],self.num_channel,1,-1))\n label = label.as_in_context(ctx)\n label = nd.one_hot(label, self.model.output_dim).asnumpy()[:,1].tolist()\n output, _ = self.model.network(X=data)\n prob = transform_softmax(output)[:,1].asnumpy().tolist()\n prob_list.extend(prob)\n label_list.extend(label)\n return prob_list, label_list, output\n","repo_name":"iphysresearch/Python4GW","sub_path":"models/solver_cnn.py","file_name":"solver_cnn.py","file_ext":"py","file_size_in_byte":28187,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"28211447205","text":"import numpy as np\nimport torch\nimport time\nfrom sklearn.manifold import TSNE\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix, average_precision_score\n\nfrom scipy.spatial.distance import pdist, squareform\n\ndef compute_pr_auc(P,S):\n \"\"\"\n Compute the prediction-recall AUC.\n\n Parameters\n ----------\n P : torch Tensor\n Ground truth.\n S : torch Tensor\n Predictions.\n\n Returns\n -------\n The precision-recall AUC.\n\n \"\"\"\n y_pred = S.clone().detach()\n\n y_pred[y_pred < 0.] = 0.\n y_pred[y_pred > 1.] = 1.\n \n y_pred = S.cpu().detach().numpy().reshape((-1,))\n y_true = P.cpu().detach().numpy().reshape((-1,))\n\n return(average_precision_score(y_true, y_pred))\n\ndef compute_confusion_matrix(y_true, y_pred):\n \"\"\"\n Compute the confusion matrix.\n\n Parameters\n ----------\n y_true : torch Tensor\n Ground truth.\n y_pred : TYPE\n Predictions.\n\n Returns\n -------\n The confusion matrix.\n\n \"\"\"\n y_pred = y_pred.clone().detach()\n\n y_pred[y_pred >= 0.5] = 1.\n y_pred[y_pred < 0.5] = 0.\n\n y_pred = y_pred.cpu().detach().numpy().reshape((-1,))\n y_true = y_true.cpu().detach().numpy().reshape((-1,))\n\n return(confusion_matrix(y_true, y_pred))\n\ndef compute_auc(P,S):\n \"\"\"\n Compute the ROC AUC.\n\n Parameters\n ----------\n P : torch Tensor\n Ground truth.\n S : TYPE\n Predictions.\n\n Returns\n -------\n The ROC AUC.\n\n \"\"\"\n\n y_pred = S.clone().detach()\n\n y_pred[y_pred < 0.] = 0.\n y_pred[y_pred > 1.] = 1.\n\t\n y_pred = S.cpu().detach().numpy().reshape((-1,))\n y_true = P.cpu().detach().numpy().reshape((-1,))\n\n return(roc_auc_score(y_true, y_pred))\n\ndef compute_accuracy(y_true, y_pred):\n \"\"\"\n Compute the accuracy.\n\n Parameters\n ----------\n y_true : torch Tensor\n Ground truth.\n y_pred : TYPE\n Predictions.\n\n Returns\n -------\n The accuracy.\n\n \"\"\"\n\n y_pred = y_pred.clone().detach()\n\n y_pred[y_pred >= 0.5] = 1.\n y_pred[y_pred < 0.5] = 0.\n\n y_pred = y_pred.cpu().detach().numpy().reshape((-1,))\n y_true = y_true.cpu().detach().numpy().reshape((-1,))\n\n return(accuracy_score(y_true, y_pred))\n\n\ndef timeit(method):\n \"\"\"\n Time decorator to print the execution time of a function.\n\n Use @timeit before your functions.\n \n Parameters\n ----------\n None.\n \n Returns\n -------\n None.\n \n \"\"\"\n\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n print('%r %2.2f ms' % \\\n (method.__name__, (te - ts) * 1000))\n return result\n return timed\n\ndef visualize_TSNE(embeddings, target, labels=None):\n \"\"\"\n Allows to visualize vectors (e.g. embeddings from a neural network model) \n in a 2d-space, using the t-SNE algorithm.\n Parameters\n ----------\n embeddings : a numpy ndarray of shape (n_samples, embd_size)\n The vectors to plot in the 2D graph.\n target : a numpy ndarray of shape (n_samples,)\n The targets associated with each embedding vector. Should be integers.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n tsne = TSNE(n_components=2, init='pca',\n random_state=0, perplexity=30)\n data = tsne.fit_transform(embeddings)\n #plt.figure(figsize=(12, 6))\n plt.title(\"TSNE visualization of the embeddings\")\n scatter = plt.scatter(data[:,0],data[:,1],c=target)\n plt.legend(handles=scatter.legend_elements()[0], labels=labels)\n\n return\n\ndef compute_similarity(net):\n \"\"\"\n Compute Jaccard similarity matrix from a bipartite graph. It allows to\n get a new similarity network with homogeneous nodes.\n\n Parameters\n ----------\n net : numpy ndarray of shape (N1,N2)\n The \"adjacency matrix\" of the bipartite graph. The similarity network\n will be computed with the set of nodes of size N1 (corresponding to\n the lines in this case).\n\n Returns\n -------\n M : numpy ndarray of shape (N1,N2)\n The similarity network.\n \n \"\"\"\n M = 1 - pdist(net, metric='Jaccard')\n M = squareform(M)\n M = M + np.eye(*M.shape)\n M[np.isnan(M)] = 0.\n\n return(M)\n\ndef readnet(net_path):\n \"\"\"\n Read an adjacency matrix in a tsv file.\n\n \"\"\"\n return(np.genfromtxt(net_path,delimiter='\\t'))\n\ndef extract_samples(K, tensor):\n \"\"\"\n Extract K random samples from tensor.\n\n Parameters\n ----------\n K : int\n Wanted number of samples\n tensor : tensor of any size\n tensor from which extract the samples \n (the first dimension will be considered as the sample dim)\n\n Returns\n -------\n samples : tensor of same type as the tensor parameter of shape (K,...)\n the K samples extracted from tensor\n\n \"\"\"\n perm = torch.randperm(tensor.size(0))\n idx = perm[:K]\n samples = tensor[idx]\n \n return (samples)","repo_name":"michavcr/Deep-Networks-for-Graph-Representation","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8753391385","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\n\nclass FolderInPurchase(models.TransientModel):\n _name = 'folder.in.purchase'\n _description = 'Link The Folder to Purchase & picking'\n\n folder_id = fields.Many2one(\n 'res.container.folder',\n string='Folder',\n required=True\n )\n\n def action_apply(self):\n purchases = self.env['purchase.order'].browse(self._context.get('active_ids', []))\n for purchase in purchases.filtered(lambda a: not a.folder_id):\n purchase.write(\n {\n 'folder_id': self.folder_id.id\n }\n )","repo_name":"TijmenHolt/acsoluions","sub_path":"container_tracking-15.0.0.1/container_tracking/wizard/folder_in_purchase.py","file_name":"folder_in_purchase.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30106605921","text":"from products import products\nfrom flask import jsonify, request\nfrom flask_api.models.Product import Product\nfrom app import app\n\n\"\"\"\nGET a la ruta de productos\n\"\"\"\n@app.route('/products')\ndef getProducts():\n return jsonify({\"products\": products}), 200\n\n\"\"\"\nPOST a la ruta de productos\n\"\"\"\n@app.route('/products', methods=['POST'])\ndef addProduct():\n newProduct = Product(\n request.json['name'],\n request.json['price'],\n request.json['quantity']\n )\n\n products.append({'name': newProduct.name, 'price': newProduct.price, 'quantity': newProduct.quantity})\n\n return jsonify({\n 'msg': 'Product added successfully',\n 'products': products\n }), 201\n\n\"\"\"\nPUT a la ruta productos pasando como parametro el nombre del producto\n\"\"\"\n@app.route('/products/', methods=['PUT'])\ndef updateProduct(product_name):\n productFound = [\n product for product in products if product['name'] == product_name]\n\n if len(productFound) > 0:\n productFound[0]['name'] = request.json['name']\n productFound[0]['price'] = request.json['price']\n productFound[0]['quantity'] = request.json['quantity']\n\n return jsonify({\n 'msg': 'Product Updated',\n 'product': productFound[0]\n }), 200\n\n return jsonify({'msg': 'Product not found'}), 404\n\n\"\"\"\nDELETE a la ruta productos pasando como parametro el nombre del producto\n\"\"\"\n@app.route('/products/', methods=['DELETE'])\ndef deleteProduct(product_name):\n productFound = [\n product for product in products if product['name'] == product_name]\n if len(productFound) > 0:\n products.remove(productFound[0])\n return jsonify({'msg': 'Product deleted', 'products': products}), 200\n return jsonify({'msg': 'Product not found'}), 404\n\n\"\"\"\nGET a la ruta productos pasando como parametro el nombre del producto\n\"\"\"\n@app.route('/products/')\ndef method_name(product_name):\n products_found = [\n product for product in products if product['name'] == product_name]\n\n if not products_found:\n return jsonify({'msg': 'Product not found'}), 404\n return jsonify(products_found), 200\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return jsonify({\n 'code': e.code,\n 'name': e.name,\n 'description': e.description,\n 'msg': 'Ingresa una URL habilitada'\n }), 404","repo_name":"DiegoSystemsDeveloper/rest-api-flask","sub_path":"flask_api/controllers/Products_controller.py","file_name":"Products_controller.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72352496586","text":"# Library containing methods for navigation of drones to given waypoints\n\nimport json\nfrom math import ceil\n\nclass Nav():\n DEBUG = True\n\n def __init__(self, waypoint_list_path: str, numb_waypoints: int = 50) -> None:\n \"\"\"\n Core navigation Class that generates a route given a set of\n way points. In general, the number of waypoints determines \n the overall trajectory of the agent. A linear path is \n currently supported, with work on curves, dubbin paths, etc. \n to follow on in the future.\n\n ## Args:\n - waypoint_list_path [string] - path to the json file that\n contains the waypoints\n - numb_waypoints [integer] - the number of intermediary\n waypoints to create. Default is\n 50 waypoints.\n \"\"\"\n try:\n with open(waypoint_list_path, \"r\") as file:\n self.waypoints = json.load(file)\n except Exception as error:\n print(\"There was an issue loading the waypoints\")\n exit()\n self.primary_route = None\n self.numb_waypoints = numb_waypoints\n self.default_velocity = 5.0 # m / s\n self.x_distance_increment = None\n self.y_distance_increment = None\n self.z_distance_increment = None\n\n def build_path(self):\n \"\"\"\n Given 2 waypoints, start and end, generate a linear path\n that is broken into segments of a specific distance\n incremement. Currently, the only supported path generation\n is a linear path of points.\n\n ## Inputs:\n - None\n\n ## Outputs:\n - Primary Route attribute with list of \"numb_waypoints\"\n [X, Y, Z, Vel] waypoints in the NED coordinate frame\n \"\"\"\n print(\"Generating path with {} intermediary waypoints.\".format(\n self.numb_waypoints\n ))\n # array [x, y, z, vel]\n start_point = self.waypoints[\"points\"][0]\n # array [x, y, z, vel]\n end_point = self.waypoints[\"points\"][1]\n self.calculate_distances(start_point, end_point)\n self.primary_route = [start_point]\n if self.DEBUG:\n print(\"X Distance Increment: {}\".format(self.x_distance_increment))\n print(\"Y Distance Increment: {}\".format(self.y_distance_increment))\n print(\"Z Distance Increment: {}\".format(self.z_distance_increment))\n for i in range(1, self.numb_waypoints):\n previous_point = self.primary_route[i-1]\n self.primary_route.append(\n [previous_point[0] + self.x_distance_increment,\n previous_point[1] + self.y_distance_increment,\n previous_point[2] + self.z_distance_increment,\n self.default_velocity]\n )\n print(\"Route generation has been completed.\")\n\n def calculate_distances(self, start_point: list, end_point: list):\n x_distance = Nav.find_difference_in_distance(\n start_point[0],\n end_point[0]\n )\n self.x_distance_increment = Nav.find_distance_increment(\n x_distance,\n self.numb_waypoints\n )\n\n y_distance = Nav.find_difference_in_distance(\n start_point[1],\n end_point[1]\n )\n self.y_distance_increment = Nav.find_distance_increment(\n y_distance,\n self.numb_waypoints\n )\n\n z_distance = (Nav.find_difference_in_distance(\n start_point[2],\n end_point[2]\n ) * -1\n )\n self.z_distance_increment = (Nav.find_distance_increment(\n z_distance,\n self.numb_waypoints\n ) * -1)\n\n def find_distance_increment(distance: float, numb_waypoints: int) -> float:\n \"\"\"\n Calculate the distance incrememnt based upon the number of\n waypoints.\n\n ## Inputs:\n - distance [float] The distance between 2 waypoints\n - numb_waypoints [integer] The total number of waypoints\n to generate.\n \n ## Outputs:\n - A distance increment in Meters to separate the\n the individual waypoints.\n \"\"\"\n return float(abs(distance / numb_waypoints))\n\n def find_difference_in_distance(start_point: float, end_point: float) -> float:\n return float(\n abs(\n abs(end_point)\n - abs(start_point)\n )\n )\n\nif __name__ == \"__main__\":\n new_nav = Nav('waypoints.json')\n new_nav.build_path()\n print(\"Route was generated with {} waypoints\".format(len(new_nav.primary_route)))\n print(\"First Point [X, Y, Z, Vel]: {}\".format(new_nav.primary_route[0]))\n print(\"Last Point [X, Y, Z, Vel]: {}\".format(new_nav.primary_route[49]))\n \n","repo_name":"xxEoD2242/multi_agent_routing","sub_path":"nav/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5360,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"36025186871","text":"from types import MethodType,FunctionType\n\nclass Student:\n def func1(self):\n print(\"func1\")\n def func2(self):\n print(\"func2\")\n @classmethod\n def func3(cls):\n print(\"func3\")\n @staticmethod\n def func4():\n print(\"func4\")\n\n\n\ndef statistic(*args):\n method_flag=0\n func_flag=0\n Student_flag=0\n for n in args:\n if isinstance(n,MethodType):\n method_flag+=1\n elif isinstance(n,FunctionType):\n func_flag+=1\n elif type(n)==Student:\n Student_flag+=1\n else:\n print(\"没有\")\n print(\"方法个数,%d,函数个数,%d,对象个数,%d\" %(method_flag,func_flag,Student_flag))\ni=Student()\nj=Student()\nstatistic(i,j,i,j,i,j,i.func4,j.func3,i.func1)\n\n\n\n\n\n","repo_name":"Assassins-king/First_semester_in_the_third_grade","sub_path":"python项目/pythonProject/实验5/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"29679444768","text":"class Kettle(object):\n power_source = \"electricity\"\n\n def __init__(self, make, price):\n self.make = make\n self.price = price\n self.on = False\n\n def switch_on(self):\n self.on = True\n\n\nkenwood = Kettle(\"Kenwood\", 8.99)\n\nhamilton = Kettle(\"Hamilton\", 14.55)\nhamilton.price = 17\nhamilton.switch_on()\nKettle.switch_on(kenwood)\n\nkenwood.power = 1.5 # bound to an instance of the class\nprint(kenwood.power)\n\nprint(\"Models: {} = {}, {} = {}\".format(hamilton.make, hamilton.price, kenwood.make, kenwood.price))\nprint(\"Models: {0.make} = {0.price} ({0.on}), {1.make} = {1.price} ({1.on})\".format(hamilton, kenwood))\n\nprint(hamilton.__dict__)\nhamilton.power_source = \"atomic\"\nprint(hamilton.__dict__)\n\nKettle.power_source = \"coal\"\nprint(Kettle.power_source, hamilton.power_source, kenwood.power_source)","repo_name":"paweln1975/mp-basics","sub_path":"oop/oop_intro.py","file_name":"oop_intro.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70923342024","text":"import torch as t \nimport torch.nn as nn\nimport time \nimport os\nimport torch.nn.functional as F\n\n# 加入load和save方法\nclass BasicModule(nn.Module):\n def __init__(self):\n super(BasicModule, self).__init__()\n\n def load(self, path):\n self.load_state_dict(t.load(path))\n\n def save(self, path):\n now = time.strftime('%Y%m%d_%H:%M:%S')\n check_path = os.path.join(path, str(self.__class__.__name__) + '_'+ now + '.pth')\n t.save(self.state_dict(), check_path)\n\n# model = BasicModule()\n# model.save('pytorch_code')\n\nclass LeNet(BasicModule):\n def __init__(self):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n # ((224 - 4) / 2 - 4) / 2\n self.fc1 = nn.Linear(16 * 53 * 53, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 6)\n \n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(x.size()[0], -1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x \n\nclass ResidualBlock(nn.Module):\n def __init__(self, c_in, c_out, stride = 1, shortcut = None):\n super(ResidualBlock, self).__init__()\n \n self.left = nn.Sequential(\n nn.Conv2d(c_in, c_out, kernel_size = 3, stride = stride, padding = 1, bias = False),\n nn.BatchNorm2d(c_out),\n nn.ReLU(inplace = True),\n nn.Conv2d(c_out, c_out, kernel_size = 3, stride = 1, padding = 1, bias = False),\n nn.BatchNorm2d(c_out)\n )\n\n self.right = shortcut\n\n def forward(self, x):\n out = self.left(x)\n residual = x if self.right is None else self.right(x)\n out += residual \n return F.relu(out)\n\n\nclass Resnet34(BasicModule):\n def __init__(self, num_classes = 6):\n super(Resnet34, self).__init__()\n self.pre = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size = 7, stride = 2, padding = 3, bias = False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace = True),\n nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1)\n )\n\n self.layer1 = self.make_layer(64, 128, num_blocks = 3, stride = 1)\n self.layer2 = self.make_layer(128, 256, num_blocks = 4, stride = 2)\n self.layer3 = self.make_layer(256, 512, num_blocks = 6, stride = 2)\n self.layer4 = self.make_layer(512, 512, num_blocks = 3, stride = 2)\n\n self.fc = nn.Linear(512, num_classes)\n\n\n def make_layer(self, c_in, c_out, num_blocks, stride = 1):\n shortcut = nn.Sequential(\n nn.Conv2d(c_in, c_out, kernel_size = 1, stride = stride, padding = 0, bias = False),\n nn.BatchNorm2d(c_out)\n )\n\n blocks = []\n blocks.append(ResidualBlock(c_in, c_out, stride = stride, shortcut = shortcut))\n for _ in range(1, num_blocks):\n blocks.append(ResidualBlock(c_out, c_out, stride = 1))\n\n return nn.Sequential(*blocks)\n \n def forward(self, x):\n x = self.pre(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = F.avg_pool2d(x, kernel_size = 7)\n x = x.view(x.size()[0], -1)\n return self.fc(x)\n","repo_name":"codingbock/simple-image-classification","sub_path":"TrashNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19763351570","text":"from datetime import datetime, timezone\nfrom dateutil.relativedelta import relativedelta\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import ListView, CreateView, UpdateView\n\nfrom .forms import CardStatusForm, CreateMULTCardForm\nfrom .models import BonusCard, Purchase\nfrom .filters import CardFilter\n\n\nclass CardsView(ListView):\n\n model = BonusCard\n context_object_name = 'bonuscards'\n template_name = 'main_page_with_filters_and_stats.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n filtered_cards = CardFilter(self.request.GET, queryset=self.get_queryset())\n context['filter_form'] = filtered_cards.form\n context['filter_items_count'] = filtered_cards.qs.count()\n paginator = Paginator(filtered_cards.qs, 50)\n page = self.request.GET.get('page')\n try:\n context['filter_qs'] = paginator.page(page)\n except PageNotAnInteger:\n context['filter_qs'] = paginator.page(1)\n except EmptyPage:\n context['filter_qs'] = paginator.page(paginator.num_pages)\n context['total_cards'] = BonusCard.objects.all().count\n context['active_cards'] = BonusCard.objects.filter(status='ACTIVE').count()\n context['deactive_cards'] = BonusCard.objects.filter(status='DEACTIVE').count()\n context['not_active_cards'] = BonusCard.objects.filter(status='NOT ACTIVE').count()\n return context\n\n\n# def show_card(request, card_series, card_number):\n# card = get_object_or_404(BonusCard, card_series=card_series, card_number=card_number)\n# form = CardStatusForm(request.POST or None)\n# if request.method == 'POST':\n# form = CardStatusForm(request.POST or None)\n# if form.is_valid():\n# card.status = form.cleaned_data['status']\n# card.save()\n#\n# context = {\n# 'card': card,\n# 'form': form,\n# }\n#\n# return render(request, '../templates2/../templates/single_card_view_and_change_status.html', context=context)\n\n\ndef show_card2(request, card_series):\n cards = BonusCard.objects.filter(card_series=card_series)\n if cards:\n context = {\n 'cards': cards,\n }\n else:\n raise Http404(\"There are no cards with this series\")\n\n return render(request, '../templates2/view2.html', context=context)\n\n\ndef create_mult_cards(request):\n form = CreateMULTCardForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n card_series = str(form.cleaned_data['card_series']).zfill(4)\n expire_in = form.cleaned_data['expires_in']\n amount = form.cleaned_data['amount']\n cards = BonusCard.objects.filter(card_series=card_series)\n if cards:\n max_number = int(\n cards.filter(card_series=card_series).values_list('card_number').order_by('card_number').last()[0]\n ) + 1\n else:\n max_number = 0\n list_cards = [BonusCard(\n card_series=card_series,\n card_number=str(max_number + i).zfill(12),\n expire_date=(datetime.now(timezone.utc) + relativedelta(months=+int(expire_in)))\n ) for i in range(amount)]\n BonusCard.objects.bulk_create(list_cards)\n return HttpResponseRedirect(reverse_lazy('index'))\n return render(request, 'create_mult_cards_form.html', {'form': form})\n\n\nclass CardUpdateView(UpdateView):\n model = BonusCard\n fields = ['status']\n template_name = 'single_card_view_and_change_status.html'\n context_object_name = 'bonuscard'\n\n def get_object(self, queryset=None):\n obj = self.model.objects.get(card_series=self.kwargs['card_series'], card_number=self.kwargs['card_number'])\n return obj\n\n def form_valid(self, form):\n self.object = form.save()\n return self.render_to_response(self.get_context_data(form=form))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['card_purchase'] = Purchase.objects.filter(card=context['bonuscard'])\n return context\n\n\nclass CardsListView(ListView):\n model = BonusCard\n template_name = ''\n\n\n\n","repo_name":"happyFrank321/bonuscards","sub_path":"bonuscards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29889482658","text":"import os\nfrom keras.models import load_model\nfrom keras.preprocessing.image import load_img, img_to_array, array_to_img\nimport numpy as np\n\n\n#load saved model file\nthisdir = os.path.abspath(os.path.dirname(__file__))\nMODEL_FOLDER = os.path.join(thisdir, 'saved_models')\nmodel_file_name = 'model_new.h5'\nmodel_path = os.path.join(MODEL_FOLDER, model_file_name)\nmodel = load_model(model_path)\nmodel._make_predict_function()\n\ndef predict(image):\n\t# dimensions of our images\n\timg_width, img_height = 128, 128\n\n\t# load the image and convert the image size\n\timg = load_img(image, target_size=(img_width, img_height))\n\n\t# convert image to numpy array\n\timg_array = img_to_array(img)\n\n\t# add dimention to image attay\n\timg_array = np.expand_dims(img_array, axis=0)\n\n\t# do prediction\n\tclasses = model.predict_classes(img_array)\n\treturn classes\n\n\n\n# dimensions of our images\n# img_width, img_height = 128, 128\n\n# load the image and convert the image size\n# img = load_img('images/13_left.jpeg', target_size=(img_width, img_height))\n\n# show the image\n# img.show()\n\n# convert image to numpy array\n# img_array = img_to_array(img)\n\n\n# # add dimention to image attay\n# img_array = np.expand_dims(img_array, axis=0)\n\n# # do prediction\n# classes = model.predict_classes(img_array)\n\n# # report details about the image\n# print(type(img))\n# print(img.format)\n# print(img.mode)\n# print(img.size)\n\n# #Report detals abour array\n# print(img_array.dtype)\n# print(img_array.shape)\n\n# # print result eg [1] \n# # This is the meaning of the results : 0 - No DR, 1 - Mild, 2 - Moderate, 3 - Severe, 4 - Proliferative DR\n# print (classes)\n\n\n","repo_name":"thomasbshop/VisualDiabetesDiagnoser","sub_path":"app/diagnoser/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32863880048","text":"#Prompt por MIGUEL ALEJANDRE\n# Documentacion: https://platform.openai.com/docs/introduction\n\nimport openai\nimport config\nimport typer\nfrom rich import print\nfrom rich.table import Table\n\ndef main():\n openai.api_key = config.api_key\n\n print(\"[bold green]ChatGPT en Python[/bold green]\")\n\n table = Table()\n table.add_column(\"Comando\")\n table.add_column(\"Descripción\")\n table.add_row(\"exit\", \"Salir de la aplicación\")\n table.add_row(\"new\", \"Crear nuevo contexto\")\n print(table)\n\n context ={\"role\": \"system\",\n \"content\": \"Eres un asistente muy útil\"}\n messages = [context]\n\n while True:\n content = __prompt()\n\n if content == \"new\":\n print(\"Nueva conversación: \")\n messages = [context]\n content = __prompt()\n\n messages.append({\"role\": \"user\", \"content\": content})\n\n response = openai.ChatCompletion.create(model=\"gpt-3.5-turbo\", messages=messages)\n\n response_content = response.choices[0].message.content\n\n messages.append({\"role\": \"assistant\", \"content\": response_content})\n\n print(f\"[bold green]> [/bold green] [green]{response_content}[/green]\")\n\ndef __prompt() -> str:\n prompt = typer.prompt(\"\\n¿Sobre qué quieres hablar? \")\n\n if prompt == \"exit\":\n print(\"¡Hasta luego!\")\n exit = typer.confirm(\"¿Estás seguro?\")\n if exit:\n raise typer.Abort()\n return __prompt()\n\n return prompt\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"Maskeit/Portafolio","sub_path":"pythonGPT/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39248191630","text":"#!/usr/bin/python3\n\"\"\"\nMore tests... State\n\"\"\"\n\nimport unittest\nimport models\nfrom models.base_model import BaseModel\nfrom models.state import State\n\n\nclass test_State(unittest.TestCase):\n \"\"\"\n Test cases for State\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Basic tests set up, create one example\n for the test\n \"\"\"\n self.example = State()\n\n def tearDown(self):\n \"\"\"\n Erase examples used for tests\n \"\"\"\n del self.example\n\n def test_init(self):\n \"\"\"\n Test if an object of the class State can be created.\n without any parameters\n \"\"\"\n new_obj = State()\n self.assertIsInstance(new_obj, State)\n\n def test_inheritance(self):\n \"\"\"\n Test if an object of the class State can be created\n and inherits from BaseModel\n without any parameters\n \"\"\"\n new_obj = State()\n self.assertIsInstance(new_obj, State)\n self.assertIsInstance(new_obj, BaseModel)\n\n def test_inheritance_parameters(self):\n \"\"\"\n Test if an object of the class State have\n Id, created_at, updated_at\n \"\"\"\n self.assertIsInstance(self.example, State)\n self.assertIsInstance(self.example, BaseModel)\n self.assertTrue(hasattr(self.example, \"id\"))\n self.assertTrue(hasattr(self.example, \"created_at\"))\n self.assertTrue(hasattr(self.example, \"updated_at\"))\n\n def test_State_name(self):\n \"\"\"\n Test if an object of the class State can be created\n and then modify the parameters.\n Test for the Name of the State.\n \"\"\"\n new_obj = State()\n new_obj.name = \"California\"\n self.assertIsInstance(new_obj, State)\n self.assertTrue(hasattr(new_obj, \"name\"))\n self.assertEqual(new_obj.name, \"California\")\n\n def test_State_save(self):\n \"\"\"\n Test if a State can be saved\n \"\"\"\n new_obj = State()\n old_time = new_obj.updated_at\n new_obj.save()\n new_time = new_obj.updated_at\n self.assertNotEqual(old_time, new_time)\n\n def test_str(self):\n \"\"\"\n Test if a State cls can use the __str__ method\n from the BaseModel class\n \"\"\"\n test_id = self.example.id\n test_dic = self.example.__dict__\n str_to_cmp = \"[State] ({}) {}\".format(test_id, test_dic)\n self.assertEqual(str(self.example), str_to_cmp)\n\n def test_to_dict(self):\n \"\"\"\n Test if State can use the to_dict method\n from the BaseModel class\n \"\"\"\n new_obj = State()\n new_obj.name = \"California\"\n\n new_dict = new_obj.to_dict()\n\n self.assertIsInstance(new_dict['__class__'], str)\n self.assertIsInstance(new_dict['id'], str)\n self.assertIsInstance(new_dict['created_at'], str)\n self.assertIsInstance(new_dict['updated_at'], str)\n self.assertIsInstance(new_dict['name'], str)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"katiacorona/holbertonschool-AirBnB_clone","sub_path":"tests/test_models/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36190118843","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n\n # new_nums = []\n\n \n # for i in nums: #o(n)\n # if i != new_nums[-1]: #o(n)\n # new_nums.append(i)\n # return len(new_nums)\n\n #o(n^2)\n\n # print(len(x))\n # index = 0\n # new_nums = nums\n # while index < len(new_nums):\n # if nums[index] == nums[index +1]:\n # index += 1\n\n pointer = 0\n for num in nums: # usually i is index\n if num != nums[pointer]:\n pointer += 1\n nums[pointer] = num\n # pointer += 1 #1\n return pointer + 1\n\n \n","repo_name":"HighandLight/LeetCodeAlgorithm","sub_path":"0026-remove-duplicates-from-sorted-array/0026-remove-duplicates-from-sorted-array.py","file_name":"0026-remove-duplicates-from-sorted-array.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36517190561","text":"from functools import reduce\n\n# import json\n\nimport pickle\n\n\nimport os.path\n\n\nfrom collections import OrderedDict\n\n\nfrom hash_util import valid_proof, hash_block\n\n\nowner = 'Arthur'\n\nMINING_REWARD = 10\n\n\nGENESIS_BLOCK = {\n 'previous_block_hash': '',\n 'index': 0,\n 'processed_transactions': [],\n 'proof': 100\n}\n\nblockchain = [GENESIS_BLOCK]\n\n\nopen_transactions = []\n\n\nparticipants = {\n 'Max'\n}\n\n\ndef load_data():\n \"\"\"Loads the data of the blockchain in a file in your system's storage\"\"\"\n\n # if (not os.path.isfile('blockchain.json')):\n if (not os.path.isfile('blockchain.p')):\n print('No blockchain file detected, loading starting blockchain...')\n return\n # with open('blockchain.json', mode='r') as g: ###versão JSON\n with open('blockchain.p', mode='rb') as g: ####versão PICKLE (rb, read binary) (e também o blockchain.p)\n # read_blockchain, read_transactions = g.readlines() ###VERSÃO JSON\n # global blockchain ###VERSÃO JSON\n # global open_transactions ###VERSÃO JSON\n # blockchain = json.loads(read_blockchain) ##versão JSON\n\n file_content = pickle.loads(g.read()) # vai ler O CONTEÚDO INTEIRO DE NOSSO ARQUIVO pickle/pickle data no arquivo lido, e aí O METHOD de '.loads()' vai tentar CONVERTER TUDO ISSO EM 'python data' (objects, dicts, tuples, lists, etc)...\n print(file_content, 'LOADED DATA') ## quando recorremos ao pickling/usamos o PICKLE, NÃO TEMOS QUE NOS PREOCUPAR COM A 'PERDA DE INFO SOBRE O FATO DE UM DICT SER ORDERED OU NÃO' ao converter umA DICT EM BINARY DATA( ao contrário da JSON DATA, em que PERDEMOS ESSA INFO AO CONVERTER PYTHON OBJECTS EM JSON STRINGS..) \n\n global blockchain\n global open_transactions\n blockchain = file_content['chain']\n open_transactions = file_content['ot']\n\n # updated_blockchain = []\n # for block in blockchain:\n # updated_block = {\n # 'previous_block_hash': block['previous_block_hash'],\n # 'index': block['index'],\n # 'proof': block['proof'],\n # 'processed_transactions': [OrderedDict(\n # [('amount', transaction['amount']), ('recipient', transaction['recipient']), ('sender', transaction['sender'])]) for transaction in block['processed_transactions']]\n # }\n # updated_blockchain.append(updated_block)\n # blockchain = updated_blockchain\n\n # print(json.loads(read_blockchain))\n\n # open_transactions = [OrderedDict([('amount', transaction['amount']), ('recipient', transaction['recipient']), (\n # 'sender', transaction['sender'])]) for transaction in json.loads(read_transactions)]\n\n # print(json.loads(read_transactions))\n\n\nload_data()\n\n\ndef save_data():\n \"\"\"Saves the data of the blockchain in a file in your system's storage\"\"\"\n\n # with open('blockchain.txt', mode='w') as f: #versão JSON DO CÓDIGO\n #VVVVVV VERSÃO PICKLE do código --> observe o formato de arquivo '.p', que é convencionado para o pickle...\n with open('blockchain.p', mode='wb') as f: ##versão PICKLE DO CÓDIGO (vai armazenar BINARY DATA no seu arquivo).. --> pq o default é 'wt' (write text), E O WRITE DE BINARY É 'wb' (write binary)...\n # f.write(json.dumps(blockchain)) #versão JSON do código...\n # f.write('\\n')\n # f.write(json.dumps(open_transactions)) \n # f.write(pickle.dumps(blockchain)) ###versão PICKLE DO CÓDIGO --> 'pickle.dumps()' CONVERTE NOSSA PYTHON DATA EM _ BINARY DATA DO PICKE, QUE SERÁ ENTÃO ESCRITA NO NOSSO ARQUIVO por meio de 'f.write()'....\n ####entretanto, se você QUER __ ESCREVER __ BINARY_ _DATA EM 1 ARQUIVO, VOCÊ É OBRIGADO A TROCAR O 'mode' de seu 'open' para o valor de 'wb'...\n ##QUANDO ESTAMOS 'PICKLING', não é POSSÍVEL ADICIONAR LINE BREAKS por meio de '\\n', POR ISSO NÃO VAMOS ESCREVER UM 'f.write('\\n')' PARA SEPARAR NOSSO CONTEÚDO...\n ## EM VEZ DISSO, VAMOS USAR 1 OBJECT OU DICTIONARY PARA __ SEPARAR _ NOSSOS CONTEÚDOS, PQ ISSO FUNCIONARÁ...\n data_to_save = { #usamos esse dict para SEPARAR NOSSOS CONTEÚDOS (line break) quando o outputtarmos no arquivo de texto...\n 'chain': blockchain,\n 'ot': open_transactions\n }\n f.write(pickle.dumps(data_to_save))\n\ndef get_user_choice():\n \"\"\" Returns the input of the user (either 1, 2, h or q) to proceed with the options \"\"\"\n user_input = input('Please choose an option: ')\n\n return user_input\n\n\ndef get_transaction_value():\n \"\"\" Returns the input of the user (sender, recipient, amount) as a tuple, to proceed with option 1 \"\"\"\n\n user_transaction_sender = owner\n\n user_transaction_recipient = input(\"Please enter the recipient's name: \")\n if (user_transaction_recipient == '' or not isinstance(user_transaction_sender, str)):\n return None\n user_transaction_amount = input('Please enter transaction amount: ')\n if (user_transaction_amount == '' or not user_transaction_amount.isnumeric() or isinstance(user_transaction_amount, bool)):\n print('TEST')\n return None\n user_transaction_input = (\n user_transaction_sender, user_transaction_recipient, float(user_transaction_amount))\n return user_transaction_input\n\n\ndef add_transaction(sender, recipient, amount=1.0):\n \"\"\"Faz append de uma NOVA TRANSACTION À LIST DE ' open_transactions, e aí RETORNA TRUE OU FALSE, a depender do sucesso de seu códiogo --> verification para ver se o user pode ou naõ realizar essa operação/send de coins...'....\n\n Arguments:\n :sender: o sender da transaction (nome ou id) \n :recipient: o receiver da transaction (nome ou id)\n :amount: a quantidade (DEVE SER UM FLOAT). DEFAULT É 1.0 coin ... \n \"\"\"\n new_transaction = OrderedDict([\n ('amount', amount),\n ('recipient', recipient),\n ('sender', sender)\n ])\n if not verify_transaction(new_transaction):\n print('Your funds are not enough for the chosen operation')\n return False\n else:\n open_transactions.append(new_transaction)\n participants.add(sender)\n participants.add(recipient)\n print(open_transactions)\n save_data()\n return True\n\n\ndef proof_of_work():\n last_block = blockchain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n while not valid_proof(open_transactions, last_hash, proof):\n proof += 1\n return proof\n\n\ndef mine_block():\n \"\"\"É essa função que PROCESSA NOSSAS OPEN TRANSACTIONS, PARA ENTÃO ADICIONAR UM NOVO BLOCK À BLOCKCHAIN \"\"\"\n previous_block = blockchain[-1]\n hashed_block = hash_block(previous_block)\n proof = 0\n\n print(hashed_block, 'LINE51251')\n\n proof = proof_of_work()\n reward_transaction = OrderedDict([\n ('amount', MINING_REWARD),\n ('recipient', owner),\n ('sender', 'ourApp')\n ])\n copied_transactions = open_transactions[:]\n copied_transactions.append(reward_transaction)\n\n block = {'previous_block_hash': hashed_block,\n 'index': len(blockchain),\n\n 'processed_transactions': copied_transactions,\n 'proof': proof\n }\n blockchain.append(block)\n\n print(blockchain, 'TRIED TO MINE BLOCK')\n return True\n\n\ndef output_blockchain():\n for block in blockchain:\n print('Outputting block')\n print(block)\n else:\n print('-' * 20)\n\n\ndef verify_chain():\n print(blockchain)\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n if block['previous_block_hash'] != hash_block(blockchain[index - 1]):\n return False\n\n if not valid_proof(block['processed_transactions'][:-1], block['previous_block_hash'], block['proof']):\n print('Proof of work is invalid.')\n return False\n return True\n\n\ndef verify_transaction(transaction):\n \"\"\"Retorna True ou False a DEPENDER DO CHECK DA TRANSACTION; SE O USER NÃO TIVER FUNDS SUFICIENTES, RETORNA FALSE E A OPERAÇÃO/TRANSACTION NÃO É REALIZADA. É chamado lá em 'add_transaction()'..\"\"\"\n sender_balance = get_balance(transaction['sender'])[2]\n print(sender_balance)\n return sender_balance >= transaction['amount']\n\n\ndef get_value(person):\n return [[transaction['amount'] for transaction in block['processed_transactions'] if transaction[person] == owner] for block in blockchain]\n\n\ndef get_balance(participant):\n transaction_sender = get_value('sender')\n open_transactions_sender = [transaction['amount']\n for transaction in open_transactions if transaction['sender'] == participant]\n transaction_sender.append(open_transactions_sender)\n\n print(transaction_sender[0])\n\n print(transaction_sender)\n\n amount_sent = reduce(lambda tx_sum, tx_amt: tx_sum + sum(tx_amt)\n if len(tx_amt) > 0 else tx_sum + 0, transaction_sender, 0)\n\n print(amount_sent)\n\n transaction_recipient = get_value('recipient')\n\n amount_received = reduce(lambda tx_sum, tx_amt: tx_sum + sum(tx_amt)\n if len(tx_amt) > 0 else tx_sum + 0, transaction_recipient, 0)\n print(amount_received)\n return (amount_sent, amount_received, amount_received - amount_sent)\n\n\ndef verify_transactions():\n\n if open_transactions == []:\n return None\n else:\n\n return all([verify_transaction(transaction) for transaction in open_transactions])\n\n\nwaiting_for_input = True\n\n\nwhile waiting_for_input:\n\n print('-' * 30)\n print('1: Add a new transaction value')\n print('2: Display current blockchain')\n print('3: Show participants')\n print(\"b: Show user's balance\")\n print('h: Manipulate the chain')\n print('m: Mine a block')\n print('q: Quit the program')\n print('v: Verify open transactions')\n print('-' * 30)\n user_input = get_user_choice()\n if(user_input == '1'):\n user_transaction = get_transaction_value()\n\n if(user_transaction == None):\n print('Please enter a valid sender, recipient and transaction value.')\n else:\n sender, recipient, amount = user_transaction\n if add_transaction(sender, recipient, amount):\n print('Added transaction!')\n else:\n print('Transaction failed')\n\n elif(user_input == '2'):\n output_blockchain()\n elif(user_input == 'q'):\n waiting_for_input = False\n\n elif(user_input == 'h'):\n print(blockchain)\n elementIndex = input(\n 'Enter the number of the element you want to manipulate: ')\n if (elementIndex.isnumeric()):\n if (len(blockchain) >= int(elementIndex) + 1):\n elementValue = input(\n ' Please Enter the value you want to insert: ')\n blockchain[int(elementIndex)]['processed_transactions'] = [\n {'sender': 'test', 'recipient': 'tested', 'amount': 1212}]\n else:\n print(\n 'No block for that index, please insert sufficient number of blocks before trying to manipulate a specific one')\n else:\n print('Invalid index entered, please try again')\n\n elif(user_input == 'm'):\n if mine_block():\n print(' MINED')\n open_transactions = []\n save_data()\n elif(user_input == '3'):\n print(participants)\n elif(user_input == 'b'):\n sent, received, balance = get_balance(owner)\n print(f'Blocks sent by {owner}: ' + '{sent:>6.2f}'.format(sent=sent))\n print(f'Blocks received by {owner}: ' +\n '{received:>6.2f}'.format(received=received))\n print(f'Total Balance of {owner}: ' +\n '{balance:>6.2f}'.format(balance=balance))\n elif(user_input == 'v'):\n if (verify_transactions()):\n print('Transactions are valid.')\n elif(verify_transactions() == None):\n print('No transactions to verify, please add a transaction.')\n else:\n print('Invalid transactions detected.')\n else:\n print('-' * 40)\n print('Invalid command, please input 1, 2 or q.')\n if(not verify_chain()):\n print('Blockchain was found invalid.')\n print(blockchain)\n waiting_for_input = False\n else:\n print('Blockchain is valid')\n\n\nelse:\n print('User left')\n","repo_name":"nothingnothings/projeto-python","sub_path":"MODULO7-TRABALHANDOCOMARQUIVOSEMPYTHON/novonovoprojeto9/blockchain10,versaocompickle.py","file_name":"blockchain10,versaocompickle.py","file_ext":"py","file_size_in_byte":12422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37593720494","text":"command = input()\nexams_info = {}\nlanguage_list = []\nbanned_students = []\nwhile command != 'exam finished':\n command = command.split('-')\n user = command[0]\n if len(command) == 2:\n if command[1] == 'banned':\n banned_students.append(user)\n elif len(command) == 3:\n language = command[1]\n points = int(command[2])\n language_list.append(language)\n if user not in exams_info:\n exams_info[user] = [(language, points)]\n else:\n exams_info[user].append((language, points))\n command = input()\nfor user, result in exams_info.items():\n result.sort(key=lambda x: -x[1])\nprint('Results:')\nfor user, exam in sorted(exams_info.items(), key=lambda kvp: (-kvp[1][0][1], kvp)):\n if user not in banned_students:\n highest_score = exam[0][1]\n print(f'{user} | {highest_score}')\nprint('Submissions:')\nfor language in sorted(list(set(language_list))):\n print(f'{language} - {language_list.count(language)}')","repo_name":"Veselin-Stoilov/softuni_projects-python-fundamentals","sub_path":"dictionaries_exercise/softuni_exam_results.py","file_name":"softuni_exam_results.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28952975124","text":"### Пакетная обработка файлов Autocad\n### Возможности\n### 1. Замена префикса слоев чертежей dwg с 'H' на 'CF'\n### 2. Пересохранение файлов dwg в другую папку\n### 3. Переименование файлов dwg на нужный префикс\n### 4. Изменение версии файлов с Autocad 2007 на версию Autocad 2021\n\n# import modules\nimport os\nimport win32com.client\nfrom tkinter import filedialog\nfrom tkinter import *\n\n# making interface\nroot = Tk()\nroot.withdraw()\n\n# getting application\napp = win32com.client.Dispatch(\"AutoCAD.Application.21\")\n\n# layers rename function\nold_pref = 'H'\nnew_pref = 'CF'\n\ndef lay_renam(adoc, old, new):\n for i in adoc.Layers:\n \tspt = (i.Name).split('.', 1)\n \tif spt[0] == old and len(spt) > 1:\n \t\ti.Name = new + '.' + spt[1]\n\n# getting directory\ndirectory = filedialog.askdirectory(title = 'Исходные файлы')\nnew_folder = filedialog.askdirectory(title = 'Новые файлы')\n\n# getting files\nfiles = os.listdir(directory)\n\n# filtering dwg files\ndwg_files = [(i, 'new_' + i) for i in files if i.split('.')[-1] == 'dwg']\n\n# files processing\nfor j in dwg_files:\n\n file_path = os.path.join(directory,j[0])\n\n new_file_path = os.path.join(new_folder,j[1])\n\n app.Documents.Open(file_path)\n\n adoc = app.ActiveDocument\n\n lay_renam(adoc, old_pref, new_pref)\n\n adoc.SaveAs(new_file_path, 48)\n\n adoc.Close()\n\nprint('ok')\n","repo_name":"muraviev-aa/ITMO.SoftwareEng2022.Python","sub_path":"FreeTask/fileWorkDWG.py","file_name":"fileWorkDWG.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73249734985","text":"import time, math\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\n\n\ndef calc(x): return str(math.log(abs(12*math.sin(int(x)))))\n\n\nif __name__ == '__main__':\n link = \"http://suninjuly.github.io/selects1.html\"\n first = \"num1\"\n second = \"num2\"\n select_id = \"dropdown\"\n\n try:\n browser = webdriver.Chrome()\n browser.get(link)\n first = int(browser.find_element(By.ID, first).text)\n second = int(browser.find_element(By.ID, second).text)\n select = Select(browser.find_element(By.ID, select_id))\n select.select_by_value(str(first + second))\n\n button = browser.find_element(By.XPATH, \"//button[@type='submit']\")\n button.click()\n\n finally:\n time.sleep(5)\n browser.quit()\n","repo_name":"Dorivan/stepik_auto_tests_course","sub_path":"stepik_py/lesson_2/2.1 - 2.2/lesson_2_2_4.py","file_name":"lesson_2_2_4.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40936077772","text":"from locale import normalize\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Code suppressing TF warnings and messages.\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\nimport sys\nimport bm3d\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nimport subprocess\nimport argparse\nimport numpy as np\nimport nibabel as nib\nimport cv2\nimport matplotlib.pyplot as plt\nsys.path.append(\"../\")\nsys.path.append(\"/home/attilasimko/Documents/artefacts\")\nfrom testing import test_downsample\nfrom MLTK.utils import Timer\nimport scikit_posthocs as sp\nfrom MLTK.data import DataGenerator\nfrom MLTK.accelerated_mri.models import InterNetLoss, KIKI, build_model\nfrom numpy.fft import fftshift, ifftshift, fftn, ifftn, fft2, ifft2\nimport MLTK.accelerated_mri.utils as utils\nfrom MLTK.accelerated_mri.utils import IQM, znorm, save_progress, compare_vif, compare_ssim, compare_mse, mse, transform_image_to_kspace, transform_kspace_to_image\nimport tensorflow\ntensorflow.get_logger().setLevel('ERROR')\nfrom tensorflow.keras.layers import Lambda, Input, MaxPooling2D, Conv2D, BatchNormalization, Dropout, UpSampling2D, Concatenate, SpatialDropout2D\nfrom tensorflow.keras.models import load_model, Model\nimport warnings\nfrom sewar.full_ref import vifp\nimport SimpleITK as sitk\n\ndef get_mask(img):\n scale = 1\n img = np.interp(img, (img.min(), img.max()), (0, scale))\n img = img[0, :, :, 0]\n\n mask = sitk.GetImageFromArray(img)\n mask = sitk.OtsuThreshold(mask, 0, 1, 200)\n mask = sitk.BinaryFillhole(mask)\n mask = sitk.GetArrayFromImage(mask)\n\n return mask\n\ndef correct_itk_boxmean(img, iter):\n time_steps = [5, 10, 15, 20, 25]\n scale = 1\n img = np.interp(img, (img.min(), img.max()), (0, scale))\n img = img[0, :, :, 0]\n\n imgImage = sitk.GetImageFromArray(img)\n imgImage = sitk.Cast(imgImage, sitk.sitkFloat32)\n corrector = sitk.CurvatureAnisotropicDiffusionImageFilter()\n corrector.SetTimeStep( 0.0625)\n corrector.SetNumberOfIterations(5)\n outputSlice = corrector.Execute(imgImage)\n outputSlice = sitk.GetArrayFromImage(outputSlice)\n\n return outputSlice\n\ndef correct_unires(small_img, zero_filled_img):\n\n # img = cv2.resize(small_img, (320, 320), cv2.INTER_CUBIC)\n img = zero_filled_img[0, :, :, 0]\n size = np.shape(img)[0]\n \n # size = np.shape(img)[0]\n # img = cv2.resize(img, (320, 320), interpolation=cv2.INTER_NEAREST)\n\n img = np.interp(img, (img.min(), img.max()), (0, 1))\n size = np.shape(img)[0]\n img = np.stack([img, img], 2)\n img = nib.Nifti1Image(img, affine=np.eye(4))\n img.header.set_zooms((320 / size, 320 / size, 1.0))\n nib.save(img, os.path.join(\"/home/attilasimko/Documents/out/amri/slice.nii.gz\"))\n\n DEVNULL = open(os.devnull, 'wb')\n p = subprocess.call('/home/attilasimko/Documents/drs/AMRI/evaluations/hr.sh', shell=True, stdout=DEVNULL, stderr=DEVNULL)\n hr_image = nib.load(os.path.join(\"/home/attilasimko/Documents/out/amri/ur_slice.nii.gz\"))\n hr_image = hr_image.get_data()[:, :, 1]\n hr_image = np.interp(hr_image, (hr_image.min(), hr_image.max()), (0, 1))\n \n return hr_image\n\nwarnings.filterwarnings(\"ignore\")\nnp.random.seed(113)\ntensorflow.get_logger().setLevel('ERROR')\npid = os.getpid()\nprint(pid)\n\nparser = argparse.ArgumentParser(description='Welcome.')\n# Arguments to optimize\nparser.add_argument(\"--lr\", default=0.0001) # [0.5, 0.1, 0.05]\nparser.add_argument(\"--optimizer\", default=\"rmsprop\")\nparser.add_argument(\"--loss\", default=\"mean_squared_error\")\nparser.add_argument(\"--batch_size\", default=4) # [0.5, 0.1, 0.05]\nparser.add_argument(\"--alpha\", default=0.001) # [0.5, 0.1, 0.05]\nparser.add_argument(\"--kspace\", default=\"True\") # [0.5, 0.1, 0.05]\nparser.add_argument(\"--beta\", default=1.0) # [0.5, 0.1, 0.05]\nparser.add_argument(\"--case\", default=\"baseline\") # [0, 1, 2]\nparser.add_argument(\"--gpu\", default=None)\nparser.add_argument(\"--base\", default=None)\nargs = parser.parse_args()\n\n\n\nlrate = float(args.lr)\nbatch_size = int(args.batch_size)\nkspace = args.kspace == \"True\"\ngpu = args.gpu\ncase = str(args.case)\nbeta = float(args.beta)\nalpha = float(args.alpha) * beta\n\n# Paths\ndata_path = '/mnt/f4616a95-e470-4c0f-a21e-a75a8d283b9e/DSets/DS0059/'\nsave_path = \"/home/attilasimko/Documents/out/amri/\"\nbase = args.base\nplt_slice = 0\n\ngen = DataGenerator(data_path + 'testing',\n inputs=[['clean', False, 'float32']],\n outputs=[],\n batch_size=1,\n shuffle=False)\n\n\nnum_artefacts = 1\nkiki_path = \"/home/attilasimko/Documents/out/amri/round_two/2236123_kiki_final.h5\"\n(model, model_full) = build_model(Input(shape=(320, 320, 1)), Input(shape=(4)), num_artefacts, 64, 2, 0.1, case, \"adam\", 0.1, dict())\ncomps = []\nfor i in range(num_artefacts):\n comps.append(model.get_layer(f\"full_{i}\").output)\nmodel_comps = Model(model.inputs[0], comps)\nmodel_comps.load_weights(kiki_path)\n\n# num_artefacts = 4\n# kiki_path = \"/home/attilasimko/Documents/out/amri/round_two/2236123_kiki_final.h5\"\n# (model, model_full) = build_model(Input(shape=(320, 320, 1)), Input(shape=(4)), num_artefacts, 64, 2, 0.1, case, \"adam\", 0.1, dict())\n# comps = []\n# for i in range(num_artefacts):\n# comps.append(model.get_layer(f\"full_{i}\").output)\n# model_comps = Model(model.inputs[0], comps)\n# model_comps.load_weights(kiki_path)\n\nreg_zeros = np.zeros((batch_size, ))\n\n# Extra parameters\n\ntensorflow.random.set_seed(1)\ntimer = Timer()\n\ndownsample_levels = [2, 3, 4]\niter_list = [0]\nfor downsample_level in downsample_levels:\n for iter in iter_list:\n loss_kiki_ssim = []\n loss_kiki_vif = []\n loss_noise_ssim = []\n loss_noise_vif = []\n loss_ssim = []\n loss_vif = []\n loss_lanczos_ssim = []\n loss_lanczos_vif = []\n loss_unires_ssim = []\n loss_unires_vif = []\n print(\"Acceleration factor: --- \" + str(downsample_level))\n for i in range(10):#int(len(gen) / 10)):\n hr, lr =gen[i]\n clean = hr[0][0,:,:,0,0]\n small, corrupt = test_downsample(clean, downsample_level)\n\n corrupt = np.expand_dims(np.expand_dims(corrupt, 0), 3)\n clean = np.expand_dims(np.expand_dims(clean, 0), 3)\n\n mask = get_mask(corrupt)\n\n lanczos_correction = cv2.resize(small, (320, 320), cv2.INTER_CUBIC)\n lanczos_correction = lanczos_correction * mask\n\n unires_correction = correct_unires(clean, clean)\n unires_correction = unires_correction * mask\n\n kiki_correction = model_comps.predict_on_batch(corrupt)[0, :, :, 0:1] + corrupt\n kiki_correction = np.interp(kiki_correction[0, :, :, 0], (np.min(kiki_correction), np.max(kiki_correction)), (0, 1)) * mask\n\n kiki_correction = np.expand_dims(np.expand_dims(znorm(kiki_correction), 0), 3)\n lanczos_correction = np.expand_dims(np.expand_dims(znorm(lanczos_correction), 0), 3)\n unires_correction = np.expand_dims(np.expand_dims(znorm(unires_correction), 0), 3)\n\n corrupt = np.interp(corrupt[0, :, :, 0], (np.min(corrupt), np.max(corrupt)), (0, 1)) * mask\n corrupt = np.expand_dims(np.expand_dims(znorm(corrupt), 0), 3)\n\n clean = np.interp(clean[0, :, :, 0], (np.min(clean), np.max(clean)), (0, 1)) * mask\n clean = np.expand_dims(np.expand_dims(znorm(clean), 0), 3)\n\n # clean = np.interp(clean, (clean.min(), clean.max()), (0, 1))\n # kiki_correction = np.interp(kiki_correction, (kiki_correction.min(), kiki_correction.max()), (0, 1))\n # # noise_correction = np.interp(noise_correction, (noise_correction.min(), noise_correction.max()), (0, 1))\n # corrupt = np.interp(corrupt, (corrupt.min(), corrupt.max()), (0, 1))\n # lanczos_correction = np.interp(lanczos_correction, (lanczos_correction.min(), lanczos_correction.max()), (0, 1))\n # unires_correction = np.interp(unires_correction, (unires_correction.min(), unires_correction.max()), (0, 1))\n \n loss_ssim.append(compare_ssim(clean, corrupt))\n loss_vif.append(compare_vif(clean, corrupt))\n\n loss_kiki_ssim.append(compare_ssim(clean, kiki_correction))\n loss_kiki_vif.append(compare_vif(clean, kiki_correction))\n\n loss_lanczos_ssim.append(compare_ssim(clean, lanczos_correction))\n loss_lanczos_vif.append(compare_vif(clean, lanczos_correction))\n\n loss_unires_ssim.append(compare_ssim(clean, unires_correction))\n loss_unires_vif.append(compare_vif(clean, unires_correction))\n\n data_ssim = np.array([loss_ssim, loss_kiki_ssim, loss_lanczos_ssim, loss_unires_ssim])[:, :, 0]\n posthoc_ssim = sp.posthoc_nemenyi_friedman(data_ssim.T)\n print(posthoc_ssim)\n \n\n data_vif = np.array([loss_vif, loss_kiki_vif, loss_lanczos_vif, loss_unires_vif])[:, :, 0]\n posthoc_vif = sp.posthoc_nemenyi_friedman(data_vif.T)\n print(posthoc_vif)\n\n print(f\"Zero-filled --- Mean Loss:\\t{str(round(np.mean(loss_ssim), 4))}+-{str(round(np.std(loss_ssim), 4))}\\t{str(round(np.mean(loss_vif), 4))}+-{str(round(np.std(loss_vif), 4))}\")\n print(f\"KIKI --- Mean Loss:\\t{str(round(np.mean(loss_kiki_ssim), 4))}+-{str(round(np.std(loss_kiki_ssim), 4))}\\t{str(round(np.mean(loss_kiki_vif), 4))}+-{str(round(np.std(loss_kiki_vif), 4))}\")\n # print(f\"Noise --- Mean Loss:\\t{str(round(np.mean(loss_noise_ssim), 4))}+-{str(round(np.std(loss_noise_ssim), 4))}\\t{str(round(np.mean(loss_noise_vif), 4))}+-{str(round(np.std(loss_noise_vif), 4))}\")\n print(f\"Lanczos --- Mean Loss:\\t{str(round(np.mean(loss_lanczos_ssim), 4))}+-{str(round(np.std(loss_lanczos_ssim), 4))}\\t{str(round(np.mean(loss_lanczos_vif), 4))}+-{str(round(np.std(loss_lanczos_vif), 4))}\")\n print(f\"ML --- Mean Loss:\\t{str(round(np.mean(loss_unires_ssim), 4))}+-{str(round(np.std(loss_unires_ssim), 4))}\\t{str(round(np.mean(loss_unires_vif), 4))}+-{str(round(np.std(loss_unires_vif), 4))}\")\n\n\n\n\n","repo_name":"attilasimko/drs","sub_path":"AMRI/evaluations/evaluate_downsample.py","file_name":"evaluate_downsample.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73471799305","text":"class Solution:\n def maximalSquare1(self, matrix: List[List[str]]) -> int:\n rows = len(matrix)\n if rows == 0:\n return 0\n cols = len(matrix[0])\n new_level = [[\"0\"]*cols for _ in range(rows)]\n any_four = False\n for i in range(rows-1):\n for j in range(cols-1):\n if matrix[i][j] ==\"1\" and matrix[i+1][j] == \"1\" and matrix[i][j+1] == \"1\" and matrix[i+1][j+1] == \"1\":\n any_four = True\n new_level[i][j] = \"1\"\n if any_four:\n print(\"Calling maximal square\", new_level)\n return self.maximalSquare1(new_level) + 1\n else:\n return 0\n \n def maximalSquare(self, matrix: List[List[str]]) -> int:\n max_area = self.maximalSquare1(matrix)\n if len(matrix) == 0:\n return 0\n if max_area > 0:\n max_area = max_area + 1\n else:\n any_taker = False\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == \"1\":\n any_taker = True\n if any_taker:\n max_area = 1\n return max_area**2\n","repo_name":"gauravaror/programming","sub_path":"maximum-square.py","file_name":"maximum-square.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36042037210","text":"import os\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"keys.json\"\n\nimport pandas as pd\nfrom bq_helper import BigQueryHelper\nimport plotly.graph_objs as go\nfrom plotly.offline import plot\n\nbq_assistant = BigQueryHelper('bigquery-public-data', 'san_francisco')\n\n\nQUERY = \"\"\"\n SELECT category, dayofweek, latitude, timestamp\n FROM `bigquery-public-data.san_francisco.sfpd_incidents`\n LIMIT 10\n \"\"\"\n\ndf = bq_assistant.query_to_pandas(QUERY)\nprint(df.head(5))\n\ncategory = df['category']\nlatitude = df['latitude']\n\n\ntrace1 = go.Scatter(\n x = category.index,\n y = latitude.values,\n mode = 'lines'\n )\n\nday = df['dayofweek']\ntime = df['timestamp']\n\ndata = [trace1]\n\n\nfig = dict(data = [trace1])\nplot(fig)","repo_name":"igortereshchenko/pandasplotly","sub_path":"km91/hudymvm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37238056844","text":"# !/usr/bin/env python\n# encoding: utf-8\n__author__ = 'Administrator'\nfrom random import *\n\ndef main():\n printInfro()\n probA,probB,n=getInputs()\n winsA,winsB=simNGames(n,probA,probB)\n printSummary(winsA,winsB)\n\ndef printInfro():\n print(\"This program simulates a game between two.\")\n print(\"There are two players, A and B.\")\n print(\"Probability (a number between 0 and 1) is used\")\n\ndef getInputs():\n a=eval(input(\"What is the prob.player A wins ?\"))\n b=eval(input(\"What is the prob.palyer B wins ?\"))\n n=eval(input(\"How many games to simulate ?\"))\n return a,b,n\n\ndef simNGames(n,probA,probB):\n winsA=0\n winsB=0\n for i in range(n):\n scoreA,scoreB=simOneGame(probA,probB)\n if scoreA > scoreB:\n winsA =winsA+1\n else:\n winsB=winsB+1\n return winsA,winsB\n\ndef simOneGame(probA,probB):\n scoreA=0\n scoreB=0\n serving=\"A\"\n while not gameOver(scoreA, scoreB):\n if serving == \"A\": # 指示当前发球局\n if random() < probA: # 根据随机数和概率可以确定发球方是否赢得比赛\n scoreA = scoreA + 1\n else:\n serving = \"B\" # 将发球权交给 B\n else: #如果当前发球局是 “B”\n if random() < probB:\n scoreB = scoreB + 1\n else:\n serving = \"A\"\n return scoreA, scoreB\ndef gameOver(a,b):\n return a==15 or b==15\n\ndef printSummary(winsA,winsB):\n n=winsA+winsB\n print(\"\\nGame simulated:%d\"%n)\n print(\"Wins for A:{0}({1:0.1%})\".format(winsA,winsA/n))\n print(\"Wins for B:{0}({1:0.1%})\".format(winsB,winsB/n))\n\nif __name__ == '__main__':\n main()\n\n\n#运行结果如下:\n# This program simulates a game between two.\n# There are two players, A and B.\n# Probability (a number between 0 and 1) is used\n# What is the prob.player A wins ?0.5\n# What is the prob.palyer B wins ?0.5\n# How many games to simulate ?10\n#\n# Game simulated:10\n# Wins for A:7(70.0%)\n# Wins for B:3(30.0%)\n","repo_name":"shenxiaolinZERO/PracticeOfPython","sub_path":"201803/180302PM-SimulateGame.py","file_name":"180302PM-SimulateGame.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42716266056","text":"\"\"\"\nGPACalc\nAlex\n3/4/2017\n\nThis file contains the class that handles the GUI of the program, GPACalc.\n\nFunctions included:\n\tcreateWidgets -- which handles the creation of all the widgets displayed.\n\tvalidateCred -- restricts the credit hour Entry field to only accept numbers from 1 - 10.\n\tvalidateGrade -- restricts the grade Entry field to only accept the letters a,b,c,d,f and its caps variants.\n\tcaps -- produces the capitalized entry of the grade Entry field.\n\tadd -- adds the Entry fields of both credText and gradeText to the Listbox.\n\tdelete -- deletes the Listbox entries that are selected.\n\tcalculate -- performs the operations needed to calculate the GPA, total grade points and total credit hours, See Calc\n\tfile for more information on how.\n\"\"\"\n\nfrom tkinter import *\nimport calculate\n\n\nclass GUI:\n\t\"\"\"\n\tThis class has everything related to the GUI of the program is in here.\n\t\"\"\"\n\t\n\tdef __init__(self, master):\n\t\t\"\"\"\n\t\tThe initialization of the main window of the GUI.\n\n\t\tIncluded in this function, is the size of the window, the title of the window, and the initialization of the\n\t\twidgets.\n\n\t\tparameters\n\t\t----------\n\t\tmaster: The main window to be initialized to.\n\t\t\"\"\"\n\t\t\n\t\tself.master = master\n\t\tmaster.title(\"GPA Calc\")\n\t\t\n\t\tmaster.minsize(width=600, height=480)\n\t\tself.createWidgets()\n\t\n\tdef createWidgets(self):\n\t\t\"\"\"\n\t\tThis function is in charge of creating all the widgets on the screen.\n\n\t\tThe function is segmented by the widgets, labels, texts, buttons, and the listbox included on the screen.\n\t\t\"\"\"\n\t\t\n\t\t# Labels (\"Text\")\n\t\tself.master.message = \"Enter the class credit hours and your grade earned.\"\n\t\tself.master.label_text = StringVar()\n\t\tself.master.label_text.set(self.master.message)\n\t\tself.master.label = Label(self.master, textvariable=self.master.label_text)\n\t\tself.master.credLabel = Label(self.master, text=\"Credit hours:\")\n\t\tself.master.gradeLabel = Label(self.master, text=\"Grade:\")\n\t\tself.master.orLabel = Label(self.master, text=\"or\")\n\t\tself.master.pointLabel = Label(self.master, text=\"Grade Points:\")\n\t\tself.master.creditOutput = Label(self.master, text=\"Total Credits:\")\n\t\tself.master.gradePointOutput = Label(self.master, text=\"Total Grade Points:\")\n\t\tself.master.gpa = Label(self.master, text=\"GPA:\")\n\t\t\n\t\tvcmdCred = self.master.register(self.validateCred)\n\t\tvcmdPoint = self.master.register(self.validatePoint)\n\t\tvcmdGrade = self.master.register(self.validateGrade)\n\t\t\n\t\t# Texts (\"Entry fields\")\n\t\tself.master.creditText = Entry(self.master, validate=\"key\", validatecommand=(vcmdCred, '%P'), width=5)\n\t\t\n\t\tself.master.var = StringVar()\n\t\tself.master.gradeText = Entry(self.master, textvariable=self.master.var, validate=\"key\",\n\t\t validatecommand=(vcmdGrade, '%P'), width=5)\n\t\tself.master.gradeText.bind(\"\", self.caps)\n\t\t\n\t\tself.master.pointText = Entry(self.master, validate=\"key\", validatecommand=(vcmdPoint, '%P'), width=5)\n\t\t\n\t\tself.master.creditOutputEntry = Entry(self.master, width=5)\n\t\tself.master.gradePointOutputEntry = Entry(self.master, width=5)\n\t\tself.master.gpaEntry = Entry(self.master, width=5)\n\t\t\n\t\t# Buttons\n\t\tself.master.addButton = Button(self.master, text=\"Add\")\n\t\tself.master.addButton.bind(\"\", self.add)\n\t\tself.master.addButton.bind(\"\", self.add)\n\t\tself.master.addButton.lift(aboveThis=self.master.gradeText)\n\t\t\n\t\tself.master.addCalcButton = Button(self.master, text=\"Calculate!\", command=self.calculate)\n\t\tself.master.addDeleteButton = Button(self.master, text='Delete', command=self.delete)\n\t\t\n\t\t# List Box\n\t\tself.master.scrollBar = Scrollbar(self.master)\n\t\tself.master.addListBox = Listbox(self.master, selectmode=EXTENDED, yscrollcommand=self.master.scrollBar.set,\n\t\t width=50)\n\t\t\n\t\t# Layout Manager for Labels\n\t\tself.master.label.grid(row=0, column=0, columnspan=2)\n\t\tself.master.credLabel.grid(row=1, column=0)\n\t\tself.master.pointLabel.grid(row=1, column=2)\n\t\tself.master.orLabel.grid(row=1, column=4)\n\t\tself.master.gradeLabel.grid(row=1, column=5, sticky=W)\n\t\t\n\t\tself.master.creditOutput.grid(row=12, column=0)\n\t\tself.master.gradePointOutput.grid(row=12, column=2)\n\t\tself.master.gpa.grid(row=12, column=6)\n\t\t\n\t\t# Layout Manager for Text Fields\n\t\tself.master.creditText.grid(row=1, column=1, sticky=W)\n\t\tself.master.pointText.grid(row=1, column=3, sticky=W)\n\t\tself.master.gradeText.grid(row=1, column=6, sticky=W)\n\t\t\n\t\tself.master.creditOutputEntry.grid(row=12, column=1, sticky=W)\n\t\tself.master.gradePointOutputEntry.grid(row=12, column=4, sticky=W)\n\t\tself.master.gpaEntry.grid(row=12, column=7, sticky=W)\n\t\t\n\t\t# Layout Manager for Buttons\n\t\tself.master.addButton.grid(row=1, column=7, sticky=E)\n\t\tself.master.addCalcButton.grid(row=5, column=2)\n\t\tself.master.addDeleteButton.grid(row=5, column=0)\n\t\t\n\t\t# Layout Manager for List Box\n\t\tself.master.addListBox.grid(row=3, columnspan=8)\n\t\tself.master.scrollBar.grid(row=3, column=5)\n\t\n\tdef validateCred(self, newText):\n\t\t\"\"\"\n\t\tFunction to see if the entry Field of credText is valid numbers from 1 - 240.\n\n\t\tparameters\n\t\t----------\n\t\t:param newText:\n\t\t\t\tThe text entered in the entry field for the credit hours Text.\n\n\t\treturns\n\t\t-------\n\t\t:return: True or False, if True entered text will work, if False, won't accept input.\n\t\t\"\"\"\n\t\t\n\t\tif not newText:\n\t\t\tself.master.creditHour = None\n\t\t\treturn True\n\t\t\n\t\ttry:\n\t\t\ttext = int(newText)\n\t\t\tif 1 <= text <= 240:\n\t\t\t\tself.master.creditHour = text\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept ValueError:\n\t\t\treturn False\n\t\n\tdef validatePoint(self, newText):\n\t\t\"\"\"\n\t\tFunction to see if the entry Field of pointText is valid numbers from 1 - 960.\n\n\t\tparameters\n\t\t----------\n\t\t:param newText:\n\t\t\t\tThe text entered in the entry field for the grade points Text.\n\n\t\treturns\n\t\t-------\n\t\t:return: True or False, if True entered text will work, if False, won't accept input.\n\t\t\"\"\"\n\t\t\n\t\tif self.master.gradeText.get():\n\t\t\treturn False\n\t\t\n\t\tif not newText:\n\t\t\tself.master.point = None\n\t\t\treturn True\n\t\t\n\t\ttry:\n\t\t\ttext = int(newText)\n\t\t\tif 1 <= text <= 960:\n\t\t\t\tself.master.point = text\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept ValueError:\n\t\t\treturn False\n\t\n\tdef validateGrade(self, newText):\n\t\t\"\"\"\n\t\tFunction to see if the entry Field of gradeText is valid entry.\n\n\t\tValid entry is included as A, B, C, D, F and all lowercase forms, note: although this function accepts lowercase\n\t\tthe entry will always be converted to uppercase.\n\n\t\tparameters\n\t\t----------\n\t\t:param newText:\n\t\t\t\tThe text entered in the entry field for the grade Text.\n\n\t\treturns\n\t\t-------\n\t\t:return: True or False, if True entered text will work, if False, won't accept input.\n\t\t\"\"\"\n\t\t\n\t\tif self.master.pointText.get():\n\t\t\treturn False\n\t\t\n\t\tif not newText:\n\t\t\tself.master.grade = None\n\t\t\treturn True\n\t\t\n\t\ttry:\n\t\t\tif newText in calculate.grades:\n\t\t\t\tself.master.grade = newText.upper()\n\t\t\t\t\n\t\t\t\treturn True\n\t\t\t\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\texcept ValueError:\n\t\t\treturn False\n\t\n\tdef caps(self, event):\n\t\t\"\"\"\n\t\tCapitalizes the entries of Entry Fields, of text variable currently used in the gradeText Entry field.\n\n\t\tParameters\n\t\t----------\n\t\t:param event:\n\t\t\t\tscans for the pressed key release event\n\n\t\treturns\n\t\t-------\n\t\t:return: No returns, but does convert the Entry Field applied to to uppercase entry.\n\t\t\"\"\"\n\t\tself.master.var.set(self.master.var.get().upper())\n\t\n\tdef add(self, event):\n\t\t\"\"\"\n\t\tAdd the text fields of Credit Hours and Grade Entry fields to the List Box\n\t\tNote: the weird spacing is due to the Listbox not accepting \\t formatting\n\t\t\"\"\"\n\t\t\n\t\tif self.master.creditText.get() and self.master.gradeText.get():\n\t\t\tself.master.addListBox.insert(END, \"%s hrs %s\" % (self.master.creditText.get(),\n\t\t\t self.master.gradeText.get()))\n\t\telif self.master.creditText.get() and self.master.pointText.get():\n\t\t\tself.master.addListBox.insert(END, \"%s hrs %s\" % (self.master.creditText.get(),\n\t\t\t self.master.pointText.get()))\n\t\n\tdef delete(self):\n\t\t\"\"\"\n\t\tDeletes the selected entries in the listbox.\n\t\t\"\"\"\n\t\t\n\t\titems = self.master.addListBox.curselection()\n\t\tpos = 0\n\t\t\n\t\tfor i in items:\n\t\t\tidx = int(i) - pos\n\t\t\tself.master.addListBox.delete(idx, idx)\n\t\t\tpos += 1\n\t\n\tdef calculate(self):\n\t\t\"\"\"\n\t\tCalculates the entirety of the listbox, summing the total credit hrs, the total grade points, and the gpa.\n\n\t\ttakes the listbox contents and outputs the results onto the gui. See the calculate module for more information\n\t\ton how the gpa and summations are being calculated.\n\t\t\"\"\"\n\t\t\n\t\titems = self.master.addListBox.get(0, END)\n\t\t\n\t\ttotalCredits = calculate.totalCredits(items)\n\t\ttotalGradePoints = calculate.totalGradePoint(items)\n\t\tgpa = calculate.gpaCalc(items)\n\t\t\n\t\tself.master.creditOutputEntry.delete(0, END)\n\t\tself.master.gradePointOutputEntry.delete(0, END)\n\t\tself.master.gpaEntry.delete(0, END)\n\t\t\n\t\tself.master.creditOutputEntry.insert(0, totalCredits)\n\t\tself.master.gradePointOutputEntry.insert(0, totalGradePoints)\n\t\tself.master.gpaEntry.insert(0, gpa)\n","repo_name":"A-Hopkins/GPACalc","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21272091698","text":"import string\n\nsymbols_low = string.ascii_lowercase\nsymbols_up = string.ascii_uppercase\nlists = [x for x in range(100)]\n\nn = int(input())\ns = input()\ns = list(s)\nk = int(input())\nres = []\nfor i in s:\n if i.isupper():\n res.append(symbols_up[(symbols_up.index(i) + k) % len(symbols_up)])\n elif i.islower():\n res.append(symbols_low[(symbols_low.index(i) + k) % len(symbols_low)])\n else:\n res.append(i)\nhellowrod = \"string\"\ns = \"\".join(res) \nprint(s)\n","repo_name":"Ni9Logic/Competitive-Programming","sub_path":"HackerRank/src/Caesar.py","file_name":"Caesar.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6176161396","text":"from socket import *\n\n\ndef main():\n # AF_INET is IPv4 and Socket_stream is TCP\n clientSocket = socket(AF_INET, SOCK_STREAM)\n\n # Before sending data we must establish a TCP connection\n SERVER_NAME = 'localhost'\n SERVER_PORT = 12000\n\n try:\n clientSocket.connect((SERVER_NAME, SERVER_PORT))\n\n # Get user input\n sentence = input('Input lowercase sentence: ')\n\n # Send sentence through the client's socket into the TCP connection\n clientSocket.send(sentence.encode())\n\n # recvfrom takes buffer size\n modifiedSentence = clientSocket.recv(1024)\n\n # Print out the modified sentence\n print(modifiedSentence.decode())\n\n # close the socket connection\n clientSocket.close()\n except:\n print('Make sure the server is open at the specified port number')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kaby2201/IKT204-G","sub_path":"assignment_2/TCPClient.py","file_name":"TCPClient.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36838938854","text":"from webapp.python_org_news import get_python_news_without_database #можно убрать\nfrom flask import Flask, render_template, flash, redirect, url_for\nfrom flask_login import LoginManager, login_required, login_user, logout_user, current_user\nfrom webapp.weather import weather_by_city\nfrom webapp.models import db, News, User\n\nfrom webapp.forms import LoginForm\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_pyfile('config.py')\n db.init_app(app)\n\n from . import models\n\n with app.app_context():\n db.create_all()\n\n# добавление фласк-логина\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = 'login'\n \n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(user_id)\n\n\n @app.route(\"/\")\n def index():\n page_title = \"��овости Python\"\n weather = weather_by_city(app.config[\"WEATHER_DEFAULT_CITY\"])\n news_list = get_python_news_without_database() #старый вариант - парсинг с сайта\n news_list = News.query.order_by(News.published.desc()).all() # добавление из базы данных с сортировкой по дате\n\n # if weather:\n # weather_text = f\"Сейчас {weather['temp_C']}, ощущается как {weather['FeelsLikeC']}\"\n # else:\n # weather_text = \"Сервис погоды временно недоступен\"\n return render_template(\"index.html\", weather=weather, page_title=page_title, news_list = news_list)\n\n @app.route('/login')\n def login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n title = \"Авторизация\"\n login_form = LoginForm()\n return render_template('login.html', page_title=title, form=login_form)\n\n @app.route('/process-login', methods=['POST'])\n def process_login():\n form = LoginForm()\n # если ошибки не возникло, можно запросить пользователя из базы данных\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and user.check_password(form.password.data):\n login_user(user)\n flash('Вы вошли на сайт')\n return redirect(url_for('index'))\n flash('Неправильное имя пользователя или пароль')\n return redirect(url_for('login'))\n\n @app.route('/logout')\n def logout():\n logout_user()\n flash('Вы успешно разлогинились')\n return redirect(url_for('index'))\n\n\n @app.route('/admin')\n @login_required\n def admin_index():\n if current_user.is_admin:\n return 'Привет админ'\n else:\n return 'Ты не админ!'\n\n return app\n\n\n\n\n\n\nif __name__==\"__main__\":\n app.run(debug = True)","repo_name":"DianaRatnikova/weather_project","sub_path":"webapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26402801407","text":"import sys\n\n\ndef main():\n comp, guest, timer = map(int, sys.stdin.readline().split())\n info = []\n result = 0\n for i in range(comp + 1):\n info.append([])\n for i in range(guest):\n num, times = map(int, sys.stdin.readline().split())\n info[num].append(times)\n\n def counter(computer, time):\n dp = [x for x in computer]\n counts = 0\n for i in range(len(computer)):\n for j in range(len(computer)):\n if computer[i] > computer[j]:\n dp[i] = max(dp[i], dp[j] + computer[i])\n for d in range(len(dp)):\n if dp[d] <= time and counts < dp[d]:\n counts = dp[d]\n return counts\n\n for i in range(comp):\n print(\"%d\" % (i + 1), counter(info[i + 1], timer) * 1000)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"glossyyoon/DailyCoding","sub_path":"피씨방.py","file_name":"피씨방.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25972188896","text":"#!/usr/bin/env python3\n# We need argparse for parsing arguments from the command line\nimport argparse\n\n# Import the most basic definitions for defining a Setup which returns an Experiment\n# and has arguments parsed by a SetupArgumentParser\nfrom cminject.experiment import Experiment\nfrom cminject.base import Setup\nfrom cminject.utils.args import SetupArgumentParser, distribution_description\n\n# Import some concrete class implementations to be able to define a simple setup:\nfrom cminject.utils.distributions import constant, GaussianDistribution # Required types of distributions\nfrom cminject.sources import VariableDistributionSource # Generates particles from property distributions\nfrom cminject.detectors import SimpleZDetector # A detector which is positioned at some Z position\n# A device for fluid flow based on an HDF5 file, together with FlowType, an enumeration of the possible models to use\nfrom cminject.devices.fluid_flow import FluidFlowDevice, FlowType\n\n\nclass ExampleSetup(Setup):\n \"\"\"\n A simple setup which will simulate a single flow field with particles starting from a fixed position and velocity\n distribution and radius. The only parameters available are the flow field filename and the density (density) of the\n particle material. The density is there just to show how additional arguments can be added.\n \"\"\"\n @staticmethod\n def construct_experiment(main_args: argparse.Namespace, args: argparse.Namespace) -> Experiment:\n dt = 1e-5\n experiment = Experiment(number_of_dimensions=2, time_step=dt, time_interval=(0, 1))\n experiment.add_source(VariableDistributionSource(\n number_of_particles=main_args.nof_particles,\n position=args.pos,\n velocity=[GaussianDistribution(mu=0.0, sigma=1e-3), GaussianDistribution(mu=2, sigma=0.5)],\n radius=constant(5e-9), density=constant(args.rho)\n ))\n experiment.add_device(FluidFlowDevice(filename=args.filename, flow_type=FlowType.STOKES, brownian_motion=True))\n for z in [0.0, 0.01, 0.02]:\n experiment.add_detector(SimpleZDetector(z))\n\n return experiment\n\n @staticmethod\n def get_parser() -> SetupArgumentParser:\n parser = SetupArgumentParser()\n parser.add_argument('-f', '--filename', help='The filename of the flow field (HDF5).', type=str)\n parser.add_argument('--rho', help='The density of the particle material [kg/m^3].', type=float, default=1050.0)\n parser.add_argument('--pos', help='The position distributions in x/z space [m]', type=distribution_description,\n nargs=2, default=[GaussianDistribution(0, 1e-3), constant(0.0)])\n return parser\n","repo_name":"CFEL-CMI/cminject","sub_path":"examples/example_setup.py","file_name":"example_setup.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"37155726793","text":"def zigzagTraverse(array):\n # O(n) time | O(n) space where n is total elements in matrix\n height = len(array) - 1\n width = len(array[0]) - 1\n result = []\n row, col = 0, 0\n goingDown = True\n\n while not isOutOfBound(row, col, height, width):\n result.append(array[row][col])\n touchLeftBorder = col == 0\n touchRightBorder = col == width\n touchUpBorder = row == 0\n touchLowBorder = row == height\n if goingDown:\n if touchLeftBorder or touchLowBorder:\n goingDown = False\n if touchLowBorder:\n col += 1\n else:\n row += 1\n else:\n row += 1\n col -= 1\n else:\n if touchUpBorder or touchRightBorder:\n goingDown = True\n if touchRightBorder:\n row += 1\n else:\n col += 1\n else:\n row -= 1\n col += 1\n return result\n\n\ndef isOutOfBound(row, col, height, width):\n return row < 0 or col < 0 or row > height or col > width\n","repo_name":"tuanhiep/expertpy","sub_path":"101.zigzag_traverse.py","file_name":"101.zigzag_traverse.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38613822465","text":"from handlers import base_handlers\nfrom models import User\n\n\nclass HandleLogin(base_handlers.BaseAction):\n def handle_post(self, google_user):\n user = User.query(ancestor=User.PARENT_KEY).filter(User.email == google_user.email().lower()).get()\n if not user:\n newUser = User(parent=User.PARENT_KEY,\n name=google_user.nickname(),\n email=google_user.email().lower(),\n groups=[])\n newUser.put()\n self.redirect(\"/groups\") \n elif self.request.get(\"group-key\"):\n user.name = google_user.nickname()\n user.put()\n self.redirect('/groups?group-key='+self.request.get(\"group-key\"))\n else:\n self.redirect(\"/groups\") ","repo_name":"xniccum/chorewheel","sub_path":"handlers/login_handler.py","file_name":"login_handler.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16371947425","text":"#!/usr/bin/python3\n\n# *****************************************\n# Name: Ross Payne\n# Problem Set: Final Project\n# Due Date: Dec 16, 2021\n# *****************************************\n\n\nimport smbus2 as smbus\nimport math\nimport time\n\n# Power management registers\npower_mgmt_1 = 0x6b\npower_mgmt_2 = 0x6c\n\nbus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards\naddress = 0x68 # This is the address value read via the i2cdetect command\n\n# Now wake the 6050 up as it starts in sleep mode\nbus.write_byte_data(address, power_mgmt_1, 0)\n\ndef read_byte(adr):\n return bus.read_byte_data(address, adr)\n\ndef read_word(adr):\n high = bus.read_byte_data(address, adr)\n low = bus.read_byte_data(address, adr+1)\n val = (high << 8) + low\n return val\n\ndef read_word_2c(adr):\n val = read_word(adr)\n if (val >= 0x8000):\n return -((65535 - val) + 1)\n else:\n return val\n\n# gyro_xout = read_word_2c(0x43)\n# gyro_yout = read_word_2c(0x45)\n# gyro_zout = read_word_2c(0x47)\n","repo_name":"rosspayn3/iot-final-project","sub_path":"gyrosensor.py","file_name":"gyrosensor.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7304955418","text":"#https://www.acmicpc.net/problem/2805\n#나무 자르기 [Silver 3]\nimport sys\nI = sys.stdin.readline\n\nn,m = map(int,I().split())\na = list(map(int,I().split()))\n\nstart = 0\nend = max(a)\n\nresult = -1\nmid = 0\n\nwhile start < end:\n mid = (start+end)//2\n\n tmpResult = 0\n for i in range(n):\n if (a[i]-mid) > 0:\n tmpResult += (a[i]-mid)\n\n if tmpResult == m:\n result = mid\n break\n elif tmpResult > m:\n result = mid\n start = mid+1\n\n elif tmpResult < m:\n end = mid\n\nprint(result)","repo_name":"dlwhd990/BOJ-2022","sub_path":"BOJ/[2805]나무자르기.py","file_name":"[2805]나무자르기.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14076976704","text":"# Write a function capitalize(lower_case_word) that takes the lower case word and returns the word with the first letter capitalized.\n# Eg., print(capitalize('word')) should print the word Word.\n# Then, given a line of lowercase ASCII words (text separated by a single space),\n# print it with the first letter of each word capitalized using the your own function capitalize().\n\n# In Python there is a function ord(character), which returns character code in the ASCII chart, and the function chr(code),\n# which returns the character itself from the ASCII code. For example, ord('a') == 97, chr(97) == 'a'.\n\ndef capitilizer(a):\n for i in range(len(a)):\n if i==0 or a[i-1]==\" \":\n a[i]=chr(ord(a[i])-32)\n return \"\".join(a)\na=[i for i in input()]\nprint(capitilizer(a))\n\n","repo_name":"furkan-durmus/Python-Functions-and-recursion","sub_path":"Simle Examples/Uppercase.py","file_name":"Uppercase.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5230992232","text":"'''\n14. Longest Common Prefix\n\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\nNote:\n\nAll given inputs are in lowercase letters a-z.\n'''\n\nclass Solution:\n def longestCommonPrefix1(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\" \n # solution 1 vertical scanning\n length = len(strs[0])\n for string in strs:\n if len(string) < length:\n length = len(string)\n\n if length == 0:\n return '' \n for i in range(length):\n currentChar = strs[0][i]\n for string in strs:\n if currentChar != string[i]:\n return string[:i]\n return strs[0][:length]\n\n def longestCommonPrefixTwo(self, strs):\n # solution 2 divide and conquer\n\n # list divided into two sublist\n length = len(strs)\n if length == 0:\n return \"\"\n if length == 1:\n return strs[0]\n midPoint = length//2 # python division /是精确除法,//是向下取整除法,%是求模\n left = strs[:midPoint]\n right = strs[midPoint:]\n # print ('here')\n leftMax = self.longestCommonPrefixTwo(left)\n rightMax = self.longestCommonPrefixTwo(right)\n return self.commonPrefix(leftMax, rightMax)\n \n def commonPrefix(self, leftString, rightString):\n length = min(len(leftString), len(rightString))\n if length == 0:\n return '' \n for i in range(length):\n currentChar = leftString[i]\n if currentChar != rightString[i]:\n return leftString[:i]\n return leftString[:length]\n\n \ns = [\"flower\",\"flow\",\"flight\"]\n\nsolution = Solution()\n# print(solution.longestCommonPrefixTwo(s))\n\n# print(solution.commonPrefix(s[0], s[1]))\n# print (\"\" != False)\n# print(not \"\" == True)","repo_name":"yiyi1026/projecteuler_ruby","sub_path":"leetcode/pythonSolutions/14.longest_common_prefix.py","file_name":"14.longest_common_prefix.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41082344006","text":"\"\"\"--------------------------------------------------------------------------\n-->Create a Slot of 10 to store Chain of Numbers that belong to each Slot\n--to efficiently search a number from a given set of number\n-->Logic -> Firstly store the numbers in the Slot. Since there are 10 Numbers\n--divide each number by 11 and the reminder put in the appropriate slot.\n--Create a Chain for each Slot to avoid Collision. If a number searched is\n--found then pop it or else push it. Use Map of Slot Numbers and Ordered\n--LinkedList to solve the problem. In the Figure Below, you can see number\n--77/11 reminder is 0 hence sits in the 0 slot while 26/11 remainder is 4\n--hence sits in slot 4\n-->Save the numbers in a file\n---------------------------------------------------------------------------\"\"\"\n\n\n# creating Node class\nclass Node:\n def __init__(self):\n self.data = None\n self.next = None\n# creating HashingList class\nclass HashingList:\n def __init__(self):\n # creating 11 empty nodes and inserting these empty nodes into array\n node0 = Node()\n node1 = Node()\n node2 = Node()\n node3 = Node()\n node4 = Node()\n node5 = Node()\n node6 = Node()\n node7 = Node()\n node8 = Node()\n node9 = Node()\n node10 = Node()\n # creating an array with nodes\n self.node_arr = [node0,node1,node2,node3,node4,node5,node6,node7,node8,node9,node10]\n\n # creating an insert function\n def insert(self,data):\n #creating the node and inserting the data and reffer next to None\n n = Node()\n n.data = data\n n.next = None\n # dividing the number with 11 and storing the number in index = remainder\n index = int(data%11)\n # reading the array element places the in the index\n node = self.node_arr[index]\n # if the node in that index is none adding the data to that node\n if node is None:\n node = n\n # else traverse to the end and adding the node to existing node\n # taking temporary variable to store node\n else:\n temp = node\n while temp.next is not None:\n temp = temp.next\n temp.next = n\n # creating a function to display the values\n def display(self):\n # Display the data regarding to the remainder and the index of the node array\n for i in range(len(self.node_arr)):\n # reading the node at the index of the node array\n temp = self.node_arr[i]\n if temp is not None:\n print(f'remainder {i} and the values are ',end=\"\")\n if temp.next is None:\n print(\"None\")\n while temp is not None:\n if temp.data is not None:\n print(temp.data,end=\",\")\n temp = temp.next\n print()\n # print method to store the values into a file\n def Print(self):\n # creating a file to store the values\n file = open(\"Hashing_function.txt\",'w')\n file.write(\"\")\n file.close()\n # writing the values into the file\n f = open('Hashing_function.txt',\"a+\")\n for i in range(len(self.node_arr)):\n # reading the node at the index of the node array\n temp = self.node_arr[i]\n if temp is not None:\n val =f'remainder {i} and the values are '\n f.write(val)\n if temp.next is None:\n f.write(\"None\")\n while temp is not None:\n if temp.data is not None:\n var = f',{temp.data}'\n f.write(var)\n temp = temp.next\n f.write(\"\\n\")\n\n# Driver program for above code\n# creating an object for HashingList class\nHash_obj = HashingList()\ncount = int(input(\"enter how many numbers want to insert\"))\n# reading the values from the user\nfor i in range(count):\n Hash_obj.insert(int(input(f'value{i+1}')))\n\n# Display the list\nHash_obj.display()\n\n# to store the values into a file\nHash_obj.Print()\n\n\n","repo_name":"Murali1125/Fellowship_programs","sub_path":"DataStructurePrograms/Hashing_function.py","file_name":"Hashing_function.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38301179815","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.1.b1'\nDESCRIPTION = \"\"\"\\\nA simple django application that manages http/https statuses of different views.\n\"\"\"\n\nsetup(\n name='django-sslredirector',\n version=VERSION,\n description=DESCRIPTION,\n url='https://bitbucket.org/yilmazhuseyin/django-sslredirector',\n author='Huseyin Yilmaz',\n author_email='me@yilmazhuseyin.com',\n packages=find_packages(),\n include_package_data=True,\n license='GPLv3',\n keywords='django ssl redirect https http',\n install_requires=[\n 'Django>=1.3',\n ]\n)\n","repo_name":"huseyinyilmaz/django-ssl-redirector","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41944109632","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\ndef gen_graph(file, title):\n with open(file) as f:\n data = f.read()\n\n js = json.loads(data)\n lamb_dup = [js[k][0][3] for k in js]\n tau_dup = np.array([js[k][0][2] for k in js])\n tau = np.unique(tau_dup)\n lamb = np.unique(lamb_dup)\n L, T = np.meshgrid(lamb, tau)\n\n mO3 = np.ndarray((15,15))\n for t in range(15):\n ta = tau[t]\n for l in range(15):\n la = lamb[l]\n for k in js:\n if math.isclose(js[k][0][3],la) and math.isclose(js[k][0][2],ta):\n mO3[l][t] = float(k)\n\n _, ax = plt.subplots()\n levels = np.linspace(np.min(mO3[mO3 > 0]), np.max(mO3), 21)\n cs = ax.contour(T, L, mO3, levels=levels, linewidths=0.5)\n plt.clabel(cs, inline=1, fontsize=12)\n ax.set_xlabel(r'$\\tau$ ($\\frac{t}{c}$)')\n ax.set_ylabel(r'$\\lambda$ ($\\frac{c_t}{c_r}$)')\n ax.set_title(title)\n\n\ngen_graph('Optimal_ARS_3.txt', r'$m_{\\mathrm{pay}}\\Omega^3$ $(\\frac{\\mathrm{g}}{\\mathrm{s}^3})$ Contour Plot (Question 4)')\ngen_graph('Optimal_ARS_2.txt', r'$m_{\\mathrm{pay}}\\Omega^3$ $(\\frac{\\mathrm{g}}{\\mathrm{s}^3})$ Contour Plot (Question 5)')\ngen_graph('Optimal_ARS_1.txt', r'$m_{\\mathrm{pay}}\\Omega^3$ $(\\frac{\\mathrm{g}}{\\mathrm{s}^3})$ Contour Plot (Question 6)')\nplt.show()","repo_name":"wpeale/UEFC","sub_path":"Q4_5_6/contour.py","file_name":"contour.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73227743626","text":"\"\"\"\nGiven a non-empty array of integers, every element appears three times except for one, which appears exactly once. Find that single one.\n\nNote:\n\nYour algorithm should have a linear runtime complexity. Could you implement it without using extra memory?\n\nExample 1:\n\nInput: [2,2,3,2]\nOutput: 3\nExample 2:\n\nInput: [0,1,0,1,0,1,99]\nOutput: 99\n\n\"\"\"\n# Time Complexity: O(nlogn)\n# Space Complexity: O(1)\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n \n sorted_nums = sorted(nums)\n curr_num = sorted_nums[0]\n total = 0\n \n for i in range(0,len(sorted_nums),):\n if curr_num != sorted_nums[i]:\n if total < 3:\n return curr_num\n else:\n curr_num = sorted_nums[i]\n total = 0\n \n total += 1\n \n if total < 3:\n return curr_num\n ","repo_name":"christian-miljkovic/interview","sub_path":"Leetcode/Algorithms/Medium/Arrays/SingleNumberII.py","file_name":"SingleNumberII.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4253919762","text":"# ## IHTC compliance\n\n# current django version settings config docs:\n# https://docs.djangoproject.com/en/3.2/ref/settings/\n\n# Set crsf cookie to be secure\nCSRF_COOKIE_SECURE = True\n\n# Set session cookie to be secure\nSESSION_COOKIE_SECURE = True\n\n# Make browser end session when user closes browser\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\n# Set cookie expiry to 4 hours\nSESSION_COOKIE_AGE = 4 * 60 * 60 # 4 hours\n\n# Prevent client side JS from accessing CRSF token\nCSRF_COOKIE_HTTPONLY = True\n\n# Prevent client side JS from accessing session cookie (true by default)\nSESSION_COOKIE_HTTPONLY = True\n\n# Set content to no sniff\nSECURE_CONTENT_TYPE_NOSNIFF = True\n\n# Audit log middleware user field\nAUDIT_LOG_USER_FIELD = \"username\"\n\n# Default value for the X-Frame-Options header used by XFrameOptionsMiddleware.\nX_FRAME_OPTIONS = \"DENY\"\n\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n\nSECURE_HSTS_PRELOAD = True\n\n# SecurityMiddleware redirects all non-HTTPS requests\n# to HTTPS (except for those URLs matching a\n# regular expression listed in SECURE_REDIRECT_EXEMPT\nSECURE_SSL_REDIRECT = True\n\nSECURE_HSTS_SECONDS = 60 # 60 secs\n\nCSP_DEFAULT_SRC = (\"'self'\", \"https:\", \"data:\")\n\nCSP_SCRIPT_SRC_ELEM = (\"'self'\", \"https:\", \"data:\", \"'unsafe-inline'\")\n\nCSP_STYLE_SRC = (\"'self'\", \"'unsafe-inline'\", \"https:\", \"data:\", \"fonts.googleapis.com\")\n\nCSP_IMG_SRC = (\"'self'\", \"https:\", \"data:\", \"www.googletagmanager.com\", \"www.google-analytics.com\")\n\nCSP_SCRIPT_SRC = (\n \"'self'\",\n \"'unsafe-hashes'\",\n \"'unsafe-inline'\",\n \"'unsafe-eval'\",\n \"ajax.googleapis.com\",\n \"www.googletagmanager.com\",\n \"www.google-analytics.com\",\n \"https:\",\n)\n\nCSP_FONT_SRC = (\n \"'self'\",\n \"fonts.gstatic.com\",\n \"https:\",\n \"data:\",\n)\n\nCSP_INCLUDE_NONCE_IN = (\"script-src\",)\n","repo_name":"uktrade/trade-remedies-public","sub_path":"trade_remedies_public/config/settings/hardening.py","file_name":"hardening.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"32436240069","text":"import argparse\nimport os\n\nimport requests\nimport telegram\nimport vk_api\nfrom dotenv import load_dotenv\n\n\ndef post_vkontakte(message_text, image_path, vk_token, vk_album_id, vk_group_id):\n vk_session = vk_api.VkApi(token=vk_token)\n vk = vk_session.get_api()\n upload = vk_api.VkUpload(vk_session)\n\n photo = upload.photo(\n image_path,\n album_id=vk_album_id,\n group_id=vk_group_id\n )\n\n vk.wall.post(\n owner_id=-(vk_group_id),\n from_group=1,\n message=message_text,\n attachments=f'photo{photo[0][\"owner_id\"]}_{photo[0][\"id\"]}'\n )\n\n\ndef post_telegram(message_text, image_path, tg_token, tg_channel_id):\n bot = telegram.Bot(tg_token)\n with open(image_path, 'rb') as image:\n bot.send_photo(chat_id=tg_channel_id, photo=image)\n bot.send_message(chat_id=tg_channel_id, text=message_text)\n\n\ndef post_facebook(message_text, image_path, fb_token, fb_group_id):\n data = {\n 'access_token': fb_token,\n 'caption': message_text\n }\n\n with open(image_path, 'rb') as image:\n files = {'source': image}\n response = requests.post(\n 'https://graph.facebook.com/{}/photos'.format(fb_group_id),\n files=files,\n data=data\n )\n response.raise_for_status()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Скрипт позволяет опубликовать пост в Telegram-канале и в группах Вконтакте и Facebook'\n )\n parser.add_argument('message_text', type=str, help='Текст поста')\n parser.add_argument('image_path', type=str, help='Путь к картинке')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n load_dotenv()\n vk_token = os.getenv('VK_TOKEN')\n vk_group_id = int(os.getenv('VK_GROUP_ID'))\n vk_album_id = os.getenv('VK_ALBUM_ID')\n tg_token = os.getenv('TG_TOKEN')\n tg_channel_id = os.getenv('TG_CHANNEL_ID')\n fb_token = os.getenv('FB_TOKEN')\n fb_group_id = os.getenv('FB_GROUP_ID')\n\n args = parse_args()\n\n post_vkontakte(args.message_text, args.image_path, vk_token, vk_album_id, vk_group_id)\n post_telegram(args.message_text, args.image_path, tg_token, tg_channel_id)\n post_facebook(args.message_text, args.image_path, fb_token, fb_group_id)\n","repo_name":"tumkir/publish_posts","sub_path":"publish_posts.py","file_name":"publish_posts.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27807742682","text":"##### 这部分脚本用于从理论上给出关于前面mock数据的统计量的分析\n##### 主要的数据来源:利用mock的数据,结合SHMR关系,从mock的mstar求出\\\n### 在相应理论下的mh,在计算的时候,需要用的是反函数。此时需要利用mock给的数据中\\\n#### mh的部分计算出可用的mstar\n####链接路径引入\nimport sys\n#sys.path.insert(0,'D:/Python1/pydocument/seniorproject_quenching2/practice/_pycache_/session_1')\nsys.path.insert(0,'D:/Python1/pydocument/seniorproject_quenching2/practice')\n##导入自己编写的脚本,需要加入这两句,一句声明符号应用,然后声明需要引入文-\n##件的路径具体到文件夹\n###库函数调用\nimport os.path\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nfrom scipy import stats as st\nimport matplotlib.gridspec as gridspec\nimport pandas as pa\nfrom scipy import interpolate as sinter\n###mock_相关脚本调用,主要用于导入数据\nfrom mock_data_reshow import read_mock_hmf\n### section1:导入数据产生需要使用的四个数组:mock_mstar——theory_mh,利用此关系反插的mock_mh——theory_mstar\ndef doload_mock_data(tt):\n mockfile = 'D:/Python1/pydocument/seniorproject_quenching2/practice/iHODcatalog_bolshoi.h5'\n Mh_arr, dndlnMh_arr, use_data = read_mock_hmf(mockfile, mmin=1.e9, mmax=1.e16, \n nmbin=101, h=0.701, rcube=250.0) \n return use_data\ndef bins_dic(tt):\n###该部分需要注意mass function的求解仅仅归一化是不够的,还要注意单位转化\n###单位转化过程中mass以Msolar/h为单位,长度以Mpc/h为单位,在模拟数据中盒子边长250Mpc\n use_data = doload_mock_data(tt=True)\n N = np.array(use_data['b'].shape)\n print('size_con=',N)\n ###查看数据量:529308*8~=4000000\n ###把主晕取出来,并把主晕对应的物理量也取出来\n ix = use_data['d']>0\n _halos = use_data['d']\n ##取出主晕\n main_halo = np.array(_halos[ix])\n ##下面把与主晕有关的物理量取出来\n #g_color = use_data['c']\n ##取出主晕下星系颜色\n #gcolor = np.array(g_color[ix])\n ###尝试读取数据的处理\n M_star = use_data['e']\n ##取出恒星质量\n mstar = np.array(M_star[ix])\n ###求出中央星系的比例\n n = np.array(main_halo.shape)\n print('size_mainhalo=',n)\n frac_ = n[0]/N[0]\n print('f_c=',frac_)\n #con_c = use_data['b']\n #con = np.array(con_c[ix])\n ###盒子长度\n #h = 0.7\n #L = 250*h##单位Mpc/h\n L = 1\n###下面为质量函数\n####mh-function\n #value_mh = plt.hist(main_halo,101,normed=True)\n #plt.show()\n value_mh = st.binned_statistic(main_halo,main_halo,statistic='count',bins=100)\n f_N_mh = np.array(value_mh[0])\n x_N_mh = np.array(value_mh[1])\n #print(np.sum(value_mh[0]*(x_N_mh[2]-x_N_mh[1])))\n S0 = np.sum(value_mh[0]*(x_N_mh[2]-x_N_mh[1]))\n media_mh = np.zeros(value_mh[0].size,dtype=np.float)\n dn_dlgMh = np.zeros(value_mh[0].size,dtype=np.float)\n for k in range(1,value_mh[0].size):\n media_mh[k] = (x_N_mh[k]+x_N_mh[k-1])/2\n dn_dlgMh[k] = f_N_mh[k]/(x_N_mh[1]-x_N_mh[0])\n media_mh[0] = media_mh[1]\n dn_dlgMh[0] = f_N_mh[0]/(x_N_mh[1]-x_N_mh[0])\n dn_dlgMh = dn_dlgMh*np.log(10)/L**3\n f_N_mh1 = f_N_mh/S0\n p_lgMh = f_N_mh1###已完成归一化\n####mstar-function \n #value_ms = plt.hist(mstar,101,normed=True)\n #plt.show()\n value_ms = st.binned_statistic(mstar,mstar,statistic='count',bins=100)\n f_N_ms = np.array(value_ms[0])\n x_N_ms = np.array(value_ms[1])\n #print(np.sum(value_ms[0]*(x_N_ms[2]-x_N_ms[1])))\n S1 = np.sum(value_ms[0]*(x_N_ms[2]-x_N_ms[1]))\n media_ms = np.zeros(value_ms[0].size,dtype=np.float)\n dn_dlgms = np.zeros(value_ms[0].size,dtype=np.float)\n for k in range(1,value_ms[0].size):\n media_ms[k] = (x_N_ms[k]+x_N_ms[k-1])/2\n dn_dlgms[k] = f_N_ms[k]/(x_N_ms[1]-x_N_ms[0])\n media_ms[0] = media_ms[1]\n dn_dlgms[0] = f_N_ms[0]/(x_N_ms[1]-x_N_ms[0])\n dn_dlgms = dn_dlgms*np.log(10)/L**3\n f_N_ms1 = f_N_ms/S1\n p_lgms = f_N_ms1##已完成归一化\n print('load mass function successfully')\n return media_mh,dn_dlgMh,media_ms,dn_dlgms,p_lgMh,p_lgms\ndef mass_function_use(uu):\n media_mh,dn_dlgMh,media_ms,dn_dlgms,p_lgMh,p_lgm = bins_dic(tt=True)\n plt.plot(media_mh,dn_dlgMh,label='dn_dlgMh')\n plt.legend(loc=1) \n plt.yscale('log')\n plt.xlabel(r'$lgM_h[M_\\odot h^{-1}]$')\n plt.ylabel(r'$dn/dlgM_h$')\n plt.grid()\n plt.title('Mh_fun')\n #plt.savefig('fun_Mh',dpi=600)\n plt.show()\n plt.plot(media_ms,dn_dlgms,label=r'$dn_dlgM_\\ast$')\n plt.legend(loc=1) \n plt.yscale('log')\n plt.xlabel(r'$lgM_\\ast[M_\\odot h^{-2}]$')\n plt.ylabel(r'$dn/dlgM_\\ast$')\n plt.grid()\n plt.title('stellarmass_fun')\n #plt.savefig('fun_Ms',dpi=600)\n plt.show() \n return\ndef the_probability(pp):\n ####自假设数据\n mhalo = np.logspace(10,15,1000)\n ms = np.logspace(5,12,950)\n M0 = 2*10**10###单位是Msolar/h^2\n M1 = 1.3*10**12###单位是Msolar/h\n belta = 0.33\n sigma = 0.42\n gamma = 1.21\n ##下面计算为了和模拟数据保持一直,把SHMR的质量转为以10为底的对数\n Theory_Mh = 10**(np.log10(M1)+belta*(np.log10(ms)-np.log10(M0))+\\\n ((ms/M0)**sigma/(1+(ms/M0)**(-gamma))-1/2))\n ###反过来求出需要使用的理论上的mstar\n Theory_mstar = np.interp(mhalo,Theory_Mh,ms)\n ita = -0.04\n sigma_ms = np.zeros(len(mhalo),dtype=np.float)\n for k in range(0,len(mhalo)):\n if mhalo[k]\n dmh = (10**_halo[-1]-10**_halo[0])/len(_halo)\n Mh_ms = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n #dMh = (np.log(10)*10**_halo)*(_halo[-1]-_halo[0])/len(_halo)\n Mh_ms[k] = np.sum(p_Mh_ms[:,k]*10**_halo*dmh)/(np.sum(p_Mh_ms[:,k]*dmh))\n ###下面求errorbar\n Mh_err = np.zeros((len(ms_use),2),dtype=np.float)\n for k in range(len(ms_use)):\n mh_err = _halo\n p_m = p_Mh_ms[:,k]\n F_m1 = np.zeros(len(mh_err),dtype=np.float)\n F_m1[0] = 0\n for t in range(len(_halo)):\n F_m1[t] = F_m1[t-1]+p_m[t]*dmh/np.sum(p_m*dmh)\n va_err1 = np.interp(0.1585,F_m1,mh_err)-np.log10(Mh_ms[k])\n va_err2 = np.interp(0.8415,F_m1,mh_err)-np.log10(Mh_ms[k])\n Mh_err[k,:] = np.array([va_err1,va_err2]) \n ###尝试求解\n lnMh_ms = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n #dMh = (np.log(10)*10**_halo)*(_halo[-1]-_halo[0])/len(_halo)\n lnMh_ms[k] = np.sum(p_Mh_ms[:,k]*np.log(10**_halo)*dmh)/(np.sum(p_Mh_ms[:,k]*dmh))\n lnMh_ms = lnMh_ms/np.log(10)\n ###下面求errorbar\n lnMh_err = np.zeros((len(ms_use),2),dtype=np.float)\n for k in range(len(ms_use)):\n mh_err = _halo\n p_m = p_Mh_ms[:,k]\n F_m2 = np.zeros(len(mh_err),dtype=np.float)\n F_m2[0] = 0\n for t in range(len(_halo)):\n F_m2[t] = F_m2[t-1]+p_m[t]*dmh/np.sum(p_m*dmh)\n va_err3 = np.interp(0.1585,F_m2,mh_err)-lnMh_ms[k]\n va_err4 = np.interp(0.8415,F_m2,mh_err)-lnMh_ms[k]\n lnMh_err[k,:] = np.array([va_err3,va_err4]) \n Mh_err[np.isnan(Mh_err)]=0\n Mh_err[np.isinf(Mh_err)]=0\n lnMh_err[np.isnan(lnMh_err)]=0\n lnMh_err[np.isinf(lnMh_err)]=0\n return ms_use,_halo,Mh_ms,Mh_err,lnMh_ms,lnMh_err\ndef fig_R_Mh_Ms(d):\n ms_use,_halo,Mh_ms,Mh_err,lnMh_ms,lnMh_err = R_Mh_Ms(c=True) \n plt.plot(ms_use,np.log10(Mh_ms))\n #plt.xscale('log')\n #plt.yscale('log')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.show()\n plt.errorbar(ms_use,np.log10(Mh_ms),yerr=abs(Mh_err.T),fmt=\"k^-\",linewidth=0.5,\n elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$< M_h-M_\\ast >$')\n #plt.xscale('log')\n #plt.yscale('log')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h[M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory-Mh-Ms',dpi=600)\n plt.show()\n plt.plot(ms_use,lnMh_ms)\n #plt.xscale('log')\n #plt.yscale('log')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lg(lnM_h[M_\\odot h^{-1}])$')\n plt.show() \n plt.errorbar(ms_use,lnMh_ms,yerr=abs(lnMh_err.T),fmt=\"k^-\",linewidth=0.5,\n elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$< lnM_h-M_\\ast >$')\n #plt.xscale('log')\n #plt.yscale('log')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lg(lnM_h[M_\\odot h^{-1}])$')\n plt.legend(loc=2)\n #plt.savefig('Theory-lnMh-Ms',dpi=600)\n plt.show()\n plt.figure()\n plt.plot(ms_use,np.log10(Mh_ms),'r-',label=r'$$')\n plt.fill_between(ms_use,np.log10(Mh_ms)+Mh_err[:,0],np.log10(Mh_ms)+Mh_err[:,1],\n facecolor='r',alpha=0.2)\n plt.plot(ms_use,lnMh_ms,'b--',label=r'$$')\n plt.fill_between(ms_use,lnMh_ms+lnMh_err[:,0],lnMh_ms+lnMh_err[:,1],\n facecolor='b',alpha=0.2)\n plt.legend(loc=4)\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lg(lnM_h[M_\\odot h^{-1}])$')\n #plt.savefig('Theory-Mh_ms-comparation',dpi=600)\n plt.show()\n return\n#########################\n#下面分析红蓝两��序列的Mh-M*质量关系\ndef func_fred(f):\n _halo,ms_use,N_ms_Mh,p_joint,p_Mh_ms,p_ms_Mh,P_Joint3,P_Joint3_1,p_ms_Mh1,p_Mh_ms1,p_joint1\\\n = Theory_fun3(ff3=True)\n##考虑恒星质量是主要quenching的原因:f_red_ms,ms_q表示quenching的临界质量\n##下面的代码中Mh,Ms中M大写的表示条件概率,小写的msmh连在一起表示联合分布概率,分开的表示边界分布\n##此外,标号1的表示以恒星质量为主要quenching机制,标号2的表示以暗晕质量为机制\n ms_q = 10.55#单位为 Msolar h^-2\n miu_ms = 0.69\n f_red_ms = 1-np.exp(-(10**ms_use/10**ms_q)**miu_ms)\n dms = (10**ms_use[-1]-10**ms_use[0])/len(ms_use)\n S1 = np.zeros(len(_halo),dtype=np.float)\n for k in range(len(_halo)):\n S1[k] = np.sum(f_red_ms*p_joint[k,:]*dms) \n dmh = (10**_halo[-1]-10**_halo[0])/len(_halo)\n S2 = np.sum(S1*dmh)\n tot_f_red_ms = S2\n ###for red sequence,the joint distribution as flow\n p_red_msmh1 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(_halo)):\n p_red_msmh1[k,:] = p_joint[k,:]*f_red_ms/tot_f_red_ms\n #求恒星质量的边界分布\n p_red_ms1 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n p_red_ms1[k] = np.sum(p_red_msmh1[:,k]*dmh)\n #the condition distribution:p_red(Mh|m*) as red_p_Mh_ms\n red_p_Mh_ms1 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n red_p_Mh_ms1[:,k] = p_red_msmh1[:,k]/p_red_ms1[k]\n red_p_Mh_ms1[np.isnan(red_p_Mh_ms1)]=0\n red_p_Mh_ms1[np.isinf(red_p_Mh_ms1)]=0\n ###对条件概率做归一化\n red_pMhms1 = np.zeros((len(_halo),len(ms_use)),dtype=np.float) \n for k in range(len(ms_use)):\n s = np.sum(red_p_Mh_ms1[:,k]*dmh)\n red_pMhms1[:,k] = red_p_Mh_ms1[:,k]/s\n #############\n ###下面求各个恒星质量区间的理论上的暗晕质量\n red_Mh_ms1 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n red_Mh_ms1[k] = np.sum(red_p_Mh_ms1[:,k]*10**_halo*dmh)/(np.sum(red_p_Mh_ms1[:,k]*dmh))\n ###下面求errorbar\n '''\n red_Mh_err1 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)): \n mh_err = _halo\n p_m = red_p_Mh_ms1[:,k]\n mh = np.sum(dMh*p_m*(10**mh_err-red_Mh_ms1[k])**2)/np.sum(p_m*dMh)\n mh = np.sqrt(mh)\n red_Mh_err1[k] = np.log10(mh)\n '''\n red_Mh_err1 = np.zeros((len(ms_use),2),dtype=np.float)\n for k in range(len(ms_use)):\n mh_err = _halo\n p_m = red_p_Mh_ms1[:,k]\n F_m1 = np.zeros(len(mh_err),dtype=np.float)\n F_m1[0] = 0\n for t in range(len(_halo)):\n F_m1[t] = F_m1[t-1]+p_m[t]*dmh/np.sum(p_m*dmh)\n va_err1 = np.interp(0.1585,F_m1,mh_err)-np.log10(red_Mh_ms1[k])\n va_err2 = np.interp(0.8415,F_m1,mh_err)-np.log10(red_Mh_ms1[k])\n red_Mh_err1[k,:] = np.array([va_err1,va_err2]) \n ####此时对blue sequence的求解\n S3 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n S3[k] = np.sum(p_joint[:,k]*dmh)\n P_tot1 = np.sum(S3*dms)\n #求解该情况下联合分布概率密度\n p_blue_msmh1 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n f_blue_ms = 1 - f_red_ms\n for k in range(len(_halo)):\n p_blue_msmh1[k,:] = f_blue_ms*p_joint[k,:]/(P_tot1 - tot_f_red_ms)\n #求解恒星质量的边界分布\n p_blue_ms1 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n p_blue_ms1[k] = np.sum(p_blue_msmh1[:,k]*dmh)\n #the condition distribution:p_blue(Mh|m*) as blue_p_Mh_ms\n blue_p_Mh_ms1 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n blue_p_Mh_ms1[:,k] = p_blue_msmh1[:,k]/p_blue_ms1[k]\n blue_p_Mh_ms1[np.isnan(blue_p_Mh_ms1)]=0\n blue_p_Mh_ms1[np.isinf(blue_p_Mh_ms1)]=0\n #对条件概率归一化\n blue_pMhms1 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n s = np.sum(blue_p_Mh_ms1[:,k]*dmh)\n blue_pMhms1[:,k] = blue_p_Mh_ms1[:,k]/s\n #########\n ###下面求各个恒星质量区间的理论上的暗晕质量\n blue_Mh_ms1 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n blue_Mh_ms1[k] = np.sum(blue_p_Mh_ms1[:,k]*10**_halo*dmh)/(np.sum(blue_p_Mh_ms1[:,k]*dmh))\n ###下面求errorbar\n '''\n blue_Mh_err1 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)): \n mh_err = _halo\n p_m = blue_p_Mh_ms1[:,k]\n mh = np.sum(dMh*(10**mh_err-blue_Mh_ms1[k])**2*p_m)/np.sum(p_m*dMh)\n mh = np.sqrt(mh)\n blue_Mh_err1[k] = np.log10(mh)\n '''\n blue_Mh_err1 = np.zeros((len(ms_use),2),dtype=np.float)\n for k in range(len(ms_use)):\n mh_err = _halo\n p_m = blue_p_Mh_ms1[:,k]\n F_m2 = np.zeros(len(mh_err),dtype=np.float)\n F_m2[0] = 0\n for t in range(len(_halo)):\n F_m2[t] = F_m2[t-1]+p_m[t]*dmh/np.sum(p_m*dmh)\n va_err3 = np.interp(0.1585,F_m2,mh_err)-np.log10(blue_Mh_ms1[k])\n va_err4 = np.interp(0.8415,F_m2,mh_err)-np.log10(blue_Mh_ms1[k])\n blue_Mh_err1[k,:] = np.array([va_err3,va_err4]) \n####################\n##考虑暗晕质量是主要的quenching的原因:f_red_mh,mh_q表示quenching的临界质量\n #mh_q = 13.5\n #miu_mh = 1.25###for M16 comparation\n mh_q = 11.25\n miu_mh = 0.6###for mock data\n f_red_mh = 1-np.exp(-(10**_halo/10**mh_q)**miu_mh)\n S4 = np.zeros(len(_halo),dtype=np.float)\n for k in range(len(ms_use)):\n S4[k] = np.sum(f_red_mh*p_joint[:,k]*dmh) \n tot_f_red_mh = np.sum(S4*dms)\n #for red sequence,the joint distribution as flow \n p_red_msmh2 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n p_red_msmh2[:,k] = p_joint[:,k]*f_red_mh/tot_f_red_mh\n #求解横行质量的边界分布\n p_red_ms2 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n p_red_ms2[k] = np.sum(p_red_msmh2[:,k]*dmh)\n #the condition distribution:p_red(Mh|m*) as red_p_Mh_ms\n red_p_Mh_ms2 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n red_p_Mh_ms2[:,k] = p_red_msmh2[:,k]/p_red_ms2[k]\n red_p_Mh_ms2[np.isnan(red_p_Mh_ms2)]=0\n red_p_Mh_ms2[np.isinf(red_p_Mh_ms2)]=0\n ###对条件概率做归一化\n red_pMhms2 = np.zeros((len(_halo),len(ms_use)),dtype=np.float) \n for k in range(len(ms_use)):\n s = np.sum(red_p_Mh_ms2[:,k]*dmh)\n red_pMhms2[:,k] = red_p_Mh_ms2[:,k]/s\n #############\n ###下面求各个恒星质量区间的理论上的暗晕质量\n red_Mh_ms2 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n red_Mh_ms2[k] = np.sum(red_p_Mh_ms2[:,k]*10**_halo*dmh)/(np.sum(red_p_Mh_ms2[:,k]*dmh))\n ###下面求errorbar\n '''\n red_Mh_err2 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)): \n mh_err = _halo\n p_m = red_p_Mh_ms2[:,k]\n mh = np.sum(dMh*(10**mh_err-red_Mh_ms2[k])**2*p_m)/np.sum(p_m*dMh)\n mh = np.sqrt(mh)\n red_Mh_err2[k] = np.log10(mh)\n '''\n red_Mh_err2 = np.zeros((len(ms_use),2),dtype=np.float)\n for k in range(len(ms_use)):\n mh_err = _halo\n p_m = red_p_Mh_ms2[:,k]\n F_m3 = np.zeros(len(mh_err),dtype=np.float)\n F_m3[0] = 0\n for t in range(len(_halo)):\n F_m3[t] = F_m3[t-1]+p_m[t]*dmh/np.sum(p_m*dmh)\n va_err5 = np.interp(0.1585,F_m3,mh_err)-np.log10(red_Mh_ms2[k])\n va_err6 = np.interp(0.8415,F_m3,mh_err)-np.log10(red_Mh_ms2[k])\n red_Mh_err2[k,:] = np.array([va_err5,va_err6]) \n ####此时对blue sequence的求解\n S5 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n S5[k] = np.sum(p_joint[:,k]*dmh)\n P_tot2 = np.sum(S5*dms)\n #求解该情况下联合分布概率密度\n f_blue_mh = 1-f_red_mh\n p_blue_msmh2 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n p_blue_msmh2[:,k] = f_blue_mh*p_joint[:,k]/(P_tot2 - tot_f_red_mh)\n #求解恒星质量的边界分布\n p_blue_ms2 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n p_blue_ms2[k] = np.sum(p_blue_msmh2[:,k]*dmh)\n #the condition distribution:p_blue(Mh|m*) as blue_p_Mh_ms\n blue_p_Mh_ms2 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n blue_p_Mh_ms2[:,k] = p_blue_msmh2[:,k]/p_blue_ms2[k]\n blue_p_Mh_ms2[np.isnan(blue_p_Mh_ms2)]=0\n blue_p_Mh_ms2[np.isinf(blue_p_Mh_ms2)]=0\n #对条件概率归一化\n blue_pMhms2 = np.zeros((len(_halo),len(ms_use)),dtype=np.float)\n for k in range(len(ms_use)):\n s = np.sum(blue_p_Mh_ms2[:,k]*dmh)\n blue_pMhms2[:,k] = blue_p_Mh_ms2[:,k]/s\n #########\n ###下面求各个恒星质量区间的理论上的暗晕质量\n blue_Mh_ms2 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)):\n blue_Mh_ms2[k] = np.sum(blue_p_Mh_ms2[:,k]*10**_halo*dmh)/(np.sum(blue_p_Mh_ms2[:,k]*dmh))\n ###下面求errorbar\n '''\n blue_Mh_err2 = np.zeros(len(ms_use),dtype=np.float)\n for k in range(len(ms_use)): \n mh_err = _halo\n p_m = blue_p_Mh_ms2[:,k]\n mh = np.sum(dMh*(10**mh_err-blue_Mh_ms2[k])**2*p_m)/np.sum(p_m*dMh)\n mh = np.sqrt(mh)\n blue_Mh_err2[k] = np.log10(mh)\n '''\n blue_Mh_err2 = np.zeros((len(ms_use),2),dtype=np.float)\n for k in range(len(ms_use)):\n mh_err = _halo\n p_m = blue_p_Mh_ms2[:,k]\n F_m4 = np.zeros(len(mh_err),dtype=np.float)\n F_m4[0] = 0\n for t in range(len(_halo)):\n F_m4[t] = F_m4[t-1]+p_m[t]*dmh/np.sum(p_m*dmh)\n va_err7 = np.interp(0.1585,F_m4,mh_err)-np.log10(blue_Mh_ms2[k])\n va_err8 = np.interp(0.8415,F_m4,mh_err)-np.log10(blue_Mh_ms2[k])\n blue_Mh_err2[k,:] = np.array([va_err7,va_err8]) \n return f_red_ms,tot_f_red_ms,p_red_msmh1,p_red_ms1,red_p_Mh_ms1,red_pMhms1,red_Mh_ms1,red_Mh_err1,\\\n f_blue_ms,p_blue_msmh1,p_blue_ms1,blue_p_Mh_ms1,blue_pMhms1,blue_Mh_ms1,blue_Mh_err1,\\\n f_red_mh,tot_f_red_mh,p_red_msmh2,p_red_ms2,red_p_Mh_ms2,red_pMhms2,red_Mh_ms2,red_Mh_err2,\\\n f_blue_mh,p_blue_msmh2,p_blue_ms2,blue_p_Mh_ms2,blue_pMhms2,blue_Mh_ms2,blue_Mh_err2,\\\n _halo,ms_use\ndef fig_func_fred(g):\n f_red_ms,tot_f_red_ms,p_red_msmh1,p_red_ms1,red_p_Mh_ms1,red_pMhms1,red_Mh_ms1,red_Mh_err1,\\\n f_blue_ms,p_blue_msmh1,p_blue_ms1,blue_p_Mh_ms1,blue_pMhms1,blue_Mh_ms1,blue_Mh_err1,\\\n f_red_mh,tot_f_red_mh,p_red_msmh2,p_red_ms2,red_p_Mh_ms2,red_pMhms2,red_Mh_ms2,red_Mh_err2,\\\n f_blue_mh,p_blue_msmh2,p_blue_ms2,blue_p_Mh_ms2,blue_pMhms2,blue_Mh_ms2,blue_Mh_err2,\\\n _halo,ms_use = func_fred(f=True)\n###作图显示两类quenching主导机制下的f_red,f_blue的变化情况\n plt.plot(ms_use,f_red_ms,'r',label=r'$f_{M_\\ast}^{red}$')\n plt.plot(ms_use,f_blue_ms,'b',label=r'$f_{M_\\ast}^{blue}$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel('fractio-stellar-mass')\n plt.legend(loc=4)\n #plt.savefig('Theory_f_red_stellar_mass',dpi=600)\n plt.show()\n plt.plot(_halo,f_red_mh,'r',label=r'$f_{M_h}^{red}$')\n plt.plot(_halo,f_blue_mh,'b',label=r'$f_{M_h}^{blue}$')\n plt.xlabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.ylabel('fractio-halo-mass')\n plt.legend(loc=4)\n #plt.savefig('Theory_f_red_halo_mass',dpi=600)\n plt.show()\n###做图显示stellar mass主导quenching的情况\n plt.plot(ms_use,np.log10(red_Mh_ms1),label=r'$QE-M_\\ast-red$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n plt.show()\n plt.errorbar(ms_use,np.log10(red_Mh_ms1),yerr=abs(red_Mh_err1.T),fmt=\"k^-\",linewidth=0.5,\n elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$red < M_h-M_\\ast > ms$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory_QE_ms_red',dpi=600)\n plt.show()\n\n plt.plot(ms_use,np.log10(blue_Mh_ms1),label=r'$QE-M_\\ast-blue$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n plt.show()\n plt.errorbar(ms_use,np.log10(blue_Mh_ms1),yerr=abs(blue_Mh_err1.T),fmt=\"k^-\",linewidth=0.5,\n elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$blue < M_h-M_\\ast > ms$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory_QE_ms_blue',dpi=600)\n plt.show()\n \n plt.plot(ms_use,np.log10(red_Mh_ms1),'r-',label=r'$QE-M_\\ast-red$')\n plt.fill_between(ms_use,np.log10(red_Mh_ms1)+red_Mh_err1[:,0],np.log10(red_Mh_ms1)+red_Mh_err1[:,1],\n facecolor='r',alpha=0.2)\n plt.plot(ms_use,np.log10(blue_Mh_ms1),'b--',label=r'$QE-M_\\ast-blue$')\n plt.fill_between(ms_use,np.log10(blue_Mh_ms1)+blue_Mh_err1[:,0],np.log10(blue_Mh_ms1)+blue_Mh_err1[:,1],\n facecolor='b',alpha=0.2)\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory_QE_ms_comparation',dpi=600)\n plt.show()\n###做图显示halo mass主导quenching的情况\n plt.plot(ms_use,np.log10(red_Mh_ms2),label=r'$QE-M_h-red$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n plt.show()\n plt.errorbar(ms_use,np.log10(red_Mh_ms2),yerr=abs(red_Mh_err2.T),fmt=\"k^-\",linewidth=0.5,\n elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$red < M_h-M_\\ast > mh$') \n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory_QE_mh_red',dpi=600)\n plt.show()\n \n plt.plot(ms_use,np.log10(blue_Mh_ms2),label=r'$QE-M_h-blue$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n plt.show()\n plt.errorbar(ms_use,np.log10(blue_Mh_ms2),yerr=abs(blue_Mh_err2.T),fmt=\"k^-\",linewidth=0.5,\n elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$blue < M_h-M_\\ast > mh$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory_QE_mh_blue',dpi=600)\n plt.show() \n \n plt.plot(ms_use,np.log10(red_Mh_ms2),'r-',label=r'$QE-M_h-red$')\n plt.fill_between(ms_use,np.log10(red_Mh_ms2)+red_Mh_err2[:,0],np.log10(red_Mh_ms2)+red_Mh_err2[:,1],\n facecolor='r',alpha=0.2)\n plt.plot(ms_use,np.log10(blue_Mh_ms2),'b--',label=r'$QE-M_h-blue$')\n plt.fill_between(ms_use,np.log10(blue_Mh_ms2)+blue_Mh_err2[:,0],np.log10(blue_Mh_ms2)+blue_Mh_err2[:,1],\n facecolor='b',alpha=0.2)\n plt.xlabel(r'$lgM_\\ast [M_\\odot h^{-2}]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Theory_QE_mh_comparation',dpi=600)\n plt.show()\n return \ndef comparation_M16(g):\n h = 0.72\n delta_value = np.log10(h)\n t = g\n###输入数据并对比,g=1,表示M16的数据对比,g=0,2表示模拟数据的对比\n f_red_ms,tot_f_red_ms,p_red_msmh1,p_red_ms1,red_p_Mh_ms1,red_pMhms1,red_Mh_ms1,red_Mh_err1,\\\n f_blue_ms,p_blue_msmh1,p_blue_ms1,blue_p_Mh_ms1,blue_pMhms1,blue_Mh_ms1,blue_Mh_err1,\\\n f_red_mh,tot_f_red_mh,p_red_msmh2,p_red_ms2,red_p_Mh_ms2,red_pMhms2,red_Mh_ms2,red_Mh_err2,\\\n f_blue_mh,p_blue_msmh2,p_blue_ms2,blue_p_Mh_ms2,blue_pMhms2,blue_Mh_ms2,blue_Mh_err2,\\\n _halo,ms_use = func_fred(f=True)\n ###数据导入\n if t==0:\n ####导入M16观测数据\n mh_r = np.array([12.17,12.14,12.50,12.89,13.25,13.63,14.05])\n mh_r_err = np.array([[0.19,0.12,0.04,0.04,0.03,0.03,0.05],\n [-0.24,-0.14,-0.05,-0.04,-0.03,-0.03,-0.05]])\n ms_r = np.array([10.28,10.58,10.86,11.10,11.29,11.48,11.68])\n mh_b = np.array([11.80,11.73,12.15,12.61,12.69,12.79,12.79])\n mh_b_err = np.array([[0.16,0.13,0.08,0.10,0.19,0.43,0.58],\n [-0.20,-0.17,-0.10,-0.11,-0.25,-1.01,-2.23]])\n ms_b = np.array([10.24,10.56,10.85,11.10,11.28,11.47,11.68])\n elif t==1:\n ####导入模拟的观测数据(数据点比较多情况)\n mh_r= np.array([11.84144843,11.84144843,11.89535823,11.95580005,12.02537811,\\\n 12.10329057,12.19181432,12.29147487,12.40331193,12.5262711,\\\n 12.66457476,12.80943267,12.96171603,13.12992385,13.30603297,\\\n 13.49122876,13.65855888,13.85375991,14.01142053,14.05900916,\\\n 14.18654906])\n ms_r = np.array([9.55516636,9.66545281,9.77573927,9.88602573,9.99631218,\\\n 10.10659864,10.2168851,10.32717155,10.43745801,10.54774447,\\\n 10.65803092,10.76831738,10.87860384,10.98889029,11.09917675,\\\n 11.20946321,11.31974967,11.43003612,11.54032258,11.65060904,11.76089549])\n mh_r_err= np.array([0.01,0.51726065,0.513776,0.51061946,0.50786479,0.50636558,\\\n 0.5054495,0.505326,0.5058986,0.50688928,0.50698652,0.5107636,\\\n 0.51497867,0.51649112,0.51241142,0.49964074,0.48506296,0.47290132,\\\n 0.45835034,0.48461951,0.04812004])\n mh_b= np.array([11.56549616,11.56549616,11.61073136,11.66002609,11.71386246,\\\n 11.77294388,11.83778009,11.90955185,11.98354181,12.06735616,\\\n 12.15251732,12.24396081,12.33427442,12.4449957,12.54540254,\\\n 12.66468538,12.82667239,13.01481881,13.08768458,13.21836804,\\\n 13.27014179])\n ms_b= np.array([9.54956774,9.64870165,9.74783556,9.84696947,9.94610338,\\\n 10.0452373,10.14437121,10.24350512,10.34263903,10.44177294,\\\n 10.54090685,10.64004076,10.73917467,10.83830858,10.93744249,\\\n 11.0365764,11.13571031,11.23484422,11.33397813,11.43311204,11.53224595])\n mh_b_err= np.array([0.01,0.31073456,0.31014416,0.31049053,0.31156874,0.31375724,\\\n 0.3163361,0.3207385,0.32540602,0.32897889,0.33508065,0.34308683,\\\n 0.34839135,0.3588951,0.36576389,0.39988588,0.40469186,0.34897021,\\\n 0.34346791,0.22070814,0.28744449]) \n else:\n ####导入模拟的观测数据(数据点比较少情况)\n mh_r= np.array([11.93213772,11.93213772,12.12434067,12.38838582,12.73441978,\\\n 13.15024239,13.62083619,14.05767997])\n ms_r = np.array([9.6447741,9.93427605,10.223778,10.51327995,10.8027819,\\\n 11.09228385,11.3817858,11.67128775])\n mh_r_err = np.array([0.01,0.51185669,0.50611527,0.50602658,0.5087693,0.51648376,\\\n 0.48655984,0.42089061])\n mh_b = np.array([11.64123409,11.64123409,11.7885786,11.97499485,12.19929409,\\\n 12.45631101,12.75363835,13.02485297])\n ms_b = np.array([9.63011405,9.89034056,10.15056708,10.41079359,10.67102011,\\\n 10.93124662,11.19147314,11.45169965])\n mh_b_err = np.array([0.01,0.31022394,0.31428295,0.32420174,0.33966039,0.36085605,\\\n 0.4146244,0.333523])\n ##下面做图对比\n if t==0:\n line2,caps2,bars2=plt.errorbar(ms_r,mh_r,yerr=abs(mh_r_err)[::-1],fmt=\"ro--\",linewidth=1,\n elinewidth=0.5,ecolor='r',capsize=1,capthick=0.5,label='red(M16)')\n line4,caps4,bars3=plt.errorbar(ms_b,mh_b,yerr=abs(mh_b_err)[::-1],fmt=\"bo--\",linewidth=1,\n elinewidth=0.5,ecolor='b',capsize=1,capthick=0.5,label='blue(M16)')\n #plt.errorbar(ms_use,np.log10(red_Mh_ms1),yerr=abs(red_Mh_err1.T),fmt=\"r^-\",linewidth=0.5,\n # elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$red < M_h-M_\\ast > ms$')\n #plt.errorbar(ms_use,np.log10(blue_Mh_ms1),yerr=abs(blue_Mh_err1.T),fmt=\"bs-\",linewidth=0.5,\n # elinewidth=0.5,ecolor='b',capsize=0.5,capthick=0.5,label=r'$blue < M_h-M_\\ast > ms$')\n #plt.errorbar(ms_use,np.log10(red_Mh_ms2),yerr=abs(red_Mh_err2.T),fmt=\"r^-.\",linewidth=0.5,\n # elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$red < M_h-M_\\ast > mh$') \n #plt.errorbar(ms_use,np.log10(blue_Mh_ms2),yerr=abs(blue_Mh_err2.T),fmt=\"bs-.\",linewidth=0.5,\n # elinewidth=0.5,ecolor='b',capsize=0.5,capthick=0.5,label=r'$blue < M_h-M_\\ast > mh$')\n #plt.plot(ms_use-2*delta_value,np.log10(red_Mh_ms1),'r-',label=r'$QE-M_\\ast-red$')\n #plt.plot(ms_use-2*delta_value,np.log10(blue_Mh_ms1),'b-',label=r'$QE-M_\\ast-blue$')\n plt.plot(ms_use-2*delta_value,np.log10(red_Mh_ms2),'r-.',label=r'$QE-M_h-red$')\n plt.plot(ms_use-2*delta_value,np.log10(blue_Mh_ms2),'b-.',label=r'$QE-M_h-blue$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot ]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2)\n #plt.savefig('Correct_Theory_compare_data',dpi=600)\n plt.show()\n else:\n line2,caps2,bars2=plt.errorbar(ms_r-2*delta_value,mh_r,yerr=[abs(mh_r_err),abs(mh_r_err)],fmt=\"ro--\",linewidth=1,\n elinewidth=0.5,ecolor='r',capsize=1,capthick=0.5,label='red(M16)')\n line4,caps4,bars3=plt.errorbar(ms_b-2*delta_value,mh_b,yerr=[abs(mh_b_err),abs(mh_b_err)],fmt=\"bo--\",linewidth=1,\n elinewidth=0.5,ecolor='b',capsize=1,capthick=0.5,label='blue(M16)')\n #plt.errorbar(ms_use,np.log10(red_Mh_ms1),yerr=abs(red_Mh_err1.T),fmt=\"r^-\",linewidth=0.5,\n # elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$red < M_h-M_\\ast > ms$')\n #plt.errorbar(ms_use,np.log10(blue_Mh_ms1),yerr=abs(blue_Mh_err1.T),fmt=\"bs-\",linewidth=0.5,\n # elinewidth=0.5,ecolor='b',capsize=0.5,capthick=0.5,label=r'$blue < M_h-M_\\ast > ms$')\n #plt.errorbar(ms_use,np.log10(red_Mh_ms2),yerr=abs(red_Mh_err2.T),fmt=\"r^-.\",linewidth=0.5,\n # elinewidth=0.5,ecolor='r',capsize=0.5,capthick=0.5,label=r'$red < M_h-M_\\ast > mh$') \n #plt.errorbar(ms_use,np.log10(blue_Mh_ms2),yerr=abs(blue_Mh_err2.T),fmt=\"bs-.\",linewidth=0.5,\n # elinewidth=0.5,ecolor='b',capsize=0.5,capthick=0.5,label=r'$blue < M_h-M_\\ast > mh$')\n #plt.plot(ms_use-2*delta_value,np.log10(red_Mh_ms1),'r-',label=r'$QE-M_\\ast-red$')\n #plt.plot(ms_use-2*delta_value,np.log10(blue_Mh_ms1),'b-',label=r'$QE-M_\\ast-blue$')\n plt.plot(ms_use-2*delta_value,np.log10(red_Mh_ms2),'r-.',label=r'$QE-M_h-red$')\n plt.plot(ms_use-2*delta_value,np.log10(blue_Mh_ms2),'b-.',label=r'$QE-M_h-blue$')\n plt.xlabel(r'$lgM_\\ast [M_\\odot ]$')\n plt.ylabel(r'$lgM_h [M_\\odot h^{-1}]$')\n plt.legend(loc=2) \n #plt.savefig('Correct_parameter_data',dpi=600)\n #plt.savefig('Mock_parameter_data',dpi=600)\n plt.show()\n return\n#########################\ndef control_file(R):\n #doload_mock_data(tt=True)###模拟数据导入\n #bins_dic(tt=True)###计算边界条件概率分布(对数空间概率密度分布),并且完成概率归一化\n #mass_function_use(uu=True)####把实际调用的从模拟数据的质量函数表示出来\n #the_probability(pp=True)###理论计算的第一步,计算P(m*|Mh)\n #figure_1(a=True)###做图显示上一步计算结果\n #Theory_fun3(ff3=True)###计算联合概率分布,以及P(Mh|m*)\n #figure_2(b=True)#####作图显示上一步结果\n #R_Mh_Ms(c=True)####求解mh-m*的关系\n #fig_R_Mh_Ms(d=True)####作图显示上一步结果\n #func_fred(f=True)###求解红蓝星系的质量函数关系\n #fig_func_fred(g=True)###作图显示上一步结果\n comparation_M16(g=2)###输入数据并对比,g=1,表示M16的数据对比,g=0,2表示模拟数据的对比\n return\ncontrol_file(R=True)","repo_name":"Kein-Cary/seniorproject_quenching_2","sub_path":"Theory_calculation.py","file_name":"Theory_calculation.py","file_ext":"py","file_size_in_byte":46283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29093312504","text":"from typing import (List, Optional) #pylint: disable-msg=W0611\n\nfrom urllib.parse import (urlparse, urljoin)\nfrom html import unescape\nfrom re import compile as _compile\n\nfrom requests import RequestException\n\nfrom .request import request\nfrom .status import Status\nfrom .exceptions import (\n DeadlinksIgnoredURL,\n DeadlinksRedirectionURL,\n)\n\n# -- Constants -----------------------------------------------------------------\n\n__RE_LINKS__ = _compile(r']+)>') # pylint: disable=W1401\n\n# filters\nCLEANER = lambda x: x.strip(\"\\\"'\\n \") # removes quotes, spaces and new lines\nANCHORS = lambda x: x.split(\"#\")[0] # removed part after anchor\nUNESCPE = lambda x: unescape(x) # pylint: disable=W0108\n\n\nclass URL:\n \"\"\" URL abstraction representation. \"\"\"\n\n def __init__(self, location: str) -> None:\n # print(urlparse(location))\n self._url = urlparse(location)\n self._status = Status.UNDEFINED # type: Status\n\n # some predefined states\n self._referrers = [] # type: List[str]\n self._text = None # type: Optional[str]\n self._links = [] # type: List[str]\n\n # internal error or mesage field, used to store ignore message\n # or error or status code.\n # TODO - rethink logic behind this value.\n self._message = \"\" # type: str\n\n # Basic properties of the URL Link\n @property\n def domain(self) -> str:\n \"\"\" Short netlocation prop. \"\"\"\n return self._url.netloc\n\n @property\n def scheme(self) -> str:\n \"\"\" Short scheme prop. \"\"\"\n return self._url.scheme\n\n @property\n def path(self) -> str:\n \"\"\" Short path prop. \"\"\"\n return self._url.path\n\n @property\n def status(self) -> Status:\n \"\"\" Return one of 4 statuses of the URL .\"\"\"\n return self._status\n\n @status.setter\n def status(self, value: Status) -> None:\n \"\"\" Setter for status property. \"\"\"\n if not isinstance(value, Status):\n raise TypeError(\"URL Status value can have only Status type \")\n self._status = value\n\n @property\n def message(self) -> str:\n return self._message\n\n @message.setter\n def message(self, value: str) -> None:\n if not isinstance(value, str):\n raise TypeError(\"message can be only string\")\n self._message = value\n\n def is_valid(self) -> bool:\n \"\"\" Check if url looks \"valid\". \"\"\"\n\n return (self.domain != \"\" and self.scheme != \"\") and self.is_crawlable()\n\n def is_crawlable(self) -> bool:\n \"\"\" is url possible to crawl ? \"\"\"\n\n return self.scheme in {\"http\", \"https\"}\n\n def is_schema_valid(self) -> bool:\n return self._url.scheme in [\n \"http\",\n \"https\",\n \"ftp\",\n \"sftp\",\n \"ws\",\n \"ssh\",\n \"mailto\",\n \"news\",\n ]\n\n def add_referrer(self, url: str) -> None:\n \"\"\" Add a page that links (referrer) to self object. \"\"\"\n if url in self._referrers:\n return\n self._referrers.append(url)\n\n def get_referrers(self) -> List[str]:\n \"\"\" Return URL refferers list. \"\"\"\n return self._referrers\n\n def match_domains(self, domains: List[str]) -> bool:\n \"\"\" Match ignored pathes (argument pathes) to `url.netloc`. \"\"\"\n for domain in domains:\n if domain in self._url.netloc:\n return True\n return False\n\n def match_pathes(self, pathes: List[str]) -> bool:\n \"\"\" Match ignored pathes (argument pathes) to `url.path`. \"\"\"\n for path in pathes:\n if path in self._url.path:\n return True\n return False\n\n def exists(self, is_external: bool = False, retries: int = 0) -> bool:\n \"\"\" Return \"found\" (or \"not found\") status of the page as bool. \"\"\"\n\n if self.status == Status.FOUND:\n return True\n\n if self.status == Status.NOT_FOUND:\n return False\n\n if self.status == Status.IGNORED:\n error = \"This URL <{}> ignored\"\n raise DeadlinksIgnoredURL(error.format(self.url()))\n\n try:\n response = request(self.url(), is_external, retries)\n except RequestException as exception:\n self.message = str(exception)\n return False\n\n # Group of 2XX responses. In general we think its OK to mark URL as\n # reachable and exists\n if response.status_code // 100 == 2:\n self._text = response.text\n return True\n\n # redirections catching.\n if response.status_code // 100 == 3:\n raise DeadlinksRedirectionURL(response.headers['location'])\n\n self.message = str(response.status_code)\n return False\n\n def url(self) -> str:\n \"\"\" Return url based on abstraction, minus ending slash. \"\"\"\n return self._url.geturl()\n\n def __str__(self) -> str:\n \"\"\" Converts URL object to string (actual URL). \"\"\"\n return self.url()\n\n def __repr__(self) -> str:\n \"\"\" Object stringer representation. \"\"\"\n\n return \"{}<{}>\".format(self.__class__.__name__, self.url())\n\n def _consume_links(self) -> None:\n \"\"\" Parse response text into list of links. \"\"\"\n\n links = []\n for attr in __RE_LINKS__.findall(self._text):\n pos = attr.find(\"href=\")\n if pos == -1:\n \"href not found\"\n continue\n\n href = attr[pos + 5:].strip()\n\n if not href:\n continue\n\n link = \"\" # type: str\n\n quoted = href[0] in {'\"', \"'\"}\n if quoted:\n end_pos = href[1:].find(href[0])\n if end_pos == -1:\n \"unquoted link\"\n continue\n link = href[1:end_pos + 1]\n else:\n end_pos = href[0:].find(\" \")\n link = href if end_pos == -1 else href[:end_pos + 1]\n\n if not link:\n \"empty link\"\n continue\n\n links.append(link.replace(\"\\n\", \"\"))\n\n self._links = list(links)\n self._links = list(map(CLEANER, self._links))\n self._links = list(map(ANCHORS, self._links))\n self._links = list(map(UNESCPE, self._links))\n\n @property\n def links(self) -> List[str]:\n \"\"\" Return links found at the page. \"\"\"\n\n if not self._text or self._links:\n return self._links\n\n self._consume_links()\n\n return list(set(self._links))\n\n def link(self, href: str) -> str:\n \"\"\"\n Construct a full (“absolute”) URL by combining a\n “URL” object as base with another URL (url).\n\n avoiding using urljoin if self._url and href are same.\n \"\"\"\n\n if self._url.geturl() == href:\n return href\n\n return urljoin(self._url.geturl(), href)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n urls = set()\n urls.add(URL(\"http://google.com\"))\n urls.add(URL(\"http://google.com\"))\n urls.add(URL(\"https://google.com\"))\n\n print(len(urls), urls)\n","repo_name":"butuzov/deadlinks","sub_path":"deadlinks/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"81"} +{"seq_id":"33986788437","text":"import re\nimport base64\nimport uuid\nimport imageio\nimport numpy as np\nfrom PIL import Image,ImageOps\nimport cv2\nimport math\nimport pytesseract\nimport sys\nimport NO_recog\nimport Comp\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask import _request_ctx_stack\nimport pathlib\nimport glob, os\nimport TextRecog\nimport threading\nimport time\nfrom os import walk\nimport os\nimport json\nimport ast\nimport requests\nimport PartCut\nfrom scipy import ndimage\nfrom pdf2image import convert_from_path,convert_from_bytes\n#from fuzzywuzzy import fuzz\nfrom thefuzz import fuzz\nimport pdfkit\nfrom skimage import morphology as morphology_sk\nfrom scipy.ndimage import morphology as morphology_sci\nfrom scipy.ndimage import label\nfrom torchvision import models,transforms\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nimport argparse\nimport string\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torch.nn.functional as torchF\nimport craft_utils\nimport imgproc\nimport file_utils\nimport merge\nimport merge2\nfrom craft import CRAFT\nfrom collections import OrderedDict\nimport keywordFinder\nimport noteFinder\nimport messageParser\nimport pdfSplit\nimport orderformParser\nfrom orderParserV2 import orderParser\nimport OrderParserV3\nimport statistics\nimport keywordFinder_v2\n\nfrom utilss import CTCLabelConverter, AttnLabelConverter\nfrom dataset import RawDataset, AlignCollate, RawDatasetWithBB\nfrom recmodel import Model as recModel\n\nimport pytesseract\nsys.path.append(\"logo\")\nfrom logo import Logo\nfrom component.component import Component\n\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\npathlib.Path(__file__).parent.absolute()\nsys.argv = ['-f']\n\n# logo = Logo()\n# component = Component()\n\n\nmodel = models.vgg11_bn(pretrained=False)\nmodel.classifier[6] = nn.Linear(in_features=4096, out_features=2)\ncheckpoint = torch.load(\"best_checkpoint_3.pth\", map_location=torch.device(device))\nmodel.load_state_dict(checkpoint)\nmodel.eval()\n\nmodel2 = models.vgg11_bn(pretrained=False)\nmodel2.classifier[6] = nn.Linear(in_features=4096, out_features=447)\ncheckpoint2 = torch.load(\"best_checkpoint_last_para.pth\", map_location=torch.device(device))\nmodel2.load_state_dict(checkpoint2)\nmodel2.eval()\n#model = model.to(device)\ntorch_size = 224\ntorch_mean = (0.5, 0.5, 0.5)\ntorch_std = (0.5, 0.5, 0.5)\npreprocess = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(torch_mean, torch_std)\n ])\n\ndef copyStateDict(state_dict):\n if list(state_dict.keys())[0].startswith(\"module\"):\n start_idx = 1\n else:\n start_idx = 0\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \".\".join(k.split(\".\")[start_idx:])\n new_state_dict[name] = v\n return new_state_dict\n\nmodel_textbox = CRAFT()\nmodel_textbox.load_state_dict(copyStateDict(torch.load(\"craft_mlt_25k.pth\", map_location=torch.device(device))))\nif torch.cuda.is_available() :\n\tmodel_textbox = model_textbox.cuda()\nmodel_textbox = torch.nn.DataParallel(model_textbox)\ncudnn.benchmark = False\nmodel_textbox.eval()\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--image_folder', default=\"CutTexts\", help='path to image_folder which contains text images')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--batch_size', type=int, default=400, help='input batch size')\nparser.add_argument('--saved_model', default=\"TPS-ResNet-BiLSTM-Attn.pth\", help=\"path to saved_model to evaluation\")\n\"\"\" Data processing \"\"\"\nparser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\nparser.add_argument('--imgH', type=int, default=32, help='the height of the input image')\nparser.add_argument('--imgW', type=int, default=100, help='the width of the input image')\nparser.add_argument('--rgb', action='store_true', help='use rgb input')\nparser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\nparser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\nparser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n\"\"\" Model Architecture \"\"\"\nparser.add_argument('--Transformation', type=str, default=\"TPS\", help='Transformation stage. None|TPS')\nparser.add_argument('--FeatureExtraction', type=str, default=\"ResNet\", help='FeatureExtraction stage. VGG|RCNN|ResNet')\nparser.add_argument('--SequenceModeling', type=str, default=\"BiLSTM\", help='SequenceModeling stage. None|BiLSTM')\nparser.add_argument('--Prediction', type=str, default=\"Attn\", help='Prediction stage. CTC|Attn')\nparser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\nparser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')\nparser.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\nparser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\nopt = parser.parse_args()\n\nif opt.sensitive:\n\topt.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n\ncudnn.benchmark = True\ncudnn.deterministic = True\nopt.num_gpu = torch.cuda.device_count()\n\"\"\" model configuration \"\"\"\nif 'CTC' in opt.Prediction:\n\tconverter = CTCLabelConverter(opt.character)\nelse:\n\tconverter = AttnLabelConverter(opt.character)\nopt.num_class = len(converter.character)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ncudnn.deterministic = True\nmodel_textrec = recModel(opt)\nmodel_textrec = torch.nn.DataParallel(model_textrec).to(device)\n# load model\nmodel_textrec.load_state_dict(torch.load(\"TPS-ResNet-BiLSTM-Attn.pth\", map_location=device))\nmodel_textrec.eval()\nkf2 = keywordFinder_v2.Keywordfinder()\nx=15000\nsys.setrecursionlimit(x)\napp = Flask(__name__)\n\ndef getRotate(imgPath):\n\tim = cv2.imread(imgPath)\n\tim = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\t#im = ndimage.zoom(im,0.3)\n\n\tgray = im\n\tedges = cv2.Canny(gray,50,150,apertureSize = 3)\n\tlines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)\n\tmax_dist = 0\n\tmax_coord1 = []\n\tmax_coord2 = []\n\trAngle = 0\n\n\n\tfor [line] in lines:\n\t\ta = np.array([line[0],line[1]])\n\t\tb = np.array([line[2],line[3]])\n\t\tdist = np.linalg.norm(a-b)\n\t\tif(dist > max_dist):\n\t\t\tmax_dist = dist\n\t\t\tmax_coord1 = a\n\t\t\tmax_coord2 = b\n\n\trise = (max_coord1[1]-max_coord2[1])\n\trun = max_coord1[0]-max_coord2[0]\n\n\tif np.isclose(run,0) or np.isclose(rise,0):\n\t\trAngle = 0\n\telse:\n\t\trAngle = math.atan2(rise, run)\n\t#print(max_coord1,max_coord2,rAngle)\n\tim_mod = ndimage.rotate(im, rAngle,cval = 255)\n\tnewdata=pytesseract.image_to_osd(im_mod)\n\trAngle2 = 360-int(re.search('(?<=Rotate: )\\d+', newdata).group(0))\n\treturn rAngle+rAngle2\ndef testBlack(red,green,blue):\n\tif red < 210 and green < 210 and blue < 210 and abs(int(red)-int(green)) < 100 and abs(int(red)-int(blue)) < 100 and abs(int(green)-int(blue)) < 100:\n\t\treturn True\n\telse:\n\t\treturn False\ndef rotateImage(image, angle):\n\timage_center = tuple(np.array(image.shape[1::-1]) / 2)\n\trot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n\tresult = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR,borderValue=(255,255,255))\n\treturn result\ndef rotateImageBlackBorder(image, angle):\n\timage_center = tuple(np.array(image.shape[1::-1]) / 2)\n\trot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n\tresult = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR,borderValue=(0,0,0))\n\treturn result\ndef init_feature(name):\n chunks = name.split('-')\n if chunks[0] == 'sift':\n detector = cv2.xfeatures2d.SIFT_create()\n norm = cv2.NORM_L2\n elif chunks[0] == 'surf':\n detector = cv2.xfeatures2d.SURF_create(800)\n norm = cv2.NORM_L2\n elif chunks[0] == 'orb':\n detector = cv2.ORB_create(400)\n norm = cv2.NORM_HAMMING\n elif chunks[0] == 'akaze':\n detector = cv2.AKAZE_create()\n norm = cv2.NORM_HAMMING\n elif chunks[0] == 'brisk':\n detector = cv2.BRISK_create()\n norm = cv2.NORM_HAMMING\n else:\n return None, None\n if 'flann' in chunks:\n if norm == cv2.NORM_L2:\n flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n else:\n flann_params = dict(algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1) # 2\n # bug : need to pass empty dict (#1329)\n matcher = cv2.FlannBasedMatcher(flann_params, {})\n else:\n matcher = cv2.BFMatcher(norm)\n return detector, matcher\n\n\ndef filter_matches(kp1, kp2, matches, ratio=0.75):\n mkp1, mkp2 = [], []\n for m in matches:\n if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n m = m[0]\n mkp1.append(kp1[m.queryIdx])\n mkp2.append(kp2[m.trainIdx])\n p1 = np.float32([kp.pt for kp in mkp1])\n p2 = np.float32([kp.pt for kp in mkp2])\n kp_pairs = zip(mkp1, mkp2)\n return p1, p2, list(kp_pairs)\n\ndef moveVer(imageR,imageG,imageB,start,dir,times):\n\tx = start[0]\n\ty = start[1]\n\twhile times > 0:\n\t\ty = y+dir*15\n\t\twhile True:\n\t\t\t#print(\"now at\",[x,y])\n\t\t\tisline = True\n\t\t\tfor i in range(15):\n\t\t\t\tif not (testBlack(imageR[y][x+i],imageG[y][x+i],imageB[y][x+i]) or testBlack(imageR[y][x-i],imageG[y][x-i],imageB[y][x-i])):\n\t\t\t\t\t#print(\"not black\",[x+i,y],\"or\",[x-i,y])\n\t\t\t\t\t#print([x+i,y],testBlack(imageR[y][x+i],imageG[y][x+i],imageB[y][x+i]))\n\t\t\t\t\t#print([x-i,y],testBlack(imageR[y][x-i],imageG[y][x-i],imageB[y][x-i]))\n\t\t\t\t\tisline = False\n\t\t\t\t\tbreak\n\t\t\tif isline:\n\t\t\t\tbreak\n\t\t\t\"\"\"\n\t\t\tif (testBlack(imageR[y][x+5],imageG[y][x+5],imageB[y][x+5]) and testBlack(imageR[y][x+4],imageG[y][x+4],imageB[y][x+4]) and testBlack(imageR[y][x+3],imageG[y][x+3],imageB[y][x+3]) and testBlack(imageR[y][x+2],imageG[y][x+2],imageB[y][x+2]) and testBlack(imageR[y][x+1],imageG[y][x+1],imageB[y][x+1])) or (testBlack(imageR[y][x-5],imageG[y][x-5],imageB[y][x-5]) and testBlack(imageR[y][x-4],imageG[y][x-4],imageB[y][x-4]) and testBlack(imageR[y][x-3],imageG[y][x-3],imageB[y][x-3]) and testBlack(imageR[y][x-2],imageG[y][x-2],imageB[y][x-2]) and testBlack(imageR[y][x-1],imageG[y][x-1],imageB[y][x-1])) :\n\t\t\t\tbreak\n\t\t\t\"\"\"\n\t\t\ty = y + dir\n\t\t\tif not testBlack(imageR[y][x],imageG[y][x],imageB[y][x]):\n\t\t\t\t[x,y] = findLine(imageR,imageG,imageB,[x,y],1,0)\n\t\t#print(\"RUN VER\",[x,y])\n\t\ttimes -= 1\n\n\treturn [x,y]\ndef moveHor(imageR,imageG,imageB,start,dir,times):\n\tx = start[0]\n\ty = start[1]\n\n\twhile times > 0:\n\t\tx = x+dir*15\n\t\twhile True:\n\t\t\t#print(\"now at\",[x,y])\n\t\t\tisline = True\n\t\t\tfor i in range(15):\n\t\t\t\tif not (testBlack(imageR[y+i][x],imageG[y+i][x],imageB[y+i][x]) or testBlack(imageR[y-i][x],imageG[y-i][x],imageB[y-i][x])):\n\n\t\t\t\t\tisline = False\n\t\t\t\t\tbreak\n\t\t\tif isline:\n\t\t\t\tbreak\n\t\t\t\"\"\"\n\t\t\tif (testBlack(imageR[y+5][x],imageG[y+5][x],imageB[y+5][x]) and testBlack(imageR[y+4][x],imageG[y+4][x],imageB[y+4][x]) and testBlack(imageR[y+3][x],imageG[y+3][x],imageB[y+3][x]) and testBlack(imageR[y+2][x],imageG[y+2][x],imageB[y+2][x]) and testBlack(imageR[y+1][x],imageG[y+1][x],imageB[y+1][x])) or (testBlack(imageR[y-5][x],imageG[y-5][x],imageB[y-5][x]) and testBlack(imageR[y-4][x],imageG[y-4][x],imageB[y-4][x]) and testBlack(imageR[y-3][x],imageG[y-3][x],imageB[y-3][x]) and testBlack(imageR[y-2][x],imageG[y-2][x],imageB[y-2][x]) and testBlack(imageR[y-1][x],imageG[y-1][x],imageB[y-1][x])) :\n\t\t\t\tbreak\n\t\t\t\"\"\"\n\t\t\tx = x + dir\n\t\t\tif not testBlack(imageR[y][x],imageG[y][x],imageB[y][x]):\n\t\t\t\t#print(\"line lost at\",[x,y])\n\t\t\t\t[x,y] = findLine(imageR,imageG,imageB,[x,y],0,1)\n\t\t#print(\"RUN HOR\",[x,y])\n\t\ttimes -= 1\n\treturn [x,y]\ndef findLine(imageR,imageG,imageB,curr,dirX,dirY):\n\tx = curr[0]\n\ty = curr[1]\n\tcount = 1\n\t#print(\"looking for line at\",curr)\n\twhile True:\n\t\tif testBlack(imageR[y+dirY*count][x+dirX*count],imageG[y+dirY*count][x+dirX*count],imageB[y+dirY*count][x+dirX*count]):\n\t\t\tx = x+dirX*count\n\t\t\ty = y+dirY*count\n\t\t\tbreak\n\t\telif testBlack(imageR[y-dirY*count][x-dirX*count],imageG[y-dirY*count][x-dirX*count],imageB[y-dirY*count][x-dirX*count]):\n\t\t\tx = x-dirX*count\n\t\t\ty = y-dirY*count\n\t\t\tbreak\n\t\tcount += 1\n\treturn [x,y]\ndef getDensity(image,TL,BR):\n\td_x = -(TL[0] - BR[0])\n\td_y = -(TL[1] - BR[1])\n\tcount = 0\n\tfor i in range (d_x):\n\t\tfor j in range(d_y):\n\t\t\tif testBlack(image[j,i,2],image[j,i,1],image[j,i,0]):\n\t\t\t\tcount += 1\n\treturn count/(d_x*d_y)\ndef getConnectedShape(image,canvas,x,y,threshold):\n\td_x = image.shape[1]\n\td_y = image.shape[0]\n\t#print(\"start at\",x,y)\n\t#print(\"Count\",count)\n\tfor i in range(-threshold,threshold+1,1):\n\t\tfor j in range(-threshold,threshold+1,1):\n\t\t\tif(x+i > 0 and y+j > 0 and x+i < d_x and y+j < d_y and (i !=0 or j !=0)):\n\t\t\t\tif canvas[y+j][x+i] == 255:\n\t\t\t\t\tif image[y+j][x+i][0] < 210:\n\t\t\t\t\t\tcanvas[y+j][x+i] = 0\n\t\t\t\t\t\timage[y+j][x+i][:] = 255\n\t\t\t\t\t\t#print(i,j)\n\t\t\t\t\t\tgetConnectedShape(image,canvas,x+i,y+j,threshold)\n\treturn\ndef getConnectedShape_Opti(image,canvas,x,y,threshold,call_i,call_j):\n\t#print(\"OPTI \"+str(image.shape),file=sys.stderr)\n\td_x = image.shape[1]\n\td_y = image.shape[0]\n\tif(call_i == 0 and call_j == 0):\n\t\tfor i in range(-threshold,threshold+1,1):\n\t\t\tfor j in range(-threshold,threshold+1,1):\n\t\t\t\tif(x+i > 0 and y+j > 0 and x+i < d_x and y+j < d_y and (i !=0 or j !=0)):\n\t\t\t\t\tif canvas[y+j][x+i] == 255:\n\t\t\t\t\t\tif image[y+j][x+i][0] < 210:\n\t\t\t\t\t\t\tcanvas[y+j][x+i] = 120\n\t\t\t\t\t\t\timage[y+j][x+i][:] = 255\n\t\t\t\t\t\t\t#print(i,j)\n\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,x+i,y+j,threshold,i,j)\n\t\n\telse:\n\t\tif(call_i < 0):\n\t\t\tfor i in range(x-threshold,x-threshold-call_i):\n\t\t\t\tif(call_j < 0):\n\t\t\t\t\tfor j in range(y-threshold,y+threshold+1):\n\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\t\t\t\telse:\n\t\t\t\t\tfor j in range(y-threshold,y+threshold+1):\n\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\t\t\tif(call_j < 0):\n\t\t\t\tfor i in range(x-call_i-threshold,x+threshold+1):\n\t\t\t\t\t\tfor j in range(y-threshold,y-call_j-threshold):\n\t\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\n\t\t\telse:\n\t\t\t\tfor i in range(x-call_i-threshold,x+threshold+1):\n\t\t\t\t\t\tfor j in range(y+threshold+1-call_j,y+threshold+1):\n\t\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\t\telse :\n\t\t\tfor i in range(x+threshold-call_i+1,x+threshold+1):\n\t\t\t\tif(call_j < 0):\n\t\t\t\t\tfor j in range(y-threshold,y+threshold+1):\n\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\t\t\t\telse:\n\t\t\t\t\tfor j in range(y-threshold,y+threshold+1):\n\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\t\t\tif(call_j < 0):\n\t\t\t\tfor i in range(x-call_i-threshold,x+threshold+1):\n\t\t\t\t\t\tfor j in range(y-threshold,y-call_j-threshold):\n\t\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\t\t\telse:\n\t\t\t\tfor i in range(x-call_i-threshold,x+threshold+1):\n\t\t\t\t\t\tfor j in range(y+threshold+1-call_j,y+threshold+1):\n\t\t\t\t\t\t\tif(i > 0 and j > 0 and i < d_x and j < d_y):\n\t\t\t\t\t\t\t\tif canvas[j][i] == 255:\n\t\t\t\t\t\t\t\t\tif image[j][i][0] < 210:\n\t\t\t\t\t\t\t\t\t\tcanvas[j][i] = 120\n\t\t\t\t\t\t\t\t\t\timage[j][i][:] = 255\n\t\t\t\t\t\t\t\t\t\tgetConnectedShape_Opti(image,canvas,i,j,threshold,i-x,j-y)\n\n\n\treturn\ndef naiveInter(image,scale):\n\td_x = image.shape[1]\n\td_y = image.shape[0]\n\t#print(\"scale:\",scale,\"x:\",d_x,\"y:\",d_y)\n\toutput = np.full([math.floor(d_y*scale+4),math.floor(d_x*scale+4)],255)\n\tscalar_flag = np.isscalar(image[0,0])\n\tfor i in range(d_x):\n\t\tfor j in range(d_y):\n\t\t\tif(scalar_flag):\n\t\t\t\tif(image[j,i] == 120):\n\t\t\t\t\t#if(j*scale+scale*4 < d_y*scale and i*scale+scale*4 < d_x):\n\t\t\t\t\toutput[j*scale:j*scale+scale*4,i*scale:i*scale+scale*4] = image[j,i]\n\t\t\telse:\n\t\t\t\tif(any(image[j,i] == 120)):\n\t\t\t\t\toutput[j*scale:j*scale+scale*4,i*scale:i*scale+scale*4,:] = image[j,i,:]\n\treturn output\ndef naiveReduction(image,scale):\n\td_x = image.shape[1]\n\td_y = image.shape[0]\n\t#cv2.imwrite('shapes/TMP/RREEEEE.png', image)\n\toutput = np.full([math.ceil(d_y/scale),math.ceil(d_x/scale),3],255)\n\tprint(\"O_S:\",output.shape)\n\tfor i in range(0,d_x,scale):\n\t\tfor j in range(0,d_y,scale):\n\t\t\t#print(image[j:j+scale][i:i+scale])\n\t\t\ts_o = 255\n\t\t\t#print(np.count_nonzero(image[j:j+scale,i:i+scale,:] != 255))\n\t\t\tif(np.count_nonzero(image[j:j+scale,i:i+scale,:] != 255) > 0):\n\t\t\t\ts_o = 0\n\t\t\t#print(s_o)\n\t\t\t#print(i/scale,j/scale)\n\t\t\toutput[int(j/scale)][int(i/scale)] = s_o\n\treturn output\n\ndef job():\n i = 0\n while(True):\n # print(\"Child thread:\", i, file=sys.stderr)\n i+=1\n time.sleep(1)\ndef compare(parts,compFiles,pid,hess,nL,thres):\n\tfilepath = \"../uploads/Crop/\"\n\t#print(parts)\n\n\t#print(parts)\n\toutput = {\"data\":[]}\n\tserial = 0\n\tfor part in parts:\n\t\tpart = part.get('filename')\n\t\tfor fn in compFiles:\n\t\t\t# tmp = Comp.compare(filepath+fn,filepath+part,hess,nL,thres)\n\t\t\timg1 = cv2.imread(filepath+fn, 0)\n\t\t\timg2 = cv2.imread(filepath+part, 0)\n\t\t\tfeature_name = 'sift'\n\t\t\tdetector, matcher = init_feature(feature_name)\n\t\t\tif img1 is None:\n\t\t\t\tcontinue\n\t\t\t\t# print('Failed to load fn1:', fn1)\n\t\t\t\t# sys.exit(1)\n\n\t\t\tif img2 is None:\n\t\t\t\tcontinue\n\t\t\t\t# print('Failed to load fn2:', fn2)\n\t\t\t\t# sys.exit(1)\n\n\t\t\tif detector is None:\n\t\t\t\tcontinue\n\t\t\t\t# print('unknown feature:', feature_name)\n\t\t\t\t# sys.exit(1)\n\n\t\t\t# print('using', feature_name)\n\n\t\t\tkp1, desc1 = detector.detectAndCompute(img1, None)\n\t\t\tkp2, desc2 = detector.detectAndCompute(img2, None)\n\t\t\t# print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))\n\n\t\t\tdef match_and_draw(win):\n\t\t\t\tstatus=[]\n\t\t\t\ttry:\n\t\t\t\t\traw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n\t\t\t\texcept:\n\t\t\t\t\treturn 0\n\t\t\t\tp1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\t\t\t\tif len(p1) >= 4:\n\t\t\t\t\tH, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n\t\t\t\t\t# print(status)\n\t\t\t\t\treturn np.sum(status) * 100 / len(status)\n\t\t\t\treturn 0\n\n\t\t\t\t# _vis = explore_match(win, img1, img2, kp_pairs, status, H)\n\t\t\tresult = match_and_draw('find_obj')\n\t\t\tserial += 1\n\t\t\toutput.get('data').append({\"source\":part,\"process_id\":pid,\"filename\":fn,\"confidence\":result,\"finish\":serial,\"total\":len(parts)*len(compFiles)})\n\t\t\tr = requests.post(\"http://172.25.25.33:8082/result\",json = output)\n\t\t\t# print(r.content,file=sys.stderr)\n\t\t\tdata = r.json()\n\t\t\tif data['status']==\"stop\":\n\t\t\t\treturn\n\t\t\toutput = {\"data\":[]}\n\tr = requests.patch(\"http://172.25.25.33:8082/process/stop\",json = {\"id\":[pid]})\n\t\"\"\"\n\toutput = {\"data\":[]}\n\tthres = (1.5/9)*(thres-1)\n\tserial = 0\n\tfor fn in compFiles:\n\t\tfor part in parts:\n\t\t\tpart = part.get('filename')\n\t\t\t#print(filepath+fn,filepath+part)\n\t\t\t#print(Comp.compare(filepath+fn,filepath+part))\n\t\t\ttmp = Comp.compare(filepath+fn,filepath+part,hess,nL,thres)\n\t\t\tserial += 1\n\t\t\toutput.get('data').append({\"process_id\":pid,\"filename\":fn,\"confidence\":tmp[1],\"finish\":serial,\"total\":len(parts)*len(compFiles)})\n\t\t\t# print(output,file=sys.stderr)\n\t\t\tr = requests.post(\"http://172.25.25.33:8082/result\",json = output)\n\t\t\tprint(r.content,file=sys.stderr)\n\t\t\tdata = r.json()\n\t\t\tif data['status']==\"stop\":\n\t\t\t\treturn\n\t\t\toutput = {\"data\":[]}\n\t\"\"\"\n\t\t#tmp = Comp.compare(filepath+fn,filepath+part)\n\t\"\"\"\n\tfor fn in compFiles:\n\t\tprint(fn,file=sys.stderr)\n\t\tif isinstance(parts, list):\n\t\t\tfor part in parts:\n\t\t\t\t#print(Comp.compare(filepath+fn,filepath+part),file=sys.stderr)\n\t\t\t\t\n\t\t\t\ttmp = Comp.compare(filepath+fn,filepath+part.get(\"filename\"))\n\t\t\t\tif(tmp[0]):\n\t\t\t\t\tprint(pid,fn,part,tmp[1])\n\t\t\t\t\n\t\t\t\t#print(\"A\")\n\t\t\t\t#print(filepath+\"Crop/\"+part,filepath+\"Crop/\"+fn)\n\t\t\t\t#print(Comp.compare(filepath+part,filepath+fn))\n\t\telse:\n\t\t\ttmp = Comp.compare(filepath+fn,filepath+parts.get(filename))\n\t\t\tif(tmp[0]):\n\t\t\t\tprint(pid,fn,parts,tmp[1])\n\t\t\t#print(Comp.compare(filepath+fn,filepath+parts),file=sys.stderr)\n\t#print(pid,file=sys.stderr)\n\t\"\"\"\n\t# print(output,file=sys.stderr)\n\n\t#print(type(output))\n\t#r = requests.post(\"http://172.25.25.33:8082/result\",data = {\"data\":[{\"process_id\":1,\"filename\":\"123\",\"confidence\":0}]})\n\t# r = requests.post(\"http://172.25.25.33:8082/result\",json = output)\n\t# print(r.content,file=sys.stderr)\n\treturn\n\ndef match(parts,compFiles,pid,hess,nL,thres):\n\tfilepath = \"../uploads/\"\n\t#print(parts)\n\toutput = {\"data\":[]}\n\tserial = 0\n\tfor part in parts:\n\t\tpart = part.get('filename')\n\t\tfor fn in compFiles:\n\t\t\t# tmp = Comp.compare(filepath+fn,filepath+part,hess,nL,thres)\n\t\t\timg1 = cv2.imread(filepath+fn, 0)\n\t\t\timg2 = cv2.imread(filepath+part, 0)\n\t\t\tfeature_name = 'sift'\n\t\t\tdetector, matcher = init_feature(feature_name)\n\t\t\tif img1 is None:\n\t\t\t\tcontinue\n\t\t\t\t# print('Failed to load fn1:', fn1)\n\t\t\t\t# sys.exit(1)\n\n\t\t\tif img2 is None:\n\t\t\t\tcontinue\n\t\t\t\t# print('Failed to load fn2:', fn2)\n\t\t\t\t# sys.exit(1)\n\n\t\t\tif detector is None:\n\t\t\t\tcontinue\n\t\t\t\t# print('unknown feature:', feature_name)\n\t\t\t\t# sys.exit(1)\n\n\t\t\t# print('using', feature_name)\n\n\t\t\tkp1, desc1 = detector.detectAndCompute(img1, None)\n\t\t\tkp2, desc2 = detector.detectAndCompute(img2, None)\n\t\t\t# print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))\n\n\t\t\tdef match_and_draw(win):\n\t\t\t\tstatus=[]\n\t\t\t\traw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n\t\t\t\tp1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\t\t\t\tif len(p1) >= 4:\n\t\t\t\t\tH, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n\t\t\t\t\t# print(status)\n\t\t\t\t\treturn np.sum(status) * 100 / len(status)\n\t\t\t\treturn 0\n\n\t\t\t\t# _vis = explore_match(win, img1, img2, kp_pairs, status, H)\n\t\t\tresult = match_and_draw('find_obj')\n\t\t\tserial += 1\n\t\t\toutput.get('data').append({\"process_id\":pid,\"filename\":fn,\"confidence\":result,\"finish\":serial,\"total\":len(parts)*len(compFiles)})\n\t\t\t# print(output,file=sys.stderr)\n\t\t\tr = requests.post(\"http://172.25.25.33:8082/resultMatch\",json = output)\n\t\t\t# print(r.content,file=sys.stderr)\n\t\t\tdata = r.json()\n\t\t\tif data['status']==\"stop\":\n\t\t\t\treturn\n\t\t\toutput = {\"data\":[]}\n\t\t#tmp = Comp.compare(filepath+fn,filepath+part)\n\tr = requests.patch(\"http://172.25.25.33:8082/process/stop\",json = {\"id\":[pid]})\n\t\"\"\"\n\tfor fn in compFiles:\n\t\tprint(fn,file=sys.stderr)\n\t\tif isinstance(parts, list):\n\t\t\tfor part in parts:\n\t\t\t\t#print(Comp.compare(filepath+fn,filepath+part),file=sys.stderr)\n\t\t\t\t\n\t\t\t\ttmp = Comp.compare(filepath+fn,filepath+part.get(\"filename\"))\n\t\t\t\tif(tmp[0]):\n\t\t\t\t\tprint(pid,fn,part,tmp[1])\n\t\t\t\t\n\t\t\t\t#print(\"A\")\n\t\t\t\t#print(filepath+\"Crop/\"+part,filepath+\"Crop/\"+fn)\n\t\t\t\t#print(Comp.compare(filepath+part,filepath+fn))\n\t\telse:\n\t\t\ttmp = Comp.compare(filepath+fn,filepath+parts.get(filename))\n\t\t\tif(tmp[0]):\n\t\t\t\tprint(pid,fn,parts,tmp[1])\n\t\t\t#print(Comp.compare(filepath+fn,filepath+parts),file=sys.stderr)\n\t#print(pid,file=sys.stderr)\n\t\"\"\"\n\t# print(output,file=sys.stderr)\n\n\t#print(type(output))\n\t#r = requests.post(\"http://172.25.25.33:8082/result\",data = {\"data\":[{\"process_id\":1,\"filename\":\"123\",\"confidence\":0}]})\n\treturn\ndef removeNoise(im):\n\tim = morphology_sci.grey_dilation(im, (3, 3)) - im\n\tglobal rotation_process\n\trotation_process = np.array(im.shape)\n\t# Binarize.\n\tmean, std = im.mean(), im.std()\n\tt = mean + std\n\tim[im < t] = 0\n\tim[im >= t] = 1\n\n\t# Connected components.\n\ts = [[1,1,1],\n\t [1,1,1],\n\t [1,1,1]]\n\tlbl, numcc = label(im,structure=s)\n\t#lbls = np.arange(1, numcc + 1)\n\tunique, counts = np.unique(lbl, return_counts=True)\n\tfailed = []\n\tfor i in range(1,numcc+1):\n\t\tif counts[i] < 200:\n\t\t\tfailed.append(i)\n\tfor i in failed:\n\t\tlbl[lbl == i] = 0\n\treturn np.array(lbl,dtype=bool)\ndef RecogNet(Boxes,ipath,rAngle,mode):\n lookup = [\n\t\t\t\"1.23479\",\n\t\t\t\"1.2365\",\n\t\t\t\"1.2767\",\n\t\t\t\"4140\",\n\t\t\t\"sus420j2\",\n\t\t\t\"4340\",\n\t\t\t\"8620\",\n\t\t\t\"sus303\",\n\t\t\t\"asp23\",\n\t\t\t\"asp30\",\n\t\t\t\"asp60\",\n\t\t\t\"c1100 copper\",\n\t\t\t\"c3604 brass\",\n\t\t\t\"c90700 bronze\",\n\t\t\t\"c93210 bronze\",\n\t\t\t\"carbide\",\n\t\t\t\"cpm-10v\",\n\t\t\t\"cpm-3v\",\n\t\t\t\"cpm-m4\",\n\t\t\t\"d2\",\n\t\t\t\"g10\",\n\t\t\t\"g15\",\n\t\t\t\"g20\",\n\t\t\t\"g30\",\n\t\t\t\"g40\",\n\t\t\t\"g50\",\n\t\t\t\"g55\",\n\t\t\t\"hap10\",\n\t\t\t\"k340\",\n\t\t\t\"k890\",\n\t\t\t\"mil-60s\",\n\t\t\t\"m7\",\n\t\t\t\"mil-tip\",\n\t\t\t\"s390\",\n\t\t\t\"s45c\",\n\t\t\t\"s7\",\n\t\t\t\"sae64(c93700 bronze)\",\n\t\t\t\"sae660 bronze(c93200 bronze)\",\n\t\t\t\"sae841 bronze\",\n\t\t\t\"scm415\",\n\t\t\t\"scm435\",\n\t\t\t\"scm440\",\n\t\t\t\"d2\",\n\t\t\t\"h13\",\n\t\t\t\"mil-60\",\n\t\t\t\"solide carbride\",\n\t\t\t\"stelite\",\n\t\t\t\"suj2\",\n\t\t\t\"sus304\",\n\t\t\t\"t15\",\n\t\t\t\"v4\",\n\t\t\t\"w360\",\n\t\t\t\"mil-60r\",\n\t\t\t\"sus316\",\n\t\t\t\"sus420j1\",\n\t\t\t\"jiscac406c bronze\",\n\t\t\t\"c83600 bronze\",\n\t\t\t\"c95500 bronze\",\n\t\t\t\"red copper\",\n\t\t\t\"c17200 beryllium copper\",\n\t\t\t\"oxygen-free copper\",\n\t\t\t\"brass\",\n\t\t\t\"cucrzr copper\",\n\t\t\t\"aluminum\",\n\t\t\t\"c95400 bronze\",\n\t\t\t\"c95800 bronze\",\n\t\t\t\"c95810 bronze\",\n\t\t\t\"aluminum bar\",\n\t\t\t\"jis-c-5191 bronze\",\n\t\t\t\"cac502c bronze\",\n\t\t\t\"cusn6 bronze(c51900 bronze)\"\n\t\t\t]\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--image_folder', default=\"CutTexts\", help='path to image_folder which contains text images')\n parser.add_argument('--workers', type=int, help='number of data loading workers', default=1)\n parser.add_argument('--batch_size', type=int, default=192, help='input batch size')\n parser.add_argument('--saved_model', default=\"TPS-ResNet-BiLSTM-Attn.pth\", help=\"path to saved_model to evaluation\")\n \"\"\" Data processing \"\"\"\n parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser.add_argument('--rgb', action='store_true', help='use rgb input')\n parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n \"\"\" Model Architecture \"\"\"\n parser.add_argument('--Transformation', type=str, default=\"TPS\", help='Transformation stage. None|TPS')\n parser.add_argument('--FeatureExtraction', type=str, default=\"ResNet\", help='FeatureExtraction stage. VGG|RCNN|ResNet')\n parser.add_argument('--SequenceModeling', type=str, default=\"BiLSTM\", help='SequenceModeling stage. None|BiLSTM')\n parser.add_argument('--Prediction', type=str, default=\"Attn\", help='Prediction stage. CTC|Attn')\n parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')\n parser.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\n parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\n opt = parser.parse_args()\n print(\"RC0\")\n \"\"\" vocab / character number configuration \"\"\"\n if opt.sensitive:\n opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n opt.num_gpu = torch.cuda.device_count()\n \"\"\" model configuration \"\"\"\n if 'CTC' in opt.Prediction:\n converter = CTCLabelConverter(opt.character)\n else:\n converter = AttnLabelConverter(opt.character)\n opt.num_class = len(converter.character)\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n if opt.rgb:\n opt.input_channel = 3\n\n # prepare data. two demo images from https://github.com/bgshih/crnn#run-demo\n print(\"RC1\")\n AlignCollate_demo = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n image = cv2.imread(ipath)\n Height,Width,chn = image.shape\n image = ndimage.rotate(image, rAngle,cval = 255)\n image = Image.fromarray(image)\n demo_data = RawDatasetWithBB(image=image, opt=opt,BB = Boxes) # use RawDataset\n demo_loader = torch.utils.data.DataLoader(\n demo_data, batch_size=opt.batch_size,\n shuffle=False,\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_demo, pin_memory=True)\n\n # predict\n model_textrec.eval()\n with torch.no_grad():\n print(\"RC2\")\n minfuzz = 80\n wordlist = []\n count = 0\n matAt = []\n for image_tensors, BoundingBox in demo_loader:\n batch_size = image_tensors.size(0)\n print(\"RC2.1\")\n image = image_tensors.to(device)\n # For max length prediction\n length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)\n text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)\n print(\"RC2.2\")\n if 'CTC' in opt.Prediction:\n preds = model_textrec(image, text_for_pred)\n\n # Select max probabilty (greedy decoding) then decode index to character\n preds_size = torch.IntTensor([preds.size(1)] * batch_size)\n _, preds_index = preds.max(2)\n # preds_index = preds_index.view(-1)\n preds_str = converter.decode(preds_index, preds_size)\n\n else:\n preds = model_textrec(image, text_for_pred, is_train=False)\n\n # select max probabilty (greedy decoding) then decode index to character\n _, preds_index = preds.max(2)\n preds_str = converter.decode(preds_index, length_for_pred)\n\n print(\"RC2.5\")\n preds_prob = torchF.softmax(preds, dim=2)\n preds_max_prob, _ = preds_prob.max(dim=2)\n print(\"RC3\")\n for BB,pred, pred_max_prob in zip(BoundingBox, preds_str, preds_max_prob):\n if 'Attn' in opt.Prediction:\n pred_EOS = pred.find('[s]')\n pred = pred[:pred_EOS] # prune after \"end of sentence\" token ([s])\n pred_max_prob = pred_max_prob[:pred_EOS]\n\n # calculate confidence score (= multiply of pred_max_prob)\n confidence_score = pred_max_prob.cumprod(dim=0)[-1]\n if(confidence_score > 0):\n #fu = fuzz.ratio(pred,\"material\")\n #fu2 = fuzz.ratio(pred,\"werkstoff\")\n #fu = max(fu,fu2)\n #wordlist.append([BB,pred,confidence_score,fu])\n wordlist.append([BB,pred,confidence_score])\n \"\"\"\n if(fu > minfuzz):\n matAt.append(count)\n count+=1\n \"\"\"\n if(mode == 'material'):\n materials,coatings,matlit,coatlit = keywordFinder.getPossibleMaterialsCoatings(wordlist,Height // 12,Width // 3,80)\n return materials,coatings,matlit,coatlit\n if(mode == 'note'):\n return noteFinder.getNoteBox_cluster(wordlist,Height,Width)\n \"\"\"\n matLit = []\n possLit = []\n for at in matAt:\n wordlist[at].append(True)\n matLit.append(wordlist[at])\n #print(oim.shape)\n Height = Height // 12\n Width = Width // 6\n for mat in matLit:\n print(mat)\n TL = np.array(mat[0][0:2])\n for word in wordlist:\n if(len(word) > 4):\n continue\n if(abs(word[0][0] - TL[0]) < Width and abs(word[0][1] - TL[1]) < Height):\n possLit.append(word)\n for l in possLit:\n #print(l[1])\n mf = 0\n ma = -1\n for i in range(len(lookup)):\n cf = fuzz.token_sort_ratio(lookup[i],l[1])\n if(cf > mf):\n mf = cf\n ma = i\n l.append([lookup[ma],mf])\n print(l)\n return [matLit,possLit]\n\t\t\"\"\"\ndef getTextBox(net, image, refine_net=None):\n print(\"TB0\")\n t0 = time.time()\n text_threshold = 0.6\n link_threshold = 0.4\n low_text = 0.4\n cuda = torch.cuda.is_available()\n poly = False\n\n # resize\n img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 1280, interpolation=cv2.INTER_LINEAR, mag_ratio=1.5)\n ratio_h = ratio_w = 1 / target_ratio\n\n # preprocessing\n x = imgproc.normalizeMeanVariance(img_resized)\n x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]\n x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]\n if cuda:\n x = x.cuda()\n\n # forward pass\n with torch.no_grad():\n y, feature = net(x)\n\n # make score and link map\n score_text = y[0,:,:,0].cpu().data.numpy()\n score_link = y[0,:,:,1].cpu().data.numpy()\n print(\"TB1\")\n # refine link\n if refine_net is not None:\n with torch.no_grad():\n y_refiner = refine_net(y, feature)\n score_link = y_refiner[0,:,:,0].cpu().data.numpy()\n\n t0 = time.time() - t0\n t1 = time.time()\n\n # Post-processing\n boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)\n\n # coordinate adjustment\n boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)\n polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)\n for k in range(len(polys)):\n if polys[k] is None: polys[k] = boxes[k]\n\n t1 = time.time() - t1\n\n # render results (optional)\n render_img = score_text.copy()\n render_img = np.hstack((render_img, score_link))\n ret_score_text = imgproc.cvt2HeatmapImg(render_img)\n\n print(\"\\ninfer/postproc time : {:.3f}/{:.3f}\".format(t0, t1))\n \n polys2 = []\n for poly in polys:\n poly2 = [poly[0][0],poly[0][1],poly[2][0],poly[2][1]]\n polys2.append(poly2)\n polys2 = np.array(polys2)\n polys2 = merge.non_max_suppression_fast(polys2,0.8)\n polys2 = merge2.non_max_suppression_fast(polys2,0,5)\n #polys2 = merge2.non_max_suppression_fast(polys2,0,3)\n #polys2 = merge2.non_max_suppression_fast(polys2,0,5)\n print(\"TB2\")\n polys3 = []\n for poly2 in polys2:\n poly3 = [[poly2[0],poly2[1]],[poly2[2],poly2[1]],[poly2[2],poly2[3]],[poly2[0],poly2[3]]]\n polys3.append(np.array(poly3))\n print(\"TB3\")\n return boxes, polys3, ret_score_text\n\n@app.route('/rotate')\ndef rotate():\n\tfilename = request.args.get('filename')\n\t#Area = request.args.get('Area')\n\tfilepath = \"../uploads/\"\n\t# print(filepath+filename, file=sys.stderr)\n\tim = cv2.imread(filepath+filename)##'111.jpg'\n\tim_tmp = np.array(im)\n\tx_tmp = im_tmp.shape[1]\n\ty_tmp = im_tmp.shape[0]\n\tim_tmp = Image.fromarray(im)\n\tim_tmp = ImageOps.grayscale(im_tmp)\n\tim_tmp = np.array(im_tmp)\n\tim_tmp = removeNoise(im_tmp)\n\tim_tmp = np.logical_not(im_tmp)\n\tim_tmp = 255*im_tmp\n\t#cv2.imwrite('C:/Users/Hilton/Desktop/Pre.jpg', 255*im_tmp)\n\t\"\"\"\n\tim2=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\tim2=im2.astype(np.uint8)\n\t#cv2.imwrite('C:/Users/Hilton/Desktop/Pre.jpg', im2)\n\tnb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im2, connectivity=8)\n\tsizes = stats[1:, -1]; nb_components = nb_components - 1\n\tim3 = np.zeros((output.shape))\n\tfor i in range(0, nb_components):\n\t if sizes[i] >= 0:\n\t im3[output == i + 1] = 255 \n\tcv2.imwrite('C:/Users/Hilton/Desktop/Pre.jpg', im3)\n\tim3 = cv2.cvtColor(im3.astype('uint8'), cv2.COLOR_GRAY2RGB)\n\tim_N = np.array(im3)\n\t\"\"\"\n\n\tim_N = np.array(im_tmp)\n\t#print(im_N.shape)\n\td_x = im_N.shape[1]\n\td_y = im_N.shape[0]\n\t#im_r = im_N[:,:,0]\n\t#im_g = im_N[:,:,1]\n\t#im_b = im_N[:,:,2]\n\t# if(d_y > d_x):\n\t# \tim_r = np.rot90(im_r,3)\n\t# \tim_g = np.rot90(im_g,3)\n\t# \tim_b = np.rot90(im_b,3)\n\t# \ttmp = d_x\n\t# \td_x = d_y\n\t# \td_y = tmp\n\n\t#outputImg = rotateImage(outputImg,-0.6)\n\t#cv2.imwrite('color_img.jpg', outputImg)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = 0\n\t\tfor k in range(i):\n\t\t\tif im_N[j+k][i-k] < 210: \n\t\t\t\tprint(im_N[j+k][i-k])\n\t\t\t\ttopLeft = [i-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\t#print(\"TL\",topLeft)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y - i - 1\n\t\tfor k in range(i):\n\t\t\t#print(j+k,k)\n\t\t\tif im_N[j+k][k] < 210: \n\t\t\t\tbotLeft = [k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\t#print(\"BL First:\",firstGuess,\"Second\",secondGuess)\n\t#print(\"BL\",botLeft)\n\n\trise = botLeft[1]-topLeft[1]\n\trun = botLeft[0]-topLeft[0]\n\t#print(rise,run)\n\tif run == 0:\n\t\trAngle = 0\n\telse:\n\t\trAngle = np.arctan(rise/run)*180/np.pi\n\t\t\n\tim = ndimage.rotate(im, rAngle,cval = 255)\n\tnewdata=pytesseract.image_to_osd(im)\n\tim = ndimage.rotate(im, 360-int(re.search('(?<=Rotate: )\\d+', newdata).group(0)),cval = 255)\n\t# if(rAngle > 0):\n\t# \trAngle = rAngle-90\n\t# else:\n\t# \trAngle = rAngle+90\n\n\t#im4 = ndimage.rotate(im, rAngle-90,cval = 255)\n\t#cv2.imwrite('C:/Users/Hilton/Desktop/Post.jpg', im4)\n\n\t#im = cv2.imread(filepath+filename)\n\t#im = ndimage.rotate(im, rAngle+(int(re.search('(?<=Rotate: )\\d+', newdata).group(0))),cval = 255)\n\t#cv2.imwrite('C:/Users/Hilton/Desktop/Post_'+filename, im)\n\t\n\t#im_org = im\n\t#anything = []\n\t#angle = 0\n\t\n\t# for i in range(4):\n\t# \tim = ndimage.rotate(im_org, rAngle+90*i)\n\n\n\t# \tim = Image.fromarray(im)\n\t# \tcustom_oem_psm_config = r'--psm 11'\n\t# \tOCRTest = im\n\t# \tArea_not_given = True\n\t# \tif(Area is not None):\n\t# \t\tArea = json.loads(Area)\n\t# \t\tTL = Area[0]\n\t# \t\tBR = Area[1]\n\t# \t\tOCRTest = OCRTest.crop((TL[0],TL[1],BR[0],BR[1]))\n\t# \t\tArea_not_given = False\n\t# \tdata = pytesseract.image_to_data(OCRTest,config=custom_oem_psm_config)\n\t# \tdata = data.split(\"\\n\")\n\t# \tdata.pop(0)\n\t# \teverything = []\n\t# \tfor dt in data:\n\t# \t\tdt = dt.split(\"\\t\")\n\t# \t\tif(len(dt) >= 11):\n\t# \t\t\tif(Area_not_given):\n\t# \t\t\t\tnumbers = sum(c.isdigit() for c in dt[11])\n\t# \t\t\t\tif(numbers >= 6):\n\t# \t\t\t\t\tbox = [int(i) for i in dt[6:10]]\n\t# \t\t\t\t\teverything.append({\"bounding_box\":box,\"text\":dt[11]})\n\t# \t\t\telse:\n\t# \t\t\t\tif(len(dt[11]) > 2):\n\t# \t\t\t\t\tbox = [int(i) for i in dt[6:10]]\n\t# \t\t\t\t\tbox2 = [TL[0],TL[1],0,0]\n\t# \t\t\t\t\tbox = np.add(box,box2)\n\t# \t\t\t\t\teverything.append({\"bounding_box\":box.tolist(),\"text\":dt[11]})\n\t# \tif len(everything) > len(anything):\n\t# \t\tanything = everything\n\t# \t\tangle=i\n\tprint(rAngle,(360-int(re.search('(?<=Rotate: )\\d+', newdata).group(0))))\n\treturn jsonify({\"rotate\":rAngle+(360-int(re.search('(?<=Rotate: )\\d+', newdata).group(0)))})\n\n@app.route('/cut')\ndef main0():\n\t#Main\n\treduction = 4\n\tfilename = request.args.get('filename')\n\tfilepath = \"../uploads/\"\n\t# print(filepath+filename, file=sys.stderr)\n\tim = cv2.imread(filepath+filename)##'111.jpg'\n\tim_N = np.array(im)\n\n\t#print(im_N.shape, file=sys.stderr)\n\td_x = im_N.shape[1]\n\td_y = im_N.shape[0]\n\tim_r = im_N[:,:,0]\n\tim_g = im_N[:,:,1]\n\tim_b = im_N[:,:,2]\n\tif(d_y > d_x):\n\t\tim_r = np.rot90(im_r,3)\n\t\tim_g = np.rot90(im_g,3)\n\t\tim_b = np.rot90(im_b,3)\n\t\ttmp = d_x\n\t\td_x = d_y\n\t\td_y = tmp\n\n\t#outputImg = rotateImage(outputImg,-0.6)\n\t#cv2.imwrite('color_img.jpg', outputImg)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = 0\n\t\tfor k in range(i):\n\t\t\tif im_r[j+k][i-k] < 210 and im_g[j+k][i-k] < 210 and im_b[j+k][i-k] < 210: \n\t\t\t\ttopLeft = [i-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"TL\",topLeft)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y-i\n\t\tfor k in range(i):\n\t\t\tif im_r[j+k][d_x-1-k] < 210 and im_g[j+k][d_x-1-k] < 210 and im_b[j+k][d_x-1-k] < 210: \n\t\t\t\tbotRight = [d_x-1-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"BR\",botRight)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y - i - 1\n\t\tfor k in range(i):\n\t\t\t#print(j+k,k)\n\t\t\tif im_r[j+k][k] < 210 and im_g[j+k][k] < 210 and im_b[j+k][k] < 210: \n\t\t\t\tbotLeft = [k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\t#print(\"BL First:\",firstGuess,\"Second\",secondGuess)\n\tprint(\"BL\",botLeft)\n\n\n\toutputImg = np.zeros([d_y,d_x,3])\n\toutputImg[:,:,0] = im_r\n\toutputImg[:,:,1] = im_g\n\toutputImg[:,:,2] = im_b\n\trise = botLeft[1]-topLeft[1]\n\trun = botLeft[0]-topLeft[0]\n\trAngle = np.arctan(rise/run)*180/np.pi\n\toutputImg = rotateImage(outputImg,rAngle-90)\n\tcv2.imwrite(filepath+\"rot.png\", outputImg)\n\tnew_b = outputImg[:,:,0]\n\tnew_g = outputImg[:,:,1]\n\tnew_r = outputImg[:,:,2]\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = 0\n\t\tfor k in range(i):\n\t\t\tif new_r[j+k][i-k] < 210 and new_g[j+k][i-k] < 210 and new_b[j+k][i-k] < 210: \n\t\t\t\ttopLeft = [i-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"Start at\",topLeft)\n\n\tnextPos = moveHor(new_r,new_g,new_b,topLeft,1,1)\n\tprint(\"First move\",nextPos)\n\tnextPos = moveVer(new_r,new_g,new_b,nextPos,1,3)\n\tprint(\"TL\",nextPos)\n\tBPTopLeft = nextPos\n\tnextPos = moveHor(new_r,new_g,new_b,nextPos,1,4)\n\tprint(\"Third move\",nextPos)\n\tnextPos = moveVer(new_r,new_g,new_b,nextPos,1,1)\n\tprint(\"BR\",nextPos)\n\tBPBotRight = nextPos\n\n\tBP_d_x = BPBotRight[0]-BPTopLeft[0]\n\tBP_d_y = BPBotRight[1]-BPTopLeft[1]\n\tBPImg = np.zeros([BP_d_y,BP_d_x,3])\n\tSplits = np.zeros([BP_d_y,BP_d_x,3])\n\tBPImg[:,:,0] = new_b[BPTopLeft[1]:BPBotRight[1],BPTopLeft[0]:BPBotRight[0]]\n\tBPImg[:,:,1] = new_g[BPTopLeft[1]:BPBotRight[1],BPTopLeft[0]:BPBotRight[0]]\n\tBPImg[:,:,2] = new_r[BPTopLeft[1]:BPBotRight[1],BPTopLeft[0]:BPBotRight[0]]\n\tcv2.imwrite(filepath+'BP.png', BPImg)\n\tBPImg[0:5,:,:] = 255;\n\tBPImg[:,0:5,:] = 255;\n\tBPImg[BP_d_y-5:BP_d_y,:,:] = 255;\n\tBPImg[:,BP_d_x-5,BP_d_x:] = 255;\n\t\"\"\"\n\tfor i in range(BP_d_x):\n\t\tfor j in range(BP_d_y):\n\t\t\tif not testBlack(new_r[j+BPTopLeft[1],i+BPTopLeft[0]],new_g[j+BPTopLeft[1],i+BPTopLeft[0]],new_b[j+BPTopLeft[1],i+BPTopLeft[0]]):\n\t\t\t\tBPImg[j,i,:] = [255,255,255]\n\t\t\telse:\n\t\t\t\tBPImg[j,i,:] = [0,0,0]\n\t\"\"\"\n\t#cv2.imwrite(filepath+\"BIN0_\"+filename, BPImg)#noncolorful cutPic\n\tBPImg = cv2.cvtColor(BPImg.astype('uint8'), cv2.COLOR_BGR2GRAY)\n\tret,BPImg = cv2.threshold(BPImg,170,255,cv2.THRESH_BINARY)\n\tBPImg = np.repeat(BPImg[:, :, np.newaxis], 3, axis=2)\n\n\tBPImg[0:5*reduction,:,:] = [255,255,255]\n\tBPImg[:,0:5*reduction,:] = [255,255,255]\n\tBPImg[BP_d_y-(5*reduction):BP_d_y,:,:] = [255,255,255]\n\tBPImg[:,BP_d_x-(5*reduction):BP_d_x,:] = [255,255,255]\n\n\t#cv2.imwrite(filepath+\"BIN_\"+filename, BPImg)#noncolorful cutPic\n\t#BPImg_reduced = cv2.resize(BPImg, (math.floor(BP_d_x/reduction), math.floor(BP_d_y/reduction)), interpolation=cv2.INTER_AREA)\n\tnaive_red = naiveReduction(BPImg,reduction)\n\tBPImg_reduced = naive_red\n\t#cv2.imwrite(filepath+\"Crop/TEST.png\", BPImg_reduced)\n\tBPCanvas = np.full([math.floor(BP_d_y/reduction),math.floor(BP_d_x/reduction)],255)\n\tBPFullCanvas = np.full([BP_d_y,BP_d_x],255)\n\t#BPCanvas = np.full([BP_d_y,BP_d_x],255)\n\n\tnextPos = moveVer(new_r,new_g,new_b,nextPos,1,2)\n\tNOTopLeft = moveHor(new_r,new_g,new_b,nextPos,-1,1)\n\tprint(NOTopLeft)\n\tNOBotRight = moveVer(new_r,new_g,new_b,nextPos,1,1)\n\tprint(NOBotRight)\n\n\tNO_d_x = NOBotRight[0]-NOTopLeft[0]\n\tNO_d_y = NOBotRight[1]-NOTopLeft[1]\n\tNOImg = np.zeros([NO_d_y,NO_d_x,3])\n\tNOImg[:,:,0] = new_b[NOTopLeft[1]:NOBotRight[1],NOTopLeft[0]:NOBotRight[0]]\n\tNOImg[:,:,1] = new_g[NOTopLeft[1]:NOBotRight[1],NOTopLeft[0]:NOBotRight[0]]\n\tNOImg[:,:,2] = new_r[NOTopLeft[1]:NOBotRight[1],NOTopLeft[0]:NOBotRight[0]]\n\tserial=\"fixed\"+filename\n\tcv2.imwrite(filepath+\"Crop/\"+serial, NOImg)\n\n\t#BPCanvas = np.full([BP_d_y,BP_d_x],255)\n\tparts_count = 1\n\tcrop_names = []\n\tboxes = {}\n\t\"\"\"\n\tfor i in range(BP_d_x):\n\t\tfor j in range(BP_d_y):\n\t\t\tif BPImg[j,i,1] < 210:\n\t\t\t\tBPCanvas = np.full([BP_d_y,BP_d_x],255)\n\t\t\t\tgetConnectedShape_Opti(BPImg,BPCanvas,i,j,1,0,0)\n\t\t\t\tcount = np.count_nonzero(BPCanvas == 0)\n\t\t\t\tif count > 1000:\n\t\t\t\t\tnons = np.nonzero(BPCanvas == 0)\n\t\t\t\t\tTLY = min(nons[0])\n\t\t\t\t\tTLX = min(nons[1])\n\t\t\t\t\tBRY = max(nons[0])\n\t\t\t\t\tBRX = max(nons[1])\n\t\t\t\t\tBPCanvas = BPCanvas[TLY:BRY,TLX:BRX]\n\t\t\t\t\tcv2.imwrite(filepath+\"Crop/\"+str(parts_count)+\"_\"+filename, BPCanvas)\n\t\t\t\t\tcrop_names.append(str(parts_count)+\"_\"+filename)\n\t\t\t\t\tparts_count+=1\n\t\"\"\"\n\ttst_c = 0\n\tfor i in range(math.floor(BP_d_x/reduction)):\n\t\tfor j in range(math.floor(BP_d_y/reduction)):\n\t\t\tif BPImg_reduced[j,i,0] < 210:\n\t\t\t\tBPCanvas = np.full([math.ceil(BP_d_y/reduction),math.ceil(BP_d_x/reduction)],255)\n\t\t\t\tgetConnectedShape_Opti(BPImg_reduced,BPCanvas,i,j,2,0,0)\n\t\t\t\tcount = np.count_nonzero(BPCanvas == 120)\n\t\t\t\t#cv2.imwrite(filepath+\"Crop/\"+str(tst_c)+\"_TST_\"+filename, BPCanvas)\n\t\t\t\ttst_c+=1\n\t\t\t\tif count > 250:\n\t\t\t\t\tBPCanvas = naiveInter(BPCanvas,reduction)\t\t\n\t\t\t\t\tBPCanvasOut = np.full([BP_d_y,BP_d_x,3],255)\n\t\t\t\t\tTMP = np.logical_and(BPImg[:,:,0] == 0,BPCanvas[0:BP_d_y,0:BP_d_x] == 120)\n\t\t\t\t\tTMP = np.logical_not(TMP)\n\t\t\t\t\tfor i in range(3):\n\t\t\t\t\t\tBPCanvasOut[:,:,i] = TMP\n\t\t\t\t\tBPCanvasOut = BPCanvasOut*255\n\t\t\t\t\tnons = np.nonzero(BPCanvasOut == 0)\n\t\t\t\t\tTLY = min(nons[0])\n\t\t\t\t\tTLX = min(nons[1])\n\t\t\t\t\tBRY = max(nons[0])\n\t\t\t\t\tBRX = max(nons[1])\n\t\t\t\t\tprint([TLX,TLY],[BRX,BRY])\n\t\t\t\t\tBPCanvasOut = BPCanvasOut[TLY:BRY,TLX:BRX]\n\t\t\t\t\tcv2.imwrite(filepath+\"Crop/\"+str(parts_count)+\"_\"+filename, BPCanvasOut)\n\t\t\t\t\tcrop_names.append(str(parts_count)+\"_\"+filename)\n\t\t\t\t\tboxes[str(parts_count)+\"_\"+filename] = [int(TLX),int(TLY),int(BRX)-int(TLX),int(BRY)-int(TLY)]\n\t\t\t\t\tparts_count+=1\n\n\t# print(crop_names,file=sys.stderr)\n\treturn jsonify({\"No_file\":serial,\"No_recog\":NO_recog.getNO_img(NOImg),\"Crop_file\":crop_names,\"Bounding_boxes\":boxes})\n\n# @app.route('/recog')\n# @app.route('/test')\n# def main1():\n# \tfilename = request.args.get('filename')\n# \tfilepath = str(pathlib.Path(__file__).parent.absolute())+\"/../uploads/\"\n# \treturn(NO_recog.getNO(filepath+filename))\n@app.route('/textrecog')\ndef main2():\n\tfilename = request.args.get('filename')\n\treturn jsonify(TextRecog.Recog(filename))\n@app.route('/match')\ndef main3():\n\tjson_data = request.args.get('data')\n\tdata = ast.literal_eval(json_data)\n\tthres_multi = request.args.get('threshold')\n\tif(thres_multi is None):\n\t\tthres_multi = 5\n\telse:\n\t\tthres_multi = int(thres_multi)\n\t\tif(thres_multi > 10):\n\t\t\tthres_multi = 10\n\t\tif(thres_multi < 1):\n\t\t\tthres_multi = 1\n\tnL = request.args.get('layers')\n\tif(nL is None):\n\t\tnL = 1\n\telse:\n\t\tnL = int(nL)\n\t\tif(nL < 1):\n\t\t\tnL = 1\n\t\tif(nL > 3):\n\t\t\tnL = 3\n\thess = request.args.get('hessian')\n\tif(hess is None):\n\t\thess = 400\n\telse:\n\t\thess = int(hess)\n\t\tif(hess < 200):\n\t\t\thess = 200\n\t\tif(hess > 800):\n\t\t\thess = 800\n\tprocess_id = data[\"process_id\"]\n\tcomponents = data[\"components\"]\n\tfile_id = data[\"file_id\"]\n\tfilepath = \"../uploads/\"\n\t# _, _, compFiles = next(walk(filepath), (None, None, []))\n\tr = requests.get(\"http://172.25.25.33:8082/recognition/files/\"+str(file_id))\n\tcompFiles = r.json()['data']\n\t#print(type(compFiles[0]))\n\t# for parts in components:\n\t\t# print(parts.get('filename') in compFiles,file=sys.stderr)\n\t\t# if(parts.get('filename') in compFiles):\n\t\t# \tcompFiles.remove(parts.get('filename'))\n\t# compFiles = [compFiles[0]]\t\t\t\n\t# print(compFiles,file=sys.stderr)\n\tt = threading.Thread(target = match, args=(components,compFiles,process_id,hess,nL,thres_multi))\n\tt.start()\n\t\"\"\"\n\tfor parts in components:\n\t\t#print(parts,file=sys.stderr)\n\t\tt = threading.Thread(target = compare, args=(parts,compFiles,process_id))\n\t\tt.start()\n\t\"\"\"\n\t#print(data[\"process_id\"])\n\t#data = json_data['data']\n\t#process_id\n\t#components\n\t\"\"\"\n\tfilename = request.args.get('filename')\n\t#filepath = \"../uploads/\"\n\t_, _, compFiles = next(walk(filepath+\"Crop/\"), (None, None, []))\n\tfor fn in compFiles:\n\t\ttmp = Comp.compare(filepath+filename,filepath+\"Crop/\"+fn)\n\t\tif(tmp[0]):\n\t\t\tsucc.append([tmp[1],fn])\n\t\"\"\"\n\t#print(filenames,file=sys.stderr)\n\treturn json.dumps({'success':True}), 200, {'ContentType':'application/json'} \n\n@app.route('/compare')\ndef cmp():\n\tjson_data = request.args.get('data')\n\tdata = ast.literal_eval(json_data)\n\tthres_multi = request.args.get('threshold')\n\tif(thres_multi is None):\n\t\tthres_multi = 5\n\telse:\n\t\tthres_multi = int(thres_multi)\n\t\tif(thres_multi > 10):\n\t\t\tthres_multi = 10\n\t\tif(thres_multi < 1):\n\t\t\tthres_multi = 1\n\tnL = request.args.get('layers')\n\tif(nL is None):\n\t\tnL = 1\n\telse:\n\t\tnL = int(nL)\n\t\tif(nL < 1):\n\t\t\tnL = 1\n\t\tif(nL > 3):\n\t\t\tnL = 3\n\thess = request.args.get('hessian')\n\tif(hess is None):\n\t\thess = 400\n\telse:\n\t\thess = int(hess)\n\t\tif(hess < 200):\n\t\t\thess = 200\n\t\tif(hess > 800):\n\t\t\thess = 800\n\tprocess_id = data[\"process_id\"]\n\tcomponent_id = data[\"component_id\"]\n\tcomponents = data[\"components\"]\n\tfilepath = \"../uploads/\"\n\tr = requests.get(\"http://172.25.25.33:8082/recognition/crops/\"+str(component_id))\n\tcompFiles = r.json()['data']\n\t# _, _, compFiles = next(walk(filepath+\"Crop/\"), (None, None, []))\n\t#print(type(compFiles[0]))\n\t# for parts in components:\n\t# \tfor part in parts:\n\t# \t\t#print(type(part),file=sys.stderr)\n\t# \t\tif(part in compFiles):\n\t# \t\t\tcompFiles.remove(part)\n\t#print(compFiles)\n\tt = threading.Thread(target = compare, args=(components,compFiles,process_id,hess,nL,thres_multi))\n\tt.start()\n\t\"\"\"\n\tfor parts in components:\n\t\t#print(parts,file=sys.stderr)\n\t\tt = threading.Thread(target = compare, args=(parts,compFiles,process_id))\n\t\tt.start()\n\t\"\"\"\n\t#print(data[\"process_id\"])\n\t#data = json_data['data']\n\t#process_id\n\t#components\n\t\"\"\"\n\tfilename = request.args.get('filename')\n\t#filepath = \"../uploads/\"\n\t_, _, compFiles = next(walk(filepath+\"Crop/\"), (None, None, []))\n\tfor fn in compFiles:\n\t\ttmp = Comp.compare(filepath+filename,filepath+\"Crop/\"+fn)\n\t\tif(tmp[0]):\n\t\t\tsucc.append([tmp[1],fn])\n\t\"\"\"\n\t#print(filenames,file=sys.stderr)\n\treturn \"\"\n\n@app.route('/')\ndef index():\n\tt = threading.Thread(target = job)\n\tt.start()\n\treturn '

hello

'\n\n@app.route('/recog')\ndef main4():\n\t# print(\"LMAO\", file=sys.stderr)\n\t# return jsonify({\"No_file\":Fname})\n\tFname = request.args.get('filename')\n\tangle = float(request.args.get('rotate'))\n\tpath = \"../uploads/\"\n\tcustom_oem_psm_config = r'--psm 11 digits'\n\n\tim = cv2.imread(path+Fname)##'111.jpg'\n\tim_N = np.array(im)\n\t#print(im_N.shape)\n\td_x = im_N.shape[1]\n\td_y = im_N.shape[0]\n\tim_r = im_N[:,:,0]\n\tim_g = im_N[:,:,1]\n\tim_b = im_N[:,:,2]\n\tif(d_y > d_x):\n\t\tim_r = np.rot90(im_r,3)\n\t\tim_g = np.rot90(im_g,3)\n\t\tim_b = np.rot90(im_b,3)\n\t\ttmp = d_x\n\t\td_x = d_y\n\t\td_y = tmp\n\n\t#outputImg = rotateImage(outputImg,-0.6)\n\t#cv2.imwrite('color_img.jpg', outputImg)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = 0\n\t\tfor k in range(i):\n\t\t\tif im_r[j+k][i-k] < 210 and im_g[j+k][i-k] < 210 and im_b[j+k][i-k] < 210: \n\t\t\t\ttopLeft = [i-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"TL\",topLeft)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y-i\n\t\tfor k in range(i):\n\t\t\tif im_r[j+k][d_x-1-k] < 210 and im_g[j+k][d_x-1-k] < 210 and im_b[j+k][d_x-1-k] < 210: \n\t\t\t\tbotRight = [d_x-1-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"BR\",botRight)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y - i - 1\n\t\tfor k in range(i):\n\t\t\t#print(j+k,k)\n\t\t\tif im_r[j+k][k] < 210 and im_g[j+k][k] < 210 and im_b[j+k][k] < 210: \n\t\t\t\tbotLeft = [k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\t#print(\"BL First:\",firstGuess,\"Second\",secondGuess)\n\tprint(\"BL\",botLeft)\n\n\n\toutputImg = np.zeros([d_y,d_x,3])\n\toutputImg[:,:,0] = im_b\n\toutputImg[:,:,1] = im_g\n\toutputImg[:,:,2] = im_r\n\trise = botLeft[1]-topLeft[1]\n\trun = botLeft[0]-topLeft[0]\n\n\tim = cv2.imread(path+Fname)\n\n\t\n\tim = ndimage.rotate(im, angle)\n\n\tcv2.imwrite(path+\"recog1_\"+Fname, im)\n\tim = Image.fromarray(im)\n\n\n\tdata = pytesseract.image_to_data(im,config=custom_oem_psm_config)\n\tdata = data.split(\"\\n\")\n\tdata.pop(0)\n\tdata.pop()\n\teverything = []\n\tbox_list_new = []\n\t# return jsonify({\"No_file\":Fname})\n\n\tfor st in data:\n\t\tli = st.split(\"\\t\")\n\t\tif(len(li[11]) != 0):\n\t\t\tbox_list_new.append([li[6],li[7],li[8],li[9]])\n\t\t\tbox = [int(i) for i in li[6:10]]\n\t\t\teverything.append({\"bounding_box\":box,\"text\":li[11]})\n\tim_N = np.array(im)\n\tim_Box = im_N.copy()\n\n\tfor crds in box_list_new:\n\t\tcv2.rectangle(im_Box,(int(crds[0]),int(crds[1])),(int(crds[0])+int(crds[2]),int(crds[1])+int(crds[3])),(255,0,0),2)\n\t\t#cv2.rectangle(im_N,(int(crds[0]),int(crds[1])),(int(crds[0])+int(crds[2]),int(crds[1])+int(crds[3])),(255,255,255),-1)\n\t#cv2.imwrite('shapes/recog.png', im_N)\n\t# cv2.imwrite(path+\"recog_\"+Fname, im_Box)\n\treturn jsonify(everything)\n@app.route('/pdf')\ndef Pdf():\n\tFname = request.args.get('filename')\n\tpages = convert_from_path('../uploads/'+Fname, 200)\n\tfor page in pages:\n\t\tpage.save('../uploads/'+os.path.splitext(Fname)[0]+'.jpg', 'JPEG')\n\treturn jsonify({\"filename\":os.path.splitext(Fname)[0]+'.jpg'})\n@app.route('/CustomerPlan')\ndef CPlan():\n\tFname = request.args.get('fileName')\n\tArea = request.args.get('area')\n\tangle = (request.args.get('rotate'))\n\tkeywords = request.args.get('keywords')\n\tpath = \"../uploads/\"\n\tim_org = cv2.imread(path+Fname)\n\t# print(im_org)\n\tif(angle is not None):\n\t\tangle = float(angle)\n\t\tim = ndimage.rotate(im_org, angle)\n\telse:\n\t\tim = im_org\n\t#rAngle = getRotate(Fname)\n\trAngle = angle \n\tim = ndimage.rotate(im_org, rAngle,cval = 255)\n\tim = Image.fromarray(im)\n\tcustom_oem_psm_config = r'--psm 11'\n\tOCRTest = im\n\tArea_not_given = True\n\tkeywords_not_given = True\n\tif(Area is not None):\n\t\tArea = json.loads(Area)\n\t\tTL = Area[0]\n\t\tBR = Area[1]\n\t\tOCRTest = OCRTest.crop((TL[0],TL[1],BR[0],BR[1]))\n\t\tArea_not_given = False\n\tif(keywords is not None):\n\t\tkeywords = json.loads(keywords)\n\t\tkeywords_not_given = False\n\tdata = pytesseract.image_to_data(OCRTest,config=custom_oem_psm_config)\n\tdata = data.split(\"\\n\")\n\tdata.pop(0)\n\tpossibleIds = []\n\tkeywords_ita = {'material':['materiale'],'hardness':['durezza'],'coating':['trattamenti termici','rivestimenti']}\n\tkeywords_en = {'materiale':['material'],'durezza':['hardness'],'trattamenti termici':['coating'],'rivestimenti':['coating']}\n\tif(keywords_not_given):\n\t\tfor dt in data:\n\t\t\tdt = dt.split(\"\\t\")\n\t\t\tif(len(dt) >= 11):\n\t\t\t\tnumbers = sum(c.isdigit() for c in dt[11])\n\t\t\t\tif(Area_not_given):\n\t\t\t\t\tif(numbers >= 6):\n\t\t\t\t\t\tbox = [int(i) for i in dt[6:10]]\n\t\t\t\t\t\tpossibleIds.append({\"bounding_box\":box,\"text\":dt[11]})\n\t\t\t\telse:\n\t\t\t\t\tif(len(dt[11]) > 6 and numbers >= 3):\n\t\t\t\t\t\tbox = [int(i) for i in dt[6:10]]\n\t\t\t\t\t\tbox2 = [TL[0],TL[1],0,0]\n\t\t\t\t\t\tbox = np.add(box,box2)\n\t\t\t\t\t\tpossibleIds.append({\"bounding_box\":box.tolist(),\"text\":dt[11]})\n\t\treturn jsonify(possibleIds)\n\tif(not keywords_not_given):\n\t\tallkeys = []\n\t\tallkeys.extend(keywords)\n\t\tfor keys in keywords:\n\t\t\tif(keys in keywords_ita):\n\t\t\t\tallkeys.extend(keywords_ita[keys])\n\t\t\tif(keys in keywords_en):\n\t\t\t\tallkeys.extend(keywords_en[keys])\n\t\tpossbleMatches = []\n\t\tfor dt in data:\n\t\t\tdt = dt.split(\"\\t\")\n\t\t\tif(len(dt) >= 11):\n\t\t\t\t#matching = max(fuzz.ratio('materiale',dt[11]),fuzz.ratio('trattamenti termici',dt[11]),fuzz.ratio('durezza',dt[11]),fuzz.ratio('rivestimenti',dt[11]))\n\t\t\t\tmatching = 0\n\t\t\t\tmatched = \"\"\n\t\t\t\to_text = \"\"\n\t\t\t\tfor key in allkeys:\n\t\t\t\t\t#new_match = fuzz.ratio(key,dt[11])\n\t\t\t\t\tif(len(dt[11]) >= 4):\n\t\t\t\t\t\tnew_match = fuzz.partial_ratio(key,dt[11].lower())\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_match = 0\n\t\t\t\t\tif(new_match > matching):\n\t\t\t\t\t\tmatching = new_match\n\t\t\t\t\t\tmatched = key\n\t\t\t\t\t\to_text = dt[11]\n\t\t\t\t# if(matching > 70):\n\t\t\t\t# \tprint([matching,matched,o_text],file=sys.stderr)\n\t\t\t\tif(matching > 70):\n\t\t\t\t\t#print([matching,matched,o_text],file=sys.stderr)\n\t\t\t\t\tif(Area_not_given):\n\t\t\t\t\t\tbox = [int(i) for i in dt[6:10]]\n\t\t\t\t\t\tbox3 = box\n\t\t\t\t\telse:\n\t\t\t\t\t\tbox = [int(i) for i in dt[6:10]]\n\t\t\t\t\t\tbox2 = [TL[0],TL[1],0,0]\n\t\t\t\t\t\tbox3 = np.add(box,box2)\n\t\t\t\t\t\tbox3 = box3.tolist()\n\n\n\t\t\t\t\tif(matched in keywords_en):\n\t\t\t\t\t\tmatched_en = keywords_en[matched]\n\t\t\t\t\telse:\n\t\t\t\t\t\tmatched_en = matched\n\t\t\t\t\tpossbleMatches.append([box,box3,o_text,matched,matched_en])\n\t\t\t\t\t#print(box,file=sys.stderr)\n\t\t#print(possbleMatches,file=sys.stderr)\n\t\tfor matches in possbleMatches:\n\t\t\tbox = matches[0]\n\t\t\tbox_TL = box[0:2]\n\t\t\t#print(box,file=sys.stderr)\n\t\t\tfor dt in data:\n\t\t\t\tdt = dt.split(\"\\t\")\n\t\t\t\tif(len(dt) >= 11):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tocr_percent = int(dt[10])\n\t\t\t\t\t\tif(ocr_percent > 0 and (abs(int(dt[7])-box_TL[1]) < 20 and abs(box[0]+box[2]-int(dt[6])) < 20)):\n\t\t\t\t\t\t\t#matches.append(dt[6:12])\n\t\t\t\t\t\t\t#print(dt,file=sys.stderr)\n\t\t\t\t\t\t\t#dt_TL = dt[6:8]\n\t\t\t\t\t\t\tif(Area_not_given):\n\t\t\t\t\t\t\t\tvalueBox = [int(i) for i in dt[6:10]]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tvalueBox = [int(i) for i in dt[6:10]]\n\t\t\t\t\t\t\t\tvalueBox2 = [TL[0],TL[1],0,0]\n\t\t\t\t\t\t\t\tvalueBox = np.add(valueBox,valueBox2)\n\t\t\t\t\t\t\t\tvalueBox = valueBox.tolist()\n\t\t\t\t\t\t\tvalue = dt[11]\n\t\t\t\t\t\t\tif('hardness'in matches[4][0]):\n\t\t\t\t\t\t\t\tprint(\"Is HR not in value? \",\"HR\" not in value)\n\t\t\t\t\t\t\tif(\"HR\" not in value and 'hardness'in matches[4][0]):\n\t\t\t\t\t\t\t\tprint(\"No hardness scale, looking:\",value)\n\t\t\t\t\t\t\t\tfor dt2 in data:\n\t\t\t\t\t\t\t\t\tdt2 = dt2.split(\"\\t\")\n\t\t\t\t\t\t\t\t\tif(len(dt2) >= 11):\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\tocr_percent = int(dt2[10])\n\t\t\t\t\t\t\t\t\t\t\tif(ocr_percent > 0 and (abs(int(dt2[7])-int(dt[7])) < 20 and abs(int(dt[6])-int(dt2[6])) < 200)):\n\t\t\t\t\t\t\t\t\t\t\t\tif(\"HR\" in dt2[11]):\n\t\t\t\t\t\t\t\t\t\t\t\t\tvalue = value + dt2[11]\n\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"found scale:\",dt2[11])\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"found other:\",dt2[11])\n\t\t\t\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tmatches.append([valueBox,value])\n\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tcontinue\n\t\tmatchesOutput = []\n\t\tmatchedY = []\n\t\tY_count = 0\n\t\tfor matches in possbleMatches:\n\t\t\tY_found = False\n\t\t\tval = \"\"\n\t\t\tvalBox = []\n\t\t\tif(len(matches) > 5):\n\t\t\t\tval = matches[5][1]\n\t\t\t\tvalBox = matches[5][0]\n\t\t\tfor Ys in matchedY:\n\t\t\t\tif(abs(Ys[0] - matches[1][1]) < 20):\n\t\t\t\t\tmatchesOutput[Ys[1]].append({\"keyword\":matches[2],\"Bounding_box\":matches[1],\"value\":val,\"Value_box\":valBox})\n\t\t\t\t\tY_found = True\n\t\t\tif(not Y_found):\n\t\t\t\tmatchesOutput.append([{\"keyword\":matches[2],\"Bounding_box\":matches[1],\"value\":val,\"Value_box\":valBox}])\n\t\t\t\tmatchedY.append([matches[1][1],Y_count])\n\t\t\t\tY_count+=1\n\n\t\t\t\n\t\treturn jsonify(matchesOutput)\n@app.route('/CustomerParts')\ndef CPart():\n\tFname = request.args.get('fileName')\n\tArea = request.args.get('area')\n\tangle_str = request.args.get('rotate')\n\tif angle_str is None:\n\t\tangle = 0\n\telse:\n\t\tangle = float(request.args.get('rotate'))\n\tpath = \"../uploads/\"\n\tprint(path+Fname)\n\tim = cv2.imread(path+Fname)##'111.jpg'\n\tim_N = np.array(im)\n\t#print(im_N.shape)\n\td_x = im_N.shape[1]\n\td_y = im_N.shape[0]\n\tim_r = im_N[:,:,0]\n\tim_g = im_N[:,:,1]\n\tim_b = im_N[:,:,2]\n\t\"\"\"\n\tif(d_y > d_x):\n\t\tim_r = np.rot90(im_r,3)\n\t\tim_g = np.rot90(im_g,3)\n\t\tim_b = np.rot90(im_b,3)\n\t\ttmp = d_x\n\t\td_x = d_y\n\t\td_y = tmp\n\t\"\"\"\n\t#outputImg = rotateImage(outputImg,-0.6)\n\t#cv2.imwrite('color_img.jpg', outputImg)\n\n\t\"\"\"\t\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = 0\n\t\tfor k in range(i):\n\t\t\tif im_r[j+k][i-k] < 210 and im_g[j+k][i-k] < 210 and im_b[j+k][i-k] < 210: \n\t\t\t\ttopLeft = [i-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"TL\",topLeft)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y-i\n\t\tfor k in range(i):\n\t\t\tif im_r[j+k][d_x-1-k] < 210 and im_g[j+k][d_x-1-k] < 210 and im_b[j+k][d_x-1-k] < 210: \n\t\t\t\tbotRight = [d_x-1-k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\tprint(\"BR\",botRight)\n\n\tfound = False\n\tfor i in range(d_x):\n\t\tj = d_y - i - 1\n\t\tfor k in range(i):\n\t\t\t#print(j+k,k)\n\t\t\tif im_r[j+k][k] < 210 and im_g[j+k][k] < 210 and im_b[j+k][k] < 210: \n\t\t\t\tbotLeft = [k,j+k]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tbreak\n\t#print(\"BL First:\",firstGuess,\"Second\",secondGuess)\n\tprint(\"BL\",botLeft)\n\t\"\"\"\n\n\tim = ndimage.rotate(im, angle)\n\tArea_not_given = False\n\tif(Area is None):\n\t\t\n\t\t#Area = []\n\t\t#Area.append([topLeft[0],topLeft[1]])\n\t\t#Area.append([botRight[0],botRight[1]])\n\t\tArea = [[0,0],[d_x,d_y]]\n\t\tArea_not_given = True\n\t#cv2.imwrite('C:/Users/Hilton/Desktop/scanTest/WTF.jpg', im)\n\tprint(Area)\n\tif(Area_not_given):\n\t\t# print(jsonify(PartCut.getParts(Fname,Area,im)))\n\t\treturn jsonify(PartCut.getParts(Fname,Area,im,angle))\n\telse:\n\t\treturn jsonify(PartCut.getParts(Fname,json.loads(Area),im,angle))\n@app.route('/CustomerCut')\ndef CCut():\n\tFname = request.args.get('fileName')\n\tpath = \"../uploads/\"\n\tfilepath = \"../uploads/\"\n\tim = cv2.imread(path+Fname)\n\tim = np.rot90(im,3)\n\tBP_d_x = im.shape[1]\n\tBP_d_y = im.shape[0]\n\tparts_count = 1\n\tcrop_names = []\n\tboxes = {}\n\treduction = 5\n\tnaive_red = naiveReduction(im,reduction)\n\tBPImg_reduced = naive_red\n\tim = cv2.cvtColor(im.astype('uint8'), cv2.COLOR_BGR2GRAY)\n\tret,im = cv2.threshold(im,170,255,cv2.THRESH_BINARY)\n\t#cv2.imwrite(\"C:/Users/Hilton/Desktop/scanTest/TEST.png\", BPImg_reduced)\n\t#BPCanvas = np.full([math.floor(BP_d_y/reduction),math.floor(BP_d_x/reduction)],255)\n\tfor i in range(math.floor(BP_d_x/reduction)):\n\t\tfor j in range(math.floor(BP_d_y/reduction)):\n\t\t\tif BPImg_reduced[j,i,0] < 210:\n\t\t\t\tBPCanvas = np.full([math.ceil(BP_d_y/reduction),math.ceil(BP_d_x/reduction)],255)\n\t\t\t\tgetConnectedShape_Opti(BPImg_reduced,BPCanvas,i,j,1,0,0)\n\t\t\t\tcount = np.count_nonzero(BPCanvas == 120)\n\t\t\t\tif count > 250:\n\t\t\t\t\t#cv2.imwrite(filepath+\"Crop/\"+str(parts_count)+\"_1_\"+Fname, BPCanvas)\n\t\t\t\t\tBPCanvas = naiveInter(BPCanvas,reduction)\t\n\t\t\t\t\t#cv2.imwrite(filepath+\"Crop/\"+str(parts_count)+\"_2_\"+Fname, BPCanvas)\t\n\t\t\t\t\tBPCanvasOut = np.full([BP_d_y,BP_d_x,3],255)\n\t\t\t\t\tTMP = np.logical_and(im[:,:] == 0,BPCanvas[0:BP_d_y,0:BP_d_x] == 120)\n\t\t\t\t\tTMP = np.logical_not(TMP)\n\t\t\t\t\tfor i in range(3):\n\t\t\t\t\t\tBPCanvasOut[:,:,i] = TMP\n\t\t\t\t\tBPCanvasOut = BPCanvasOut*255\n\t\t\t\t\t#cv2.imwrite(filepath+\"Crop/\"+str(parts_count)+\"_3_\"+Fname, BPCanvasOut)\n\t\t\t\t\tnons = np.nonzero(BPCanvasOut == 0)\n\t\t\t\t\tTLY = min(nons[0])\n\t\t\t\t\tTLX = min(nons[1])\n\t\t\t\t\tBRY = max(nons[0])\n\t\t\t\t\tBRX = max(nons[1])\n\t\t\t\t\tprint([TLX,TLY],[BRX,BRY])\n\t\t\t\t\tBPCanvasOut = BPCanvasOut[TLY:BRY,TLX:BRX]\n\t\t\t\t\tcv2.imwrite(filepath+\"Crop/\"+str(parts_count)+\"_\"+Fname, BPCanvasOut)\n\t\t\t\t\tcrop_names.append(str(parts_count)+\"_\"+Fname)\n\t\t\t\t\tboxes[str(parts_count)+\"_\"+Fname] = [int(TLX),int(TLY),int(BRX)-int(TLX),int(BRY)-int(TLY)]\n\t\t\t\t\tparts_count+=1\n\treturn jsonify({\"No_file\":\"\",\"No_recog\":\"\",\"Crop_file\":crop_names,\"Bounding_boxes\":boxes})\n@app.route('/cutComponent')## rAngle rotate still not satisfactory, weird artifact if the image is rotated first\ndef CC():\n\tFname = request.args.get('fileName')\n\tfilepath = \"../uploads/\"\n\treturn jsonify({\"id\":component.run(source=filepath+Fname)})\t\n@app.route('/matchCustomer')## rAngle rotate still not satisfactory, weird artifact if the image is rotated first\ndef MC():\n\tFname = request.args.get('fileName')\n\tlogos = request.args.get('logo')\n\tfilepath = \"../uploads/\"\n\tprint(filepath+Fname)\n\treturn jsonify({\"value\":logo.run(source=filepath+Fname)})\n\n\t\"\"\"\n\tif(logos is None):\n\t\treturn \"\"\n\telse:\n\t\tmaxID = \"\"\n\t\tlogos = json.loads(logos)\n\t\tmaxMatch=0\n\t\tfor logo in logos:\n\t\t\tlogoName = logo['name']\n\t\t\timg1 = cv2.imread(filepath+Fname, 0)\n\t\t\timg2 = cv2.imread(filepath+logoName, 0)\n\n\t\t\tfeature_name = 'sift'\n\t\t\tdetector, matcher = init_feature(feature_name)\n\t\t\tif img1 is None:\n\t\t\t\tcontinue\n\n\t\t\tif img2 is None:\n\n\t\t\t\tcontinue\n\n\t\t\tif detector is None:\n\n\t\t\t\tcontinue\n\n\n\t\t\tkp1, desc1 = detector.detectAndCompute(img1, None)\n\t\t\tkp2, desc2 = detector.detectAndCompute(img2, None)\n\n\t\t\tdef match_and_draw(win):\n\t\t\t\tstatus=[]\n\t\t\t\traw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n\t\t\t\tp1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\t\t\t\tif len(p1) >= 4:\n\t\t\t\t\tH, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n\t\t\t\t\t# print(status)\n\t\t\t\t\treturn np.sum(status) * 100 / len(status)\n\t\t\t\treturn 0\n\n\t\t\t\t# _vis = explore_match(win, img1, img2, kp_pairs, status, H)\n\t\t\tresult = match_and_draw('find_obj')\n\t\t\tif result > maxMatch:\n\t\t\t\tmaxMatch = result\n\t\t\t\tmaxID = logo['file_id']\n\t\tprint (maxID)\n\t\treturn jsonify({\"id\":maxID})\n\t\"\"\"\n\n@app.route('/cutLogo')## rAngle rotate still not satisfactory, weird artifact if the image is rotated first\ndef CL():\n\tFname = request.args.get('fileName')\n\tbox = request.args.get('box')\n\tthreshold = request.args.get('threshold')\n\tArea = request.args.get('area')\n\ttmptype = request.args.get('type')\n\tangle = float(request.args.get('rotate'))\n\tif threshold is None:\n\t\tthreshold = 1\n\telif int(threshold) > 1:\n\t\tthreshold = 1\n\tpath = \"../uploads/\"\n\tim = cv2.imread(path+Fname)\n\tim = ndimage.rotate(im, angle)\n\tim = cv2.cvtColor(im.astype('uint8'), cv2.COLOR_BGR2GRAY)\n\tret,im = cv2.threshold(im,170,255,cv2.THRESH_BINARY)\n\tim_N = np.array([im,im,im])\n\tim_N = np.moveaxis(im_N, [0,1,2],[2,0,1])\n\tim_N = im_N.astype(np.float32)\n\n\tif(box is None):\n\t\treturn \"\"\n\telse:\n\t\tbox = json.loads(box)\n\t\tboxCount = 0\n\t\t# outBoxes = {}\n\t\t# outNames = ''\n\t\t# return BBox\n\t\tboxCount += 1\n\t\t#-----#\n\t\tcurrName = tmptype+\"_\"+Fname\n\t\t# currName = \"111111111111\"\n\t\tTL = [int(box[0]),int(box[1])]\n\t\tBR = [(int(box[0])+int(box[2])),(int(box[1])+int(box[3]))]\n\t\tcv2.imwrite(path+currName, im_N[TL[1]:BR[1],TL[0]:BR[0],:].astype(np.uint8))\n\t\t# outNames.append(currName)\n\t\t# outBoxes[currName] = [box[0],box[1],box[2],box[3]]\n\t\n\n\n\treturn jsonify({\"name\":currName})\n\n@app.route('/PartsWithBox')## rAngle rotate still not satisfactory, weird artifact if the image is rotated first\ndef PWB():\n\tFname = request.args.get('fileName')\n\tBBox = request.args.get('bounding_box')\n\tthreshold = request.args.get('threshold')\n\tArea = request.args.get('area')\n\tangle_str = request.args.get('rotate')\n\tif angle_str is None:\n\t\tangle = 0\n\telse:\n\t\tangle = float(request.args.get('rotate'))\n\tif threshold is None:\n\t\tthreshold = 1\n\telif int(threshold) > 1:\n\t\tthreshold = 1\n\tpath = \"../uploads/\"\n\tim = cv2.imread(path+Fname)\n\tim = ndimage.rotate(im, angle)\n\tim = cv2.cvtColor(im.astype('uint8'), cv2.COLOR_BGR2GRAY)\n\tret,im = cv2.threshold(im,170,255,cv2.THRESH_BINARY)\n\tim_N = np.array([im,im,im])\n\tim_N = np.moveaxis(im_N, [0,1,2],[2,0,1])\n\tim_N = im_N.astype(np.float32)\n\t#print(im_N.shape,file=sys.stderr)\n\t#cv2.imwrite('C:/Users/Hilton/Desktop/scanTest/REEE.png', im_N)\n\tif(BBox is None):\n\t\treturn \"\"\n\telse:\n\t\tBoxes = json.loads(BBox)\n\t\tboxCount = 0\n\t\toutBoxes = {}\n\t\toutNames = []\n\t\t# return BBox\n\t\tfor box in Boxes:\n\t\t\tboxCount += 1\n\t\t\t#-----#\n\t\t\tcurrName = str(boxCount)+\"_\"+Fname\n\t\t\tTL = [int(box[0]),int(box[1])]\n\t\t\tBR = [(int(box[0])+int(box[2])),(int(box[1])+int(box[3]))]\n\t\t\tcv2.imwrite(path+\"Crop/\"+currName, im_N[TL[1]:BR[1],TL[0]:BR[0],:].astype(np.uint8))\n\t\t\toutNames.append(currName)\n\t\t\toutBoxes[currName] = [box[0],box[1],box[2],box[3]]\n\t\t\t#-----#\n\t\t\t\"\"\"\n\t\t\t#print(box,file=sys.stderr)\n\t\t\treduction = math.ceil(math.sqrt(int(box[2])*int(box[3])/100000))\n\t\t\t#print(\"reduction \"+str(reduction),file=sys.stderr)\n\t\t\t#reduction = 1\n\t\t\t#TL = [math.floor(box[0]/reduction),math.floor(box[1]/reduction)]\n\t\t\t#BR = [math.floor((int(box[0])+int(box[2]))/reduction),math.floor((int(box[1])+int(box[3]))/reduction)]\n\t\t\tif(box[0] is None and box[1] is None and box[2] is None and box[3] is None):\n\t\t\t\tcontinue\n\t\t\tTL = [int(box[0]),int(box[1])]\n\t\t\tBR = [(int(box[0])+int(box[2])),(int(box[1])+int(box[3]))]\n\t\t\tsmallBP = im_N[TL[1]:BR[1],TL[0]:BR[0],:]\n\t\t\toriBP = smallBP\n\t\t\tori_x = int(box[2])\n\t\t\tori_y = int(box[3])\n\t\t\t#cv2.imwrite(\"C:/Users/Hilton/Desktop/scanTest/TESTs\"+str(boxCount)+\"_1.png\",smallBP)\n\t\t\t#print(\"PPBIG \"+str(smallBP.shape),file=sys.stderr)\n\t\t\tsmallBP = naiveReduction(smallBP,reduction)\n\t\t\t#print(\"PPSMALL \"+str(smallBP.shape),file=sys.stderr)\n\t\t\t#cv2.imwrite(\"C:/Users/Hilton/Desktop/scanTest/TESTs\"+str(boxCount)+\"_2.png\",smallBP)\n\t\t\t#continue\n\t\t\t#TL = [math.floor(box[0]/reduction),math.floor(box[1]/reduction)]\n\t\t\t#BR = [math.floor((int(box[0])+int(box[2]))/reduction),math.floor((int(box[1])+int(box[3]))/reduction)]\n\t\t\tsmall_d_x = smallBP.shape[1]\n\t\t\tsmall_d_y = smallBP.shape[0]\n\t\t\t#small_d_x = box[2]\n\t\t\t#small_d_y = box[3]\n\t\t\tprint(small_d_x,small_d_y,file=sys.stderr)\n\t\t\tsubBoxCount = 0\n\t\t\tfor i in range(small_d_x):\n\t\t\t\tfor j in range(small_d_y):\n\t\t\t\t\tif smallBP[j,i].any() < 210:\n\t\t\t\t\t\tsmallCanvas = np.full([small_d_y,small_d_x],255,dtype = np.uint8)\n\t\t\t\t\t\tgetConnectedShape_Opti(smallBP,smallCanvas,i,j,math.ceil(threshold/reduction),0,0)\n\t\t\t\t\t\tcount = np.count_nonzero(smallCanvas == 120)\n\t\t\t\t\t\tif count > 1500/reduction:\n\t\t\t\t\t\t\tsubBoxCount += 1\n\t\t\t\t\t\t\tsmallCanvas = naiveInter(smallCanvas,reduction)\n\t\t\t\t\t\t\t#overmarginX = smallCanvas.shape[1] - ori_x\n\t\t\t\t\t\t\t#overmarginY = smallCanvas.shape[0] - ori_y\n\t\t\t\t\t\t\t#cv2.imwrite(\"C:/Users/Hilton/Desktop/scanTest/TESTs\"+str(boxCount)+\"_\"+str(subBoxCount)+\"_3.png\",smallCanvas)\n\t\t\t\t\t\t\t#cv2.imwrite(\"C:/Users/Hilton/Desktop/scanTest/TESTs\"+str(boxCount)+\"_\"+str(subBoxCount)+\"_4.png\",smallCanvas[0:ori_y,0:ori_x])\n\t\t\t\t\t\t\t#print( [math.floor(overmarginY/2),ori_y+math.ceil(overmarginY/2),math.floor(overmarginX/2),ori_x+math.ceil(overmarginX/2)],file=sys.stderr)\n\t\t\t\t\t\t\t#TMP = np.logical_and(oriBP[:,:,0] == 0,smallCanvas[math.floor(overmarginY/2):ori_y+math.ceil(overmarginY/2),math.floor(overmarginX/2):ori_x+math.ceil(overmarginX/2)] == 120)\n\t\t\t\t\t\t\tTMP = np.logical_and(oriBP[:,:,0] == 0,smallCanvas[0:ori_y,0:ori_x] == 120)\n\t\t\t\t\t\t\tBPCanvasOut = np.full([ori_y,ori_x,3],255)\n\t\t\t\t\t\t\tfor i in range(3):\n\t\t\t\t\t\t\t\tBPCanvasOut[:,:,i] = TMP\n\t\t\t\t\t\t\tBPCanvasOut = BPCanvasOut*255\n\t\t\t\t\t\t\tnons = np.nonzero(BPCanvasOut == 255)\n\t\t\t\t\t\t\tTLY = min(nons[0])\n\t\t\t\t\t\t\tTLX = min(nons[1])\n\t\t\t\t\t\t\tBRY = max(nons[0])\n\t\t\t\t\t\t\tBRX = max(nons[1])\n\t\t\t\t\t\t\tprint([TLX,TLY],[BRX,BRY])\n\t\t\t\t\t\t\tBPCanvasOut = BPCanvasOut[TLY:BRY,TLX:BRX]\n\t\t\t\t\t\t\tBPCanvasOut = rotateImageBlackBorder(BPCanvasOut.astype(np.float32),angle)\n\t\t\t\t\t\t\t#BPCanvasOut = rotateImage(BPCanvasOut.astype(np.float32),rAngle-90)\n\t\t\t\t\t\t\t#nons = np.nonzero(smallCanvas == 120)\n\t\t\t\t\t\t\t#nons = np.nonzero(smallCanvas == 120)\n\t\t\t\t\t\t\t#TLY = min(nons[0])\n\t\t\t\t\t\t\t#TLX = min(nons[1])\n\t\t\t\t\t\t\t#BRY = max(nons[0])\n\t\t\t\t\t\t\t#BRX = max(nons[1])\n\t\t\t\t\t\t\t#print([TLX,TLY],[BRX,BRY])\n\t\t\t\t\t\t\t#smallCanvas = smallCanvas[TLY:BRY,TLX:BRX]\n\t\t\t\t\t\t\t#smallCanvas = cv2.bitwise_not(smallCanvas)\n\t\t\t\t\t\t\t#ret,smallCanvas = cv2.threshold(smallCanvas,115,255,cv2.THRESH_BINARY)\n\t\t\t\t\t\t\tcurrName = str(boxCount)+\"_\"+str(subBoxCount)+\"_\"+Fname\n\t\t\t\t\t\t\tcv2.imwrite(path+\"Crop/\"+currName, BPCanvasOut)\n\t\t\t\t\t\t\toutNames.append(currName)\n\t\t\t\t\t\t\toutBoxes[currName] = [int(TLX)*reduction+TL[0],int(TLY)*reduction+TL[1],(int(BRX)-int(TLX))*reduction,(int(BRY)-int(TLY))*reduction]\n\n\t\t\t#print(smallBP.shape[:],file=sys.stderr)\n\t\t#print(Boxes,file=sys.stderr)\n\t\t\"\"\"\n\t\treturn jsonify({\"Crop_file\":outNames,\"Bounding_boxes\":outBoxes})\ndef QuotationPDF(files,self):\n\n\tfiles = json.loads(files)\n\tcolumns = ['order_name','comment','comment','material','titanizing','num','cost','cost2','remark']\n\thtml_file = ''\n\tcustomer = ''\n\tenquiry = ''\n\tdeadline = ''\n\tnow = ''\n\tfor num,file in enumerate(files):\n\t\tprint(file)\n\t\tif 'customer' not in file:\n\t\t\tcustomer = ''\n\t\telse:\n\t\t\tcustomer = file['customer']\n\t\tif 'customer' not in file:\n\t\t\tenquiry = ''\n\t\telse:\n\t\t\tenquiry = file['customer']\n\t\tif 'deadline' not in file:\n\t\t\tdeadline = ''\n\t\telse:\n\t\t\tdeadline = file['deadline']\n\t\tif 'update_time' not in file:\n\t\t\tnow = ''\n\t\telse:\n\t\t\tnow = file['update_time']\n\n\t\thtml_file += ''\n\t\thtml_file += f'{(num+1)}'\n\t\tfor column in columns:\n\t\t\tif column in file:\n\t\t\t\thtml_file += f'{file[column]}'\n\t\t\telse:\n\t\t\t\thtml_file += f''\n\t\thtml_file += f'ASAP'\n\t\thtml_file += ''\n\tprint(html_file)\n\n\tbody = f\"\"\"\n\t\t\n\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\tMajor Industries Ltd.
\n\t\t\t\t\t\t1,Chang-Tai St.,Hsiao-Kang
\n\t\t\t\t\t\tKaohsiung, Taiwan, R.O.C.
\n\t\t\t\t\t
\n\t\t\t\t\t\tTel: 886-7-8716711
\n\t\t\t\t\t\tFax: 886-7-8715935
\n\t\t\t\t\t\teMail: milmajor@mil.com.tw
\n\t\t\t\t\t
\n\t\t

Date:2021/07/29

\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t
\n\t\t\t\t\t\t

Customer:{customer}

\n\t\t\t\t\t
\n\t\t\t\t\t\t

enquiry:{enquiry}

\n\t\t\t\t\t
\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\t{html_file}\n\t\t\t\n\t\t
Pos.Ident-No.Date of
Drawing
DescriptionMaterialPVDQty€/Pc.€/Pc.Delivery
date arriving
CIF BeckingenUPDATE PRICEREMARK
\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\t

Delivery: {deadline}

\n\t\t\t\t\t\t

\n\t\t\t\t\t\t

\n\t\t\t\t\t\tThank you very much.
\n\t\t\t\t\t\tBest regrads,
\n\t\t\t\t\t\tCathy {now}
\n\t\t\t\t\t

\n\t\t\n\t\t\"\"\"\n\n\t# print(body,file=sys.stderr)\n\n\tts = time.time()\n\tfileName='out.pdf'\n\tprint(fileName)\n\n\n\n\tpdfkit.from_string(body, '../uploads/out.pdf') #with --page-size=Legal and --orientation=Landscape\n\tprint(fileName)\n\tsys.exit(0)\n\treturn fileName\n\n@app.route('/quotation', methods=['GET', 'POST'])## rAngle rotate still not satisfactory, weird artifact if the image is rotated first\ndef Quotation():\n\tfiles = request.form.get('files')\n\tprint(files)\n\t\n\tt = threading.Thread(target = QuotationPDF, args=(files,''))\n\tt.start()\n\n\treturn jsonify({\"name\":\"out.pdf\"})\n@app.route('/CNNPartFilter')\ndef CNNPartFilter():\n\tjson_data = request.args.get('crops')\n\tif json_data is not None:\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify(\"Input name error\")\n\tif 'paths' in data:\n\t\tFPaths = data['paths']\n\telse:\n\t\treturn jsonify(\"File name error\")\n\toutput = []\n\tfor FPath in FPaths:\n\t\tprint(FPath)\n\n\t\tim = cv2.imread(FPath)\n\t\tim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\t\tim_N = np.array(im)\n\t\twidth, height = im_N.shape\n\t\tif(width > height):\n\t\t\tpadded = cv2.copyMakeBorder(im_N, 0, width-height, 0, 0,cv2.BORDER_CONSTANT,value = 255)\n\t\telse:\n\t\t\tpadded = cv2.copyMakeBorder(im_N, 0, 0, 0, height-width,cv2.BORDER_CONSTANT,value = 255)\n\t\tpadded = cv2.resize(padded, (1000,1000), interpolation = cv2.INTER_AREA)\n\t\tpadded = cv2.cvtColor(padded, cv2.COLOR_GRAY2BGR)\n\t\tpadded_pil = Image.fromarray(padded)\n\t\t#cv2.imshow('A',part)\n\t\t#cv2.waitKey(0)\n\t\ta = preprocess(padded_pil)\n\t\ta = Variable(torch.unsqueeze(a, dim=0), requires_grad=False)\n\t\t#print(a)\n\t\tresult = model(a)\n\t\tresult = result.tolist()\n\t\tif(result[0][0] > result[0][1] or result[0][1] < 0):\n\t\t\toutput.append({\"path\":FPath,\"isPart\":False})\n\t\telse:\n\t\t\toutput.append({\"path\":FPath,\"isPart\":True})\n\t\tprint(result)\n\n\treturn jsonify(output)\n\n@app.route('/recognition//')\ndef get_checkpoint(customer=None, checkpoint=None):\n\tcheckpoint_list = os.listdir('test/checkpoint')\n\tannotation_list = os.listdir('test/annotation')\n\tannotation_dict = {}\n\tfor anno in annotation_list:\n\t\tcheck_anno = '_'.join(anno.split('_')[0:2])\n\t\tif (customer+'_'+checkpoint) == check_anno:\n\t\t\t# print(anno)\n\t\t\twith open('test/annotation/' + anno, 'r', encoding='utf-8') as R:\n\t\t\t\tfor line in R.readlines():\n\t\t\t\t\tline = line.split('\\n')[0].split(',')\n\t\t\t\t\tindex = line[1]\n\t\t\t\t\tanno_class = line[2]\n\t\t\t\t\tif index not in annotation_dict:\n\t\t\t\t\t\tannotation_dict[index] = anno_class\n\t# print(annotation_dict)\n\tcheckpoint_exist = False\n\tfor pth in checkpoint_list:\n\t\tcheck = customer + '_' + checkpoint\n\t\tif check in pth:\n\t\t\tcheckpoint_path = 'test/checkpoint/' + pth\n\t\t\tcheckpoint_class = int(pth.split('_')[2])\n\t\t\tcheckpoint_exist = True\n\tif checkpoint_exist == True:\n\t\tprint(checkpoint_exist)\n\t\t# os.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\t\tcustomer_model = models.vgg11_bn(pretrained=False)\n\t\tcustomer_model.classifier[6] = nn.Linear(in_features=4096, out_features=checkpoint_class)\n\t\tcheckpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n\t\tcustomer_model.load_state_dict(checkpoint)\n\t\tcustomer_model.eval()\n\telse:\n\t\treturn 'Checkpoint is not exist'\n\tjson_data = request.args.get('crops')\n\tif json_data is not None:\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify(\"Input name error\")\n\tif 'paths' in data:\n\t\tFPaths = data['paths']\n\telse:\n\t\treturn jsonify(\"File name error\")\n\ttop_k = request.args.get('top_k')\n\tif(top_k is None):\n\t\ttop_k = 5\n\telse:\n\t\ttop_k = int(top_k)\n\n\tGoodPaths = []\n\tBoxes = []\n\tcurrIm = None\n\tfor FPath in FPaths:\n\t\tprint(FPath)\n\n\t\tim = cv2.imread(FPath)\n\t\tim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\t\tim_N = np.array(im)\n\t\twidth, height = im_N.shape\n\t\tif(width > height):\n\t\t\tpadded = cv2.copyMakeBorder(im_N, 0, width-height, 0, 0,cv2.BORDER_CONSTANT,value = 255)\n\t\telse:\n\t\t\tpadded = cv2.copyMakeBorder(im_N, 0, 0, 0, height-width,cv2.BORDER_CONSTANT,value = 255)\n\t\tpadded = cv2.resize(padded, (1000,1000), interpolation = cv2.INTER_AREA)\n\t\tpadded = cv2.cvtColor(padded, cv2.COLOR_GRAY2BGR)\n\t\tpadded_pil = Image.fromarray(padded)\n\t\t#cv2.imshow('A',part)\n\t\t#cv2.waitKey(0)\n\t\ta = preprocess(padded_pil)\n\t\ta = Variable(torch.unsqueeze(a, dim=0), requires_grad=False)\n\t\t#print(a)\n\t\tresult = model(a)\n\t\tresult = result.tolist()\n\t\tif(result[0][0] < result[0][1] and result[0][1] > 0):\n\t\t\tGoodPaths.append([FPath,im_N.shape])\n\t\t\tBoxes.append(im_N.shape)\n\tndboxes = np.array(Boxes)\n\tif(ndboxes.shape[0] > 0):\n\t\tymax = max(ndboxes[:,0])\n\telse:\n\t\treturn jsonify([])\n\tcurrIm = None\n\tfor FPath,Box in GoodPaths:\n\t\tim = cv2.imread(FPath)\n\t\tim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\t\tpart = np.array(im)\t\n\t\tif(Box[0] < ymax):\n\t\t\tpart = cv2.copyMakeBorder(part,0,ymax-Box[0],0,0,cv2.BORDER_CONSTANT,value = 255)\n\t\tif(currIm is None):\n\t\t\tcurrIm = part\n\t\telse:\n\t\t\tcurrIm = cv2.hconcat([currIm,part])\n\n\twidth, height = currIm.shape\n\tif(width > height):\n\t\tpadded = cv2.copyMakeBorder(currIm, 0, width-height, 0, 0,cv2.BORDER_CONSTANT,value = 255)\n\telse:\n\t\tpadded = cv2.copyMakeBorder(currIm, 0, 0, 0, height-width,cv2.BORDER_CONSTANT,value = 255)\n\tpadded = cv2.resize(padded, (1000,1000), interpolation = cv2.INTER_AREA)\n\tpadded = cv2.cvtColor(padded, cv2.COLOR_GRAY2BGR)\n\tpadded_pil = Image.fromarray(padded)\n\t#cv2.imshow('A',part)\n\t#cv2.waitKey(0)\n\ta = preprocess(padded_pil)\n\ta = Variable(torch.unsqueeze(a, dim=0), requires_grad=False)\n\t#print(a)\n\tresult = customer_model(a)\n\tprob,ind = torch.sort(result,descending=True)\n\tind = ind.tolist()\n\tprob = prob.tolist()\n\toutput = [ind[0][0:top_k],prob[0][0:top_k]]\n\tprint(output)\n\tfor i, n in enumerate(output[0]):\n\t\toutput[0][i] = annotation_dict[str(n)]\n\treturn jsonify(output)\n@app.route('/CNNPartSuggestion')\ndef CNNPartSuggestion():\n\tjson_data = request.args.get('crops')\n\tif json_data is not None:\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify(\"Input name error\")\n\tif 'paths' in data:\n\t\tFPaths = data['paths']\n\telse:\n\t\treturn jsonify(\"File name error\")\n\ttop_k = request.args.get('top_k')\n\tif(top_k is None):\n\t\ttop_k = 5\n\telse:\n\t\ttop_k = int(top_k)\n\n\tGoodPaths = []\n\tBoxes = []\n\tcurrIm = None\n\tfor FPath in FPaths:\n\t\tprint(FPath)\n\n\t\tim = cv2.imread(FPath)\n\t\tim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\t\tim_N = np.array(im)\n\t\twidth, height = im_N.shape\n\t\tif(width > height):\n\t\t\tpadded = cv2.copyMakeBorder(im_N, 0, width-height, 0, 0,cv2.BORDER_CONSTANT,value = 255)\n\t\telse:\n\t\t\tpadded = cv2.copyMakeBorder(im_N, 0, 0, 0, height-width,cv2.BORDER_CONSTANT,value = 255)\n\t\tpadded = cv2.resize(padded, (1000,1000), interpolation = cv2.INTER_AREA)\n\t\tpadded = cv2.cvtColor(padded, cv2.COLOR_GRAY2BGR)\n\t\tpadded_pil = Image.fromarray(padded)\n\t\t#cv2.imshow('A',part)\n\t\t#cv2.waitKey(0)\n\t\ta = preprocess(padded_pil)\n\t\ta = Variable(torch.unsqueeze(a, dim=0), requires_grad=False)\n\t\t#print(a)\n\t\tresult = model(a)\n\t\tresult = result.tolist()\n\t\tif(result[0][0] < result[0][1] and result[0][1] > 0):\n\t\t\tGoodPaths.append([FPath,im_N.shape])\n\t\t\tBoxes.append(im_N.shape)\n\tndboxes = np.array(Boxes)\n\tif(ndboxes.shape[0] > 0):\n\t\tymax = max(ndboxes[:,0])\n\telse:\n\t\treturn jsonify([])\n\tcurrIm = None\n\tfor FPath,Box in GoodPaths:\n\t\tim = cv2.imread(FPath)\n\t\tim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\t\tpart = np.array(im)\t\n\t\tif(Box[0] < ymax):\n\t\t\tpart = cv2.copyMakeBorder(part,0,ymax-Box[0],0,0,cv2.BORDER_CONSTANT,value = 255)\n\t\tif(currIm is None):\n\t\t\tcurrIm = part\n\t\telse:\n\t\t\tcurrIm = cv2.hconcat([currIm,part])\n\n\twidth, height = currIm.shape\n\tif(width > height):\n\t\tpadded = cv2.copyMakeBorder(currIm, 0, width-height, 0, 0,cv2.BORDER_CONSTANT,value = 255)\n\telse:\n\t\tpadded = cv2.copyMakeBorder(currIm, 0, 0, 0, height-width,cv2.BORDER_CONSTANT,value = 255)\n\tpadded = cv2.resize(padded, (1000,1000), interpolation = cv2.INTER_AREA)\n\tpadded = cv2.cvtColor(padded, cv2.COLOR_GRAY2BGR)\n\tpadded_pil = Image.fromarray(padded)\n\t#cv2.imshow('A',part)\n\t#cv2.waitKey(0)\n\ta = preprocess(padded_pil)\n\ta = Variable(torch.unsqueeze(a, dim=0), requires_grad=False)\n\t#print(a)\n\tresult = model2(a)\n\tprob,ind = torch.sort(result,descending=True)\n\tind = ind.tolist()\n\tprob = prob.tolist()\n\toutput = [ind[0][0:top_k],prob[0][0:top_k]]\n\tprint(output)\n\n\treturn jsonify(output)\n@app.route('/CNNTextRec')\ndef CNNTextRec():\n\tjson_data = request.args.get('Files')\n\tif json_data is not None:\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify(\"Input name error\")\n\tposs_mats = {}\n\tposs_coats = {}\n\tout = {}\n\tfor file in data:\n\t\trAngle = 0\n# \t\trAngle = getRotate(file)\n\t\t# image = imgproc.loadImage(file)\n# \t\timage = ndimage.rotate(image, rAngle,cval = 255)\n\t\tmaterial,coating = kf2.get_material_coating(file)\n\t\tout[file] = {'coating':[coating],'material':[material]}\n\t\t# bboxes, polys, score_text = getTextBox(model_textbox, image)\n\t\t\"\"\"\n\t\tmaterial,matlist = RecogNet(polys,file,rAngle)\n\t\tif(len(matlist) == 0):\n\t\t poss_mats[file] = \"None\"\n\t\t continue\n\t\tbest = 0\n\t\tpromat = \"\"\n\t\tfor m in matlist:\n\t\t if(m[4][1] > best):\n\t\t \tbest = m[4][1]\n\t\t \tpromat = m[4][0]\n\t\tposs_mats[file] = promat\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tmaterials,coatings,matlit,coatlit = RecogNet(polys,file,rAngle,'material')\n\t\tout[file] = {'matlit':[],'material':[],'coatlit':[],'coating':[]}\n\t\tfor mat in materials:\n\t\t\tout[file]['material'].append(mat[-1])\n\t\tfor ml in matlit:\n\t\t\tout[file]['matlit'].append([[int(tmp) for tmp in ml[0]],ml[1]])\n\t\tfor coat in coatings:\n\t\t\tout[file]['coating'].append(coat[-1])\n\t\tfor cl in coatlit:\n\t\t\tout[file]['coatlit'].append([[int(tmp) for tmp in cl[0]],cl[1]])\n\t\t\"\"\"\n\t\t\n\treturn jsonify(out)\n\t#print(material,matlist)\n\t#return jsonify(poss_mats)\n@app.route('/NoteRec')\ndef NoteRec():\n\tjson_data = request.args.get('Files')\n\tif json_data is not None:\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify(\"Input name error\")\n\tnoteBoxes = {}\n\tfor file in data:\n\t\timage = imgproc.loadImage(file)\n\t\tbboxes, polys, score_text = getTextBox(model_textbox, image)\n\t\tnoteBoxes[file] = RecogNet(polys,file,0,'note')\n\treturn jsonify(noteBoxes)\n\n@app.route('/messageParse')\ndef MsgParse():\n\tjson_data = request.args.get('Files')\n\tif(json_data is not None):\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify('Input name error')\n\treturn jsonify(messageParser.msgParse(data))\n\n@app.route('/pdfSplit')\ndef PSplit():\n\tjson_data = request.args.get('Files')\n\tif(json_data is not None):\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify('Input name error')\n\treturn jsonify(pdfSplit.split_jpg(data))\n@app.route('/orderParse3')\ndef OParse():\n\tjson_data = request.args.get('Files')\n\tif(json_data is not None):\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify('Input name error')\n\treturn jsonify(orderformParser.extract_cell_images_from_table(cv2.imread(\"../uploads/\"+data[0], cv2.IMREAD_GRAYSCALE)))\n@app.route('/orderParse2')\ndef OParse2():\n\tjson_data = request.args.get('Files')\n\tif(json_data is not None):\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify('Input name error')\n\timg = cv2.imread(\"../uploads/\"+data[0],0)\n\ttemplate = img\n\ttemplate = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n\ttemplate = cv2.Canny(template, 50, 200)\n\t(h, w) = template.shape[:2]\n\n\tfor imagePath in glob.glob(\"img2\" + \"/*.jpg\"):\n\t\timage = cv2.imread(imagePath)\n\t\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t\tfound = None\n\n\t\tfor scale in np.linspace(0.2, 1.0, 20)[::-1]:\n\t\t\tresized = imutils.resize(gray, width=int(gray.shape[1] * scale))\n\t\t\tr = gray.shape[1] / float(resized.shape[1])\n\n\t\t\tif resized.shape[0] < h or resized.shape[1] < w:\n\t\t\t\tbreak\n\n\t\t\tedged = cv2.Canny(resized, 50, 200)\n\t\t\tresult = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF)\n\t\t\t(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)\n\n\t\t\tif found is None or maxVal > found[0]:\n\t\t\t\tfound = (maxVal, maxLoc, r)\n\n\t\t(_, maxLoc, r) = found\n\t\t(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))\n\t\t(endX, endY) = (int((maxLoc[0] + w) * r), int((maxLoc[1] + h) * r))\n\n\t\tcv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)\n\t\tcv2.imwrite(\"out.png\", image)\n\t\tprint(\"Table coordinates: ({}, {}, {}, {})\".format(startX, startY, endX, endY))\n\treturn jsonify(todump)\n\n@app.route('/orderParse')\ndef OParse3():\n\tjson_data = request.args.get('Files')\n\tif(json_data is not None):\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify('Input name error')\n\tfast = request.args.get('fast')\n\tif fast:\n\t\treturn jsonify(OrderParserV3.order_parser(\"../uploads/\"+data[0]).fast_cell_recognition())\n\tscale_data = request.args.get(\"scale\")\n\tif(scale_data is not None):\n\t\tscale = ast.ast.literal_eval(scale_data)\n\telse:\n\t\tscale = 5\n\ttable_bounding_box = request.args.get(\"table_box\")\n\tif(table_bounding_box is not None):\n\t\ttable_box = ast.literal_eval(table_bounding_box)\n\t\top = orderParser(model_textbox,\"../uploads/\"+data[0],True,scale)\n\t\top.no_sep_method(rect = table_box)\n\t\top.OCR()\n\telse:\n\t\top = orderParser(model_textbox,\"../uploads/\"+data[0],False,scale)\n\t\tif(op.largest_rect is None):return jsonify([])\n\treturn jsonify(op.output_data())\n\t\t\t\n@app.route('/dwgTojpg')\ndef dwgConvert():\n\tjson_data = request.args.get('Files')\n\tif(json_data is not None):\n\t\tdata = ast.literal_eval(json_data)\n\telse:\n\t\treturn jsonify(\"Input name error\")\n\tconverterPath = \"https://vector.express/api/v2/public/convert/dwg/cad2svg/svg/librsvg/pdf/gs/pdf?cad2pdf-auto-orientation=true&cad2pdf-auto-fit=true\"\n\tout = {}\n\tfor fname in data:\n\t\tfilePath = f\"../uploads/{fname}\"\n\t\tout[fname] = []\n\t\twith open(filePath,'rb') as dwgFile:\n\t\t\tdwg_response = requests.post(converterPath,data = dwgFile)\n\t\t\tpdfUrl = json.loads(dwg_response.text)['resultUrl']\n\t\t\tpdf = requests.get(pdfUrl)\n\t\t\tpages = convert_from_bytes(pdf.content, 200)\n\t\t\tfor page in pages:\n\t\t\t\tunique_name = str(uuid.uuid4())\n\t\t\t\tpage.save(f'../uploads/{unique_name}.jpg', 'JPEG')\n\t\t\t\tout[fname].append(f'{unique_name}.jpg')\n\treturn jsonify(out)\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=8090, threaded=True)\n # app.run(host='0.0.0.0', port=6001, threaded=True)\n","repo_name":"ryanwu1717/online-ordering","sub_path":"recognition/Splice.py","file_name":"Splice.py","file_ext":"py","file_size_in_byte":87902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6462840163","text":"import io\nimport zipfile\n\nimport requests\n\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom work_db.models import Bank\n\n\nclass WorkWithBanks:\n\n @staticmethod\n def get_Content():\n response = requests.get(settings.URL)\n if response.status_code != requests.codes.ok:\n raise CommandError('Cannot download file')\n return response.content\n\n @staticmethod\n def read_zip_bytes(content):\n b_bytes = io.BytesIO(content)\n with zipfile.ZipFile(b_bytes) as zf:\n with zf.open('bnkseek.txt') as file:\n byte = file.read()\n text = byte.decode('windows-1251')\n return text\n\n @staticmethod\n def save_InfoBanks(text):\n text = text.split('\\n')\n text.pop(-1) # delete because last line text = ['']\n for line in text:\n line = line.split('\\t')\n _, city, _, name, _, bik, account = line\n Bank.objects.get_or_create(\n city=city,\n name=name,\n bik=bik,\n account=account,\n )\n\n @staticmethod\n def load_and_save_infoBank():\n content = WorkWithBanks.get_Content()\n text = WorkWithBanks.read_zip_bytes(content)\n WorkWithBanks.save_InfoBanks(text)\n","repo_name":"Slava0708/register_of_banks","sub_path":"work_db/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2221630229","text":"import datetime, re\nfrom car_framework.context import context\nfrom car_framework.data_handler import BaseDataHandler\n\n\n# maps asset-server endpoints to CAR service endpoints\nendpoint_mapping = \\\n {'vulnerabilities' : 'vulnerability', 'sites' : 'site', 'assets' : 'asset', 'ip_addresses' : 'ipaddress',\n 'mac_addresses' : 'macaddress', 'hosts' : 'hostname', 'apps' : 'application', 'ports' : 'port'}\n\n# helper functions\ndef extract_id(url):\n m = re.search(r'/(\\d+)/$', url)\n return int(m.group(1))\n\n\ndef find_by_id(collection, url):\n id = extract_id(url)\n for obj in collection:\n if obj['pk'] == id:\n return obj\n\n\ndef filter_out(source, *fields):\n res = {}\n for key in source.keys():\n if not key in fields:\n res[key] = str(source[key])\n return res\n\ndef get_report_time():\n delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n milliseconds = delta.total_seconds() * 1000\n return milliseconds\n\nclass DataHandler(BaseDataHandler):\n \n xrefproperties = []\n def __init__(self):\n super().__init__()\n\n\n def create_source_report_object(self):\n if not (self.source and self.report):\n # create source and report entry and it is compuslory for each imports API call\n self.source = {'_key': context().args.CONNECTION_NAME, 'name': context().args.CONFIGURATION_AUTH_URL, 'description': 'Reference Asset server'}\n self.report = {'_key': str(self.timestamp), 'timestamp' : self.timestamp, 'type': 'Reference Asset server', 'description': 'Reference Asset server'}\n\n return {'source': self.source, 'report': self.report}\n\n # Copies the source object to CAR data model object if attribute have same name\n def copy_fields(self, obj, *fields):\n res = {}\n for field in fields:\n res[field] = obj[field]\n return res\n\n # Handlers\n # Each endpoint defined in the above endpoint_mapping object should have a handle_* method\n\n # Create vulnerability Object as per CAR data model from data source\n def handle_vulnerabilities(self, obj):\n res = self.copy_fields(obj, 'name', 'published_on', 'disclosed_on', 'updated_on', 'vcvssbmid', 'base_score', )\n res['external_id'] = str(obj['pk'])\n res['vcvssbmid'] = str(obj['vcvssbmid'])\n res['xref_properties'] = []\n for xref in obj['xref_properties']:\n res['xref_properties'].append(filter_out(find_by_id(self.xrefproperties, xref), 'pk'))\n self.add_collection('vulnerability', res, 'external_id')\n\n # Create asset Object as per CAR data model from data source\n def handle_assets(self, obj):\n res = self.copy_fields(obj, 'name', 'initial_value', )\n res['external_id'] = str(obj['pk'])\n res['assetid'] = str(obj['pk'])\n res['asset_type'] = str(obj['type'])\n\n for vuln in obj.get('vulnerabilities', []):\n self.add_edge('asset_vulnerability', {'_from_external_id': res['external_id'], '_to_external_id': str(extract_id(vuln))})\n\n if (obj.get('site')):\n self.add_edge('site_asset', {'_from_external_id': str(extract_id(obj['site'])), '_to_external_id': res['external_id'],\n 'source': context().args.CONNECTION_NAME})\n\n self.add_collection('asset', res, 'external_id')\n\n # Create ipaddress Object as per CAR data model from data source\n def handle_ip_addresses(self, obj):\n res = {}\n res['_key'] = str(obj['address'])\n self.add_edge('asset_ipaddress', {'_from_external_id': str(extract_id(obj['asset'])), '_to': 'ipaddress/' + res['_key']})\n self.add_collection('ipaddress', res, '_key')\n\n # Create mac address Object as per CAR data model from data source\n def handle_mac_addresses(self, obj):\n res = {}\n res['_key'] = str(obj['address'])\n self.add_edge('asset_macaddress', {'_from_external_id': str(extract_id(obj['asset'])), '_to': 'macaddress/' + res['_key']})\n self.add_collection('macaddress', res, '_key')\n\n # Create hostname Object as per CAR data model from data source\n def handle_hosts(self, obj):\n res = {}\n res['_key'] = str(obj['host'])\n self.add_edge('asset_hostname', {'_from_external_id': str(extract_id(obj['asset'])), '_to': 'hostname/' + res['_key']})\n self.add_collection('hostname', res, '_key')\n\n # Create application Object as per CAR data model from data source\n def handle_apps(self, obj):\n res = self.copy_fields(obj, 'name', )\n res['external_id'] = str(obj['pk'])\n\n for asset_url in obj.get('assets', []):\n asset = context().asset_server.get_object(asset_url)\n for vuln in asset.get('vulnerabilities', []):\n self.add_edge('application_vulnerability', {'_from_external_id': res['external_id'], '_to_external_id': str(extract_id(vuln))})\n\n self.add_collection('application', res, 'external_id')\n\n # Create port Object as per CAR data model from data source\n def handle_ports(self, obj):\n res = self.copy_fields(obj, 'port_number', 'layer7application', 'protocol', )\n res['external_id'] = str(obj['pk'])\n\n for app in obj.get('apps', []):\n self.add_edge('application_port', {'_from_external_id': str(extract_id(app)), '_to_external_id': res['external_id']})\n\n ids = []\n for ip_ref in obj.get('ip_addresses', []):\n ids.append(extract_id(ip_ref))\n\n for ip in context().asset_server.get_objects('ip_addresses', ids):\n self.add_edge('ipaddress_port', {'_from': 'ipaddress/' + str(ip['address']), '_to_external_id': res['external_id']})\n\n self.add_collection('port', res, 'external_id')\n\n def handle_sites(self, obj):\n res = self.copy_fields(obj, 'name', 'address', )\n res['external_id'] = str(obj['pk'])\n self.add_collection('site', res, 'external_id')\n","repo_name":"IBM/cp4s-car-connectors","sub_path":"connectors/reference_connector/connector/data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"73387009546","text":"import telebot_calendar\n\nfrom app.tbot import bot\nfrom app.tbot.resources.review_period_views.review_period_views import first_date_period, \\\n second_date_period\nfrom app.tbot.services.forms.review_period_form import ReviewPeriodForm\n\n\ndef calendar_handler(request):\n call = request.args['call']\n call_data, action, year, month, day = call.data.split(':')\n # Обработка календаря. Получить дату или None, если кнопки другого типа\n date = telebot_calendar.calendar_query_handler(\n bot=bot, call=call, name=call_data, action=action, year=year, month=month, day=day\n )\n if action == \"DAY\":\n if call_data == 'first_date_period':\n return first_date_period(request, date)\n elif 'date_period_2' in call_data:\n return second_date_period(request, date)\n elif action == \"CANCEL\":\n return ReviewPeriodForm(cancel=True)\n","repo_name":"gagpa/PerformanceReview__mark","sub_path":"app/tbot/resources/calendar_views.py","file_name":"calendar_views.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32125273292","text":"import pytest\nimport operator\nimport string\nfrom collections import defaultdict\nfrom heapq import heappush, heappush, heappop\n\nDIRS = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n\ndef read_raw(data:str):\n data = data.replace(' ', '#') # make spaces impassible\n return [s.strip() for s in data.splitlines()]\n\n\ndef find_connectors(data):\n labels = defaultdict(set)\n nrows = len(data)\n ncols = len(data[0])\n for row in range(nrows-1):\n for col in range(ncols-1):\n pos = None\n outer = True\n c1 = data[row][col]\n c2 = data[row][col + 1]\n c3 = data[row + 1][col]\n if c1 not in string.ascii_uppercase \\\n or (c2 not in string.ascii_uppercase \n and c3 not in string.ascii_uppercase):\n continue\n label = c1 + c2 if c2 != '#' else c1 + c3\n if row == 0:\n pos = (col, row + 2)\n elif row == nrows - 2:\n pos = (col, row - 1)\n elif col == 0:\n pos = (col + 2, row)\n elif col == ncols - 2:\n pos = (col - 1, row)\n elif c3 in string.ascii_uppercase:\n outer = False\n if row < nrows/2:\n pos = (col, row)\n else:\n pos = (col, row + 1)\n elif c2 in string.ascii_uppercase:\n outer = False\n if col < ncols/2:\n pos = (col, row)\n else:\n pos = (col + 1, row)\n \n labels[label].add((pos, outer))\n (x, y) = pos\n data[y] =data[y][:x] + '.' + data[y][x+1:]\n return dict(labels)\n \n\ndef parse_maze(maze):\n connectors = find_connectors(maze)\n edges = defaultdict(lambda:defaultdict(lambda:(1000000, (0, 0))))\n con_locations = {k:(v, o) \n for (v, locs) in connectors.items()\n for (k, o) in locs}\n\n start_locations = con_locations.keys()\n for start_loc in start_locations:\n seen = set()\n stack = []\n heappush(stack, (0, start_loc))\n while stack:\n (d, loc) = heappop(stack)\n seen.add(loc)\n if loc != start_loc and loc in con_locations:\n [(l1, d1), (l2, d2)] = sorted((con_locations[l][0],level_shift(con_locations, l)) \n for l in [start_loc, loc])\n edges[l1][l2] = (min(d, edges[l1][l2][0]), (d1, d2))\n continue\n new_locs = [(x, y) for d in DIRS for (x, y) in [t_add(loc, d)] \n if maze[y][x] == '.' and (x, y) not in seen]\n for nloc in new_locs:\n heappush(stack,(d + 1, nloc))\n \n return edges\n\n\ndef level_shift(labels, location):\n (label, is_outer) = labels[location]\n if label in ['AA', 'ZZ']:\n return 0\n else:\n return -1 if is_outer else 1\n\ndef t_add(t1, t2):\n return tuple(map(operator.add, t1, t2))\n\n\n\ndef shortest_path(maze, start, finish):\n stack = []\n seen = set()\n heappush(stack, (0, [start]))\n while stack:\n (d, path) = heappop(stack)\n loc = path[-1]\n if loc == finish:\n return (d, path)\n seen.add(loc)\n for nloc in [l for l in neighbors(maze, loc) if l not in seen]:\n heappush(stack, (d + distance(maze, loc, nloc), path + [nloc]))\n\n\n\ndef distance(maze, l1, l2):\n return maze[l1][l2][0] if l1 <= l2 else maze[l2][l1][0]\n\ndef neighbors(maze, loc):\n direct = list(maze[loc].keys())\n indirect = [k for k in maze if loc in maze[k]]\n return direct + indirect\n\n\ndef shortest_path_recursive(maze, start, finish):\n stack = []\n seen = set()\n heappush(stack, (0, 0, 0, [(start, 0)]))\n while stack:\n (d, lvl, ldl, path) = heappop(stack)\n loc = path[-1][0]\n if lvl == 0 and loc == finish:\n print(f'Path recursive : {path}')\n return (d, path)\n seen.add((loc, lvl))\n candidates = [(l, dl) \n for l in neighbors(maze, loc) \n for dl in [level_change(maze, loc, l)]\n if (l, lvl + d) not in seen\n if not(l in {'AA', 'ZZ'} and lvl != 0)\n if level_change(maze, l, loc) == -ldl ]\n for (nloc, dl) in candidates:\n nlvl = lvl + dl\n if lvl >= 0:\n heappush(stack, (d + distance(maze, loc, nloc), \n nlvl, dl,\n path + [(nloc, nlvl)]))\n return (-1, [])\n\ndef level_change(maze, l1, l2):\n return maze[l1][l2][1][1] if l1 <= l2 else maze[l2][l1][1][0]\n\n\n#######################################\n\n\nSMALL_TEST_INPUT = ''' A \n A \n #######.######### \n #######.........# \n #######.#######.# \n #######.#######.# \n #######.#######.# \n ##### B ###.# \nBC...## C ###.# \n ##.## ###.# \n ##...DE F ###.# \n ##### G ###.# \n #########.#####.# \nDE..#######...###.# \n #.#########.###.# \nFG..#########.....# \n ###########.##### \n Z \n Z '''\n\nLARGER_INPUT = ''' A \n A \n #################.############# \n #.#...#...................#.#.# \n #.#.#.###.###.###.#########.#.# \n #.#.#.......#...#.....#.#.#...# \n #.#########.###.#####.#.#.###.# \n #.............#.#.....#.......# \n ###.###########.###.#####.#.#.# \n #.....# A C #.#.#.# \n ####### S P #####.# \n #.#...# #......VT\n #.#.#.# #.##### \n #...#.# YN....#.# \n #.###.# #####.# \nDI....#.# #.....# \n #####.# #.###.# \nZZ......# QG....#..AS\n ###.### ####### \nJO..#.#.# #.....# \n #.#.#.# ###.#.# \n #...#..DI BU....#..LF\n #####.# #.##### \nYN......# VT..#....QG\n #.###.# #.###.# \n #.#...# #.....# \n ###.### J L J #.#.### \n #.....# O F P #.#...# \n #.###.#####.#.#####.#####.###.# \n #...#.#.#...#.....#.....#.#...# \n #.#####.###.###.#.#.#########.# \n #...#.#.....#...#.#.#.#.....#.# \n #.###.#####.###.###.#.#.####### \n #.#.........#...#.............# \n #########.###.###.############# \n B J C \n U P P '''\n\nPART2_INPUT = ''' Z L X W C \n Z P Q B K \n ###########.#.#.#.#######.############### \n #...#.......#.#.......#.#.......#.#.#...# \n ###.#.#.#.#.#.#.#.###.#.#.#######.#.#.### \n #.#...#.#.#...#.#.#...#...#...#.#.......# \n #.###.#######.###.###.#.###.###.#.####### \n #...#.......#.#...#...#.............#...# \n #.#########.#######.#.#######.#######.### \n #...#.# F R I Z #.#.#.# \n #.###.# D E C H #.#.#.# \n #.#...# #...#.# \n #.###.# #.###.# \n #.#....OA WB..#.#..ZH\n #.###.# #.#.#.# \nCJ......# #.....# \n ####### ####### \n #.#....CK #......IC\n #.###.# #.###.# \n #.....# #...#.# \n ###.### #.#.#.# \nXF....#.# RF..#.#.# \n #####.# ####### \n #......CJ NM..#...# \n ###.#.# #.###.# \nRE....#.# #......RF\n ###.### X X L #.#.#.# \n #.....# F Q P #.#.#.# \n ###.###########.###.#######.#########.### \n #.....#...#.....#.......#...#.....#.#...# \n #####.#.###.#######.#######.###.###.#.#.# \n #.......#.......#.#.#.#.#...#...#...#.#.# \n #####.###.#####.#.#.#.#.###.###.#.###.### \n #.......#.....#.#...#...............#...# \n #############.#.#.###.################### \n A O F N \n A A D M '''\n \n\n@pytest.fixture\ndef small_input():\n return read_raw(SMALL_TEST_INPUT)\n\n@pytest.fixture\ndef larger_input():\n return read_raw(LARGER_INPUT)\n\n@pytest.fixture\ndef part2_input():\n return read_raw(PART2_INPUT)\n\ndef test_find_connectors(small_input):\n actual = find_connectors(small_input)\n expected = {\n 'AA':{((9,2), True)}, \n 'BC': {((2,8), True),((9,7), False)},\n 'DE': {((2,13), True), ((7,10), False)},\n 'FG': {((11,11), False), ((2,15), True)},\n 'ZZ': {((13,16), True)}\n }\n assert actual == expected\n\n\ndef test_parse_maze(small_input):\n actual = parse_maze(small_input)\n expected = {\n 'AA': {'BC': (5, (0, 1)), 'ZZ': (26, (0, 0)), 'FG': (31, (0, 1)),},\n 'BC': {'DE': (7, (-1, 1)), 'FG': (34, (1, 1)), 'ZZ': (29, (1, 0)),},\n 'DE': {'FG': (4, (-1, -1))},\n 'FG': {'ZZ': (7, (1, 0))},\n }\n assert actual == expected\n\ndef test_shortest_path(small_input):\n maze = parse_maze(small_input)\n (distance, path) = shortest_path(maze, 'AA', 'ZZ')\n assert distance == 23\n assert path == ['AA', 'BC', 'DE', 'FG', 'ZZ']\n\n\ndef test_shortest_path_recursive(part2_input):\n maze = parse_maze(part2_input)\n (distance, path) = shortest_path_recursive(maze, 'AA', 'ZZ')\n assert distance == 396\n\nif __name__ == '__main__':\n pytest.main([__file__])\n\n with open('day20_input.txt', 'r') as f:\n maze = parse_maze(read_raw(f.read()))\n\n (dist, path) = shortest_path(maze, 'AA', 'ZZ')\n print(f'Part1: distance {dist} found for path {path}')\n\n (dist2, path2) = shortest_path_recursive(maze, 'AA', 'ZZ')\n print(f'Part2: distance {dist2} found for path {path2}')","repo_name":"ptillemans/AdventOfCode2019","sub_path":"day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":9949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4160399218","text":"import numpy\nimport sys\n\nif(len(sys.argv)<4):\n print(\"syntax:\",sys.argv[0],\"[FLASK info infile] [bias, one for each galaxy/matter field] [outfile]\")\n sys.exit(1)\n\nt=numpy.genfromtxt(sys.argv[1], dtype=(int,int,float,float,int,float,float))\n\nbias=[float(s) for s in sys.argv[2:-1]]\n\nbi=0\n\nfor f in t:\n if f[4]==1: # galaxy field\n print(\"multiplying by bias\",bias[bi],bi)\n print(f)\n f[3]*=bias[bi]\n bi+=1\n print(f)\n\nnumpy.savetxt(sys.argv[-1],t,fmt='%i %i %f %f %i %f %f ')\n","repo_name":"danielgruen/flaskpipe","sub_path":"flask_bias_info.py","file_name":"flask_bias_info.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5985093387","text":"from django.db import models\n\n# Create your models here.\n\n\nclass Category(models.Model):\n id = models.AutoField(primary_key=True, serialize=True)\n name = models.CharField(max_length= 30, blank=True, default='')\n description = models.TextField(blank=True, default= '')\n\n\nclass Product(models.Model):\n id = models.AutoField(primary_key=True, serialize=True)\n name = models.CharField(max_length= 100, blank=True, default='')\n code = models.CharField(max_length= 8, blank=True, default='N/A')\n description = models.TextField()\n price = models.DecimalField(max_digits=6, decimal_places=2)\n categories = models.ManyToManyField(Category)","repo_name":"cosminpopa1897/AE_SPA","sub_path":"aeSpaServer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72490231946","text":"'''\nCreated on 05 Nov 2019\n\n@author: danhbuithi\n'''\n\nimport sys \nimport numpy as np\n\nfrom common.CommandArgs import CommandArgs\nfrom common.DataSet import DataSet\nfrom rulefit.rulefit import RuleFit\nfrom sklearn.metrics.classification import f1_score\nfrom sklearn.ensemble.gradient_boosting import GradientBoostingClassifier\nimport time\n\n\ndef evaluateByF1(y_pred, y_true):\n a = f1_score(y_true, y_pred, average='micro')\n b = f1_score(y_true, y_pred, average='macro')\n return (a, b)\n\nif __name__ == '__main__':\n config = CommandArgs({\n 'train' : ('', 'Path of training data file'),\n 'test' : ('', 'Path of testing data file'),\n 'class' : (None, 'Class index'),\n 'n' : (250, 'Maximum number of rules')\n }) \n \n if not config.load(sys.argv):\n print ('Argument is not correct. Please try again')\n sys.exit(2)\n \n \n class_index = int(config.get_value('class'))\n nrules = int(config.get_value('n'))\n\n \n for i in range(5):\n train_data = DataSet()\n train_data.load(config.get_value('train')+'.'+str(i), class_index)\n \n test_data = DataSet()\n test_data.load(config.get_value('test')+'.'+str(i), class_index)\n print(train_data.size())\n \n '''\n Convert data into binary\n '''\n rel_train_X = train_data.get_X_in_binary()\n rel_train_Y = train_data.get_Y_in_numeric()\n train_X = rel_train_X.relation_matrix\n train_Y = rel_train_Y.values\n train_Y[train_Y < 1] = -1\n train_Y[train_Y >= 1] = 1\n print(np.unique(train_Y, return_counts=True))\n \n test_X = test_data.get_X_in_binary_with(rel_train_X.item_dict)\n test_Y = test_data.get_Y_in_numeric_with(rel_train_Y.item_dict)\n test_Y[test_Y < 1] = -1\n test_Y[test_Y >= 1] = 1\n print(np.unique(test_Y, return_counts=True))\n \n gb = GradientBoostingClassifier(n_estimators=100, max_depth=10, learning_rate=0.01,random_state=1)\n rf = RuleFit(tree_generator=gb,rfmode='classify',max_rules=nrules,random_state=1)\n N=train_X.shape[0]\n start = time.time()\n rf.fit(train_X, train_Y)\n rules = rf.get_rules()\n print('# rules', len(rules))\n print('executing time', time.time()-start)\n \n print('Predict for training data')\n y_pred=rf.predict(train_X)\n print(evaluateByF1(y_pred, train_Y))\n \n print('Predict for testing data')\n y_pred=rf.predict(test_X)\n print(evaluateByF1(y_pred, test_Y))\n","repo_name":"banhdzui/MoMAC-v1","sub_path":"TestRuleFit.py","file_name":"TestRuleFit.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30168438346","text":"from flask import (Blueprint, render_template, current_app, request, flash,\n redirect, url_for, abort)\nfrom flask_login import login_required, current_user\n\nfrom wtforms import FormField, TextField\nfrom sqlalchemy.orm.session import make_transient\n\nfrom ..extensions import db\nfrom ..settings import Setting\nfrom ..zones import Zone\nfrom .forms import (CreateNotificationForm, EditNotificationForm,\n EditNotificationMessageForm,\n EmailNotificationForm, PushoverNotificationForm,\n TwilioNotificationForm, TwiMLNotificationForm, ProwlNotificationForm,\n GrowlNotificationForm, CustomPostForm, ZoneFilterForm, ReviewNotificationForm,\n MatrixNotificationForm, UPNPPushNotificationForm)\n\nfrom .models import Notification, NotificationSetting, NotificationMessage\n\nfrom .constants import (EVENT_TYPES, NOTIFICATION_TYPES, DEFAULT_SUBSCRIPTIONS,\n EMAIL, PUSHOVER, TWILIO, PROWL, GROWL,\n CUSTOM, TWIML, MATRIX, ZONE_FAULT, ZONE_RESTORE,\n UPNPPUSH)\n\nNOTIFICATION_TYPE_DETAILS = {\n 'email': (EMAIL, EmailNotificationForm),\n 'pushover': (PUSHOVER, PushoverNotificationForm),\n 'twilio': (TWILIO, TwilioNotificationForm),\n 'prowl': (PROWL, ProwlNotificationForm),\n 'growl': (GROWL, GrowlNotificationForm),\n 'custom': (CUSTOM, CustomPostForm),\n 'twiml': (TWIML, TwiMLNotificationForm),\n 'upnppush': (UPNPPUSH, UPNPPushNotificationForm),\n 'matrix': (MATRIX, MatrixNotificationForm)\n}\n\nnotifications = Blueprint('notifications',\n __name__,\n url_prefix='/settings/notifications')\n\n@notifications.context_processor\ndef notifications_context_processor():\n return {\n 'TYPES': NOTIFICATION_TYPES,\n 'TYPE_DETAILS': NOTIFICATION_TYPE_DETAILS,\n 'EVENT_TYPES': EVENT_TYPES,\n }\n\n@notifications.route('/')\n@login_required\ndef index():\n use_ssl = Setting.get_by_name('use_ssl', default=False).value\n notification_list = Notification.query.all()\n\n return render_template('notifications/index.html',\n notifications=notification_list,\n active='notifications',\n ssl=use_ssl)\n\n@notifications.route('//edit', methods=['GET', 'POST'])\n@login_required\ndef edit(id):\n notification = Notification.query.filter_by(id=id).first_or_404()\n if notification.user != current_user and not current_user.is_admin():\n abort(403)\n\n type_id, form_type = NOTIFICATION_TYPE_DETAILS[NOTIFICATION_TYPES[notification.type]]\n obj = notification\n if request.method == 'POST':\n obj = None\n\n form = form_type(obj=obj)\n\n if not form.is_submitted():\n form.populate_from_settings(id)\n\n if form.validate_on_submit():\n notification.description = form.description.data\n form.populate_settings(notification.settings, id=id)\n notification.enabled = 1\n db.session.add(notification)\n db.session.commit()\n\n current_app.decoder.refresh_notifier(id)\n\n if str(ZONE_FAULT) in form.subscriptions.data or str(ZONE_RESTORE) in form.subscriptions.data:\n return redirect(url_for('notifications.zone_filter', id=notification.id))\n\n return redirect(url_for('notifications.review', id=notification.id))\n\n use_ssl = Setting.get_by_name('use_ssl', default=False).value\n\n return render_template('notifications/edit.html',\n form=form,\n id=id,\n notification=notification,\n active='notifications',\n ssl=use_ssl, legend=form.legend)\n\n@notifications.route('/create', methods=['GET', 'POST'])\n@login_required\ndef create():\n form = CreateNotificationForm()\n\n if form.validate_on_submit():\n return redirect(url_for('notifications.create_by_type',\n type=form.type.data))\n\n use_ssl = Setting.get_by_name('use_ssl', default=False).value\n\n return render_template('notifications/create.html',\n form=form,\n active='notifications',\n ssl=use_ssl)\n\n@notifications.route('/create/', methods=['GET', 'POST'])\n@login_required\ndef create_by_type(type):\n if type not in NOTIFICATION_TYPE_DETAILS.keys():\n abort(404)\n\n type_id, form_type = NOTIFICATION_TYPE_DETAILS[type]\n form = form_type()\n form.type.data = type_id\n\n if not form.is_submitted():\n form.subscriptions.data = [str(k) for k in DEFAULT_SUBSCRIPTIONS]\n\n if form.validate_on_submit():\n obj = Notification()\n\n obj.type = form.type.data\n obj.description = form.description.data\n obj.user = current_user\n form.populate_settings(obj.settings)\n\n db.session.add(obj)\n db.session.commit()\n\n current_app.decoder.refresh_notifier(obj.id)\n\n if str(ZONE_FAULT) in form.subscriptions.data or str(ZONE_RESTORE) in form.subscriptions.data:\n return redirect(url_for('notifications.zone_filter', id=obj.id))\n\n return redirect(url_for('notifications.review', id=obj.id))\n\n use_ssl = Setting.get_by_name('use_ssl', default=False).value\n\n return render_template('notifications/create_by_type.html',\n form=form,\n type=type,\n active='notifications',\n ssl=use_ssl, legend=form.legend)\n\ndef build_zone_list():\n zone_list = [(str(i), \"Zone {0:02d}\".format(i)) for i in range(1, 100)]\n\n zones = Zone.query.all()\n zone_list_len = len(zone_list)\n for z in zones:\n if z.zone_id <= zone_list_len - 1:\n zone_list[z.zone_id - 1] = (str(z.zone_id), 'Zone {0:02d} - {1}'.format(z.zone_id, z.name))\n\n return zone_list\n\n@notifications.route('//zones', methods=['GET', 'POST'])\n@login_required\ndef zone_filter(id):\n form = ZoneFilterForm()\n form.zones.choices = build_zone_list()\n\n if not form.is_submitted():\n form.populate_from_settings(id=id)\n\n if form.validate_on_submit():\n obj = Notification.query.filter_by(id=id).first_or_404()\n form.populate_settings(obj.settings)\n\n db.session.add(obj)\n db.session.commit()\n\n return redirect(url_for('notifications.review', id=id))\n\n return render_template('notifications/zone_filter.html', id=id, form=form, active='notifications')\n\n@notifications.route('//remove', methods=['GET', 'POST'])\n@login_required\ndef remove(id):\n notification = Notification.query.filter_by(id=id).first_or_404()\n if notification.user != current_user and not current_user.is_admin():\n abort(403)\n\n db.session.delete(notification)\n db.session.commit()\n\n current_app.decoder.refresh_notifier(id)\n\n flash('Notification deleted.', 'success')\n return redirect(url_for('notifications.index'))\n\n@notifications.route('//copy', methods=['GET', 'POST'])\n@login_required\ndef copy_notification(id):\n notification = Notification.query.filter_by(id=id).first_or_404()\n desc = notification.description\n\n if notification.user != current_user and not current_user.is_admin():\n abort(403)\n\n notification.id = None\n notification.description = desc + ' Clone'\n make_transient(notification)\n\n db.session.add(notification)\n db.session.commit()\n\n old_settings = NotificationSetting.query.filter_by(notification_id=id).all()\n\n for s in old_settings:\n s.id = None\n s.notification_id = notification.id\n make_transient(s)\n db.session.add(s)\n\n db.session.commit()\n\n current_app.decoder.refresh_notifier(notification.id)\n\n flash('Notification cloned.', 'success')\n return redirect(url_for('notifications.index'))\n\n@notifications.route('//toggle', methods=['GET', 'POST'])\n@login_required\ndef toggle_notification(id):\n notification = Notification.query.filter_by(id=id).first_or_404()\n\n if notification.user != current_user and not current_user.is_admin():\n abort(403)\n\n status = \"Enabled\"\n\n if notification.enabled is 0:\n notification.enabled = 1\n status = \"Enabled\"\n else:\n notification.enabled = 0\n status = \"Disabled\"\n\n db.session.add(notification)\n db.session.commit()\n\n current_app.decoder.refresh_notifier(id)\n\n flash('Notification ' + status, 'success')\n return redirect(url_for('notifications.index'))\n\n@notifications.route('//review', methods=['GET', 'POST'])\n@login_required\ndef review(id):\n form = ReviewNotificationForm()\n\n notification = Notification.query.filter_by(id=id).first_or_404()\n if notification.user != current_user and not current_user.is_admin():\n abort(403)\n\n if form.validate_on_submit():\n error = None\n if form.buttons.test.data:\n error = current_app.decoder.test_notifier(notification.id)\n\n if error:\n flash('Error sending test notification: {0}'.format(error), 'error')\n else:\n flash('Test notification sent.', 'success')\n else:\n flash('Notification saved.', 'success')\n\n if error is None:\n return redirect(url_for('notifications.index'))\n\n return render_template('notifications/review.html', notification=notification, form=form, active='notifications')\n\n@notifications.route('/messages', methods=['GET'])\n@login_required\ndef messages():\n if not current_user.is_admin():\n abort(403)\n\n messages = NotificationMessage.query.all()\n\n return render_template('notifications/messages.html',\n messages=messages,\n active='notifications')\n\n@notifications.route('/messages/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_message(id):\n if not current_user.is_admin():\n abort(403)\n\n message = NotificationMessage.query.filter_by(id=id).first_or_404()\n form = EditNotificationMessageForm()\n\n if not form.is_submitted():\n form.id.data = message.id\n form.text.data = message.text\n else:\n message.text = form.text.data\n\n db.session.add(message)\n db.session.commit()\n\n flash('The notification message has been updated.', 'success')\n\n return redirect(url_for('notifications.messages'))\n\n return render_template('notifications/edit_message.html',\n form=form,\n message_id=message.id,\n active='notifications')\n","repo_name":"nutechsoftware/alarmdecoder-webapp","sub_path":"ad2web/notifications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10711,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"81"} +{"seq_id":"26355928250","text":"import nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\nimport numpy\nimport tflearn\nimport tensorflow\nfrom tensorflow.python.framework import ops\nimport random\nimport json\nimport pickle\nfrom mysql_python_interface import read_from_db\nfrom dotenv import dotenv_values\n\nenv = dotenv_values(\".env\")\nPATTERNS_TABLE = env[\"patterns_table\"]\nRESPONSES_TABLE = env[\"responses_table\"]\n\ntry:\n x\n with open(\"data.pickle\", \"rb\") as f:\n words, labels, training, output = pickle.load(f)\nexcept:\n words = []\n labels = []\n docs_x = []\n docs_y = []\n\n patterns_from_db = read_from_db(PATTERNS_TABLE,\"*\")\n\n for pattern_tag in patterns_from_db:\n wrds = nltk.word_tokenize(pattern_tag[1])\n words.extend(wrds)\n docs_x.append(wrds)\n docs_y.append(pattern_tag[0])\n\n if pattern_tag[0] not in labels:\n labels.append(pattern_tag[0])\n\n words = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\n words = sorted(set(words))\n\n labels = sorted(labels)\n\n training = []\n output = []\n\n out_empty = [0 for _ in range(len(labels))]\n\n for x,doc in enumerate(docs_x):\n bag = []\n\n wrds = [stemmer.stem(w) for w in doc]\n\n for w in words:\n if w in wrds:\n bag.append(1)\n else:\n bag.append(0)\n\n output_row = out_empty[:]\n output_row[labels.index(docs_y[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n\n training = numpy.array(training)\n output = numpy.array(output)\n\n with open(\"data.pickle\",\"wb\") as f:\n pickle.dump((words,labels, training, output), f)\n\nops.reset_default_graph()\n\nnet = tflearn.input_data(shape=[None, len(training[0])])\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\nnet = tflearn.regression(net)\n\nmodel = tflearn.DNN(net)\n\nmodel.fit(training, output, n_epoch=5000, batch_size=8, show_metric=True)\nmodel.save(\"model.tflearn\")\n\ndef bag_of_words(s,words):\n bag = [0 for _ in range(len(words))]\n\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n\n return numpy.array(bag)\n\ndef create_response(inp):\n results = model.predict([bag_of_words(inp,words)]) # render probabilities\n results_index = numpy.argmax(results) # choose the highest probability in the array\n\n print(results)\n\n if results[0][results_index] > 0.7:\n tag = labels[results_index]\n\n try:\n x\n with open(\"data_response.pickle\",\"rb\") as f:\n responses_from_db = pickle.load(f)\n except:\n responses_from_db = read_from_db(RESPONSES_TABLE,\"*\")\n\n with open(\"data_response.pickle\",\"wb\") as f:\n pickle.dump((responses_from_db),f)\n\n responses = []\n if len(responses_from_db) == 0:\n return \"no response\"\n\n for tag_response in responses_from_db:\n print(tag_response[0])\n print(tag)\n print(tag_response[0] == tag)\n if tag_response[0] == tag:\n responses.append(tag_response[1])\n\n return random.choice(responses)\n else:\n return \"no response\"\n","repo_name":"usersmagic/discord.usersmagic","sub_path":"chat_bot.py","file_name":"chat_bot.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2701722422","text":"from code.utils import parse_input\nimport re\ninput = parse_input(19, '\\n\\n', sample=False)\nrules = input[0].split('\\n')\nlines = input[1].split('\\n')\n\nmeta_rules = {}\nstraight_rules = {}\ncache = {}\n\ndef parse_rules(rules):\n for rule in rules:\n index, logic = rule.split(': ')\n if \"\\\"\" in logic:\n straight_rules[index] = logic[1]\n else:\n options = logic.split(\" | \")\n for op in options:\n op = op.split(\" \")\n if index not in meta_rules:\n meta_rules[index] = []\n meta_rules[index].append(op)\n\n\ndef add_to_cache(rule_num, matched):\n if rule_num not in cache:\n cache[rule_num] = {matched}\n else:\n cache[rule_num].add(matched)\n\n\ndef match_return_remainder(line, rule_num, verbose=False):\n if rule_num in cache:\n for mop in cache[rule_num]:\n if line.startswith(mop):\n return True, mop, line[len(mop):]\n\n if line == '':\n return True, '', line\n\n if rule_num in meta_rules:\n for option in meta_rules[rule_num]:\n option_copy = line\n matched = ''\n for part in option:\n is_match, submatch, remainder = match_return_remainder(\n option_copy, part, verbose=verbose)\n if not is_match:\n break\n else:\n matched += submatch\n option_copy = remainder\n\n if is_match:\n add_to_cache(rule_num, matched)\n return True, matched, remainder\n return False, '', line\n elif rule_num in straight_rules:\n if line[0] == straight_rules[rule_num]:\n add_to_cache(rule_num, line[0])\n return True, line[0], line[1:]\n else:\n return False, '', line\n\n\n# part one\nparse_rules(rules)\nmatch0 = 0\nfor line in lines:\n\n is_match, matched, remainder = match_return_remainder(line, '0')\n if is_match and remainder == '':\n match0 += 1\n\nprint(match0)\n\n\n# part two\nmatch1 = 0\nfor line in lines:\n remaining = line\n keep_going = True\n num42s = 0\n while remaining and keep_going:\n start_len = len(remaining)\n for mop in cache['42']:\n if remaining.startswith(mop):\n num42s += 1\n remaining = remaining[len(mop):]\n if len(remaining) == start_len:\n keep_going = False\n\n keep_going = True\n num31s = 0\n while remaining and keep_going:\n start_len = len(remaining)\n for mop in cache['31']:\n if remaining.startswith(mop):\n num31s += 1\n remaining = remaining[len(mop):]\n if len(remaining) == start_len:\n keep_going = False\n\n if not remaining and num42s >= 1 and num31s >= 1 and num42s - num31s >= 1:\n match1 += 1\n\nprint(match1)\n","repo_name":"trisha-queue/advent2020","sub_path":"code/solutions/aoc19.py","file_name":"aoc19.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26099614288","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import metrics #Import scikit-learn metrics module for accuracy calculation\nfrom sklearn import tree\nfrom sklearn.tree import export_graphviz,DecisionTreeClassifier \nfrom sklearn.tree import _tree\nimport time\n\ndef tree_to_code(tree, feature_names,used_feature):\n tree_ = tree.tree_\n def recurse(node, depth):\n indent = \" \" * depth\n #print(indent, node)\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n name = \"feature[\"+str(tree_.feature[node])+\"]\" #translate to a compact feature code\n threshold = int(tree_.threshold[node])\n print(\"{}if {} <= {}:\".format(indent, name, threshold))\n recurse(tree_.children_left[node], depth + 1)\n print(\"{}else: # if {} > {}\".format(indent, name, threshold))\n recurse(tree_.children_right[node], depth + 1)\n else:\n print(indent,\"return \",np.argmax(tree_.value[node]))\n recurse(0, 1)\n\ndef node_translator(node, used_node, value):\n if node in used_node:\n return used_node.index(node)\n else:\n return value \n\ndef write_file(name,mylist):\n f = open(name, \"w\")\n for line in mylist:\n # write line to output file\n f.write(line)\n f.write(\"\\n\")\n f.close()\n\ndef tree_to_mem(tree,used_feature, used_node):\n tree_ = tree.tree_\n Mem_fea=[]\n Mem_thd=[]\n Mem_child=[]\n print(\"\\n\"+\"final architecture\")\n for i in range(256):\n Mem_fea.append('00')\n Mem_thd.append('00')\n Mem_child.append('00')\n Mem_child.append('00') ## each node has two child\n def recurse(node, depth):\n indent = \" \" * depth\n cur_node=node_translator(node, used_node,value=\"leaf\")\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n fea = used_feature.index(tree_.feature[node]) #translate to a compact feature code\n Mem_fea[cur_node]=\"0x{:02x}\".format(fea)[2:]\n threshold = int(tree_.threshold[node])\n Mem_thd[cur_node]=\"0x{:02x}\".format(threshold)[2:]\n print(indent,\"Node: \", cur_node,\"Feature: \", fea,\"Threshold: \" , threshold)\n \n l_node=tree_.children_left[node]\n l_value=np.argmax(tree_.value[l_node])\n l_node=node_translator(l_node, used_node, l_value)\n Mem_child[cur_node*2]=\"0x{:02x}\".format(l_node)[2:]\n print(indent+\" \",\"Left: \", l_node)\n recurse(tree_.children_left[node], depth + 1)\n \n r_node=tree_.children_right[node]\n r_value=np.argmax(tree_.value[r_node])\n r_node=node_translator(r_node, used_node, r_value)\n Mem_child[cur_node*2+1]=\"0x{:02x}\".format(r_node)[2:]\n print(indent+\" \",\"Right: \", r_node)\n recurse(tree_.children_right[node], depth + 1)\n \n else:\n print(indent,cur_node,np.argmax(tree_.value[node]))\n recurse(0, 1)\n #write out model parameters\n write_file(\"fea.hex\", Mem_fea)\n write_file(\"thd.hex\", Mem_thd)\n write_file(\"child.hex\", Mem_child)\n \n# pre-process\nloans = pd.read_csv('loan_data.csv')\nfinal_data = pd.get_dummies(loans, columns = ['purpose'], drop_first = True)\nfinal_data.head(1)\nX = final_data.drop('not.fully.paid', axis= 1)\nX=(X-X.min())*255.0//(X.max()-X.min())\nX=X.astype(int)\ny= final_data['not.fully.paid']\n\n#spliting\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)\n\n\n#training and \nweights = {0:np.sum(y_train[:])/(len(y_train[:])*0.5), 1:1.0}\ndtc = DecisionTreeClassifier(max_depth=6,class_weight=weights,max_leaf_nodes=16)\ndtc.fit(X_train, y_train)\ntime_start=time.time()\npredictions = dtc.predict(X_test)\nprint(\"Total inference time using cpu: \"+str(time.time()-time_start))\n#pruning, code modified from \n#https://stackoverflow.com/questions/51397109/prune-unnecessary-leaves-in-sklearn-decisiontreeclassifier\nfrom sklearn.tree._tree import TREE_LEAF, TREE_UNDEFINED\n\ndef is_leaf(inner_tree, index):\n # Check whether node is leaf node\n return (inner_tree.children_left[index] == TREE_LEAF and \n inner_tree.children_right[index] == TREE_LEAF)\n\ndef prune_index(inner_tree, decisions, index=0):\n # Start pruning from the bottom - if we start from the top, we might miss\n # nodes that become leaves during pruning.\n # Do not use this directly - use prune_duplicate_leaves instead.\n used_feature_l=[]\n used_feature_r=[]\n used_node_l=[]\n used_node_r=[]\n if not is_leaf(inner_tree, inner_tree.children_left[index]):\n used_feature_l, used_node_l = prune_index(inner_tree, decisions, inner_tree.children_left[index])\n else:\n used_node_l=[-np.argmax(inner_tree.value[inner_tree.children_left[index]])-1]\n if not is_leaf(inner_tree, inner_tree.children_right[index]):\n used_feature_r, used_node_r = prune_index(inner_tree, decisions, inner_tree.children_right[index])\n else:\n used_node_r=[-np.argmax(inner_tree.value[inner_tree.children_right[index]])-1]\n used_feature_child = used_feature_l+used_feature_r\n used_node_child = used_node_l+used_node_r\n # Prune children if both children are leaves now and make the same decision: \n if (is_leaf(inner_tree, inner_tree.children_left[index]) and\n is_leaf(inner_tree, inner_tree.children_right[index]) and\n (decisions[index] == decisions[inner_tree.children_left[index]]) and \n (decisions[index] == decisions[inner_tree.children_right[index]])):\n # turn node into a leaf by \"unlinking\" its children\n inner_tree.children_left[index] = TREE_LEAF\n inner_tree.children_right[index] = TREE_LEAF\n inner_tree.feature[index] = TREE_UNDEFINED\n ##print(\"Pruned {}\".format(index))\n return [], [-np.argmax(inner_tree.value[index])-1]\n return used_feature_child+[inner_tree.feature[index]], used_node_child+[index]\ndef prune_duplicate_leaves(mdl):\n # Remove leaves if both \n decisions = mdl.tree_.value.argmax(axis=2).flatten().tolist() # Decision for each node\n used_feature,used_node=prune_index(mdl.tree_, decisions)\n used_feature.sort()\n used_node.sort()\n used_feature2 = []\n used_node2 = []\n for fea in used_feature:\n if fea not in used_feature2:\n used_feature2.append(fea)\n for node in used_node:\n if node not in used_node2:\n used_node2.append(node) \n return used_feature2,used_node2 \n\nused_feature, used_node = prune_duplicate_leaves(dtc)\nprint(\"feature compress\")\nfor i in range(len(used_feature)):\n print(\"original index:\",used_feature[i], \"new index\", i,\"feature name:\",X_test.keys()[used_feature[i]])\n#print(\"node compress\", used_node)\n\n#write out data to be fed\ndata_out=[]\nans_out=[]\ntemp=X_test.to_numpy()\ntemp2=predictions\nfor i in range(len(temp)):\n ans_out.append(\"0x{:01x}\".format(temp2[i])[2:])\n for j in range(len(used_feature)): \n data_out.append(\"0x{:02x}\".format(temp[i,used_feature[j]])[2:])\nwrite_file(\"data.hex\", data_out)\nwrite_file(\"ans.hex\", ans_out)\n\n'''\n# verification for tree conversion\n# code modified from: https://stackoverflow.com/questions/56334210/how-to-extract-sklearn-decision-tree-rules-to-pandas-boolean-conditions \ntree_to_code(dtc, X_train.columns,used_feature)\ndef tree_pred(feature): #paste the result here\n if feature[0] <= 127:\n if feature[1] <= 54:\n if feature[3] <= 75:\n return 1\n else: # if feature[3] > 75\n return 0\n else: # if feature[1] > 54\n return 1\n else: # if feature[0] > 127\n if feature[1] <= 55:\n if feature[2] <= 150:\n if feature[5] <= 157:\n if feature[8] <= 161:\n return 0\n else: # if feature[8] > 161\n return 1\n else: # if feature[5] > 157\n return 0\n else: # if feature[2] > 150\n return 1\n else: # if feature[1] > 55\n if feature[17] <= 127:\n return 0\n else: # if feature[17] > 127\n if feature[6] <= 73:\n return 1\n else: # if feature[6] > 73\n return 0\ncounter=0\nfor i in range(len(y_test)):\n if (tree_pred(X_test.to_numpy()[i])!=predictions[i]):\n counter+=1\n print(\"result different!!\",counter)\n'''\n#write out model parameters\ntree_to_mem(dtc, used_feature, used_node)\n\n#visualization\nplt.figure(figsize=(36,12))\ntree.plot_tree(dtc, fontsize=6)\nplt.savefig('tree_high_dpi', dpi=100)\n\n# performance\nfrom sklearn.metrics import classification_report, confusion_matrix\n#print(classification_report(y_test, predictions))\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, predictions))\nprint(\"F1:\",metrics.f1_score(y_test, predictions))\nprint(\"Confusion Matrix:\\n\",confusion_matrix(y_test, predictions))\n ","repo_name":"brandon9838/Verilog-Implementation-of-Decision-Tree-Accelerator","sub_path":"software/dec.py","file_name":"dec.py","file_ext":"py","file_size_in_byte":9135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18217828032","text":"'''\ncoding=utf-8\nUser:Tina\ndata:2018-08-22\n添加:储值卡设置\n'''\n\nfrom pageObject.add_store_card_object import AddStoreCard\nimport time\n\n\nclass StoreCardSetting:\n\n def __init__(self):\n print(\"添加储值卡的设置\")\n\n @staticmethod\n def card_setting(driver,cardname):\n addcardsetting = AddStoreCard(driver)\n driver.implicitly_wait(30)\n\n\n #进入左侧的菜单:购买\n addcardsetting.go_purchase_menu_obj().click()\n\n #切换至菜单:储值卡设置\n addcardsetting.go_store_card_setting_obj().click()\n\n #添加\n addcardsetting.add_card_obj().click()\n time.sleep(2)\n\n\n #卡名称\n addcardsetting.name_obj().send_keys(cardname)\n\n #有效期\n addcardsetting.valid_days_obj().send_keys(1000)\n\n #售卖基础价格\n addcardsetting.price_obj().send_keys(10)\n\n #实际价值\n addcardsetting.real_price_obj().send_keys(1000)\n\n #转让手续费\n addcardsetting.fee_obj().send_keys(1)\n\n #门店\n addcardsetting.shop_select_obj().click()\n\n #支持消费方式\n addcardsetting.consumption_patterns1().click()\n addcardsetting.consumption_patterns2().click()\n addcardsetting.consumption_patterns3().click()\n addcardsetting.consumption_patterns4().click()\n\n #售卖时间\n addcardsetting.sale_time_from_obj().send_keys(\"2018-10-17\")\n addcardsetting.sale_time_to_obj().send_keys(\"2019-12-17\")\n time.sleep(2)\n\n #可在线购买\n addcardsetting.purchase_obj().click()\n time.sleep(2)\n\n #设置权重\n addcardsetting.weights_obj().send_keys(10)\n time.sleep(3)\n\n #保存\n addcardsetting.confirm_obj().click()\n time.sleep(2)\n\n # 确认\n addcardsetting.confirm_button_obj().click()\n\n # 切换至菜单:储值卡设置\n addcardsetting.go_store_card_setting_obj().click()\n\n\n\n\n\n\n\n","repo_name":"tsjcxd/saas1.0_UI_python","sub_path":"appModules/store_card_setting.py","file_name":"store_card_setting.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8518777565","text":"\r\n\r\n# Read the data from the file1.txt and file2.txt\r\n\r\n# and display all the words from the file1 which are starts with h or m\r\n\r\n# and display all the words from the file2 which are starts with c or s\r\n\r\npath = \".\\dependecies\\\\file1.txt\" # Reletive path for file1\r\n\r\nf = open(path, 'r')\r\n\r\ntext1 = f.read()\r\nf.close()\r\n\r\nwords = text1.split()\r\n\r\nwords_from_file1= []\r\nfor word in words:\r\n #print(word)\r\n if word[0] == \"h\" or word[0] == 'm':\r\n words_from_file1.append(word)\r\nprint(\"Data from file1\")\r\nprint(words_from_file1)\r\n\r\nprint(\"=\"*100)\r\n\r\npath2 = \".\\dependecies\\\\file2.txt\"\r\n\r\nf = open(path, 'r')\r\ntext2 = f.read()\r\nf.close()\r\n\r\n\r\nwords = text2.split()\r\n\r\nwords_from_file2 = []\r\nfor word in words:\r\n if word[0] == 'c' or word[0] == 's':\r\n words_from_file2.append(word)\r\nprint(\"Data from file2\")\r\nprint(words_from_file2)\r\n\r\n\r\n\r\n\r\n","repo_name":"mupputur/RohithMission20","sub_path":"FileOperations/file9.py","file_name":"file9.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23128450111","text":"import yadisk\r\n\r\ny = yadisk.YaDisk(token=\"y0_AgAAAAA6Qd7JAAoMJgAAAADlfHsiBIK6yTKwQduJv4vG9Sg4ZH8z2ug\")\r\n#y.mkdir('/test')\r\n# или\r\n# y = yadisk.YaDisk(\"\", \"\", \"<токен>\")\r\ny.upload('images/check.png', '/test/check.png')\r\n\r\n# Проверяет, валиден ли токен\r\nfiles = list(y.listdir('/test')) \r\nfor p in files:\r\n print(p[\"name\"]) \r\n\r\n","repo_name":"zdarova1/zxczxc","sub_path":"t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15828271441","text":"#!/usr/bin/env python\n\n# this script can check the dimension of each case so that \n# we can set a reasonable cropping/padding size\n\nimport os\nimport numpy as np\nimport U_Net_function_list as ff\nimport nibabel as nib\nimport segcnn\n\ncg = segcnn.Experiment()\n\npatient_list = ff.get_patient_list_from_csv(os.path.join(cg.spreadsheet_dir,'Final_patient_list_include.csv'))\nprint(len(patient_list))\n\nx_size = []\ny_size = []\nz_size = []\nfor p in patient_list:\n patient_id = p[1]\n patient_class = p[0]\n vol = os.path.join(cg.image_data_dir,patient_class,patient_id,'img-nii-0.625/0.nii.gz')\n vol_data = nib.load(vol).get_fdata()\n dimension = vol_data.shape\n x_size.append(dimension[0])\n y_size.append(dimension[1])\n z_size.append(dimension[-1])\n print(patient_class,patient_id,dimension)\nx_size = np.asarray(x_size)\ny_size = np.asarray(y_size)\nz_size = np.asarray(z_size)\nprint(np.mean(x_size),np.std(x_size),np.median(x_size),np.min(x_size),np.max(x_size))\nprint(np.mean(y_size),np.std(y_size),np.median(y_size),np.min(y_size),np.max(y_size))\nprint(np.mean(z_size),np.std(z_size),np.median(z_size),np.min(z_size),np.max(z_size))\n\n\n\n# for VR dataset\n# x_dim: mean - 358, median - 352, min - 240, max - 656\n# y_dim: mean - 358, median - 352, min - 240, max - 656\n# z_dim: mean - 262, median - 356, min - 192, max - 488","repo_name":"zhennongchen/U-Net-2D","sub_path":"tool_check_image_size.py","file_name":"tool_check_image_size.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3848205892","text":"import asyncio\nimport datetime\nimport websockets\nimport json\nfrom cryptotrading import Crypto_bot\n\nclass Bot(Crypto_bot):\n def run(self):\n self.on_init()\n\n def on_init(self):\n tickers = [\"BNBBTC,1m\",\n \"BTCUSDT,1m\",\n \"ETHBTC,1m\"]\n\n self.subscribe_tickers(tickers)\n\n self.main_loop()\n\n def on_bar_close(self,bar):print(\"Ticker: {}\\nDatetime: {}\\nOpentime: {}\\nClosetime: {}\\nInterval: {}\\nOpen: {}\\nClose: {}\\nClosed: {}\\n\\n\".format(bar['ticker'],\n bar[\"datetime\"],\n bar['open_time'],\n bar['close_time'],\n bar['interval'],\n bar['open'],\n bar['close'],\n bar['closed']))\n\n\nif __name__ == \"__main__\":\n bot = Bot()\n bot.run()\n","repo_name":"nicholascomuni/Crypto-Trading-Bot-With-Binance-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15042455275","text":"import os, warnings\nos.environ[\"CLTK_DATA\"] = os.getcwd()+\"/Data/texts/\"\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=UserWarning)\n from cltk.tokenizers.lat.lat import LatinWordTokenizer as WordTokenizer\n from cltk.tokenizers.lat.lat import LatinPunktSentenceTokenizer as SentenceTokenizer\n from cltk.lemmatize.lat import LatinBackoffLemmatizer\nimport re\nfrom unidecode import unidecode\n\nclass PreProcessor():\n \"\"\" A PreProcessing class so as to not instantiate multiple versions of the tokenizer in short\n succession needlessly\"\"\"\n def __init__(self):\n self.wt = WordTokenizer()\n self.st = SentenceTokenizer()\n self.lt = LatinBackoffLemmatizer()\n \n\n def preprocess(self, inputText: str, keepPunct: bool = True, shouldTokenize: bool = True, shouldLemma: bool = True) -> list[str]:\n text = []\n # include only unicode characters\n inputText = unidecode(inputText).lower()\n if shouldTokenize:\n sents = self.st.tokenize(inputText)\n for sent in sents:\n tmpSent = []\n if not keepPunct:\n sent = re.sub(r'[^\\w\\s]', '', sent)\n wordToks = self.wt.tokenize(sent)\n if shouldLemma:\n res = self.lt.lemmatize(wordToks)\n lemmToks = [''.join([i for i in tmp[1] if not i.isdigit()]) for tmp in res]\n text+=lemmToks\n else:\n text+=wordToks\n return text\n return inputText \n\nif __name__==\"__main__\":\n pp = PreProcessor()\n text = 'atque haec Παρὰ τοῦ πάππου Οὐήρου τὸ καλόηθες καὶ ἀόργητον. abuterque puerve paterne nihil mecum. animiæger dicatur ut Seneca in Epistolis dixit.'\n text = \"nautas viam puer\"\n print(pp.preprocess(text, False))\n","repo_name":"SufurElite/LatinAuthorshipAttribution","sub_path":"Data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13036237137","text":"from collections import namedtuple\nfrom .. utils.handlers import eventHandler\nfrom .. utils.operators import makeOperator\n\nidKeysInFile = []\nunremovableIDKeys = set()\n\n@eventHandler(\"FILE_LOAD_POST\")\n@eventHandler(\"ADDON_LOAD_POST\")\n@makeOperator(\"an.update_id_keys_list\", \"Update ID Key List\", redraw = True)\ndef updateIdKeysList():\n idKeysInFile.clear()\n unremovableIDKeys.clear()\n\n keys, unremovable = findIDKeysInCurrentFile()\n\n idKeysInFile.extend(keys)\n unremovableIDKeys.update(unremovable)\n\ndef getAllIDKeys():\n return idKeysInFile\n\ndef getUnremovableIDKeys():\n return unremovableIDKeys\n\ndef findIDKeysInCurrentFile():\n foundKeys = set()\n unremovableKeys = set()\n\n for findKeys, removable in findIDKeysFunctions:\n keys = findKeys()\n foundKeys.update(keys)\n if not removable:\n unremovableKeys.update(keys)\n\n # default keys should stay in order\n allKeys = list()\n allKeys.extend(defaultIDKeys)\n unremovableKeys.update(defaultIDKeys)\n for key in foundKeys:\n if key not in allKeys:\n allKeys.append(key)\n return allKeys, unremovableKeys\n\nIDKey = namedtuple(\"IDKey\", [\"type\", \"name\"])\n\ndefaultIDKeys = [\n IDKey(\"Transforms\", \"Initial Transforms\"),\n IDKey(\"Integer\", \"Index\")\n]\n\nfindIDKeysFunctions = []\ndef findsIDKeys(removable = True):\n def findsIDKeysDecorator(function):\n findIDKeysFunctions.append((function, removable))\n return function\n return findsIDKeysDecorator\n\nremoveIDKeyFunctions = []\ndef removesIDKey(function):\n removeIDKeyFunctions.append(function)\n return function\n\n@makeOperator(\"an.remove_id_key\", \"Remove ID Key\", arguments = [\"String\", \"String\"],\n redraw = True, confirm = True,\n description = \"Remove this ID Key from the whole file\")\ndef removeIDKey(dataType, propertyName):\n idKey = IDKey(dataType, propertyName)\n for removeFunction in removeIDKeyFunctions:\n removeFunction(idKey)\n updateIdKeysList()\n","repo_name":"JacquesLucke/animation_nodes","sub_path":"animation_nodes/id_keys/existing_keys.py","file_name":"existing_keys.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":2231,"dataset":"github-code","pt":"81"} +{"seq_id":"37712543258","text":"# 91.解码方法\n# 相似题:剑指 Offer 46.:把数字翻译成字符串 translateNum\n# 46 题是肯定可以有对应的字符串的,而本题有可能无法解码,返回 0,比如含前导的如 1002\nclass Solution:\n # def numDecodings(self, s: str) -> int:\n # if s.startswith('0'):\n # return 0\n #\n # n = len(s)\n # dp = [0] * (n+1)\n # dp[0], dp[1] = 1, 1\n #\n # for i in range(2, n+1):\n # # and 优先级要高于 or\n # if s[i-1] == '0' and (s[i-2] > '2' or s[i-2] == '0'):\n # return 0\n # if s[i-2] == '0' or s[i-2:i] > '26':\n # dp[i] = dp[i-1]\n # elif s[i-1] != '0':\n # dp[i] = dp[i-1] + dp[i-2]\n # else:\n # dp[i] = dp[i-2]\n # return dp[n]\n\n # # 前两种方法,dp 表示前 i 个字符串可以解码的总个数\n # 都说前 i 个已经可以解码了,还要判断前 i 个能不能解码,感觉有些不合适。\n # 更正确的表述是:若前 i 个字符串可以解码,dp 表示前 i 个字符串可以解码的总个数\n # def numDecodings(self, s: str) -> int:\n # if s.startswith('0'):\n # return 0\n #\n # n = len(s)\n # dp = [1] * (n+1)\n #\n # for i in range(2, n+1):\n # if s[i-1] == '0' and s[i-2] not in '12': # 还真不能用 not\n # return 0\n # if s[i-2:i] in ['10', '20']: # 只有组合在一起才行\n # dp[i] = dp[i-2]\n # elif '10' < s[i-2:i] <= '26':\n # dp[i] = dp[i-1] + dp[i-2]\n # else: # '01'到 ‘09’ 或 > '26'\n # dp[i] = dp[i-1]\n # return dp[n]\n\n # 以列举 可能出现的字符作为 dp,会有四种结果:0, dp[i-1], dp[i-2], dp[i-1] + dp[i-2]\n # ‘00’, ‘01’~‘09’, ‘10’,‘11’~‘19’ ‘20’, ‘21’~‘26’, >'26'\n # def numDecodings(self, s: str) -> int:\n # if s.startswith('0'):\n # return 0\n #\n # n = len(s)\n # dp = [1] * (n+1)\n #\n # for i in range(2, n+1):\n # if s[i-1] == '0' and s[i-2] not in '12':\n # return 0\n # if s[i-2:i] in ['10', '20']:\n # dp[i] = dp[i-2]\n # elif '10' < s[i-2:i] <= '26': # 已经剔除了 '20'\n # dp[i] = dp[i-1] + dp[i-2]\n # else:\n # dp[i] = dp[i-1]\n # return dp[n]\n\n\n # def numDecodings(self, s: str) -> int:\n # if s.startswith('0'):\n # return 0\n # n = len(s)\n # dp = [1] * (n+1)\n # for i in range(2, n+1):\n # if s[i-1] == '0' and s[i-2] not in '12':\n # return 0\n # if s[i-2:i] in ['10', '20']:\n # dp[i] = dp[i-2]\n # elif '10' < s[i-2:i] <= '26':\n # dp[i] = dp[i-1] + dp[i-2]\n # else:\n # dp[i] = dp[i-1]\n # return dp[n]\n\n # def numDecodings(self, s: str) -> int:\n # s = '0' + s\n # n = len(s)\n # dp = [1] * n\n # for i in range(1, n):\n # if s[i-1:i+1] in ['10', '20']:\n # dp[i] = dp[i-2]\n # elif '10' < s[i-1:i+1] <= '26':\n # dp[i] = dp[i-1] + dp[i-2]\n # elif '01' <= s[i-1:i+1] <= '09' or s[i] != '0' and s[i-1:i+1] > '26':\n # dp[i] = dp[i-1]\n # else:\n # return 0\n # return dp[n-1]\n\n # 官方题解,看起来更为简洁\n # 只分为 2 种情况,可以单独的,可以组合的\n # dp[i] = dp[i-1](单独,否则为 0) + dp[i-2](结合,否则为 0)\n # def numDecodings(self, s: str) -> int:\n # n = len(s)\n # dp = [1] + [0] * n\n # for i in range(1, n+1):\n # if s[i-1] != '0': # 至少 1 种解法\n # dp[i] += dp[i-1]\n # if i > 1 and s[i-2] != '0' and s[i-2:i] <= '26':\n # dp[i] += dp[i-2]\n # return dp[n]\n\n # def numDecodings(self, s: str) -> int:\n # if s.startswith('0'):\n # return 0\n # n = len(s)\n # dp = [0] * (n+1)\n # dp[0] = dp[1] = 1\n # for i in range(2, n+1):\n # c = s[i-2:i]\n # if c[1] == '0':\n # if c in ['10', '20']:\n # dp[i] = dp[i - 2]\n # else:\n # return 0\n # elif '01' <= c <= '09' or c > '26':\n # dp[i] = dp[i-1]\n # else:\n # dp[i] = dp[i-1] + dp[i-2]\n # return dp[n]\n\n def numDecodings(self, s: str) -> int:\n if s.startswith('0'):\n return 0\n\n n = len(s)\n dp = [0] * (n+1)\n dp[0] = dp[1] = 1\n for i in range(2, n+1):\n if s[i-1] != '0':\n dp[i] = dp[i-1]\n if '10' <= s[i-2:i] <= '26':\n dp[i] += dp[i-2] # 注意这点\n return dp[n]\n\n\nobj = Solution()\ns = \"12\"\nprint(obj.numDecodings(s))\n\ns = \"226\"\nprint(obj.numDecodings(s))\n\ns = \"0\"\nprint(obj.numDecodings(s)) # 输出 0\n\ns = \"1\"\nprint(obj.numDecodings(s))\n\ns = \"2\"\nprint(obj.numDecodings(s))\n\ns = \"12901\"\nprint(obj.numDecodings(s)) # 返回 0,不能解码\n\ns = \"200\"\nprint(obj.numDecodings(s))\n\ns = \"2101\"\nprint(obj.numDecodings(s)) # 输出 1\n\ns = \"10\"\nprint(obj.numDecodings(s)) # 输出 1\n\n# 正确结果\n# 2\n# 3\n# 0\n# 1\n# 1\n# 0\n# 0\n# 1\n# 1","repo_name":"BruceHi/leetcode","sub_path":"month12/numDecodings.py","file_name":"numDecodings.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71615950345","text":"import os\nimport unittest\nimport tempfile\nimport hiro\nimport jiracli.cache\n\n\nclass CacheTests(unittest.TestCase):\n def setUp(self):\n self.cache_dir = tempfile.mkdtemp()\n jiracli.cache.CACHE_DIR = self.cache_dir\n def test_cache_data_not_exist(self):\n data = jiracli.cache.CachedData(\"foobar\")\n self.assertTrue(data.get()==None)\n data.update({\"foo\":\"bar\"})\n self.assertEqual(jiracli.cache.CachedData(\"foobar\").get(),\n {\"foo\": \"bar\"})\n def test_cache_invalidate(self):\n with hiro.Timeline().freeze() as timeline:\n data = jiracli.cache.CachedData(\"foobar\")\n data.update({\"foo\":\"bar\"})\n timeline.forward(1 + 60*60*24)\n self.assertTrue(data.get()==None)\n\n def test_clear_cache(self):\n data = jiracli.cache.CachedData(\"foobar\")\n data.update({\"foo\":\"bar\"})\n self.assertTrue(os.path.isfile(data.path))\n jiracli.cache.clear_cache(data)\n self.assertFalse(os.path.isfile(data.path))\n jiracli.cache.clear_cache()\n self.assertFalse(os.path.isdir(self.cache_dir))\n\n def test_decorated(self):\n @jiracli.cache.cached(\"foo\")\n def func(a,b):\n return a+b\n\n self.assertEqual(func(1,2), func(1,2))\n self.assertNotEqual(func(1,2), func(3,4))\n\n self.assertEqual(len(os.listdir(self.cache_dir)), 2)\n","repo_name":"alisaifee/jira-cli","sub_path":"tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"81"} +{"seq_id":"72150306504","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom odoo.http import content_disposition, Controller, request, route, Response\nimport json\nimport logging\nfrom html import unescape\n\n_logger = logging.getLogger(__name__)\n\n\nclass ConsultaStockMateriales(Controller):\n\n @route('/check_connection', methods=['GET',], auth='public', csrf=False, cors=\"*\")\n def check_connection(self):\n res_data = {\"Connection\":\"OK\", \"TimeStamp\":str(datetime.now())}\n return json.dumps(res_data)\n\n @route('/restapi/private/stock_movement/confirmation', methods=['POST',], type='json', auth='public', csrf=False, cors=\"*\")\n def stock_movement_confirmation(self):\n res_data = {\"Connection\":\"OK\", \"TimeStamp\":str(datetime.now())}\n req = request.httprequest\n res_data[\"headers\"] = {}\n for head in req.headers.keys():\n res_data[\"headers\"][head] = req.headers.get(head)\n payload = json.loads(req.data.decode('utf-8'))\n _logger.info(\"DICT TYPE %s\", type(payload))\n new_log = request.env['integration.request.log'].sudo()\n request_id = request.env['integration.request'].sudo().search(\n [\n ('ref','=',req.headers.get('CODIGO_INTERFAZ','NONE')),\n ], limit=1)\n log = new_log.create_log(request_id=request_id.id if request else None,\n header=req.headers,\n payload=payload,\n traffic='inbound')\n return json.dumps(res_data)\n\n @route('/stock_consult', methods=[\"POST\"], auth='public', csrf=False, cors=\"*\")\n def consulta_stock_materiales(self, **kw):\n res_data = {\n \"ResponseConsultaStockMateriales_Inb\": {\n \"HEADER\": {\n \"ID_MENSAJE\": \"0 \",\n \"MENSAJE\": \"OK\",\n \"FECHA\": \"20210212\",\n \"SOCIEDAD\": \"OK\",\n \"LEGADO\": \"BUS\",\n \"CODIGO_INTERFAZ\": \"ITD_009\"\n },\n \"CosulMat\": [\n {\n \"Centro\": \"2301\",\n \"Almacen\": \"PT01\",\n \"TabMate\": [\n {\n \"Material\": \"450229\",\n \"Stock\": \"1784.000 \",\n \"Codigo\": \"0 \",\n \"Mensaje\": \"OK\"\n },\n {\n \"Material\": \"450230\",\n \"Stock\": \"314.000 \",\n \"Codigo\": \"0 \",\n \"Mensaje\": \"OK\"\n }\n ]\n },\n {\n \"Centro\": \"9001\",\n \"Almacen\": \"PT01\",\n \"TabMate\": {\n \"Material\": \"ET11754\",\n \"Stock\": \"91.000 \",\n \"Codigo\": \"0 \",\n \"Mensaje\": \"OK\"\n }\n }\n ]\n }\n }\n return json.dumps(res_data)\n\n @route('/restapi/private/account_move/return', methods=['POST'], type='json', auth='public', csrf=False, cors=\"*\")\n def stock_movement_confirmation(self):\n res_data = {\"Connection\":\"OK\", \"TimeStamp\":str(datetime.now())}\n req = request.httprequest\n res_data[\"headers\"] = {}\n for head in req.headers.keys():\n res_data[\"headers\"][head] = req.headers.get(head)\n payload = json.loads(req.data.decode('utf-8'))\n _logger.info(\"DICT TYPE %s\", type(payload))\n new_log = request.env['integration.request.log'].sudo()\n request_id = request.env['integration.request'].sudo().search(\n [\n ('ref','=',req.headers.get('CODIGO_INTERFAZ','NONE')),\n ], limit=1)\n log = new_log.create_log(request_id=request_id.id if request else None,\n header=req.headers,\n payload=payload,\n traffic='inbound')\n return json.dumps(res_data)\n\n# ejemplo respuesta sap\n# {\n# \"SOCIEDAD\": \"A031\",\n# \"LEGADO\": \"TRUCK\",\n# \"ID_MENSAJE\": \"2c7f616d-d99e-3aea-90c0-acc89df336b0\",\n# \"FECHA\": 20210424,\n# \"MENSAJE\": \"Registro asiento contable\",\n# \"CODIGO_INTERFAZ\": \"RTR_038\",\n# \"respuesta\": [\n# {\n# \"id_documento\": \"SE210228162703000O\",\n# \"id_referencia\": {\n# \"codigo\": \"P-001\",\n# \"mensaje\": \"Documento ya se encuentra creado en la BD. Documento: 3200010401\"\n# }\n# }\n# ]\n# }\n\n","repo_name":"marcobustamanteab/odoo-pos","sub_path":"src/custom-addons/ccu_integration/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32241911691","text":"# -*- encoding: utf-8 -*-\n'''\n@File: main.py\n@Describe: \n@Create Time: 2020/09/17 16:56:32\n@Author: Lookback\n@Version: 1.0\n'''\nfrom es_sender import es_sender\nimport parse as p\nimport argparse\nimport configparser\nimport os\nimport commons\n\n\ndef get_args():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '--prefix', '-pre', help='Decide the prefix of parsed log file name')\n arg_parser.add_argument(\n '--file', '-f', help='Define origin log file to parse.')\n arg_parser.add_argument(\n '--dir', '-d', help='Define origin log document to parse.')\n arg_parser.add_argument(\n '--parse', '-p', help='Parsing.', action=\"store_true\") \n arg_parser.add_argument(\n '--send', '-s', help='Send to ES.', action=\"store_true\")\n arg_parser.add_argument(\n \"--logtype\",help='See rule_list.py to choose specific log type.')\n arg_parser.add_argument(\"--test\",'-t',help=\"Run testing.\", action=\"store_true\")\n args = arg_parser.parse_args()\n return args\n\n\ndef main():\n script_path=os.path.dirname(\n os.path.realpath(__file__))\n conf_file=os.path.join(script_path, 'config.conf')\n config = configparser.ConfigParser()\n config.read(conf_file)\n args = get_args()\n # args.prefix\n # args.format\n if args.test:\n test_path=os.path.join(script_path,'testing/example.log')\n with open(test_path) as infile:\n example = infile.readline()\n test_p = p.log_parser('splunk')\n result = test_p.entry_parser(example)\n print(example)\n # print log\n commons.print_result(result)\n print('\\n\\n====response_body==========================')\n\n print(result['response_body'])\n print('====response_body End==========================\\n')\n print('====full_request==========================')\n print(result['full_request'])\n print('====full_request End==========================')\n return\n if not args.file:\n print('Please define file path with argument --file/f. Use --help to see details.')\n return \n log_type=''\n if config['parser']['log_type']:\n log_type=config['parser']['log_type']\n elif args.logtype:\n log_type=args.logtype\n else:\n log_type=p.get_file_log_type(args.file)\n \n \n # args.parse\n if args.parse:\n new_parser=p.log_parser(log_type)\n parsed_file = new_parser.file_parser(args.file)\n else:\n parsed_file=args.file\n # args.send\n if args.send:\n sender = es_sender(parsed_file,conf_file)\n sender.send_file()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kanitan/LogComb","sub_path":"logcomb.py","file_name":"logcomb.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70178599626","text":"from rest_framework import serializers\nfrom .......models.tax_map_control_model import TaxMapControl\nfrom ..serializers.assessment_serializer import DisplayAssessmentSerializer\n\nclass DisplayTaxMapControlSerializer(serializers.ModelSerializer):\n class Meta:\n model = TaxMapControl\n fields = ['id','prov_city', 'prov_city_index_no', 'mun_city', 'mun_city_index_no', 'barangay', \n 'barangay_index_no', 'section_index_no',\n ############ Static ######################\n 'date_modified'\n ]\n \n def to_representation(self, instance):\n rep = super().to_representation(instance)\n rep['assessments'] = DisplayAssessmentSerializer(instance.assessment_set.all(),many=True).data\n rep['total_assessments'] = instance.assessment_set.count()\n return rep\n","repo_name":"Ronuel-R/Digital_Dexterity_Backend","sub_path":"digital_dex_admin_web/versions/v1p0/features/tax_map_control/display_tax_map_control/serializers/display_tax_map_control_serializer.py","file_name":"display_tax_map_control_serializer.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71300732105","text":"'''\r\nCreated on May 25, 2012\r\n\r\n@author: Alex Marich\r\n'''\r\n\r\ntax_Rate = .065;\r\n\r\n'''\r\nPython allows you to set multiple variable values on the same line. In this case, \r\nwe are setting both item_Price and sum equal to 0. We do this by seperating the two\r\nvariable names with a comma(','), use one equal sign and seperate the values of each \r\nvariable with a comma. The value of the first variable is going to be the value that\r\nyou stated first, and the second variables value will be the value that you stated\r\nsecond.\r\n'''\r\nitem_Price, sum = 0, 0;\r\n\r\n'''\r\nVariables that defined are capable of being redefined at almost any time you need \r\nit to change. In this case, we took the original value of item_Price, 0, and \r\nredeclared the value to be 5.5. \r\n'''\r\nitem_Price = 5.5;\r\n'''\r\nThere are different ways we can accomplish adding a number to an already defined\r\nvariable. The variable sum was defined as being 0, however we want to take the \r\nold value of sum and add an extra number on top of it so that the value will \r\nincrease by the number that was specified. You can accomplish this by just \r\nre-defining the value of sum in a mathematical expression as shown below.\r\n'''\r\nsum = sum + item_Price;\r\n\r\n'Giving item_Price a new value'\r\nitem_Price = 7.89;\r\n'''\r\nAdditionally, you can take the variable sum and add, subtract, multiply, and divide\r\nit by itself by just changing the way the variable defines itself. In order to \r\naccomplish this you can do:\r\nsum += item_Price;\r\nThis will take the value of sum and add the value of item_Price to it and set the\r\nvariable sum back to the sum of those two values.\r\n'''\r\nsum += item_Price;\r\n\r\n'Find the price of both items including tax'\r\nfinal_Price = sum*(1 + tax_Rate);\r\n\r\n'Print out the purchase with tax'\r\nprint(\"Your purchase cost \" + str(final_Price) + \" with tax.\")","repo_name":"youngclick/advGIS","sub_path":"Purchase2.py","file_name":"Purchase2.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10581327085","text":"# '''\n# A small Tensorflow XLA benchmark\n# \n# Original Author: Aymeric Damien\n# Project: https://github.com/aymericdamien/TensorFlow-Examples/\n# '''\n\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.framework import dtypes\n\nimport tensorflow.compat.v1.nn.rnn_cell as rnn\n\nimport time\n\ntf.compat.v1.disable_eager_execution()\n\nminst = tf.keras.datasets.mnist.load_data()\n\n\n# '''\n# To classify images using a reccurent neural network, we consider every image_celn\n# row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then\n# handle 28 sequences of 28 steps for every sample.\n# '''\n\n# In[2]:\n\n# Parameters\nlearning_rate = 0.001\ntraining_iters = 10\nbatch_size = 128\ndisplay_step = 10\n\n# Network Parameters\nn_input = 28 # MNIST data input (img shape: 28*28)\nn_steps = 28 # timesteps\nn_hidden = 128 # hidden layer num of features\nn_classes = 10 # MNIST total classes (0-9 digits)\n\n# tf Graph input\nx = tf.compat.v1.placeholder(\"float\", [None, n_steps, n_input])\ny = tf.compat.v1.placeholder(\"float\", [None, n_classes])\n\n# Define weights\nweights = {\n 'out': tf.compat.v1.Variable(tf.compat.v1.random_normal([n_hidden, n_classes]))\n }\nbiases = {\n 'out': tf.compat.v1.Variable(tf.compat.v1.random_normal([n_classes]))\n }\n\n\n# In[3]:\n\ndef RNN(x, weights, biases):\n\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, n_steps, n_input)\n # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)\n x = tf.transpose(x, [1, 0, 2])\n # Reshaping to (n_steps*batch_size, n_input)\n x = tf.reshape(x, [-1, n_input])\n # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n x = tf.split(x, n_steps, 0)\n\n # Define a lstm cell with tensorflow\n lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n\n # Get lstm cell output\n outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n\n # Linear activation, using rnn inner loop last output\n return tf.matmul(outputs[-1], weights['out']) + biases['out']\n\npred = RNN(x, weights, biases)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n\n\ndef benchmark(use_xla, use_gpu):\n\n # Launch the graph\n config = tf.ConfigProto(\n device_count = {'GPU': 0 if not use_gpu else 1}\n )\n\n if use_xla:\n config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n\n with tf.Session(config=config) as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Reshape data to get 28 seq of 28 elements\n batch_x = batch_x.reshape((batch_size, n_steps, n_input))\n # Run optimization op (backprop)\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n if step % display_step == 0:\n # Calculate batch accuracy\n acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n # Calculate batch loss\n loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n print (\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n step += 1\n\n # Calculate accuracy for 128 mnist test images\n test_len = 128\n test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))\n test_label = mnist.test.labels[:test_len]\n\n t_start = time.time()\n total_steps = 500\n for i in range(total_steps):\n outs = sess.run(accuracy, feed_dict={x: test_data, y: test_label})\n tdiff = time.time() - t_start\n print( \"{} inference steps took: {:.2f}\".format(total_steps, tdiff))\n\nbenchmark(True, True)\nbenchmark(False, True)\nbenchmark(True, False)\nbenchmark(False, False)\n\n# 500 inference steps took: 1.51\n# 500 inference steps took: 2.20\n# 500 inference steps took: 5.35\n# 500 inference steps took: 5.35\n","repo_name":"backyes/experiment_tvm","sub_path":"Apple-M1-BERT/xla_tutorial/cans/xla_benchmark.py","file_name":"xla_benchmark.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32337016641","text":"#Escribir el código para Invertir un array de string, por ejemplo si la entrada es\r\n#[\"1\",\"4\",\"3\",\"2\"] la salida debe ser [\"2\",\"3\",\"4\",\"1\"]. El código debe cumplir con los\r\n#siguientes parámetros:\r\n # •Se debe solicitar la cantidad de elementos del array.\r\n # •Se debe solicitar por pantalla el valor de cada uno de los elementos\r\n #•Se debe crear una funcion que reciba el arrayyretorne el array invertido.\r\n #•Se debe mostrar en pantalla el array originalyel invertidoarray\r\nawarded = []\r\nfor i in range(int(input(\"cuantos numeros desea meter a la lista?\"))):\r\n awarded.append(int(input(\"Introduce un número : \")))\r\nawarded.sort()\r\nprint(\"Los números ingresados \" + str(awarded))\r\nidx = len(awarded) - 1\r\nnewList = []\r\nwhile (idx >= 0):\r\n newList.append(awarded[idx])\r\n idx = idx - 1\r\n\r\nprint(newList)\r\n\r\n\r\n\r\n","repo_name":"nickvillamizar/parcial-2-corte","sub_path":"parcial corte 2 programacion,py.py","file_name":"parcial corte 2 programacion,py.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32999620215","text":"from django import forms\nfrom django.forms import TextInput\nfrom .models import ClassOfBusiness\n\nclass classOfBusinessForm(forms.ModelForm):\n class Meta:\n model = ClassOfBusiness\n fields = [\"classOfBusiness_text\"]\n labels = {\"classOfBusiness_text\": \"Class of Business name\"}\n widgets = {\n 'classOfBusiness_text': TextInput(attrs={\n 'class': \"form-control\",\n 'placeholder': \"Type here...\"\n }) \n }\n","repo_name":"Lukaszesque/BusinessWebsite","sub_path":"settings/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43436220267","text":"#-*- coding: utf-8 -*-\n# Bilingual dictionary: take a phrase in one\n# language and translate it one word at a time\n# into another. In this case, we're using:\n# Spanish\n#\nfrom __future__ import print_function\ntry:\n input = raw_input\nexcept NameError:\n pass\n\ntranslations = {\n \"merry\": \"feliz\",\n \"christmas\": \"navidad\",\n \"and\": \"y\",\n \"happy\": \"feliz\",\n \"new\": \"nuevo\",\n \"year\": \"año\",\n}\n\nphrase = input(\"Please enter a phrase: \")\n\nspanish_phrase = []\nfor word in phrase.split():\n word = word.lower()\n spanish_word = translations.get(word, word)\n spanish_phrase.append(spanish_word)\n\nseparator = \" \"\nspanish_phrase = separator.join(spanish_phrase)\nprint(\"In Spanish, that's:\", spanish_phrase)\n","repo_name":"dcpylab/classes","sub_path":"2016-03/26/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"81"} +{"seq_id":"40453212099","text":"import argparse\nfrom socketserver import ThreadingUDPServer\nimport helper_modules.combine_expanded_indels_and_create_csv as combine_expanded_indels\nimport helper_modules.create_result_vcf as write_result\nimport helper_modules.convert_vcf_to_csv as convert_vcf\nimport logging\nimport os\nimport pandas as pd\nimport time\nimport variant_scoring.score_variants as predict\nimport variant_prioritization.prioritize_variants as prio\nimport yaml\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description = \"AIdiva -- Augmented Intelligence Disease Variant Analysis\")\n parser.add_argument(\"--snp_vcf\", type=str, dest=\"snp_vcf\", metavar=\"snp.vcf\", required=True, help=\"VCF file with the annotated SNP variants [required]\")\n parser.add_argument(\"--indel_vcf\", type=str, dest=\"indel_vcf\", metavar=\"indel.vcf\", required=True, help=\"VCF file with the annotated (only basic annotation) InDel variants [required]\")\n parser.add_argument(\"--expanded_indel_vcf\", type=str, dest=\"expanded_indel_vcf\", metavar=\"expanded_indel.vcf\", required=True, help=\"VCF file with the annotated expanded InDel variants [required]\")\n parser.add_argument(\"--out_prefix\", type=str, dest=\"out_prefix\", metavar=\"result\", required=True, help=\"Prefix that is used to save the results [required]\")\n parser.add_argument(\"--workdir\", type=str, dest=\"workdir\", metavar=\"workdir/\", required=True, help=\"Path to the working directory (here all results are saved) [required]\")\n parser.add_argument(\"--hpo_list\", type=str, dest=\"hpo_list\", metavar=\"hpo.txt\", required=False, help=\"TXT file containing the HPO terms reported for the current patient\")\n parser.add_argument(\"--gene_exclusion\", type=str, dest=\"gene_exclusion\", metavar=\"gene_exclusion.txt\", required=False, help=\"Tab separated file containing the genes to exclude in the analysis. Genes are assumed to be in the first column.\")\n parser.add_argument(\"--family_file\", type=str, dest=\"family_file\", metavar=\"family.txt\", required=False, help=\"TXT file showing the sample relations of the current data\")\n parser.add_argument(\"--family_type\", type=str, dest=\"family_type\", metavar=\"SINGLE\", required=False, help=\"In case of multisample data the kind of sample relation [SINGLE, TRIO, MULTI]\")\n parser.add_argument(\"--config\", type=str, dest=\"config\", metavar=\"config.yaml\", required=True, help=\"Config file specifying the parameters for AIdiva [required]\")\n parser.add_argument(\"--skip_db_check\", dest=\"skip_db_check\", action=\"store_true\", required=False, help=\"Flag to skip database (ClinVar, HGMD) lookup\")\n parser.add_argument(\"--only_top_results\", dest=\"only_top_results\", action=\"store_true\", required=False, help=\"Report only the top 25 variants as result\")\n parser.add_argument(\"--threads\", type=int, dest=\"threads\", metavar=\"1\", required=False, help=\"Number of threads to use (default: 1)\")\n parser.add_argument(\"--log_level\", type=str, dest=\"log_level\", metavar=\"INFO\", required=False, help=\"Define logging level, if unsure just leave the default [DEBUG, INFO, WARN, ERROR, CRITICAL] (default: INFO)\")\n args = parser.parse_args()\n \n if args.log_level is not None:\n if args.log_level == \"DEBUG\":\n log_level = logging.DEBUG\n elif args.log_level == \"INFO\":\n log_level = logging.INFO\n elif args.log_level == \"WARN\":\n log_level = logging.WARN\n elif args.log_level == \"ERROR\":\n log_level = logging.ERROR\n elif args.log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n else:\n log_level = logging.INFO\n else:\n log_level = logging.INFO\n\n # set up logger\n timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n logging.basicConfig(filename=str(args.workdir + \"/\" + args.out_prefix + \"_aidiva_\" + timestamp + \".log\"),\n filemode='a',\n format='%(asctime)s -- %(name)s - %(levelname)s - %(message)s',\n datefmt='%H:%M:%S',\n level=log_level)\n logger = logging.getLogger()\n logger.info(\"Running AIdiva on annotated data\")\n logger.info(\"Start program\")\n\n\n if args.threads is not None:\n num_cores = int(args.threads)\n else:\n num_cores = 1\n\n # parse configuration file\n with open(args.config, \"r\") as config_file:\n configuration = yaml.load(config_file, Loader=yaml.SafeLoader)\n \n working_directory = args.workdir\n\n if not working_directory.endswith(\"/\"):\n working_directory = working_directory + \"/\"\n\n # parse input files\n snp_vcf = args.snp_vcf\n indel_vcf = args.indel_vcf\n expanded_indel_vcf = args.expanded_indel_vcf\n\n # load SNP ML model\n scoring_model_snp = configuration[\"Analysis-Input\"][\"scoring-model-snp\"]\n \n # load InDel ML model\n scoring_model_indel = configuration[\"Analysis-Input\"][\"scoring-model-indel\"]\n\n # load internal parameters\n internal_parameter_dict = configuration[\"Internal-Parameters\"]\n\n # parse output files\n output_filename = args.out_prefix\n\n # parse disease and inheritance information\n if args.hpo_list is not None:\n hpo_file = args.hpo_list\n else:\n hpo_file = None\n\n if args.gene_exclusion is not None:\n gene_exclusion_file = args.gene_exclusion\n else:\n gene_exclusion_file = None\n\n if (args.family_file is not None) and (args.family_type is not None):\n family_file = args.family_file\n family_type = args.family_type\n else:\n family_file = None\n family_type = \"SINGLE\"\n\n only_top_results = args.only_top_results\n \n skip_db_check = args.skip_db_check\n\n \n\n allele_frequency_list = configuration[\"Model-Features\"][\"allele-frequency-list\"]\n feature_list = configuration[\"Model-Features\"][\"feature-list\"]\n assembly_build = configuration[\"Assembly-Build\"]\n ref_path = configuration[\"Analysis-Input\"][\"ref-path\"]\n\n # convert splitted input data to vcf and annotate\n input_data_snp = convert_vcf.convert_vcf_to_pandas_dataframe(snp_vcf, False, num_cores)\n input_data_indel = convert_vcf.convert_vcf_to_pandas_dataframe(indel_vcf, True, num_cores)\n input_data_expanded_indel = convert_vcf.convert_vcf_to_pandas_dataframe(expanded_indel_vcf, True, num_cores)\n\n logger.debug(f\"Condition-Check: {input_data_snp.dropna(how='all').empty}, {input_data_indel.dropna(how='all').empty}, {input_data_expanded_indel.dropna(how='all').empty}\")\n logger.debug(f\"Condition: {(not input_data_snp.dropna(how='all').empty) or ((not input_data_indel.dropna(how='all').empty) and (not input_data_expanded_indel.dropna(how='all').empty))}\")\n\n if (not input_data_snp.dropna(how='all').empty) or ((not input_data_indel.dropna(how='all').empty) and (not input_data_expanded_indel.dropna(how='all').empty)):\n if ((not input_data_indel.empty) and (not input_data_expanded_indel.empty)):\n logger.info(\"Combine InDel variants ...\")\n input_data_combined_indel = combine_expanded_indels.parallelized_indel_combination(input_data_indel, input_data_expanded_indel, feature_list, num_cores)\n \n else:\n logger.info(\"No InDel variants given move on to SNP processing!\")\n input_data_combined_indel = pd.DataFrame()\n\n # predict pathogenicity score\n logger.info(\"Score variants ...\")\n \n if not input_data_snp.dropna(how='all').empty:\n predicted_data_snp = predict.perform_pathogenicity_score_prediction(scoring_model_snp, input_data_snp, allele_frequency_list, feature_list, num_cores)\n \n else:\n logger.info(\"No SNP variants, skip SNP prediction!\")\n predicted_data_snp = pd.DataFrame()\n\n if not input_data_combined_indel.dropna(how='all').empty:\n predicted_data_indel = predict.perform_pathogenicity_score_prediction(scoring_model_indel, input_data_combined_indel, allele_frequency_list, feature_list, num_cores)\n \n else:\n logger.info(\"No InDel variants, skip InDel prediction!\")\n predicted_data_indel = pd.DataFrame()\n\n if (not predicted_data_snp.dropna(how='all').empty) and (not predicted_data_indel.dropna(how='all').empty):\n predicted_data = pd.concat([predicted_data_snp, predicted_data_indel])\n predicted_data.sort_values([\"CHROM\", \"POS\"], ascending=[True, True], inplace=True)\n predicted_data.reset_index(inplace=True, drop=True)\n predicted_data = predicted_data[predicted_data_snp.columns]\n\n elif (predicted_data_snp.dropna(how='all').empty) and (not predicted_data_indel.dropna(how='all').empty):\n predicted_data = predicted_data_indel\n\n elif (predicted_data_indel.dropna(how='all').empty) and (not predicted_data_snp.dropna(how='all').empty):\n predicted_data = predicted_data_snp\n\n else:\n logger.critical(\"Something went terribly wrong!\")\n\n \n\n # prioritize and filter variants\n logger.info(\"Prioritize variants and finalize score ...\")\n prioritized_data = prio.prioritize_variants(predicted_data, internal_parameter_dict, ref_path, num_cores, assembly_build, skip_db_check, family_file, family_type, hpo_file, gene_exclusion_file)\n\n if only_top_results:\n prioritized_data[prioritized_data[\"FILTER_PASSED\"] == 1].head(n=25).to_csv(str(working_directory + output_filename + \"_filtered.tsv\"), sep=\"\\t\", index=False)\n logger.info(\"Only 25 best variants are reported as result!\")\n else:\n write_result.write_result_vcf(prioritized_data, str(working_directory + output_filename + \".vcf\"), assembly_build, bool(family_type == \"SINGLE\"))\n write_result.write_result_vcf(prioritized_data[prioritized_data[\"FILTER_PASSED\"] == 1], str(working_directory + output_filename + \"_filtered.vcf\"), assembly_build, bool(family_type == \"SINGLE\"))\n prioritized_data = prioritized_data.rename(columns={\"CHROM\": \"#CHROM\"})\n prioritized_data.to_csv(str(working_directory + output_filename + \".tsv\"), sep=\"\\t\", index=False)\n prioritized_data[prioritized_data[\"FILTER_PASSED\"] == 1].to_csv(str(working_directory + output_filename + \"_filtered.tsv\"), sep=\"\\t\", index=False)\n logger.info(\"Pipeline successfully finsished!\")\n\n else:\n write_result.write_result_vcf(None, str(working_directory + output_filename + \".vcf\"), assembly_build, bool(family_type == \"SINGLE\"))\n write_result.write_result_vcf(None, str(working_directory + output_filename + \"_filtered.vcf\"), assembly_build, bool(family_type == \"SINGLE\"))\n logger.warn(\"The given input files were empty!\")\n","repo_name":"imgag/aiDIVA","sub_path":"aidiva/run_AIdiva.py","file_name":"run_AIdiva.py","file_ext":"py","file_size_in_byte":10685,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"23847912127","text":"score = []\np = 0\nf = 0\n\nnum = int(input(\"수강 과목 수 입력 :\"))\n\nfor i in range(num):\n print(\"score\", i+1, \":\", end = \"\")\n score.append(int(input()))\n\nfor i in score:\n if(i >= 80):\n p = p + 1\n else:\n f = f + 1\nprint(\"--------------------\")\nprint(\"합격과목 수 :\", p)\nprint(\"불합격과목 수 :\", f)","repo_name":"somflower/2022_summer","sub_path":"Week4/HW4_1.py","file_name":"HW4_1.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4374297258","text":"\"\"\"\nradix sort\ncomplexity: O(nk + n) . n is the size of input list and k is the digit length\nof the number\n\"\"\"\n\n\ndef radix_sort(arr: list, simulation=False):\n position = 1\n max_number = max(arr)\n\n iteration = 0\n if simulation:\n print(\"iteration\", iteration, \":\", *arr)\n\n while position < max_number:\n queue_list = [list() for _ in range(10)]\n\n for num in arr:\n digit_number = num // position % 10\n queue_list[digit_number].append(num)\n\n index = 0\n for numbers in queue_list:\n for num in numbers:\n arr[index] = num\n index += 1\n\n if simulation:\n iteration = iteration + 1\n print(\"iteration\", iteration, \":\", *arr)\n\n position *= 10\n return arr\n\n\nif __name__ == '__main__':\n import random\n\n lst = random.sample(range(10 ** 4), k=10 ** 1)\n lst_sorted = radix_sort(lst, simulation=True)\n print(all(\n map(lambda x: x[0] <= x[1],\n zip(lst_sorted[:-1],\n lst_sorted[1:]\n )\n )\n )\n )\n","repo_name":"Irlirion/data_structures_and_algorithms","sub_path":"sort/radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6175749910","text":"# https://www.hackinscience.org/exercises/student-class:\n# Student, School, and City have a name attribute, given at initialization time.\n# A Student have an add_exam(grade) method, recording a new grade for him, as a float.\n# A School have an add_student(student) method.\n# A City have an add_school(school) method.\n# Student, School, and City have a get_mean() method giving:\n# For the Student, the average of its results.\n# For the School, the average of the students averages.\n# For the City the average of the School averages.\n# School have a get_best_student() method, returning the best Student.\n# Cities have a get_best_school() and a get_best_student() methods, returning respectively a School and a Student.\n\nclass Student:\n def __init__(self, name:str) -> None:\n \"\"\"Initialize the Student with a name.\"\"\"\n self.name = name\n self.grades: list[int] = []\n\n def add_exam(self, grade: int) -> None:\n \"\"\"Record a new grade for the student.\"\"\"\n self.grades.append(grade)\n\n def get_mean(self) -> float:\n \"\"\"Return the average of the student's grades.\"\"\"\n return round(sum(self.grades)/len(self.grades), 2) if self.grades else 0.0\n\nclass School:\n def __init__(self, name:str) -> None:\n \"\"\"Initialize the School with a name.\"\"\"\n self.name = name\n self.students: list[Student] = []\n\n def add_student(self, student: Student) -> None:\n \"\"\"Add a student to the school.\"\"\"\n self.students.append(student)\n\n def get_mean(self) -> float:\n \"\"\"Return the average of the students' averages.\"\"\"\n total_avg = sum(student.get_mean() for student in self.students)\n return round(total_avg/len(self.students), 2) if self.students else 0.0\n\n def get_best_student(self) -> str:\n \"\"\"Return the student with the highest average grade.\"\"\"\n if not self.students:\n return None\n best_student: Student = max(self.students, key=lambda student: student.get_mean())\n return best_student\n\nclass City:\n def __init__(self, name:str) -> None:\n \"\"\"Initialize the City with a name.\"\"\"\n self.name = name\n self.schools: list[School] = []\n\n def add_school(self, school: School) -> None:\n \"\"\"Add a school to the city.\"\"\"\n self.schools.append(school)\n\n def get_mean(self) -> float:\n \"\"\"Return the average of the schools' averages.\"\"\"\n total_avg = sum(school.get_mean() for school in self.schools)\n return round(total_avg/len(self.schools), 2) if self.schools else 0.0\n\n def get_best_school(self) -> str:\n \"\"\"Return the school with the highest average grade.\"\"\"\n if not self.schools:\n return None\n best_school: School = max(self.schools, key=lambda school: school.get_mean())\n return best_school\n\n def get_best_student(self) -> str:\n \"\"\"Return the best student among all schools in the city.\"\"\"\n if not self.schools:\n return None\n best_student: Student = max((student for school in self.schools for student in school.students),\n key=lambda student: student.get_mean(), default=None)\n return best_student if best_student else None\n\ndef test_classes():\n # Test Student class\n s1 = Student(\"John\")\n s1.add_exam(90)\n s1.add_exam(100)\n s1.add_exam(95)\n assert s1.get_mean() == 95\n\n s2 = Student(\"Jane\")\n s2.add_exam(85)\n s2.add_exam(95)\n s2.add_exam(100)\n assert s2.get_mean() == 93.33\n\n # Test School class\n school1 = School(\"High School\")\n school1.add_student(s1)\n school1.add_student(s2)\n assert school1.get_mean() == 94.16 # (95 + 93.33) / 2\n assert school1.get_best_student().name == \"John\" # s1 has higher average\n\n s3 = Student(\"Johnathon\")\n s3.add_exam(73)\n s3.add_exam(100)\n s3.add_exam(95)\n assert s3.get_mean() == 89.33\n\n s4 = Student(\"Janeka\")\n s4.add_exam(85)\n s4.add_exam(95)\n s4.add_exam(96)\n assert s4.get_mean() == 92\n\n # Test School class\n school2 = School(\"High School South\")\n school2.add_student(s3)\n school2.add_student(s4)\n assert school2.get_mean() == 90.66 # (95 + 93.33) / 2\n assert school2.get_best_student().name == \"Janeka\" # s1 has higher average\n\n # Test City class\n city = City(\"City1\")\n city.add_school(school1)\n city.add_school(school2)\n assert city.get_mean() == 92.41 # there's only one school\n assert city.get_best_school().name == \"High School\" # only one school in city\n assert city.get_best_student().name == \"John\" # s1 has higher average\n print(\"All tests passed!\")\n\ndef main() -> None:\n test_classes()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jasn-armstrng/hackinscience-problems-solutions","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33665260720","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\n\nfrom app import schemas, services\nfrom app.api import deps\nfrom app.core.security import verify_api_key\nfrom app.models import Message\nfrom app.schemas.api import ErrorResponse\n\nrouter = APIRouter()\nresponses = {\n 200: {\"description\": \"Success response.\", \"model\": schemas.Message},\n 400: {\n \"description\": \"Bad request, check required request body.\",\n \"model\": ErrorResponse,\n },\n 401: {\"description\": \"Not authorized\", \"model\": ErrorResponse},\n 404: {\n \"description\": \"Message with given id does not exist.\",\n \"model\": ErrorResponse,\n },\n}\n\n\n@router.get(\"/{id}\", response_model=schemas.Message, responses=responses)\nasync def get_message(id: int, db: Session = Depends(deps.get_db)) -> Message:\n \"\"\"\n Retrieves message by ID. Increments message's views count.\n \"\"\"\n\n # increment view count\n services.messages.increment_view_count_by_id(db=db, _id=id)\n\n return services.messages.get_by_id(db=db, _id=id)\n\n\n@router.post(\"\", response_model=schemas.Message, responses=responses)\nasync def create_message(\n *,\n db: Session = Depends(deps.get_db),\n message_create_dto: schemas.MessageCreateDto,\n _: bool = Depends(verify_api_key)\n) -> Message:\n \"\"\"\n Creates new message.\n \"\"\"\n return services.messages.create(db=db, message_create_dto=message_create_dto)\n\n\n@router.put(\"/{id}\", response_model=schemas.Message, responses=responses)\nasync def update_message(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n message_update_dto: schemas.MessageUpdateDto,\n _: bool = Depends(verify_api_key)\n) -> Message:\n \"\"\"\n Updates existing message with given ID.\n \"\"\"\n return services.messages.update_by_id(\n db=db, _id=id, message_update_dto=message_update_dto\n )\n\n\n@router.delete(\"/{id}\", response_model=schemas.Message, responses=responses)\nasync def delete_message(\n *, db: Session = Depends(deps.get_db), id: int, _: bool = Depends(verify_api_key)\n) -> Message:\n \"\"\"\n Deletes existing message with given ID.\n \"\"\"\n return services.messages.delete_by_id(db=db, _id=id)\n","repo_name":"BartlomiejRasztabiga/evox","sub_path":"app/api/api_v1/endpoints/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31624949515","text":"def product(p, q):\n r = []\n for i in range(len(q)):\n r.append(p[q[i]-1])\n return r\n #return tuple(q[p[i] - 1] for i in range(len(p)))\n\n\ndef inverse(p):\n q = [0] * len(p)\n\n for i in range(len(p)):\n q[p[i]-1] = i + 1\n return q\n\n\ndef sift(tableau, p):\n IDENTITY = tuple(range(1, len(p)+1))\n q = p\n\n while q != IDENTITY:\n i = min(x for x in range(len(q)) if q[x] != x+1)\n j = q[i] - 1\n if tableau[i][j] == IDENTITY:\n tableau[i][j] = q\n return q\n else:\n q = product(q, inverse(tableau[i][j]))\n return None\n\n\ndef composition(permutations, r):\n IDENTITY = tuple(range(1, len(r)+1))\n tableau = [[IDENTITY] * len(r) for _ in range(len(r))]\n \n # Tamisage initial / Initial sift\n for p in permutations:\n sift(tableau, p)\n \n # Remplir tableau / Fill table\n to_sift = [(p, q) for p in permutations for q in permutations]\n \n while len(to_sift) > 0:\n p, q = to_sift.pop()\n q = sift(tableau, product(p, q))\n \n if q is not None:\n # q est une nouvelle permutation ajoutee au tableau\n to_sift.extend([(p, q) for p in permutations])\n to_sift.extend([(q, p) for p in permutations])\n \n # Genere r? / Generates r?\n return sift(tableau, r) is None\n \n# Exemple / Example\n# a = tuple([2, 1, 3, 4, 5]) # (12)(3)(4)(5)\n# b = tuple([2, 3, 4, 5, 1]) # (12345)\n# r = tuple([2, 1, 4, 5, 3]) # (12)(345)\n# print(appartenance_intelligent(set([a, b]), r))\n\nf51 = tuple([3, 3, 5, 5, 7, 7, 1, 1, 11, 12, 12, 1])\nf52 = tuple([3, 3, 1, 1, 5, 5, 7, 7, 7, 4, 12, 11])\nf53 = tuple([1, 1, 3, 3, 5, 5, 9, 7, 9, 9, 10, 12])\ns5 = set([f51, f52, f53])\nf5 = tuple([7, 7, 3, 3, 1, 1, 5, 5, 12, 11, 10, 4])\n\n\nprint(composition(s5, f5))\n\n","repo_name":"olivierhsta/2125_Devoir1","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38844029889","text":"import subprocess\nimport glob\nimport shutil\nimport datetime as dtm\nimport os\n\nos.chdir('/home/dkivits/STILT/WRF-ARL-converter/metprog/grib2arl/')\n\nfiledir = '/projects/0/ctdas/PARIS/DATA/meteo/STILT/combined_20lvls/'\n#filedir = '/projects/0/ctdas/PARIS/DATA/meteo/STILT/testfiles/'\nfiles = sorted(glob.glob(filedir + '*.grb2'))\n\nfor f in files:\n datestr = f.split('/')[-1][5:-4]\n\n #outfile = '/projects/0/ctdas/PARIS/DATA/meteo/STILT/combined/arl/ecmw.{}.arl'.format(datestr)\n outfile = './arl/ecmw.{}.arl'.format(datestr)\n #outfile = './arl/ecmw.test.arl'.format(datestr)\n\n #command = ['/home/dkivits/STILT/WRF-ARL-converter/metprog/grib2arl/grib2arl'] + [r'-i' + f ]\n command = ['./grib2arl'] + [r'-i' + f ]\n command = ' '.join(command)\n print(command)\n p = subprocess.Popen(command, shell=True)\n p.communicate()\n shutil.move('DATA.ARL', outfile)\n \n","repo_name":"DaanKivits/STILT_v1.2_sparse","sub_path":"WRF-ARL-converter/metprog/grib2arl/process_multiple.py","file_name":"process_multiple.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71345256906","text":"# Body Mass Index Loop Program \n# Name: Yuqi Zhou \n# \n# Description \n# This application is to calculate a person's body mass index and determine the category of it. \n# We will prompt the user to enter height and weight, \n# then check if those values are numeric, and if they are in the valid range. \n# When the input are valid, we will calculate the BMI \n# and then determine its category depending on the value of BMI. \n# At this point, we will ask the user if they want to enter a new set of height and weight. \n# We will validate the answer. If it's neither yes nor no, we will notify the user to enter a valid choice. \n# If the answer is yes, the whole calculation process will start over again. \n# If the answer is no, the application will be ended. \n\n# Declaration \n# Initialize a Boolean variable to store the determination of \n# if the user want to enter a new set of values. \nenter_another = True \n# Declare three constants for value validation \nMIN_HEIGHT = 24.0 \nMAX_HEIGHT = 120.0 \nMIN_WEIGHT = 25.0 \n\n# This function is to check if the input of height is numeric. \n# If not, prompt the user to enter a numeric value. \ndef Valid_Height(user_entry): \n try: \n input_check = float(user_entry) \n return True\n except: \n print(\"Please enter the height as a numeric value.\") \n return False \n\n# This function is to check if the input is in valid range. \n# If not, prompt the user to enter a value in valid range. \ndef Height_Range(user_entry): \n if MIN_HEIGHT <= float(user_entry) <= MAX_HEIGHT: \n return True \n else: \n print(\"Please enter the height between 24 and 120 inches.\")\n return False \n\n# This function is to check if the input of weight is numeric. \n# If not, prompt the user to enter a numeric value. \ndef Valid_Weight(user_entry): \n try: \n input_check = float(user_entry) \n return True\n except: \n print(\"Please enter the weight as a numeric value.\") \n return False \n\n# This function is to check if the input is in valid range. \n# If not, prompt the user to enter a value in valid range. \ndef Weight_Range(user_entry): \n if MIN_WEIGHT <= float(user_entry): \n return True \n else: \n print(\"Please enter a weight of at least 25 pounds.\") \n return False \n\n# A while loop, condition is met when the user is willing to enter a new set. \nwhile enter_another == True: \n # Input \n # Prompt the user to enter value of height, and check if the input is valid. \n # If not, ask the user to enter again. \n height = input(\"Please enter the person's height in inches: \") \n while Valid_Height(height) == False or Height_Range(height) == False: \n height = input(\"Please enter the person's height in inches: \") \n \n # Prompt the user to enter value of weight, and check if the input is valid. \n # If not, ask the user to enter again. \n weight = input(\"Please enter the person's weight in pounds: \") \n while Valid_Weight(weight) == False or Weight_Range(weight) == False: \n weight = input(\"Please enter the person's weight in pounds: \") \n \n # Initialize a Boolean variable to store True or False value \n # of checking if new weight is less than 85% of original weight. \n weight_check = True \n # Declare a float variable to store renewed weight value. The first one is equal to the original. \n weight_new = float(weight) \n # Declare a float variable to store the limit we set as an exit point. \n weight_limit = float(weight)*0.85 \n # Initialize a string variable to store the category of BMI. \n category_bmi = \"\" \n # Set up a constant for conversion factor. \n CONVERSION_FACTOR = 703 \n # Set up 4 constants to set the range of categories of BMI. \n BMI_SEVERELY_UNDER = 16 \n BMI_UNDER = 18.5 \n BMI_HEALTY = 25 \n BMI_OVER = 30 \n \n # A while loop to calculate BMI and determine the category of BMI, \n # when new weight is less than 85% of original weight. \n while weight_check == True: \n bmi = (float(weight_new)/float(height)**2)*CONVERSION_FACTOR \n if bmi < BMI_SEVERELY_UNDER: \n category_bmi = \"severely underweight\" \n elif BMI_SEVERELY_UNDER <= bmi < BMI_UNDER: \n category_bmi = \"underweight\" \n elif BMI_UNDER <= bmi < BMI_HEALTY: \n category_bmi = \"healthy\" \n elif BMI_HEALTY <= bmi < BMI_OVER: \n category_bmi = \"overweight\" \n else: \n category_bmi = \"obese\" \n \n # Print out height, weight, result of BMI and its category altogether as a string. \n print('The BMI for a ' + str(height) + '\" tall person who weighs ' + str(weight_new) + ' lb. is ' + str(round(bmi,1)) + ', which is categorized as ' + str(category_bmi) + '.') \n\n # Set the increment of weight change as a constant. \n WEIGHT_INCREMENT = -5 \n # Calculate new weight. \n weight_new = weight_new + WEIGHT_INCREMENT \n # Check if new weight is less than 85% of original weight. If yes, end the loop. \n if weight_new < weight_limit: \n weight_check = False \n \n # Ask the user if they want to enter a new set of data. \n new_input = input(\"Would you like to enter data for another person? (yes/no): \") \n \n # If the answer is 'yes', start a new validation and calculation process. \n # If the answer is 'no', end the application. \n # If the answer is neither 'yes' nor 'no', prompt the user to enter 'yes' or 'no'. \n while new_input.strip().lower() != 'yes' and new_input.strip().lower() != 'no': \n new_input = input(\"Please enter 'yes' or 'no'. Would you like enter data for another person?: \") \n if new_input.strip().lower() == \"no\": \n enter_another = False \n else: \n enter_another = True \n ","repo_name":"miracleyuqi/BMI_Calculator","sub_path":"Body Mass Index Calculator.py","file_name":"Body Mass Index Calculator.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38850001686","text":"import argparse\nimport os\nimport logging\nimport torch\n\nfrom memcnn.config import Config\nfrom memcnn.experiment.manager import ExperimentManager\nfrom memcnn.experiment.factory import load_experiment_config, experiment_config_parser\n\nimport memcnn.utils.log\n\n\nlogger = logging.getLogger('train')\n\n\ndef run_experiment(experiment_tags, data_dir, results_dir, start_fresh=False, use_cuda=False, workers=None,\n experiments_file=None, *args, **kwargs):\n if not os.path.exists(data_dir):\n raise RuntimeError('Cannot find data_dir directory: {}'.format(data_dir))\n\n if not os.path.exists(results_dir):\n raise RuntimeError('Cannot find results_dir directory: {}'.format(results_dir))\n\n cfg = load_experiment_config(experiments_file, experiment_tags)\n logger.info(cfg)\n\n model, optimizer, trainer, trainer_params = experiment_config_parser(cfg, workers=workers, data_dir=data_dir)\n\n experiment_dir = os.path.join(results_dir, '_'.join(experiment_tags))\n manager = ExperimentManager(experiment_dir, model, optimizer)\n if start_fresh:\n logger.info('Starting fresh option enabled. Clearing all previous results...')\n manager.delete_dirs()\n manager.make_dirs()\n\n if use_cuda:\n manager.model = manager.model.cuda()\n import torch.backends.cudnn as cudnn\n cudnn.benchmark = True\n\n last_iter = manager.get_last_model_iteration()\n if last_iter > 0:\n logger.info('Continue experiment from iteration: {}'.format(last_iter))\n manager.load_train_state(last_iter)\n\n trainer_params.update(kwargs)\n\n trainer(manager, start_iter=last_iter, use_cuda=use_cuda, *args, **trainer_params)\n\n\ndef main(data_dir, results_dir):\n # setup logging\n memcnn.utils.log.setup(True)\n\n # specify defaults for arguments\n use_cuda = torch.cuda.is_available()\n workers = 16\n experiments_file = os.path.join(os.path.dirname(__file__), 'config', 'experiments.json')\n start_fresh = False\n\n # parse arguments\n parser = argparse.ArgumentParser(description='Run memcnn experiments.')\n parser.add_argument('experiment_tags', type=str, nargs='+',\n help='Experiment tags to run and combine from the experiment config file')\n parser.add_argument('--workers', dest='workers', type=int, default=workers,\n help='Number of workers for data loading (Default: {})'.format(workers))\n parser.add_argument('--results-dir', dest='results_dir', type=str, default=results_dir,\n help='Directory for storing results (Default: {})'.format(results_dir))\n parser.add_argument('--data-dir', dest='data_dir', type=str, default=data_dir,\n help='Directory for input data (Default: {})'.format(data_dir))\n parser.add_argument('--experiments-file', dest='experiments_file', type=str, default=experiments_file,\n help='Experiments file (Default: {})'.format(experiments_file))\n parser.add_argument('--fresh', dest='start_fresh', action='store_true', default=start_fresh,\n help='Start with fresh experiment, clears all previous results (Default: {})'\n .format(start_fresh))\n parser.add_argument('--no-cuda', dest='use_cuda', action='store_false', default=use_cuda,\n help='Always disables GPU use (Default: use when available)')\n args = parser.parse_args()\n\n if not use_cuda:\n logger.warning('CUDA is not available in the current configuration!!!')\n\n if not args.use_cuda:\n logger.warning('CUDA is disabled!!!')\n\n # run experiment given arguments\n run_experiment(\n args.experiment_tags,\n args.data_dir,\n args.results_dir,\n start_fresh=args.start_fresh,\n experiments_file=args.experiments_file,\n use_cuda=args.use_cuda, workers=args.workers)\n\n\nif __name__ == '__main__': # pragma: no cover\n config_fname = Config.get_filename()\n if not os.path.exists(config_fname) or not 'data_dir' in Config() or not 'results_dir' in Config():\n print('The configuration file was not set correctly.\\n')\n print('Please create a configuration file (json) at:\\n {}\\n'.format(config_fname))\n print('The configuration file should be formatted as follows:\\n\\n'\n '{\\n'\n ' \"data_dir\": \"/home/user/data\",\\n'\n ' \"results_dir\": \"/home/user/experiments\"\\n'\n '}\\n')\n print('data_dir : location for storing the input training datasets')\n print('results_dir : location for storing the experiment files during training')\n else:\n main(data_dir=Config()['data_dir'],\n results_dir=Config()['results_dir'])\n","repo_name":"silvandeleemput/memcnn","sub_path":"memcnn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"81"} +{"seq_id":"27347849520","text":"from spacy.matcher import DependencyMatcher\nfrom kg_detective.lib import merge\n\ndef search_out(doc, nlp):\n \"\"\"Search for copular verbs \n\n Args:\n doc (spacy.tokens.Doc): doc to be analyzed\n nlp (spacy.language.Language): context language\n\n Returns:\n list: list of spacy.tokens.Span\n \"\"\"\n\n result = []\n\n dep_matcher = DependencyMatcher(nlp.vocab)\n\n attr_dep = [\n {\n \"RIGHT_ID\": \"core_verb\",\n \"RIGHT_ATTRS\": {\"POS\": \"VERB\", \"LEMMA\": \"become\"}\n },\n {\n \"LEFT_ID\": \"core_verb\",\n \"REL_OP\": \">\",\n \"RIGHT_ID\": \"attr\",\n \"RIGHT_ATTRS\": {\"DEP\": \"attr\"}\n },\n ]\n acomp_dep = [\n {\n \"RIGHT_ID\": \"core_verb\",\n \"RIGHT_ATTRS\": {\"POS\": {\"IN\": [\"VERB\", \"AUX\"]}, \"LEMMA\": {\"IN\": [\"come\", \"look\", \"sound\", \"taste\", \"be\", \"smell\", \"go\", \"get\", \"feel\"]}}\n },\n {\n \"LEFT_ID\": \"core_verb\",\n \"REL_OP\": \">\",\n \"RIGHT_ID\": \"acomp\",\n \"RIGHT_ATTRS\": {\"DEP\": {\"IN\": [\"acomp\", \"advcl\"]}}\n }\n ]\n\n oprd_dep = [\n {\n \"RIGHT_ID\": \"core_verb\",\n \"RIGHT_ATTRS\": {\"POS\": {\"IN\": [\"VERB\"]}, \"LEMMA\": {\"IN\": [\"appear\"]}}\n },\n {\n \"LEFT_ID\": \"core_verb\",\n \"REL_OP\": \">\",\n \"RIGHT_ID\": \"oprd\",\n \"RIGHT_ATTRS\": {\"DEP\": \"oprd\"}\n }\n ]\n\n\n dep_patterns = [attr_dep, acomp_dep, oprd_dep]\n dep_matcher.add(\"verb_copular\", dep_patterns)\n matches = dep_matcher(doc)\n\n for _, (copular, copular_obj) in matches:\n if copular < copular_obj:\n span_text = doc[copular].text + \" \" + \" \".join([e.text for e in doc[copular_obj].subtree])\n result.append({\"text\": span_text})\n\n return result\n \n","repo_name":"qishe-nlp/kg-detective","sub_path":"kg_detective/en/rules/verb_copular.py","file_name":"verb_copular.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28605709659","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nimport tweepy\nimport csv\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nfrom tweepy.error import TweepError\n\nauth = tweepy.OAuthHandler(\"xxxxxxxxxxxxx\",\"xxxxxxxxxxxxx\")\nauth.set_access_token(\"xxxxxxxxxxxxxxx-xxxxxxxxxxxxxx\",\"xxxxxxxxxxxxxxxxx\")\napi = tweepy.API(auth,wait_on_rate_limit=True)\n\n# Create your views here.\ndef index(request):\n if request.method == 'POST':\n index.query = request.POST['searchquery']\n try:\n search_user = api.get_user(index.query)\n except:\n return render(request,'no-data.html')\n get_tweets = api.user_timeline(screen_name=index.query,count=10)\n followers=api.followers(screen_name=index.query,count=15)\n\n if(search_user.followers_count > 10):\n left_followers = search_user.followers_count-10\n else:\n left_followers=0\n if(search_user.statuses_count > 10):\n left_tweets = search_user.statuses_count-10 \n else:\n left_tweets=0\n return render(request,'data.html',{'f_count':search_user.followers_count,\n 'status_count':search_user.statuses_count,\n 'get_tweets':get_tweets,\n 'screen_name':search_user.name,\n 'description':search_user.description,\n 'followers':followers,\n 'username':index.query,\n 'left_tweets':left_tweets,\n 'left_followers':left_followers})\n else:\n return render(request,'index.html')\n \ndef export_csv(request):\n if request.method == \"POST\":\n user_email = request.POST.get('useremail',False)\n followers_count = request.POST.get('followers_count',False)\n\n with open('follower.csv','w',newline='',encoding='utf-8') as csvfile:\n wrt = csv.writer(csvfile)\n for i in tweepy.Cursor(api.followers, index.query).items(int(followers_count)):\n n = i.name\n wrt.writerow([n])\n \n email_user = 'xxxx@x.xcom'\n email_password = 'xxxxxxxxxxxxxx'\n email_send = user_email\n\n subject = 'Followers csv file'\n\n msg = MIMEMultipart()\n msg['From'] = email_user\n msg['To'] = email_send\n msg['Subject'] = subject\n\n body = 'Hi there, Here is your followers file. Thanks for choosing us'\n msg.attach(MIMEText(body,'plain'))\n\n filename='follower.csv'\n attachment = open(filename,'rb')\n\n part = MIMEBase('application','octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\"attachment; filename= \"+filename)\n\n msg.attach(part)\n text = msg.as_string()\n server = smtplib.SMTP('smtp.gmail.com',587)\n server.starttls()\n server.login(email_user,email_password)\n\n server.sendmail(email_user,email_send,text)\n server.quit()\n return redirect('')\n\n\n else:\n return redirect('/')\n\n\ndef export_csv_tweets(request):\n if request.method == \"POST\":\n user_email = request.POST.get('useremail',False)\n tweet_count = request.POST.get('tweet_count',False)\n\n with open('tweets.csv','w',newline='',encoding=\"utf-8\") as csvfile:\n wrt = csv.writer(csvfile)\n for i in tweepy.Cursor(api.user_timeline, index.query).items(int(tweet_count)):\n n = i.text\n wrt.writerow([n])\n\n email_user = 'xxxxxxxx@xxxxx'\n email_password = 'xxxxxxxx'\n email_send = user_email\n\n subject = 'Tweets csv file'\n\n msg = MIMEMultipart()\n msg['From'] = email_user\n msg['To'] = email_send\n msg['Subject'] = subject\n\n body = 'Hi there, Here is your file. Thanks for choosing us'\n msg.attach(MIMEText(body,'plain'))\n\n filename='tweets.csv'\n attachment = open(filename,'rb')\n\n part = MIMEBase('application','octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\"attachment; filename= \"+filename)\n\n msg.attach(part)\n text = msg.as_string()\n server = smtplib.SMTP('smtp.gmail.com',587)\n server.starttls()\n server.login(email_user,email_password)\n\n\n server.sendmail(email_user,email_send,text)\n server.quit()\n\n return HttpResponse('Your tweets file is sent to your email...')\n else:\n return redirect('/')\n \n\n","repo_name":"Hemant8555/twitter_bot","sub_path":"twibot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29722344796","text":"import os\n\nimport geopandas as gpd\nimport pandas as pd\nfrom shapely.geometry import Point\n\n# %%\n# give the folder path:\n\n# give the folder path:\nrunId = \"hamburg-v3.0-25pct-base\"\ntrips_info_folder = 'D:/ReallabHH/v3.0/25pct/hv3-25-7-1/'\n\n#output file path\noutputDir = trips_info_folder + 'analysis/'\noutputFile = outputDir + runId + '.distance_distribution_per_mode.csv'\n\nprint(\"reading output trips and person2homeCoord csv files\")\n#trips_ending = 'pct.output_trips.csv.gz'\ntrips_ending = '.output_trips.csv.gz'\n\nprint(\"will read from \" + trips_info_folder + runId + trips_ending)\ntrips = pd.read_csv(trips_info_folder + runId + trips_ending, sep=';')\n\n\n# put the file here which contains information about person and their home coordinate. Download it from:\n# shared-svn/projects/matsim-hamburg/hamburg-v1/person2homeCoord.csv\n# generated by java class org.matsim.analysis.GeneratePersonHomeLocation\n\nperson_home_location = pd.read_csv('D:/svn/shared-svn/projects/matsim-hamburg/hamburg-v3/hamburg-v3.0-person2HomeLocation.tsv', sep=\"\\t\")\nprint(\"... finished\")\n\n# %%\n# ---------------------------- geo process\ntrips = trips.merge(right=person_home_location, how='left', left_on=['person'], right_on=['person'])\nperson_home_location = trips[['person', 'home_x', 'home_y']]\ntrips_start_location = trips[['person', 'start_x', 'start_y']]\ntrips_end_location = trips[['person', 'end_x', 'end_y']]\n# %%\n# put the shapefile of hamburg here, Download it from:\n# /shared-svn/projects/realLabHH/data/hamburg_shapeFile/hamburg_merge/hamburg.shp\nshapeFile = \"D:/svn/shared-svn/projects/realLabHH/data/hamburg_shapeFile/hamburg_merge/hamburg.shp\"\n\nprint(\"reading shape file\")\nhamburg_map = gpd.read_file(shapeFile)\nprint(\"... finished\")\n\n# %%\ndef geo_merge(df, name):\n points = df.apply(lambda row: Point(row[name + '_x'], row[name + '_y']), axis=1)\n points_geo = gpd.GeoDataFrame(df, geometry=points, crs=hamburg_map.crs)\n df_shape = gpd.sjoin(points_geo, hamburg_map, how='inner', op='within')\n df_shape = pd.DataFrame(df_shape)\n df_shape = df_shape[~df_shape.index.duplicated()]\n df_shape = pd.concat([trips, df_shape], axis=1)\n trips[name + '_area'] = df_shape['AreaType']\n\nprint(\"perform geo merge(s)\")\ngeo_merge(person_home_location, 'home')\n#geo_merge(trips_start_location, 'start')\n#geo_merge(trips_end_location, 'end')\nprint(\".... done\")\n\n# %%\n## distance distrbution\ndef get_trip_dis_bin(x):\n if x < 500:\n return \"0.5\"\n elif x < 1000:\n return \"1\"\n elif x < 2000:\n return \"2\"\n elif x < 5000:\n return \"5\"\n elif x < 10000:\n return \"10\"\n elif x < 20000:\n return \"20\"\n elif x < 50000:\n return \"50\"\n elif x < 100000:\n return \"100\"\n else:\n return \"100+\"\n\nprint(\"apply distance distribution\")\ntrips['dis_bins_rou'] = trips['traveled_distance'].apply(lambda x: get_trip_dis_bin(x))\ntrips['dis_bins_euc'] = trips['euclidean_distance'].apply(lambda x: get_trip_dis_bin(x))\n\n\n# %%\n\ndef get_trip_dis_dis(trips_dataframe):\n rou_distance_dist = trips_dataframe.groupby(['main_mode', 'dis_bins_rou'])['person'].count()\n rou_distance_dist = rou_distance_dist.unstack().fillna(0)\n rou_distance_dist = rou_distance_dist[['0.5', '1', '2', '5', '10', '20', '50', '100', '100+']]\n rou_distance_dist['dis_type'] = 'routed'\n\n euc_distance_dist = trips_dataframe.groupby(['main_mode', 'dis_bins_euc'])['person'].count()\n euc_distance_dist = euc_distance_dist.unstack().fillna(0)\n euc_distance_dist = euc_distance_dist[['0.5', '1', '2', '5', '10', '20', '50', '100', '100+']]\n euc_distance_dist['dis_type'] = 'euclidean'\n\n data = pd.concat([rou_distance_dist, euc_distance_dist])\n return data\n\n\n# %%\nout_dis_dist = get_trip_dis_dis(trips[trips['home_area'] == 0])\nout_dis_dist['home_area'] = 'outer'\nhvv_dis_dist = get_trip_dis_dis(trips[trips['home_area'] == 1])\nhvv_dis_dist['home_area'] = 'hvv-umland'\nhamburg_dis_dist = get_trip_dis_dis(trips[trips['home_area'] == 2])\nhamburg_dis_dist['home_area'] = 'city'\nmetro_dis_dist = get_trip_dis_dis(trips)\nmetro_dis_dist['home_area'] = 'metropolregion'\nprint(\"... done\")\n\nprint(\"write output to \" + outputFile)\nif not os.path.exists(outputDir):\n os.mkdir(trips_info_folder + 'analysis/')\npd.concat([metro_dis_dist, hamburg_dis_dist, hvv_dis_dist, out_dis_dist]).to_csv(\n outputFile, sep=\";\")\nprint(\"...done\")\nprint(\"FINISHED\")\n","repo_name":"matsim-scenarios/matsim-hamburg","sub_path":"src/main/python/analysis/trips_distance_distribution.py","file_name":"trips_distance_distribution.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"27344594620","text":"from Common.AndroidMessage import Android\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\nimport selenium.common.exceptions as E\nfrom selenium.webdriver.common.by import By\nimport time\nimport os\nimport allure\nfrom loguru import logger\nfrom selenium.webdriver.remote.webelement import WebElement\n\nlogger.add(os.path.abspath(os.path.join(os.path.dirname(__file__), f\"../reports/log/auto.log\")), encoding='utf-8')\n\n\nclass Base:\n \"\"\"\n 基本页,包含设备的所有所需信息(暂时只支持安卓)\n 用来连接页面元素和操作过程的类\n \"\"\"\n\n driver = None\n\n an = Android() # 初始化设备数据\n android_driver_caps = {'platformName': an.platformName,\n 'platformVersion': an.get_device_version,\n 'deviceName': an.get_device_name[0], # 第一个设备\n 'appPackage': an.get_app_name,\n 'appActivity': an.get_app_Activity,\n # 'autoGrantPermissions': True, # 获取默认权限\n # \"noReset\": True, # 不清空数据\n # \"automationName\": \"Uiautomator2\" # 使用Uiautomator2\n }\n \"\"\"\n ios_driver_caps = {\"platformName\": \"iOS\",\n \"platformVersion\": \"12.1\",\n \"bundleId\": \"com.pundix.wallet\",\n \"automationName\": \"XCUITest\",\n \"udid\": \"72c8074b6e518ba2c4a462a5bbe169f90c802f8c\",\n \"deviceName\": \"“PundiX051”的 iPhone\"\n }\n \"\"\"\n\n def __init__(self, driver):\n self.driver = driver\n\n def find_Element(self, el, mark='') -> WebElement:\n logger.info(f'{mark} 查找元素 {el}')\n try:\n return self.driver.find_element(*el)\n except E.NoSuchElementException as e:\n logger.exception('查找元素失败.', e)\n self.allure_save_img(el[1]) # 找不到元素,截图\n raise\n\n def find_Elements(self, el, mark=None) -> WebElement:\n logger.info(f'{mark} 查找元素(s) {el}')\n try:\n logger.info(f'els == {self.driver.find_elements(*el)}')\n return self.driver.find_elements(*el)\n except E.NoSuchElementException as e:\n logger.exception('查找元素(s)失败.', e)\n raise\n\n def click_element(self, el, mark=None):\n try:\n self.find_Element(el).click()\n logger.info(f'{mark} 点击元素 {el}')\n except E.ElementClickInterceptedException as e:\n logger.exception('点击元素失败.', e)\n\n def input_element(self, el, text, mark=None):\n try:\n self.find_Element(el).send_keys(text)\n logger.info(f'{mark} 赋值元素 {el}:{text}')\n except:\n logger.exception('元素赋值失败.')\n\n def clear_element(self, el, mark=None):\n try:\n self.find_Element(el).clear()\n logger.info(f'{mark} 清除元素值 {el}')\n except:\n logger.exception('元素值清理失败.')\n logger.warning('尝试更改清理元素方法: self.clear_Text')\n self.clear_text(el)\n\n def sys_back(self):\n \"\"\"点击系统返回键\"\"\"\n logger.info('点击系统返回键')\n self.driver.keyevent(4)\n\n def sys_home(self):\n \"\"\"点击系统home键\"\"\"\n logger.info('点击系统home键')\n self.driver.keyevent(3)\n\n def sys_keycode(self, code):\n \"\"\"点击系统按键\"\"\"\n logger.info(f'点击按键 {code}')\n self.driver.keyevent(code)\n\n def text_in_pagesource(self, text):\n \"\"\"检查文本是否在page_source里\"\"\"\n if text in self.driver.page_source:\n logger.info(f'找到 {text} 文本在页面元素里.')\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"获取屏幕分辨率大小\"\"\"\n size = self.driver.get_window_size()\n logger.info(f'屏幕宽度:{size[\"width\"]}, 长度:{size[\"height\"]}')\n return size['width'], size['height']\n\n def swipe_down(self, duration=500):\n \"\"\"\n 根据屏幕相对大小,向下滑动\n duration: 滑动时间间隔\n \"\"\"\n logger.info('向下滑动.')\n x, y = self.get_size()\n self.driver.swipe(x / 2, y / 4, x / 2, y * 3 / 4, duration)\n\n def swipe_up(self, duration=500):\n \"\"\"\n 根据屏幕相对大小,向上滑动\n duration: 滑动时间间隔\n \"\"\"\n logger.info('向上滑动.')\n x, y = self.get_size()\n self.driver.swipe(x / 2, y * 3 / 4, x / 2, y / 4, duration)\n\n def Swipe(self, x1, y1, x2, y2, duration=500):\n # 滑动\n x, y = self.get_size()\n self.driver.swipe(x * x1, y * y1, x * x2, y * y2, duration)\n\n def wait_element(self, duration, frequency, el) -> WebElement:\n \"\"\"等待元素出现\"\"\"\n try:\n ele = WebDriverWait(self.driver, timeout=duration, poll_frequency=frequency).until(\n expected_conditions.presence_of_element_located(el))\n logger.info(f'等待元素{el}出现...时长:{duration}s,间隔:{frequency}s')\n return ele\n except TimeoutError as e:\n logger.exception('查找元素超时.', e)\n\n def clear_text(self, el):\n \"\"\"第二种清楚元素的方法\"\"\"\n conn = self.find_Element(el)\n conn.click()\n self.driver.keyevent(123) # 光标追尾\n text_Length = len(str(conn.text))\n logger.info('触发清除元素func 2.')\n for i in range(0, text_Length):\n self.driver.keyevent(67) # 逐个删除已输入的内容\n\n def save_img(self, picname):\n \"\"\"截图并保存\"\"\"\n filename = picname + '.png'\n filepath = os.path.abspath(os.path.join(os.path.dirname(__file__), f\"../images/{filename}\"))\n logger.warning(f'截图路径:: {filepath}')\n self.driver.get_screenshot_as_file(filepath)\n return filepath\n\n def allure_save_img(self, name):\n \"\"\"allure截图\"\"\"\n with open(self.save_img(name), 'rb') as f:\n file = f.read()\n allure.attach(file, name, allure.attachment_type.PNG)\n\n def ios_swipe_up(self):\n \"\"\"iOS端向上滑动\"\"\"\n logger.info('向上滑动.')\n self.driver.execute_script('mobile: swipe', {'direction': 'up'})\n\n def ios_swipe_down(self):\n \"\"\"iOS端向下滑动\"\"\"\n logger.info('向下滑动')\n self.driver.execute_script('mobile: swipe', {'direction': 'down'})\n\n def is_toast_exist(self, text, duration=5, frequency=0.1):\n \"\"\"\n 定位toast提示语\n :param text: 提示语内容(全部)\n :param duration: 多少秒后超时,不再监控\n :param frequency: 监控间隔\n :return:\n \"\"\"\n logger.info(f'等待toast {text} 出现...时长:{duration}s,间隔:{frequency}s')\n try:\n toast_loc = (\"xpath\", \".//*[contains(@text,'%s')]\" % text)\n WebDriverWait(self.driver, duration, frequency).until(\n expected_conditions.presence_of_element_located(toast_loc))\n logger.info(f'找到toast {text}.')\n return True\n except E.NoSuchElementException as n:\n logger.exception('找不到该toast提示.', n)\n return False\n except E.TimeoutException as t:\n logger.exception('等待超时,找不到该toast提示.', t)\n return False\n\n\n # def switch_to_view(self, target='H5'):\n # \"\"\"\n # 切换app视窗 或 h5视窗\n # :target:目标视窗(app/H5),默认切换到H5\n # \"\"\"\n #\n # view_list = self.driver.contexts\n # logger.warning('当前页面的webview元素有:', view_list)\n # webview = [i for i in view_list if 'app_name' in i]\n # app = [a for a in view_list if 'APP' in a]\n #\n # if target == 'H5':\n # self.driver.execute(MobileCommand.SWITCH_TO_CONTEXT, {\"name\": webview[0]})\n # logger.warning('切换到H5 view.')\n # else:\n # self.driver.execute(MobileCommand.SWITCH_TO_CONTEXT, {\"name\": app[0]})\n # logger.warning('切换到app view.')\n # logger.warning(self.driver.current_context)\n # time.sleep(2)\n\n def authority(self):\n \"\"\"授予app系统权限\"\"\"\n logger.info('授权弹窗')\n try:\n time.sleep(1)\n if self.find_Element('Allow'): # 权限询问弹窗\n self.driver.switch_to.alert.accept() # 系统弹窗默认允许\n self.driver.switch_to.alert.accept() # 系统弹窗默认允许\n time.sleep(1)\n except:\n logger.warning('没有找到授权弹窗,跳过!')\n pass\n\n def get_text(self, el) -> str:\n \"\"\"获取元素text\"\"\"\n text = self.find_Element(el).text\n logger.info(f'找到元素的文本::{text}')\n return text\n\n def click_text(self, text):\n \"\"\"点击文本\"\"\"\n loc = (By.XPATH, f'//android.widget.TextView[@text=\"{text}\"]')\n self.click_element(loc)\n\n def element_display(self, el) -> bool:\n \"\"\"检查元素是否可视\"\"\"\n try:\n result = self.find_Element(el).is_displayed()\n logger.info(f'元素 {el} 是否可视: {result}')\n return result\n except:\n return False\n\n def element_enable(self, el) -> bool:\n \"\"\"检查元素是否可点击\"\"\"\n try:\n result = self.find_Element(el).is_enabled()\n logger.info(f'元素 {el} 是否可点击: {result}')\n return result\n except:\n return False\n\n def get_element_value(self, el, name='text'):\n # 返回元素的属性值\n return self.find_Element(el).get_attribute(name)\n\n def hide_keyboard(self):\n \"\"\"收起键盘\"\"\"\n try:\n self.driver.hide_keyboard()\n except:\n return False\n\n\nif __name__ == '__main__':\n print(Base(1).android_driver_caps)\n","repo_name":"qiusese/aton-tests","sub_path":"Page/basePage.py","file_name":"basePage.py","file_ext":"py","file_size_in_byte":10260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38401288253","text":"import csv\r\nimport json_lines\r\nimport matplotlib.pyplot as plt\r\nfrom utils import utils\r\nfrom pycorenlp import StanfordCoreNLP\r\nfrom features import AbuserDetectionFeatures as adf\r\nfrom features import ImageFeatures as imf\r\nfrom features import LinguisticAnalysisFeatures as laf\r\nfrom features import SentimentFeatures as sf\r\n\r\n\r\ndef main():\r\n # Creating label dictionary\r\n labels = utils.get_label_dict()\r\n with open('dataset/instances.jsonl', 'rb') as f:\r\n headers = False\r\n count = 0 # elements processed\r\n for post in json_lines.reader(f):\r\n count += 1\r\n print('Sample', count)\r\n # Reading post/article elements\r\n post_id = utils.post_id(post)\r\n post_title = utils.title(post)\r\n article_title = utils.article(post)\r\n # Extracting sample label\r\n post_label = labels[post_id]\r\n # Presence of image in a post\r\n has_image = imf.image_presence(post)\r\n # Number of characters\r\n len_chars_post_title, len_chars_article_title, len_chars_article_desc, len_chars_article_keywords = \\\r\n laf.get_no_of_characters_features(post)\r\n # Difference between number of characters\r\n diff_chars_post_title_article_title, diff_chars_post_title_article_desc, diff_chars_post_title_article_keywords, \\\r\n diff_chars_article_title_article_desc, diff_chars_article_title_article_keywords, diff_chars_article_desc_article_keywords = \\\r\n laf.get_diff_between_no_of_characters_features(post)\r\n # Number of characters ratio\r\n ratio_chars_post_title_article_title, ratio_chars_post_title_article_desc, ratio_chars_post_title_article_keywords, \\\r\n ratio_chars_article_title_article_desc, ratio_chars_article_title_article_keywords, ratio_chars_article_desc_article_keywords = \\\r\n laf.get_no_of_characters_ratio_features(post)\r\n # Number of Words\r\n len_words_post_title, len_words_article_title, len_words_article_desc, len_words_article_keywords = \\\r\n laf.get_no_of_characters_features(post)\r\n # Difference between number of words\r\n diff_words_post_title_article_title, diff_words_post_title_article_desc, diff_words_post_title_article_keywords, \\\r\n diff_words_article_title_article_desc, diff_words_article_title_article_keywords, diff_words_article_desc_article_keywords = \\\r\n laf.get_diff_between_no_of_words_features(post)\r\n # Number of words ratio\r\n ratio_words_post_title_article_title, ratio_words_post_title_article_desc, ratio_words_post_title_article_keywords, \\\r\n ratio_words_article_title_article_desc, ratio_words_article_title_article_keywords, ratio_words_article_desc_article_keywords = \\\r\n laf.get_no_of_words_ratio_features(post)\r\n # Post creation hour\r\n post_creation_hour = adf.get_post_creation_hour(post)\r\n # Number of sings\r\n post_title_no_signs = adf.get_no_signs(post_title)\r\n # Number of hashtags\r\n post_title_no_hashtags = adf.get_no_hashtags(post_title)\r\n # Number of exclamations\r\n post_title_no_exclamations = adf.get_no_exclamations(post_title)\r\n article_title_no_exclamations = adf.get_no_exclamations(article_title)\r\n # Number of question marks\r\n post_title_no_questionmarks = adf.get_no_question_marks(post_title)\r\n article_title_no_questionmarks = adf.get_no_question_marks(article_title)\r\n # Number of abbreviations\r\n post_title_no_abbreviations = adf.get_no_abbreviations(post_title)\r\n article_title_no_abbreviations = adf.get_no_abbreviations(article_title)\r\n # Number of ellipses\r\n post_title_no_ellipses = adf.get_no_ellipses(post_title)\r\n article_title_no_ellipses = adf.get_no_ellipses(article_title)\r\n # Number of dots\r\n post_title_no_dots = adf.get_no_dots(post_title)\r\n article_title_no_dots = adf.get_no_dots(article_title)\r\n # Begins with interrogative\r\n post_title_begins_with_interrogative = adf.get_begins_with_interrogative(post_title)\r\n article_title_begins_with_interrogative = adf.get_begins_with_interrogative(article_title)\r\n # Begins with number\r\n post_title_begins_with_number = adf.get_begins_with_number(post_title)\r\n article_title_begins_with_number = adf.get_begins_with_number(article_title)\r\n # Contains determiners and possessives\r\n post_title_determiners, post_title_possessives = laf.get_det_poses(post_title)\r\n article_title_determiners, article_title_possessives = laf.get_det_poses(article_title)\r\n # Contains hyperbolic words\r\n try:\r\n nlp = StanfordCoreNLP('http://localhost:9000')\r\n post_title_hyperbolics, article_title_hyperbolics = sf.get_hyperbolic_words_feature(nlp, post)\r\n except:\r\n print(\"\\nServer is not up!\")\r\n # Contains common clickbait phXrases\r\n post_title_common_phr, article_title_common_phr = laf.get_common_clickbait_phrases_feature(post)\r\n # # Contains Internet slangs\r\n post_title_slang, article_title_slang = laf.get_slang_words_feature(post)\r\n # Sentiment polarity\r\n post_title_sentiment, article_title_sentiment = sf.get_sentiment_polarity_feature(post)\r\n # Writing line to file (could write them in batches to improve performance)\r\n feature_output = post_id + ',' + str(post_label) + ',' + str(has_image) + ',' + str(post_creation_hour) + ',' + str(post_title_begins_with_interrogative) \\\r\n + ',' + str(article_title_begins_with_interrogative) + ',' + str(post_title_begins_with_number) + ',' + str(article_title_begins_with_number) \\\r\n + ',' + str(post_title_determiners) + ',' + str(post_title_possessives) + ',' + str(article_title_determiners) + ',' + str(article_title_possessives) \\\r\n + ',' + str(post_title_hyperbolics) + ',' + str(article_title_hyperbolics) + ',' + str(post_title_common_phr) + ',' + str(article_title_common_phr) \\\r\n + ',' + str(post_title_slang) + ',' + str(article_title_slang) + ',' + str(post_title_sentiment) + ',' + str(article_title_sentiment) \\\r\n + ',' + str(len_chars_post_title) + ',' + str(len_chars_article_title) + ',' + str(len_chars_article_desc) + ',' + str(len_chars_article_keywords) \\\r\n + ',' + str(diff_chars_post_title_article_title) + ',' + str(diff_chars_post_title_article_desc) + ',' + str(diff_chars_post_title_article_keywords) \\\r\n + ',' + str(diff_chars_article_title_article_desc) + ',' + str(diff_chars_article_title_article_keywords) + ',' + str(diff_chars_article_desc_article_keywords) \\\r\n + ',' + str(ratio_chars_post_title_article_title) + ',' + str(ratio_chars_post_title_article_desc) + ',' + str(ratio_chars_post_title_article_keywords) \\\r\n + ',' + str(ratio_chars_article_title_article_desc) + ',' + str(ratio_chars_article_title_article_keywords) + ',' + str(ratio_chars_article_desc_article_keywords) \\\r\n + ',' + str(len_words_post_title) + ',' + str(len_words_article_title) + ',' + str(len_words_article_desc) + ',' + str(len_words_article_keywords) \\\r\n + ',' + str(diff_words_post_title_article_title) + ',' + str(diff_words_post_title_article_desc) + ',' + str(diff_words_post_title_article_keywords) \\\r\n + ',' + str(diff_words_article_title_article_desc) + ',' + str(diff_words_article_title_article_keywords) + ',' + str(diff_words_article_desc_article_keywords) \\\r\n + ',' + str(ratio_words_post_title_article_title) + ',' + str(ratio_words_post_title_article_desc) + ',' + str(ratio_words_post_title_article_keywords) \\\r\n + ',' + str(ratio_words_article_title_article_desc) + ',' + str(ratio_words_article_title_article_keywords) + ',' + str(ratio_words_article_desc_article_keywords) \\\r\n + ',' + str(post_title_no_signs) + ',' + str(post_title_no_hashtags) + ',' + str(post_title_no_exclamations) + ',' + str(article_title_no_exclamations) \\\r\n + ',' + str(post_title_no_questionmarks) + ',' + str(article_title_no_questionmarks) + ',' + str(post_title_no_abbreviations) \\\r\n + ',' + str(article_title_no_abbreviations) + ',' + str(post_title_no_ellipses) + ',' + str(article_title_no_ellipses) + ',' + str(post_title_no_dots) \\\r\n + ',' + str(article_title_no_dots)\r\n\r\n # POS tags extraction\r\n counts_post_title_POS = laf.get_POS_counts(post_title)\r\n for key, value in counts_post_title_POS.items():\r\n feature_output += ',' + str(value)\r\n counts_article_title_POS = laf.get_POS_counts(article_title)\r\n for key, value in counts_article_title_POS.items():\r\n feature_output += ',' + str(value)\r\n # POS patterns extraction\r\n post_title_patterns_POS = laf.get_title_patterns(post_title)\r\n article_title_patterns_POS = laf.get_title_patterns(article_title)\r\n # Convert True/False to 0/1\r\n post_title_pattern_nnpv = int(post_title_patterns_POS[0] is True)\r\n post_title_pattern_nnpt = int(post_title_patterns_POS[1] is True)\r\n article_title_pattern_nnpv = int(article_title_patterns_POS[0] is True)\r\n article_title_pattern_nnpt = int(article_title_patterns_POS[1] is True)\r\n feature_output += ',' + str(post_title_pattern_nnpv) + ',' + str(post_title_pattern_nnpt)\r\n feature_output += ',' + str(article_title_pattern_nnpv) + ',' + str(article_title_pattern_nnpt)\r\n # N-gram extraction\r\n unigrams = laf.get_ngram_counts(post, 1, 6, 1000)\r\n for key, value in unigrams.items():\r\n feature_output += ',' + str(value)\r\n bigrams = laf.get_ngram_counts(post, 2, 6, 200)\r\n for key, value in bigrams.items():\r\n feature_output += ',' + str(value)\r\n trigrams = laf.get_ngram_counts(post, 3, 6, 100)\r\n for key, value in trigrams.items():\r\n feature_output += ',' + str(value)\r\n # If first sample, write the file headers first\r\n if not headers:\r\n feature_headers = 'Post_ID,Label,Has_Img,Post_Creation_Hour,Post_Title_Begins_With_Interrogative,' \\\r\n 'Article_Title_Begins_With_Interrogative,Post_Title_Begins_With_Number,' \\\r\n 'Article_Title_Begins_With_Number,Post_Title_Contains_Determiners,Post_Title_Contains_Possesives,Article_Title_Contains_Determiners,' \\\r\n 'Article_Title_Contains_Possesives,Post_Title_Contains_Hyperbolics,Article_Title_Contains_Hyperbolics,Post_Title_Contains_Common_Phrases,' \\\r\n 'Article_Title_Contains_Common_Phrases,Post_Title_Contains_Slang,Article_Title_Contains_Slang,Post_Title_Sentiment,Article_Title_Sentiment,' \\\r\n 'Chars_Post_Text,Chars_Article_Title,Chars_Article_Description,Chars_Article_Keywords,' \\\r\n 'Diff_Char_Post_Title_Article_Title,Diff_Char_Post_Title_Article_Descr,Diff_Char_Post_Title_Article_Keywords,' \\\r\n 'Diff_Char_Article_Title_Article_Descr,Diff_Char_Article_Title_Article_Keywords,Diff_Char_Article_Descr_Article_Keywords,' \\\r\n 'Ratio_Char_Post_Title_Article_Title,Ratio_Char_Post_Title_Article_Descr,Ratio_Char_Post_Title_Article_Keywords,' \\\r\n 'Ratio_Char_Article_Title_Article_Descr,Ratio_Char_Article_Title_Article_Keywords,Ratio_Char_Article_Descr_Article_Keywords,' \\\r\n 'Words_Post_Text,Words_Article_Title,Words_Article_Description,Words_Article_Keywords,Diff_Words_Post_Title_Article_Title,' \\\r\n 'Diff_Words_Post_Title_Article_Descr,Diff_Words_Post_Title_Article_Keywords,Diff_Words_Article_Title_Article_Descr,' \\\r\n 'Diff_Words_Article_Title_Article_Keywords,Diff_Words_Article_Descr_Article_Keywords,Ratio_Words_Post_Title_Article_Title,' \\\r\n 'Ratio_Words_Post_Title_Article_Descr,Ratio_Words_Post_Title_Article_Keywords,Ratio_Words_Article_Title_Article_Descr,' \\\r\n 'Ratio_Words_Article_Title_Article_Keywords,Ratio_Words_Article_Descr_Article_Keywords,Post_Title_No_@,Post_Title_No_#,' \\\r\n 'Post_Title_No_Exclam,Article_Title_No_Exclam,Post_Title_No_Question,Article_Title_No_Question,Post_Title_No_Abbrev,' \\\r\n 'Article_Title_No_Abbrev,Post_Title_No_Ellipses,Article_Title_No_Ellipses,Post_Title_No_Dots,Article_Title_No_Dots'\r\n for key, value in counts_post_title_POS.items():\r\n feature_headers += ',Post_Title_' + key\r\n for key, value in counts_article_title_POS.items():\r\n feature_headers += ',Article_Title_' + key\r\n feature_headers += ',Post_Title_NNPV,Post_Title_NNPT'\r\n feature_headers += ',Article_Title_NNPV,Article_Title_NNPT'\r\n for key, value in unigrams.items():\r\n feature_headers += ',' + key\r\n for key, value in bigrams.items():\r\n feature_headers += ',' + key\r\n for key, value in trigrams.items():\r\n feature_headers += ',' + key\r\n # Writing file headlines\r\n with open('dataset/features.csv', encoding='utf8', mode='w',\r\n newline='') as features_file:\r\n features_writer = csv.writer(features_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n features_writer.writerow([feature_headers])\r\n headers = True\r\n with open('dataset/features.csv', encoding='utf8', mode='a', newline='') as features_file:\r\n features_writer = csv.writer(features_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n features_writer.writerow([feature_output])\r\n\r\n\r\ndef plot_ngram_distribution():\r\n with open('dataset/NNP/3-gram_frequencies.csv', 'r', encoding='utf8') as csvfile:\r\n read_csv = csv.reader(csvfile, delimiter=',')\r\n headers = False\r\n ngram_counts = []\r\n for ngram in read_csv:\r\n if not headers:\r\n headers = True\r\n continue\r\n ngram_counts.append(int(ngram[1]))\r\n ngram_counts.sort(reverse=True)\r\n plt.hist(ngram_counts, bins=range(0, 6500, 3))\r\n plt.yscale('log')\r\n plt.xscale('log')\r\n plt.title('Trigram Distribution')\r\n plt.xlabel('Ngram Count')\r\n plt.ylabel('Bin Count')\r\n plt.show()\r\n plt.clf()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n #plot_ngram_distribution()\r\n main()\r\n\r\nimport pandas as pd\r\nfrom classification import NaiveBayes\r\nfrom classification import MaximumEntropy\r\nfrom classification import RandomForest\r\nfrom classification import XGBoost as xgb\r\n\r\nfiles = [\r\n # \"final_feature_vectors_20.csv\",\r\n # \"final_feature_vectors_40.csv\",\r\n # \"final_feature_vectors_60.csv\",\r\n # \"final_feature_vectors_80.csv\",\r\n # \"final_feature_vectors_120.csv\",\r\n \"final_feature_vectors_160.csv\",\r\n # \"final_feature_vectors_200.csv\",\r\n # \"features_no_ngrams.csv\",\r\n]\r\n\r\nme = MaximumEntropy.MaximumEntropy()\r\nnb = NaiveBayes.NaiveBayes()\r\nrf = RandomForest.RandomForest()\r\nxg = xgb.XGBoost()\r\n\r\nfor file in files:\r\n df = pd.read_csv(\"dataset/\" + file)\r\n X = df.loc[:, ~df.columns.isin(['Label', 'Post_ID'])].values\r\n y = df['Label'].values\r\n\r\n nb.train(X, y)\r\n # results = nb.cross_validation(X, y)\r\n # print(results)\r\n\r\n me.train(X, y)\r\n # print(svm.cross_validation(X, y))\r\n # svm.optimize_params(X, y)\r\n\r\n # rf = RandomForest.RandomForest()\r\n # rf.train(X, y)\r\n # print(rf.cross_validation(X, y))\r\n\r\n rf.train(X, y)\r\n\r\n xg.train(X, y)\r\n\r\n\r\ndf = pd.read_csv(\"dataset/leftout_test.csv\")\r\nX = df.loc[:, ~df.columns.isin(['Label', 'Post_ID'])].values\r\ny = df['Label'].values\r\n\r\nprint(nb.predict(X, y, True))\r\nprint(me.predict(X, y, True))\r\nprint(rf.predict(X, y, True))\r\nprint(xg.predict(X, y, True))\r\n","repo_name":"agamvrinos/NLP-Clickbait-detection","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3098903613","text":"import pickle\r\nimport zipfile\r\nimport sys, os\r\nimport time\r\nsys.path.append(os.path.abspath(__file__).replace('evaluation/collect_3DPW_results.py',''))\r\nsys.path.append(os.path.abspath(__file__).replace('lib/evaluation/collect_3DPW_results.py',''))\r\nfrom base import *\r\nnp.set_printoptions(precision=2, suppress=True)\r\n\r\nclass Submit(Base):\r\n def __init__(self):\r\n super(Submit, self).__init__()\r\n self.pw3d_path = os.path.join(args().dataset_rootdir, '3DPW')\r\n self.set_smpl_parent_tree()\r\n self._build_model_()\r\n self.collect_3DPW_layout()\r\n\r\n self.loader_val = self._create_single_data_loader(dataset='pw3d',train_flag=False,split=args().dataset_split, mode='normal') # val batch_sampler 105 batch_size=16 pictures=1669\r\n self.output_dir = args().output_dir\r\n print('Initialization finished!')\r\n # self.model_path = '/home/ssw/code/romp/trained_models/ROMP_HRNet32_V1.pkl'\r\n\r\n save_dir = os.path.join(self.output_dir, 'R_' + os.path.basename(self.model_path).replace('.pkl', ''))\r\n\r\n # time_stamp = time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(int(round(time.time() * 1000)) / 1000))\r\n # save_dir = os.path.join(self.output_dir, os.path.basename(self.model_path).replace('.pkl','') + time_stamp)#time.strftime(\"results_%Y-%m-%d_%H:%M:%S\", time.localtime())\r\n final_results_path = os.path.join(save_dir,'results.zip') # 'home/ssw/code/romp/output/R_ROMP_HRNet32_V1/results.zip'\r\n print('final results will be saved to ',final_results_path)\r\n if not os.path.exists(final_results_path):\r\n self.evaluation()\r\n self.pack_results(save_dir)\r\n else:\r\n print(final_results_path, 'already exists. Going direct to evaluation')\r\n self.run_official_evaluation(save_dir)\r\n\r\n def collect_3DPW_layout(self):\r\n self.layout = {}\r\n root_dir = os.path.join(self.pw3d_path,\"sequenceFiles/\") # '/home/ssw/code/dataset/3DPW/sequenceFiles/'\r\n for split in os.listdir(root_dir):\r\n for action in os.listdir(os.path.join(root_dir,split)):\r\n action_name = action.strip('.pkl') # 'downtown_arguing_00'\r\n label_path = os.path.join(root_dir,split,action) # '/home/ssw/code/dataset/3DPW/sequenceFiles/test/downtown_arguing_00.pkl'\r\n raw_labels = read_pickle(label_path) # 读取原来标注的信息\r\n frame_num = len(raw_labels['img_frame_ids'])\r\n subject_num = len(raw_labels['poses'])\r\n self.layout[action_name] = [split, subject_num, frame_num] # 这个行为数据 = 什么类型的数据集如train,test,val, 有几个人,帧的数量\r\n\r\n def set_smpl_parent_tree(self):\r\n parents = torch.Tensor([-1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16,17, 18, 19, 20, 21])\r\n self.sellect_joints = [0,1,2,4,5,16,17,18,19]\r\n self.parent_tree = []\r\n for idx, joint_idx in enumerate(self.sellect_joints):\r\n parent = []\r\n while joint_idx>-1:\r\n parent.append(joint_idx)\r\n joint_idx = int(parents[joint_idx])\r\n self.parent_tree.append(parent)\r\n\r\n @torch.no_grad()\r\n def evaluation(self):\r\n eval_model = nn.DataParallel(self.model.module).eval()\r\n MPJPE, PAMPJPE, PCK3D, MPJAE = [],[],[],[]\r\n self.results = {}\r\n self.results_save = {}\r\n \r\n start_time = time.time()\r\n for test_iter,meta_data in enumerate(self.loader_val):\r\n ds_org, imgpath_org = get_remove_keys(meta_data,keys=['data_set','imgpath'])\r\n meta_data['batch_ids'] = torch.arange(len(meta_data['params']))\r\n meta_data_org = meta_data.copy()\r\n if self.model_precision=='fp16':\r\n with autocast():\r\n outputs = eval_model(meta_data, **self.eval_cfg)\r\n else:\r\n outputs = eval_model(meta_data, **self.eval_cfg)\r\n\r\n outputs['meta_data']['data_set'], outputs['meta_data']['imgpath'] = reorganize_items([ds_org, imgpath_org], outputs['reorganize_idx'].cpu().numpy())\r\n meta_data = outputs['meta_data']\r\n params_pred = outputs['params']\r\n pose_pred = torch.cat([params_pred['global_orient'],params_pred['body_pose']],1).cpu()\r\n shape_pred = params_pred['betas'].cpu()\r\n # kp3d_smpl = outputs['joints_smpl24']\r\n kp3d_smpl = outputs['j3d']\r\n subject_ids = meta_data['subject_ids']\r\n imgpaths = meta_data['imgpath']\r\n \r\n kp3d_smpl, pose_pred = kp3d_smpl.cpu(), pose_pred.cpu()\r\n for idx,(imgpath, subject_id) in enumerate(zip(imgpaths, subject_ids)):\r\n imgpath = imgpath.replace(os.path.join(self.pw3d_path,'imageFiles/'),'')\r\n if imgpath not in self.results:\r\n self.results[imgpath] = {}\r\n self.results[imgpath][subject_id] = [pose_pred[idx], shape_pred[idx], kp3d_smpl[idx,:24]]\r\n if test_iter%60==0:\r\n print('Processing {}/{}'.format(test_iter, len(self.loader_val)))\r\n\r\n print('Runtime: {},per sample {}'.format(time.time()-start_time, (time.time()-start_time)/50534))\r\n\r\n def pack_results(self, save_dir):\r\n os.makedirs(save_dir, exist_ok=True)\r\n results = {}\r\n for split in ['train','validation','test']:\r\n os.makedirs(os.path.join(save_dir,split), exist_ok=True)\r\n results[split] = {}\r\n for action_name, [split, subject_num, frame_num] in self.layout.items():\r\n results[split][action_name] = [np.zeros((subject_num, frame_num, 24,3)), np.zeros((subject_num, frame_num, 82)), np.zeros((subject_num, frame_num, 9,3,3))]\r\n\r\n for imgpath in self.results:\r\n action_name, frame_id = imgpath.split('/')[0],int(imgpath.split('/')[1].replace('image_','').strip('.jpg'))\r\n for subject_id, [pose_pred, shape_pred, kp3d_smpl] in self.results[imgpath].items():\r\n split, subject_num, frame_num = self.layout[action_name] # ['validation', 1, 544] KeyError: 'downtown_bar_00' subject_num 多少个人\r\n assert frame_id= len(list1):\r\n return list1\r\n elif list1[k+1][1] == list1[k][1]:\r\n k +=1\r\n else:\r\n break\r\n return list1[0:k+1]\r\n\r\n\r\ndef leaderboard_cyc_seq(N:int, experimental_spec:list):\r\n parent_mass = max(experimental_spec)\r\n # every peptide will be stored with its information(sequence, score, total mass)\r\n leaderboard = [('', 0, 0,)]\r\n leaderpeptide = ('', 0, 0)\r\n while leaderboard:\r\n candidate_peptides = []\r\n #branching\r\n for peptide in leaderboard:\r\n seq, score_of_pep, mass_of_pep = peptide\r\n for aa in amino_acids:\r\n candidate_peptide = seq+aa\r\n sub_spec_of_cand_pep = subspectrum_generator(candidate_peptide)\r\n mass_of_cand_pep = max(sub_spec_of_cand_pep)\r\n candidate_peptide_info = (candidate_peptide,score(sub_spec_of_cand_pep,experimental_spec),\r\n mass_of_cand_pep)\r\n if candidate_peptide_info[2] == parent_mass:\r\n if candidate_peptide_info[1] > leaderpeptide[1]:\r\n leaderpeptide = candidate_peptide_info\r\n #bounding 1\r\n elif candidate_peptide_info[2] < parent_mass:\r\n candidate_peptides.append(candidate_peptide_info)\r\n #bounding 2\r\n leaderboard = trim(candidate_peptides, N)\r\n\r\n return leaderpeptide\r\n\r\ninput = open(\"tryme.txt\",\"r\") #this file should include the experimental spectrum of a peptide as integers\r\nSpectrum = [int(item) for item in input.read().split(\" \")]\r\nN = 1000 #how many peptides you want to keep during trimming.\r\n# Bigger numbers will take more time, smaller numbers might exclude a good candidate. \"Choose wisely\"\r\n\r\nif __name__ == \"__main__\":\r\n start = timeit.default_timer()\r\n results = leaderboard_cyc_seq(N,Spectrum)\r\n print('-'.join(map(str, results)))\r\n print('')\r\n stop = timeit.default_timer()\r\n print(stop - start)\r\n\r\n\r\n","repo_name":"aslicinar0/Coursera-Bioinformatics-Specialization","sub_path":"LeaderboardCyclopeptideSequencing.py","file_name":"LeaderboardCyclopeptideSequencing.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6350199018","text":"import torch\r\n\r\nimport numpy as np\r\nimport os\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport json\r\nimport torchvision.transforms as trans\r\nimport cv2\r\nimport imageio\r\n\r\ndef getTinyLegotensor(path):\r\n try:\r\n data=np.load(path) \r\n print(\"read file done\")\r\n except:\r\n print(\"read file error\") \r\n\r\n # %%\r\n images=data['images']\r\n poses=data['poses']\r\n focal=data['focal']\r\n\r\n # %%\r\n ImgTensor=torch.tensor(images)\r\n posesTensor=torch.tensor(poses)\r\n focalTensor=torch.tensor(focal)\r\n\r\n ImgTensor=ImgTensor[:100,:,:,:]\r\n posesTensor=posesTensor[:100,:,:]\r\n return ImgTensor,posesTensor,focalTensor\r\n\r\n\r\n## basepath ~/ \r\nfrom tqdm import tqdm\r\ndef getdatablender(basedir, half_res=8,skip=1,datatype='train'):\r\n print(\"reading data\")\r\n splits = [datatype]\r\n metas = {}\r\n for s in splits:\r\n with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:\r\n metas[s] = json.load(fp)\r\n\r\n all_imgs = []\r\n all_poses = []\r\n counts = [0]\r\n for s in splits:\r\n meta = metas[s]\r\n imgs = []\r\n poses = []\r\n \r\n for frame in tqdm(meta['frames'][::skip]):\r\n fname = os.path.join(basedir, frame['file_path'] + '.png')\r\n imgs.append(imageio.imread(fname))\r\n poses.append(np.array(frame['transform_matrix']))\r\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\r\n poses = np.array(poses).astype(np.float32)\r\n counts.append(counts[-1] + imgs.shape[0])\r\n all_imgs.append(imgs)\r\n all_poses.append(poses)\r\n \r\n # i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]\r\n \r\n imgs = np.concatenate(all_imgs, 0)\r\n poses = np.concatenate(all_poses, 0)\r\n \r\n H, W = imgs[0].shape[:2]\r\n camera_angle_x = float(meta['camera_angle_x'])\r\n focal = .5 * W / np.tan(.5 * camera_angle_x)\r\n \r\n # render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]],0)\r\n\r\n if half_res:\r\n\r\n H = H//half_res\r\n W = W//half_res\r\n focal = focal/float(half_res)\r\n imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))\r\n for i, img in enumerate(imgs):\r\n imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)\r\n imgs = imgs_half_res\r\n\r\n\r\n # imgs = imgs[..., :3]*imgs[..., -1:] + (1.-imgs[..., -1:])\r\n # else:\r\n\r\n # imgs = imgs[..., :3]*imgs[..., -1:] + (1.-imgs[..., -1:])\r\n imgs = imgs[...,:3]\r\n # poses[:,:3,1:3]*=-1. \r\n print(\"read done!\")\r\n\r\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]],0)\r\n \r\n return torch.tensor(imgs), torch.tensor(poses), torch.tensor(focal)\r\n\r\ntrans_t = lambda t : torch.tensor([\r\n [1,0,0,0],\r\n [0,1,0,0],\r\n [0,0,1,t],\r\n [0,0,0,1],\r\n], dtype=torch.float32)\r\n\r\nrot_phi = lambda phi : torch.tensor([\r\n [1,0,0,0],\r\n [0,np.cos(phi),-np.sin(phi),0],\r\n [0,np.sin(phi), np.cos(phi),0],\r\n [0,0,0,1],\r\n], dtype=torch.float32)\r\n\r\nrot_theta = lambda th : torch.tensor([\r\n [np.cos(th),0,-np.sin(th),0],\r\n [0,1,0,0],\r\n [np.sin(th),0, np.cos(th),0],\r\n [0,0,0,1],\r\n], dtype=torch.float32)\r\n\r\ndef pose_spherical(theta, phi, radius):\r\n c2w = trans_t(radius)\r\n c2w = rot_phi(phi/180.*np.pi) @ c2w\r\n c2w = rot_theta(theta/180.*np.pi) @ c2w\r\n c2w = torch.tensor([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]],dtype=torch.float32) @ c2w\r\n return c2w\r\n\r\n\r\n","repo_name":"pulangk97/nerfPytorch","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"37567078906","text":"import torchvision.models as models\nimport torch.nn as nn\n\n\nclass RestNet(nn.Module):\n def __init__(self, pretrain):\n super(RestNet, self).__init__()\n resnet = models.resnet152(pretrained=pretrain)\n # for param in resnet.parameters():\n # param.requires_grad = False\n # resnet.conv1 = nn.Conv2d(3, 256, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n # resnet.bn1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n # resnet.layer1[0].conv1 = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n # resnet.layer1[0].downsample[0] = nn.Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n # resnet.layer1[0].bn1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n resnet.fc = nn.Linear(2048, 1)\n self.resnet = resnet\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.resnet(x)\n return self.sigmoid(x)\n","repo_name":"SungWeiTseng/training-crouse-CatsDogs","sub_path":"script/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3004086578","text":"import sqlite3\r\nimport os\r\n\"\"\"\r\n Inicializa una instancia de la clase Pedido con los datos proporcionados.\r\n\r\n Argumentos:\r\n codigo_pedido (int): El código del pedido.\r\n codigo_cliente (str): El código del cliente que realiza el pedido.\r\n codigo_producto (str): El código del producto solicitado.\r\n cantidad (int): La cantidad del producto solicitada.\r\n \"\"\"\r\n#Clase Pedido que maneja las operaciones relaciondas con los pedidos\r\nclass Pedido:\r\n def __init__(self, codigo_pedido, codigo_cliente, codigo_producto, cantidad):\r\n self.codigo_pedido = codigo_pedido\r\n self.codigo_cliente = codigo_cliente\r\n self.codigo_producto = codigo_producto\r\n self.cantidad = cantidad\r\n#Se crea la clase ManejoPedidos para las operaciones relacionadas con el manejo y operacion de los clientes\r\nclass ManejoPedidos:\r\n def __init__(self):\r\n self.pedidos = []\r\n self.db = Database('mi_base_de_datos.db')\r\n self.db.crear_tabla_pedidos()\r\n\r\n def crear_pedido(self):\r\n codigo_pedido = int(input(\"Ingrese el código del pedido: \"))\r\n codigo_cliente = str(input(\"Ingrese el código del cliente: \"))\r\n codigo_producto = str(input(\"Ingrese el código del producto: \"))\r\n cantidad = int(input(\"Ingrese la cantidad: \"))\r\n\r\n pedido = Pedido(codigo_pedido, codigo_cliente, codigo_producto, cantidad)\r\n self.db.crear_pedido(codigo_pedido, codigo_cliente, codigo_producto, cantidad)\r\n print(\"Pedido creado correctamente.\")\r\n \r\n # Generar el contenido del ticket\r\n ticket_content = f\"\"\"TICKET DE PEDIDO\r\n --------------------\r\n Código del pedido: {codigo_pedido}\r\n Código del cliente: {codigo_cliente}\r\n Código del producto: {codigo_producto}\r\n Cantidad: {cantidad}\r\n --------------------\r\n ¡Gracias por su pedido!\r\n\r\n \"\"\"\r\n\r\n # Ruta y nombre del archivo del ticket\r\n ticket_path = f\"tickets/pedido_{codigo_pedido}.txt\"\r\n\r\n # Guardar el ticket en un archivo de texto\r\n with open(ticket_path, \"a\") as file:\r\n file.write(ticket_content)\r\n\r\n print(\"Pedido creado correctamente.\")\r\n\r\n def cancelar_pedido(self):\r\n codigo_pedido = input(\"Ingrese el código del pedido que desea cancelar: \")\r\n self.db.cancelar_pedido(codigo_pedido)\r\n print(\"Pedido cancelado correctamente.\")\r\n#Se crea la base de datos para la manipulacion de los datos de los pedidos\r\nclass Database:\r\n def __init__(self, nombre_archivo):\r\n self.conexion = sqlite3.connect(nombre_archivo)\r\n self.cursor = self.conexion.cursor()\r\n\r\n def crear_tabla_pedidos(self):\r\n self.cursor.execute(\"CREATE TABLE IF NOT EXISTS pedidos (codigo_pedido TEXT, codigo_cliente TEXT, codigo_producto TEXT, cantidad INTEGER)\")\r\n self.conexion.commit()\r\n\r\n def crear_pedido(self, codigo_pedido, codigo_cliente, codigo_producto, cantidad):\r\n consulta = \"INSERT INTO pedidos VALUES (?, ?, ?, ?)\"\r\n datos = (codigo_pedido, codigo_cliente, codigo_producto, cantidad)\r\n self.cursor.execute(consulta, datos)\r\n self.conexion.commit()\r\n\r\n def cancelar_pedido(self, codigo_pedido):\r\n consulta = \"DELETE FROM pedidos WHERE codigo_pedido = ?\"\r\n datos = (codigo_pedido,)\r\n self.cursor.execute(consulta, datos)\r\n self.conexion.commit()\r\n \r\n def consultar_pedido(self, codigo_pedido):\r\n consulta = \"SELECT * FROM pedidos WHERE codigo_pedido = ?\"\r\n datos = (codigo_pedido,)\r\n self.cursor.execute(consulta, datos)\r\n pedido = self.cursor.fetchone()\r\n\r\n if pedido:\r\n datos_pedido = {\r\n 'codigo_pedido': pedido[0],\r\n 'codigo_cliente': pedido[1],\r\n 'codigo_producto': pedido[2],\r\n 'cantidad': pedido[3]\r\n }\r\n return datos_pedido\r\n else:\r\n return None\r\n\r\n def cerrar_conexion(self):\r\n self.conexion.close()","repo_name":"ccastrov/Happy-Burguer","sub_path":"pedido.py","file_name":"pedido.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3478532553","text":"from typing import Optional\nimport selectors, socket, traceback\n\nfrom client.messages import TMessage\nclass TClient:\n\t\"\"\"\n\tInitialize the network client with information about the server to contact and the client port to use!\n\t\"\"\"\n\tdef __init__(self, server_host, server_port, client_port):\n\t\tself.client_port = client_port\n\t\tself.server_addr = (server_host, server_port)\n\n\tdef create_request(self, content):\n\t\tprint(f\"TClient.create_request(...)\")\n\t\treturn dict(\n\t\t\ttype=\"text/json\",\n\t\t\tencoding=\"utf-8\",\n\t\t\tcontent=content,\n\t\t)\n\n\t\"\"\"\n\tSet up the selector to use and connect to the server\n\t\"\"\"\n\tdef connect(self):\n\t\tself.sel = selectors.DefaultSelector()\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t# Avoid bind() exception: OSError: [Errno 48] Address already in use\n\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\tself.sock.bind((\"\", self.client_port))\n\t\tself.sock.setblocking(False)\n\t\tself.sock.connect_ex(self.server_addr)\n\n\t\"\"\"\n\tInitialize sending a message\n\t\"\"\"\n\tdef send(self, request):\n\t\tprint(\"TClient.send(...)\")\n\t\tself.connect()\n\t\tevents = selectors.EVENT_WRITE\n\t\tmessage = TMessage(self.sel, self.sock, self.server_addr, self.create_request(request))\n\t\tself.sel.register(self.sock, events, data=message)\n\t\twhile True:\n\t\t\tevents = self.sel.select(timeout=None)\n\t\t\tfor key, mask in events:\n\t\t\t\tmessage = key.data\n\t\t\t\ttry:\n\t\t\t\t\tmessage.process_events(mask)\n\t\t\t\t\t# print(f'{message.response!r}')\n\t\t\t\texcept Exception:\n\t\t\t\t\tprint(\n\t\t\t\t\t\tf\"Main: Error: Exception for {message.addr}:\\n\"\n\t\t\t\t\t\tf\"{traceback.format_exc()}\"\n\t\t\t\t\t)\n\t\t\t\t\tmessage.close()\n\t\t\t# Check for a socket being monitored to continue.\n\t\t\tif not self.sel.get_map():\n\t\t\t\tbreak\n\t\tself.sel.close()","repo_name":"johnaagelv/EndlessWorlds","sub_path":"client/communicators.py","file_name":"communicators.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16963252387","text":"acertos=0\nprint(\"SUPER JOGO ULTRA LEGAL DE PAR OU IMPAR\")\nimport random\nLista = [\"PAR\" , \"IMPAR\"]\nwhile True:\n escolhido = random.choice(Lista)\n esc= str(input(\"PAR OU IMPAR? >>\")).upper()\n if esc == escolhido:\n print(\"Acertou, continue jogando\")\n acertos+=1\n else:\n print(\"ERROUUUUUUU :(\")\n break\nprint(f\"Ao todo o jogador teve {acertos} acertos consecutivos\")\n\n","repo_name":"Miguelmorassuti/Python-Exercicios","sub_path":"Pacote-Dowload/EX 68 - Par ou Impar.py","file_name":"EX 68 - Par ou Impar.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26972137222","text":"from sqlalchemy import *\nfrom migrate import *\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import mapper, sessionmaker\n\npre_meta = MetaData()\npost_meta = MetaData()\n\npre_man = Table('man', pre_meta,\n Column('id', INTEGER, primary_key=True, nullable=False),\n Column('full_name', VARCHAR(length=4096), nullable=False),\n)\nclass PreMan(object):\n def __init__(self, full_name):\n self.full_name = full_name\n\n\npost_man = Table('man', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('first_name', String(length=4096), nullable=False),\n Column('second_name', String(length=4096), nullable=False),\n Column('last_name', String(length=4096), nullable=False),\n)\nclass PostMan(object):\n def __init__(self, f_name, s_name, l_name):\n self.first_name = f_name\n self.second_name = s_name\n self.last_name = l_name\n\nmapper(PreMan, pre_man)\nmapper(PostMan, post_man)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n\n Session = sessionmaker(bind=migrate_engine)\n session = Session()\n pre_man_record = session.query(PreMan).filter(PreMan.full_name.contains('Ivan')).one()\n session.close()\n\n pre_meta.tables['man'].columns['full_name'].drop()\n post_meta.tables['man'].columns['first_name'].create()\n post_meta.tables['man'].columns['last_name'].create()\n post_meta.tables['man'].columns['second_name'].create()\n\n f_name, s_name, l_name = pre_man_record.full_name.split(' ')\n post_man_record = PostMan(f_name, s_name, l_name)\n session = Session()\n session.add(post_man_record)\n session.commit()\n session.close()\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n Session = sessionmaker(bind=migrate_engine)\n\n session = Session()\n post_man_record = session.query(PostMan).filter(PostMan.first_name == 'Ivan').one()\n session.close()\n\n pre_meta.tables['man'].columns['full_name'].create()\n post_meta.tables['man'].columns['first_name'].drop()\n post_meta.tables['man'].columns['last_name'].drop()\n post_meta.tables['man'].columns['second_name'].drop()\n\n session = Session()\n pre_man_record = PreMan(\"{0} {1} {2}\".format(post_man_record.first_name, post_man_record.second_name, post_man_record.last_name))\n session.add(pre_man_record)\n session.commit()\n session.close()\n\n","repo_name":"HoneyDevoli/sqlalchemy-migrate-data","sub_path":"001_migration.py","file_name":"001_migration.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43800890115","text":"# This problem was asked by Facebook.\n# Given a number in Roman numeral format, convert it to decimal.\n# The values of Roman numerals are as follows:\n# {\n# 'M': 1000,\n# 'D': 500,\n# 'C': 100,\n# 'L': 50,\n# 'X': 10,\n# 'V': 5,\n# 'I': 1\n# }\n# In addition, note that the Roman numeral system uses subtractive notation for numbers such as IV and XL.\n# For the input XIV, for instance, you should return 14.\n####\nclass RomanInt:\n def __init__(self):\n units = ['', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX']\n tens = ['', 'X', 'XX', 'XXX', 'XL', 'L', 'LX', 'LXX', 'LXXX', 'XC']\n hundreds = ['', 'C', 'CC', 'CCC', 'CD', 'D', 'DC', 'DCC', 'DCCC', 'CM']\n thousands = ['', 'M', 'MM', 'MMM']\n stored = {}\n\n for i in range(1, 4000):\n cur = ''\n cur += thousands[i//1000]\n cur += hundreds[(i%1000)//100]\n cur += tens[(i%100)//10]\n cur += units[i%10]\n stored[cur] = i\n self.roman_to_int = stored\n####\nri = RomanInt()\nprint(ri.roman_to_int['MMMCMXCIX'])\n","repo_name":"whoophee/DCP","sub_path":"216.py","file_name":"216.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"70145807306","text":"# Lonely Integer\n\n# There are N integers in an array A. All but one integer occur in pairs. Your task is to find out the number that occurs only once.\n\n\ndef lonelyinteger(a):\n answer = 0\n x = len(a)\n for i in range(x):\n answer ^= a[i]\n i += 1\n return answer\nif __name__ == '__main__':\n a = input()\n b = map(int, raw_input().strip().split(\" \"))\n print (lonelyinteger(b))\n","repo_name":"defaults/competitive-programming","sub_path":"hackerrank/algorithms/Warmup/lonely_integer.py","file_name":"lonely_integer.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5120301509","text":"import json\nimport time\nfrom azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential, EventData\nimport os\nimport math\nimport datetime\nfrom datetime import timedelta\nimport random\n\nSTART_DATETIME = datetime.datetime(year=2019, month=7, day=5, hour=7)\n\nNUM_VEHICLES = 1024 #Make this a multiple of NUM_PARTITIONS\nNUM_PARTITIONS = 32\n\nNUM_VEHICLES_PER_PARTITION = int(NUM_VEHICLES / NUM_PARTITIONS)\n\nSIZE_OF_EVENT_DATA_BYTES = 0.17 # in KB\n\nSIZE_LIMIT_OF_EVENT_DATA_KB = 1024 # in KB\n\nNUM_EVENTS_PER_BATCH = int(SIZE_LIMIT_OF_EVENT_DATA_KB/SIZE_OF_EVENT_DATA_BYTES) #calculated based on 1 MB limit on event data(Event hub)\n\nNUM_EVENTS_PER_VEHICLE_PER_BATCH = int(NUM_EVENTS_PER_BATCH / NUM_VEHICLES_PER_PARTITION)\n\nTIME_BETWEEN_EVENTS_SECS = 1 #in seconds\nDURATION_FOR_DATA_GENERATION_MINS = 60 # in minutes\nTOTAL_EVENTS_PER_VEHICLE = math.ceil((DURATION_FOR_DATA_GENERATION_MINS * 60)/TIME_BETWEEN_EVENTS_SECS)\nTOTAL_EVENTS_FOR_ALL_VEHICLES = TOTAL_EVENTS_PER_VEHICLE * NUM_VEHICLES\n\nEVENTS_PER_ITERATION_OVER_ALL_PARTITIONS = NUM_EVENTS_PER_BATCH * NUM_PARTITIONS\nTOTAL_ITERATIONS_OVER_ALL_PARTITIONS = round(TOTAL_EVENTS_FOR_ALL_VEHICLES/EVENTS_PER_ITERATION_OVER_ALL_PARTITIONS) #Ideally this could be math.ceil to err in side of caution\n\nOUT_OF_GEOFENCE_PROB = 0.1\nGEOJSON_FILE_PATHS =[\"location_data/points/point_1.geojson\", \"location_data/points/point_2.geojson\"]\nglobal_event_id = 0 #keeps track of current event #\nevent_producer_list = []\ngeojson_list = []\n\nglobal_current_batch_num = 0 #only for display\n\ndef main():\n init_event_hub()\n init_point_geojsons()\n print(\"TOTAL_ITERATIONS_OVER_ALL_PARTITIONS: \", TOTAL_ITERATIONS_OVER_ALL_PARTITIONS)\n print(\"Total batches: \", TOTAL_EVENTS_FOR_ALL_VEHICLES/NUM_EVENTS_PER_BATCH)\n curr_time = START_DATETIME\n for curr_global_iter in range(TOTAL_ITERATIONS_OVER_ALL_PARTITIONS):\n print(\"Global iter#: \", curr_global_iter)\n for curr_partition in range(NUM_PARTITIONS):\n vehicle_id_list_for_partition = list(range(curr_partition*NUM_PARTITIONS,(curr_partition*NUM_PARTITIONS) + NUM_PARTITIONS)) #could be precomputed for efficiency :)\n curr_time = handleSingleBatch(curr_partition, vehicle_id_list_for_partition, curr_time)\n\ndef handleSingleBatch(partition_id, vehicle_id_list, curr_time):\n eventBatchData = []\n global global_current_batch_num\n global global_event_id\n print(\"Processing batch# :\", global_current_batch_num)\n for _ in range(NUM_EVENTS_PER_VEHICLE_PER_BATCH):\n for curr_vehicle_id in vehicle_id_list:\n curr_point = random.choices(population=geojson_list, weights=[1.0-OUT_OF_GEOFENCE_PROB, OUT_OF_GEOFENCE_PROB], k=1)[0]\n curr_point[\"vehicle_id\"] = curr_vehicle_id\n curr_point[\"point_id\"] = global_event_id\n curr_point[\"event_date\"] = str(curr_time)\n eventBatchData.append(EventData(json.dumps(curr_point)))\n global_event_id += 1\n curr_time += timedelta(seconds=TIME_BETWEEN_EVENTS_SECS)\n event_producer_list[partition_id].send(eventBatchData)\n global_current_batch_num += 1\n return curr_time\n\ndef init_event_hub():\n with open('config.json', 'r') as json_file:\n config = json.load(json_file)\n client = EventHubClient(host=config[\"EH_HOST\"], event_hub_path=config[\"EH_NAME\"],\n credential=EventHubSharedKeyCredential(config[\"EVENT_HUB_SAS_POLICY\"],\n config[\"EVENT_HUB_SAS_KEY\"]),\n network_tracing=False)\n\n for i in range(NUM_PARTITIONS):\n event_producer_list.append(client.create_producer(partition_id=str(i)))\n\ndef init_point_geojsons():\n for file_path in GEOJSON_FILE_PATHS:\n with open(file_path, 'r') as json_file:\n geojson_list.append(json.load(json_file))\n\nif __name__ == '__main__':\n main()","repo_name":"rsethur/SparkStreamingGeoFencing","sub_path":"3. Large Scale Geofencing with Spark Streaming/GenerateTestData.py","file_name":"GenerateTestData.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"42866004905","text":"y = []\r\nt = []\r\nfor _ in range(3):\r\n a, b, c = input().split()\r\n a = int(a)\r\n b = int(b[-2:])\r\n y.append(b)\r\n t.append((a, c))\r\nry = sorted(y)\r\nrt = sorted(t, key=lambda x: x[0], reverse=True)\r\nprint(''.join(map(str, ry)))\r\nprint(''.join([p[1][0] for p in rt]))","repo_name":"iblug/Baekjoon","sub_path":"백준/Bronze/28114. 팀명 정하기/팀명 정하기.py","file_name":"팀명 정하기.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21435342499","text":"import re\n\nt_file = open('Finnish.html', encoding = 'UTF-8')\nlines = t_file.read()\nt_file.close()\n\nclearing = re.compile('<[/]?[a-z]*>')\nlines = re.sub(u'<.*?>', u'', lines, flags = re.U)\np1 = re.compile('ISO 639-3\\D', re.IGNORECASE)\np2 = re.compile('\\n[a-z]{3}', re.IGNORECASE)\nm1 = p1.search(lines)\nm2 = p2.search(lines[m1.end():len(lines)])\nprint(m1.group(), m2.group())\n","repo_name":"NALM98/Homework-Course-1","sub_path":"Wikipedia Cards/ISO_finder.py","file_name":"ISO_finder.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10040096400","text":"\"\"\"\nChristina Wang\t8/4/22\tCSCI-UA 2 - 002\nAssignment #6 Problem #1b\n\"\"\"\n##picking the word\n\nimport random\n\n#open file for reading\nwords = open('words.txt', 'r')\n\n#read all data as one long string\nalldata=words.read()\n\n#cut apart string based on \\n\"\nsplitdata= alldata.split(\"\\n\")\n\nprint(\"The Wordle is \", end=\"\")\n\n#pick a random word from list\nnum=random.randint(0, len(splitdata))\nwordle=str.upper((splitdata[num]))\nprint(wordle)\n\n#close file\nwords.close()\n\n##format\n\nprint (\" WORDLE \")\nprint(\"------------------------------\")\n\n\n##bad guess\nguess=input(\"Guess the word: \")\nwhile True:\n if len(guess) !=5:\n print(\"You must enter a 5 letter word\")\n guess=input(\"Guess the word: \")\n continue\n elif guess.isalpha() != True:\n print(\"You must enter a 5 letter word\")\n guess=input(\"Guess the word: \")\n continue\n elif guess not in splitdata:\n print(\"Invalid word\")\n guess=input(\"Guess the word: \")\n continue\n\n##good guess\n else:\n break\n\n#check guess\ncount = 0\nfor letter in guess:\n if letter == str.lower(wordle[count]):\n print(str.upper(letter), \"*\", sep=\"\", end=\" \")\n count += 1\n elif letter.lower() in wordle.lower(): \n print(str.upper(letter), \"?\", sep=\"\", end=\" \")\n count += 1\n else:\n print (str.upper(letter), end= \" \")\n count += 1\n","repo_name":"crw333168/cs-002","sub_path":"assign6/guess the wordle once/WangChristina_assign6_partb.py","file_name":"WangChristina_assign6_partb.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794611465","text":"class Solution:\r\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\r\n for i, num in enumerate(numbers):\r\n j = self.bisect_left(numbers, target-num, i+1)\r\n if j < len(numbers) and num + numbers[j] == target:\r\n return [i+1, j+1]\r\n \r\n def bisect_left(self, arr, target, startIdx):\r\n ans = -1\r\n i = startIdx\r\n j = len(arr)-1\r\n while i <= j:\r\n mid = i + (j-i) // 2\r\n if arr[mid] < target:\r\n i = mid+1\r\n elif arr[mid] > target:\r\n j = mid-1\r\n else:\r\n ans = mid\r\n j = mid-1\r\n return ans if ans != -1 else i","repo_name":"novayo/LeetCode","sub_path":"0167_Two_Sum_II_-_Input_array_is_sorted/try_3.py","file_name":"try_3.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"43449207667","text":"import pytz , datetime\r\nyear=int(input())\r\nmonth=int(input())\r\nday=int(input())\r\nhour=int(input())\r\nmin=int(input())\r\nuser_time=datetime.datetime(year,month,day,hour,min)\r\ncairo_timezone=pytz.timezone('Africa/Cairo')\r\ncairo_time=pytz.utc.localize(user_time).astimezone(cairo_timezone)\r\n\r\nlondon_timezone=pytz.timezone('UTC')\r\nlondon_time=pytz.utc.localize(user_time).astimezone(london_timezone)\r\n\r\ndelhi_timezone=pytz.timezone('Asia/Kolkata')\r\ndelhi_time=pytz.utc.localize(user_time).astimezone(delhi_timezone)\r\n\r\nSydney_timezone=pytz.timezone('Australia/Sydney')\r\nsydney_time=pytz.utc.localize(user_time).astimezone(Sydney_timezone)\r\n\r\n\r\nprint(\"Cairo time is \",cairo_time.isoformat())\r\nprint(\"London time is \",london_time.isoformat())\r\nprint(\"Delhi time is \",delhi_time.isoformat())\r\nprint(\"Sydney time is \",sydney_time.isoformat())","repo_name":"Anjali4rl/MINIPROJECS","sub_path":"timezone_converter.py","file_name":"timezone_converter.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69963934025","text":"\"\"\"\n5311 Utils for Analysis and Charting \n\"\"\"\nimport pandas as pd\nfrom siuba import *\nfrom calitp_data_analysis.sql import *\n\nimport altair as alt\nfrom calitp_data_analysis import calitp_color_palette as cp, styleguide\n\n'''\nCharting Functions \n'''\n#Labels\ndef labeling(word):\n # Add specific use cases where it's not just first letter capitalized\n LABEL_DICT = { \"prepared_y\": \"Year\",\n \"dist\": \"District\",\n \"nunique\":\"Number of Unique\",\n \"project_no\": \"Project Number\"}\n \n if (word == \"mpo\") or (word == \"rtpa\"):\n word = word.upper()\n elif word in LABEL_DICT.keys():\n word = LABEL_DICT[word]\n else:\n #word = word.replace('n_', 'Number of ').title()\n word = word.replace('unique_', \"Number of Unique \").title()\n word = word.replace('_', ' ').title()\n \n return word\n\n# Bar Chart\ndef basic_bar_chart(df, x_col, y_col, colorcol):\n \n chart = (alt.Chart(df)\n .mark_bar()\n .encode(\n x=alt.X(x_col, title=labeling(x_col), sort=('-y')),\n y=alt.Y(y_col, title=labeling(y_col)),\n color = alt.Color(colorcol,\n scale=alt.Scale(\n range=cp.CALITP_CATEGORY_BRIGHT_COLORS),\n legend=alt.Legend(title=(labeling(colorcol)))\n ))\n .properties( \n title=(f\"{labeling(x_col)} by {labeling(y_col)}\"))\n )\n\n chart=styleguide.preset_chart_config(chart)\n chart.save(f\"./chart_outputs/bar_{x_col}_by_{y_col}.png\")\n \n return chart\n\n\n# Scatter Chart\ndef basic_scatter_chart(df, x_col, y_col, colorcol):\n \n chart = (alt.Chart(df)\n .mark_circle(size=60)\n .encode(\n x=alt.X(x_col, title=labeling(x_col)),\n y=alt.Y(y_col, title=labeling(y_col)),\n color = alt.Color(colorcol,\n scale=alt.Scale(\n range=cp.CALITP_CATEGORY_BRIGHT_COLORS),\n legend=alt.Legend(title=(labeling(colorcol)))\n ))\n .properties( \n title = (f\"{labeling(x_col)} by {labeling(y_col)}\"))\n )\n\n chart=styleguide.preset_chart_config(chart)\n chart.save(f\"./chart_outputs/scatter_{x_col}_by_{y_col}.png\")\n \n return chart\n\n\n# Line Chart\ndef basic_line_chart(df, x_col, y_col, colorcol):\n \n chart = (alt.Chart(df)\n .mark_line()\n .encode(\n x=alt.X(x_col, title=labeling(x_col)),\n y=alt.Y(y_col, title=labeling(y_col)),\n color = alt.Color(colorcol,\n scale=alt.Scale(\n range=cp.CALITP_CATEGORY_BRIGHT_COLORS),\n legend=alt.Legend(title=(labeling(colorcol)))\n ))\n ).properties( \n title=f\"{labeling(x_col)} by {labeling(y_col)}\")\n\n chart=styleguide.preset_chart_config(chart)\n chart.save(f\"./chart_outputs/line_{x_col}_by_{y_col}.png\")\n \n return chart\n\n#Return multiple charts at once based on a fixed X axis and \n#but mulitple y value sin a list\ndef multi_charts(df, x_axis_col:str, cols_of_int: list):\n for i in cols_of_int:\n df_i = df[[x_axis_col, i]]\n bar_chart_i = basic_bar_chart(df_i, x_axis_col, i, x_axis_col)\n display(bar_chart_i)\n return bar_chart_i\n\n\n\n'''\n\nAggregating Functions\n\n'''\n#Aggregate by fleet size, GTFS, vehicle ages.\ndef aggregation_one(df, grouping_col):\n #adding up the vehicles 9+ and 15+ \n df['vehicles_older_than_9']= df['_10_12'] + df['_13_15'] + df['_16_20'] + df['_21_25'] + df['_26_30'] + df['_31_60'] + df['_60plus']\n df['vehicles_older_than_15']= df['_16_20'] + df['_21_25'] + df['_26_30'] + df['_31_60'] + df['_60plus']\n #rename 0-9\n df = df.rename(columns={'_0_9':'vehicles_0_to_9'}) \n #pivot \n df = df.groupby([grouping_col]).agg({'vehicles_older_than_9':'sum', 'vehicles_older_than_15':'sum', 'vehicles_0_to_9': 'sum'}) \n #dividing the different bins by the total across all agencies\n df['vehicles_percent_older_than_9'] = (df['vehicles_older_than_9']/sum(df['vehicles_older_than_9']))*100\n df['vehicles_percent_older_than_15'] = (df['vehicles_older_than_15']/sum(df['vehicles_older_than_15']))*100\n df['vehicles_percent_0_to_9'] = (df['vehicles_0_to_9']/sum(df['vehicles_0_to_9']))*100\n #reset index\n df = df.reset_index()\n return df \n\n'''\nOther\n'''\n#Clean up titles on a dataframe\ndef cols_cleanup(df):\n df.columns = (df.columns\n .str.replace('[_]', ' ')\n .str.title()\n .str.strip()\n )\n return df","repo_name":"cal-itp/data-analyses","sub_path":"5310_5311/5311/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"74147447305","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os, os.path\nimport random\nimport sqlite3\nimport string\nimport json\nimport cherrypy\n\nDB_STRING = os.path.join(os.path.abspath(os.getcwd()),\"../data_collector/disney.sqlite\")\n\nDEFAULT_ATTRACTIONS=\"4,5,12,14,23,26,35,36,43,44,49,52,53\"\n\n__all__ = [\"DisneyWaitTimeGraph\"]\n\n\ndef dict_factory(cursor, row):\n \"\"\"DB行をdictに変換する\"\"\"\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\nclass DisneyWaitTimeGraph(object):\n @cherrypy.expose\n def index(self, daysPrevious=7):\n \"\"\"index.htmlページ\"\"\"\n cherrypy.log(str(cherrypy.request.app.config))\n return file(os.path.join(cherrypy.request.app.config['/']['tools.staticdir.root'],'public/html/index.html'))\n\n\n @cherrypy.expose\n @cherrypy.tools.accept(media='text/plain')\n @cherrypy.tools.json_out()\n def waittime(self, daysPrevious=7, attractions=DEFAULT_ATTRACTIONS, parkAverage=False):\n \"\"\"waittime web service\"\"\"\n\n #cherrypy.log(\"daysPrevious = \"+str(daysPrevious))\n #cherrypy.log(\"attractions = \"+str(attractions))\n if (not str(daysPrevious).isdigit()):\n raise cherrypy.HTTPError(403)\n if (not parkAverage and len(attractions) == 0):\n raise cherrypy.HTTPError(403)\n\n sql = \"\"\n\n if not parkAverage:\n sql = \" \".join(('select datetime, attraction_name, wait from v_wait_time',\n 'where attraction_id IN ('+str(attractions)+') ',\n 'and datetime > date(\"now\"'+(',\"-'+daysPrevious+' days\")' if not daysPrevious=='0' else ')'),\n 'order by attraction_id, datetime;'))\n else:\n sql = \" \".join(('select datetime, park_name as name, avg(wait) as average from v_wait_time',\n 'where wait <> 0 ',\n 'and datetime > date(\"now\"'+(', \"-'+daysPrevious+' days\")' if not daysPrevious=='0' else ')'),\n 'group by datetime, park_id',\n 'order by park_id, datetime;'))\n\n with sqlite3.connect(DB_STRING) as c:\n c.row_factory = dict_factory\n cur = c.cursor()\n cur.execute(sql)\n return cur.fetchall()\n\n","repo_name":"yasny/tdl-wait-time","sub_path":"site/webapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7648328537","text":"import boto3\nimport base64\n\nclass S3Client(object):\n def __init__(self, config):\n self._config = config\n self.connect()\n\n def connect(self):\n self._s3 = boto3.resource('s3')\n self._s3_client = boto3.client('s3')\n\n def save64(self, filename, base64_file):\n obj = self._s3.Object(self._config.s3_bucket_name, filename)\n obj.put(Body=base64.b64decode(base64_file))\n location = self._s3_client.get_bucket_location(Bucket=bucket_name)['LocationConstraint']\n object_url = \"https://%s/%s\" % (self._config.s3_bucket_name,file_name_with_extention)\n return object_url","repo_name":"MynorXico/react-crud","sub_path":"backend/dynamodb/build/lib/dynamodb/s3_client.py","file_name":"s3_client.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9767120490","text":"# Assignment\n# File: \n# Date: 12/7/22\n# By: Jacob Freidline\n# Section: 005\n# Team: 259\n# \n# ELECTRONIC SIGNATURE\n# Jacob Freidline\n#\n# The electronic signature above indicates the script\n# submitted for evaluation is my individual work, and I\n# have a general understanding of all aspects of its\n# development and execution.\n#\n# A BRIEF DESCRIPTION OF WHAT THE SCRIPT OR FUNCTION DOES\n# This script is a header template that will be used for \n# all your python files the rest of the semester.\n\nimport random\nx = [0]\nN = int(input('Select the number of times two dice are rolled: '))\nwhile N <= 0:\n print('Error: Insert a positive number')\n N = int(input('Select the number of times two dice are rolled: '))\nfor k in range(N):\n Dice1 = random.randint(1,6)\n Dice2 = random.randint(1,6)\n S = Dice1+Dice2\n x.append(S)\nfor k in range(N):\n num = x[k]\nprint\n","repo_name":"Ert850/Code-Trial","sub_path":"JF/TASK/RandomList.py","file_name":"RandomList.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39488018492","text":"import os\nfrom os import path\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport time\nimport urllib.request\n\ndef CheckImgFolder():\n if path.exists(\"Images\"):\n pass\n else:\n mkdir_Images = \"./Images\" \n os.makedirs(mkdir_Images, exist_ok=True)\n\ndef Get_Url(search):\n url = f\"https://www.google.com/search?q={search}&source=lnms&tbm=isch\"\n return url \n\ndef DownPage(driver):\n SCROLL_PAUSE_TIME = 1\n\n # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\") #브라우져 높이를 확인 가능(자바스크립트)\n\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\") \n time.sleep(SCROLL_PAUSE_TIME) # 페이지 로딩 될 동안 웨잇\n\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height: # 스크롤이 끝까지 내려가서 더이상 내릴 것이 없을 때\n try:\n driver.find_element(By.CSS_SELECTOR, \".mye4qd\").click() # 검색어 더 찾아보기 클릭\n except:\n break\n last_height = new_height\n\n\ndef Save_Img(driver, images):\n img_count = 1\n for img in images:\n try:\n img.click()\n time.sleep(2)\n wait = WebDriverWait(driver, 2)\n img_path = \"\"\"//*[@id=\"Sva75c\"]/div[2]/div/div[2]/div[2]/div[2]/c-wiz/div/div/div/div[3]/div[1]/a/img[1]\"\"\"\n imgUrl = driver.find_element(By.XPATH, img_path).get_attribute(\"src\")\n urllib.request.urlretrieve(imgUrl, f\"images/Img_{img_count}.jpg\")\n img_count += 1\n \n except Exception as e:\n print('error', img_count)\n\n driver.close()\n print(f'{img_count}개의 이미지를 다운로드 하였습니다.')\n\ndef main():\n CheckImgFolder()\n url = Get_Url(input('찾고 싶은 이미지를 입력하세요 : '))\n\n driver = webdriver.Chrome(\"chromedriver\")\n driver.implicitly_wait(3)\n driver.get(url)\n\n DownPage(driver)\n\n images = driver.find_elements(By.CSS_SELECTOR, \".rg_i.Q4LuWd\")\n Save_Img(driver, images)\n\nif __name__ == \"__main__\":\n main()","repo_name":"soft0725/AI-Study","sub_path":"Crawling/Crawling.py","file_name":"Crawling.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9412772258","text":"import cv2\r\nfrom math import sqrt\r\nimport traceback\r\nimport time\r\n\r\nclass TestTracke():\r\n def __init__(self):\r\n self.log_level = 1\r\n self.tracker_type='CSRT'\r\n self.tracker=self.get_traccker(self.tracker_type)\r\n self.trec_on=False\r\n self.global_x=0\r\n self.global_y=0\r\n #self.cap = cv2.VideoCapture('baran.mp4')\r\n self.cap = cv2.VideoCapture(0)\r\n ##########################################\r\n self.frame_rate=int(self.cap.get(5))\r\n\r\n\r\n\r\n\r\n ###########################################\r\n self.log_patch = \"log.txt\"\r\n self.log_file = open(self.log_patch, 'w', encoding=\"utf-8\")\r\n self.size_G=64\r\n self.size_A=10\r\n self.y_c=0\r\n self.x_c=0\r\n self.experemental_size=256\r\n\r\n self.tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\r\n\r\n self.all_in_=0\r\n\r\n\r\n def get_traccker(self,tracker_type):\r\n if tracker_type == 'BOOSTING':\r\n tracker_class = cv2.TrackerBoosting_create()\r\n elif tracker_type == 'MIL':\r\n tracker_class = cv2.TrackerMIL_create()\r\n elif tracker_type == 'KCF':\r\n tracker_class = cv2.TrackerKCF_create()\r\n elif tracker_type == 'TLD':\r\n tracker_class = cv2.TrackerTLD_create()\r\n elif tracker_type == 'GOTURN':\r\n tracker_class = cv2.TrackerGOTURN_create()\r\n elif tracker_type == 'MEDIANFLOW':\r\n tracker_class = cv2.TrackerMedianFlow_create()\r\n elif tracker_type == 'MOSSE':\r\n tracker_class = cv2.TrackerMOSSE_create()\r\n elif tracker_type == \"CSRT\":\r\n tracker_class = cv2.TrackerCSRT_create()\r\n return tracker_class\r\n\r\n\r\n\r\n\r\n def gstreamer_pipeline(self,\r\n capture_width=1920,\r\n capture_height=1080,\r\n display_width=1280,\r\n display_height=720,\r\n framerate=30,\r\n flip_method=2,\r\n ):\r\n return (\r\n \"nvarguscamerasrc ! \"\r\n \"video/x-raw(memory:NVMM), \"\r\n \"width=(int)%d, height=(int)%d, \"\r\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\r\n \"nvvidconv flip-method=%d ! \"\r\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\r\n \"videoconvert ! \"\r\n \"video/x-raw, format=(string)BGR ! appsink\"\r\n % (\r\n capture_width,\r\n capture_height,\r\n framerate,\r\n flip_method,\r\n display_width,\r\n display_height,\r\n )\r\n )\r\n\r\n\r\n def xywh2xy(self,box):\r\n x_c=box[0]+box[2]/2\r\n y_c=box[1]+box[3]/2\r\n return int(x_c),int(y_c)\r\n\r\n def drawBox(self,img, bbox):\r\n x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])\r\n cv2.rectangle(img, (x, y), ((x + w), (y + h)), (0, 0, 255), 2, 2)\r\n\r\n x_black=x-int(self.size_A)\r\n y_black=y-int(self.size_A)\r\n\r\n\r\n cv2.rectangle(img, (x_black, y_black), ((x + w+self.size_A), (y + w+self.size_A)), (0, 0, 0), 1, 1)\r\n cv2.putText(img, \"Tracking\", (x_black, y_black-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)\r\n\r\n\r\n\r\n\r\n cv2.putText(img, \"Tracking\", (100, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\r\n\r\n str_out = 'P(' + str(x) + ',' + str(y) + ')'\r\n cv2.putText(self.img, str_out, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.3,\r\n (255, 255, 0), 1)\r\n str_bottom='S(' + str(w) + ',' + str(h) + ')'\r\n cv2.putText(self.img, str_bottom, (x, y+ h+ 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3,\r\n (255, 255, 0), 1)\r\n #if x>img.shape[1] and y>img.shape[0]:\r\n #print(x+w,y+h)\r\n return img\r\n\r\n def nothing(self,*arg):\r\n pass\r\n\r\n\r\n def set_tracker(self,val):\r\n self.tracker_type = self.tracker_types[val]\r\n\r\n\r\n def all_in(self,val):\r\n self.all_in_ = val\r\n\r\n\r\n\r\n\r\n def set_size_G(self,val):\r\n self.size_G = max(val, 10)\r\n def set_size_A(self,val):\r\n self.size_A = max(val, 10)\r\n\r\n def draw_box(self,event, x, y, flags, param):\r\n\r\n if event == cv2.EVENT_MOUSEMOVE:\r\n self.global_x = x\r\n self.global_y = y\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n bbox = (self.global_x -int(self.size_G/2), self.global_y - int(self.size_G/2), int(self.size_G), int(self.size_G))\r\n self.trec_on = True\r\n\r\n ################# EXPERIMENT ####################\r\n if not self.all_in_:\r\n self.tracker.init(self.img, bbox)\r\n else:\r\n crop_img=self.crop_image(self.img)\r\n crop_bbox=self.crop_bbox(bbox)\r\n print(crop_img.shape)\r\n self.tracker.init(crop_img, crop_bbox)\r\n\r\n if event == cv2.EVENT_RBUTTONDOWN:\r\n self.trec_on = False\r\n self.tracker = self.get_traccker(self.tracker_type)\r\n if event == cv2.EVENT_MOUSEWHEEL:\r\n if flags>0:\r\n if self.size_G<256:\r\n self.size_G+=4\r\n else:\r\n if self.size_G>10:\r\n self.size_G-=4\r\n\r\n\r\n\r\n def crop_image(self,img):\r\n crop_image=img[self.global_y-int(self.experemental_size/2):self.global_y+int(self.experemental_size/2), \\\r\n self.global_x-int(self.experemental_size/2):self.global_x+int(self.experemental_size/2)]\r\n return crop_image\r\n\r\n def crop_bbox(self,bbox):\r\n x=int(self.experemental_size/2)\r\n y=int(self.experemental_size/2)\r\n w,h=int(bbox[2]), int(bbox[3])\r\n return (x,y,w,h)\r\n\r\n\r\n def crop_image_final(self,img):\r\n if self.y_c!=0 and self.x_c!=0:\r\n crop_image = img[self.y_c - int(self.experemental_size / 2):self.y_c + int(\r\n self.experemental_size / 2),\r\n self.x_c - int(self.experemental_size / 2):self.x_c + int(\r\n self.experemental_size / 2)]\r\n else:\r\n crop_image = img[self.global_y - int(self.experemental_size / 2):self.global_y + int(\r\n self.experemental_size / 2),\r\n self.global_x - int(self.experemental_size / 2):self.global_x + int(\r\n self.experemental_size / 2)]\r\n return crop_image\r\n\r\n def main(self):\r\n prev=0\r\n def back(*args):\r\n pass\r\n cv2.namedWindow(\"settings\") # создаем окно настроек\r\n cv2.createTrackbar('size_G', 'settings', 64, 256, self.set_size_G)\r\n cv2.createTrackbar('size_A', 'settings', 10, 128, self.set_size_A)\r\n cv2.createTrackbar('TRACKER_TUPE', 'settings', 7, 7, self.set_tracker)\r\n cv2.createTrackbar('ALL_IN', 'settings', 0, 1, self.all_in)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.log_patch = \"log.txt\"\r\n self.log_file = open(self.log_patch, 'a+', encoding=\"utf-8\")\r\n\r\n try:\r\n\r\n success, frame = self.cap.read()\r\n\r\n print(self.cap.isOpened())\r\n\r\n width = int(self.cap.get(3))\r\n height = int(self.cap.get(4))\r\n frame_fps = int(self.cap.get(5))\r\n new_patch_video = 'new_video.avi'\r\n vw = cv2.VideoWriter(new_patch_video, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), frame_fps,\r\n (width, height))\r\n\r\n window_name = 'Tracking'\r\n cv2.namedWindow(window_name)\r\n cv2.setMouseCallback(window_name, self.draw_box)\r\n\r\n while self.cap.isOpened():\r\n\r\n #self.size_G = cv2.getTrackbarPos('size_G', 'settings')\r\n\r\n\r\n\r\n\r\n\r\n timer = cv2.getTickCount()\r\n\r\n time_elapsed = time.time() - prev\r\n\r\n\r\n if time_elapsed > 1. / self.frame_rate:\r\n success, self.img = self.cap.read()\r\n prev = time.time()\r\n if success:\r\n\r\n #crop_img=self.crop_image_final(self.img)\r\n #succes, bbox = self.tracker.update(crop_img)\r\n\r\n if not self.all_in_:\r\n succes, bbox = self.tracker.update(self.img)\r\n\r\n if succes:\r\n self.img =self.drawBox(self.img, bbox)\r\n self.x_c,self.y_c=self.xywh2xy(bbox)\r\n self.img[self.y_c,self.x_c]=[255,255,255]\r\n else:\r\n cv2.putText(self.img, \"Lost\", (100, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n else:\r\n crop_img=self.crop_image_final(self.img)\r\n succes, bbox = self.tracker.update(crop_img)\r\n if succes:\r\n self.img = self.drawBox(crop_img, bbox)\r\n #self.x_c, self.y_c = self.xywh2xy(bbox)\r\n #self.img[self.y_c, self.x_c] = [255, 255, 255]\r\n else:\r\n cv2.putText(self.img, \"Lost\", (100, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n\r\n\r\n\r\n cv2.rectangle(self.img, (15, 15), (200, 120), (255, 0, 255), 2)\r\n cv2.putText(self.img, \"Fps:\", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2);\r\n cv2.putText(self.img, \"Status:\", (20, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2);\r\n cv2.putText(self.img, \"Tracker:\", (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2);\r\n cv2.putText(self.img, self.tracker_type, (120, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2);\r\n\r\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);\r\n if fps > 60:\r\n myColor = (20, 230, 20)\r\n elif fps > 20:\r\n myColor = (230, 20, 20)\r\n else:\r\n myColor = (20, 20, 230)\r\n cv2.putText(self.img, str(int(fps)), (75, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, myColor, 2);\r\n #vw.write(img)\r\n\r\n if self.global_x != 0 and self.global_y != 0:\r\n cv2.rectangle(self.img, (self.global_x - int(self.size_G/2), self.global_y - int(self.size_G/2)), (self.global_x + int(self.size_G/2), self.global_y + int(self.size_G/2)), (255, 255, 255),\r\n 1)\r\n\r\n\r\n cv2.imshow(window_name, self.img)\r\n if cv2.waitKey(1) & 0xff == ord('q'):\r\n break\r\n # if 0==cv2.EVENT_MOUSEMOVE:\r\n # print(1111)\r\n\r\n\r\n else:\r\n break\r\n else:\r\n pass\r\n except Exception as e:\r\n print(traceback.format_exc(), file=self.log_file)\r\n return {\"status\": \"Error\", \"message\": str(e)}\r\n finally:\r\n self.log_file.close()\r\n\r\n\r\n return {\"status\": \"OK\"}\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_tracker=TestTracke()\r\n test_tracker.main()\r\n\r\n\r\n\r\n","repo_name":"MsWik/tracker","sub_path":"class_tracker.py","file_name":"class_tracker.py","file_ext":"py","file_size_in_byte":11710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30784184723","text":"from odoo.addons.queue_job.tests.common import JobMixin\nfrom odoo.addons.shopinvader.tests.common import ProductCommonCase\n\n\nclass ActionServerCase(ProductCommonCase, JobMixin):\n def test_action_server_on_product_template(self):\n job_counter = self.job_counter()\n # we take the number of variant linked => the number of created jobs\n bindings = self.env[\"shopinvader.product\"].search([], limit=4)\n variant_length = len(bindings.mapped(\"shopinvader_variant_ids\"))\n action = self.env.ref(\n \"shopinvader_search_engine.action_recompute_shopinvader_product_on_template\"\n )\n action_context = action.with_context(\n active_model=\"product.template\",\n active_ids=bindings.mapped(\"record_id\").ids,\n )\n action_context.run()\n job = job_counter.search_created()\n self.assertEqual(job_counter.count_created(), 1)\n self.assertEqual(\n job.display_name,\n f\"Batch task of {variant_length} for recomputing shopinvader.variant json\",\n )\n\n def test_action_server_on_product_category(self):\n self.backend.bind_all_category()\n job_counter = self.job_counter()\n bindings = self.env[\"shopinvader.category\"].search([], limit=4)\n action = self.env.ref(\n \"shopinvader_search_engine.action_recompute_shopinvader_category\"\n )\n action_context = action.with_context(\n active_model=\"product.category\",\n active_ids=bindings.mapped(\"record_id\").ids,\n )\n action_context.run()\n job = job_counter.search_created()\n self.assertEqual(job_counter.count_created(), 1)\n self.assertEqual(\n job.display_name,\n \"Batch task of 4 for recomputing shopinvader.category json\",\n )\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_search_engine/tests/test_action_server.py","file_name":"test_action_server.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"32794303365","text":"from bokeh.plotting import figure\nfrom bokeh.models import LogColorMapper, ColumnDataSource, HoverTool, LinearColorMapper, ColorBar, Panel\nfrom bokeh.models.widgets import Select, Slider, Tabs, Select\nfrom bokeh.layouts import column, row, WidgetBox\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.application import Application\nfrom bokeh.transform import factor_cmap\n\ndef map_bar_tab(source):\n \n def create_data(source, year):\n df = source[source[\"year\"] == year]\n df = df.dropna()\n assert len(df) > 0, \"No data for this disease and year combination\"\n \n data = dict(\n state_name = df[\"state_name\"],\n x = df['lons'].values.tolist(),\n y = df['lats'].values.tolist(),\n incidence_per_capita = df[\"avg_incidence_per_week\"],\n total_cases = df[\"total_cases_per_year\"],\n avg_cases = df[\"avg_cases_per_week\"],\n year = df[\"year\"]\n )\n \n return ColumnDataSource(data)\n \n def cases_bar_plot(src):\n states = src.data[\"state_name\"]\n bar = figure(plot_width=550, plot_height=350, \n title=\"Total cases of Measles in the United States\",\n x_range=states, toolbar_location=None, tools=\"\", y_range = (0, 110000))\n bar.xgrid.grid_line_color = None\n bar.xaxis.axis_label = \"US States\"\n bar.xaxis.major_label_orientation = 1.2\n bar.yaxis.axis_label = \"Total measle cases\"\n\n bar.vbar(x='state_name', top='total_cases', width=1, source=src,\n line_color=\"white\", fill_color=\"#3d84f7\", \n hover_line_color=\"black\")\n bar.add_tools(HoverTool(tooltips=[(\"Average incidence per capita per week\", \"@incidence_per_capita\")]))\n\n return bar\n \n def incidence_bar_plot(src):\n states = src.data[\"state_name\"]\n bar = figure(plot_width=550, plot_height=350, \n title=\"Average weekly incidence of Measles in the United States\",\n x_range=states, toolbar_location=None, tools=\"\", y_range = (0, 62))\n bar.xgrid.grid_line_color = None\n bar.xaxis.axis_label = \"US States\"\n bar.xaxis.major_label_orientation = 1.2\n bar.yaxis.axis_label = \"Average weekly incidence per capita\"\n\n bar.vbar(x='state_name', top='incidence_per_capita', width=1, source=src,\n line_color=\"white\", fill_color=\"#3d84f7\", \n hover_line_color=\"black\")\n bar.add_tools(HoverTool(tooltips=[(\"Total Measles cases\", \"@total_cases\")]))\n\n return bar\n \n def build_map(src):\n\n TOOLS = \"pan,wheel_zoom,reset,hover,save\"\n colors = [\"#A7D49B\", \"#92AC86\", \"#696047\", \"#55251D\", \"#5A1807\"]\n color_mapper = LinearColorMapper(palette=colors, low=src.data[\"incidence_per_capita\"].min(), high=src.data[\"incidence_per_capita\"].max())\n p = figure(\n title=\"US States\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@state_name\"), (\"Average incidences per capita per week\", \"@incidence_per_capita{1.11}\"), \n (\"Average # of cases per week\", \"@avg_cases{1.11}\"), (\"Total cases in year\", \"@total_cases{1.11}\")\n ], plot_width=850, plot_height=650)\n p.grid.grid_line_color = None\n p.hover.point_policy = \"follow_mouse\"\n p.patches('x', 'y', source=src, hover_line_color=\"black\",\n fill_color={'field': 'incidence_per_capita', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\n return p\n \n def update_map(attr, old, new):\n chosen_year = choose_year.value\n new_data = create_data(source, chosen_year)\n src.data.update(new_data.data)\n \n #Define Widgets\n choose_year = Slider(start=1928, end=2002, value=1928, step = 1, title = \"Year\")\n choose_year.on_change('value', update_map)\n \n #Select starting data\n src = create_data(source, 1928)\n \n #Init plot and set layout\n controls = WidgetBox(choose_year)\n m = build_map(src)\n b_cases = cases_bar_plot(src)\n b_incidence = incidence_bar_plot(src)\n layout = row(column(controls, m), column(b_cases, b_incidence))\n tab = Panel(child = layout, title = \"Measles Map\")\n return tab ","repo_name":"burtonrj/measles_map","sub_path":"dashboard_application/scripts/map_bar.py","file_name":"map_bar.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12137131624","text":"# Калькулятор гравитации\n# Рассчитывает силу гравитации между двумя объектами в космосе\n\nG = 6.674 * (10 ** -11) # Гравитационная постоянная\n\n\n# Запрос масс объектов и расстояния между ними\nm1 = float(input(\"Введите массу первого объекта (в кг): \"))\nm2 = float(input(\"Введите массу второго объекта (в кг): \"))\nr = float(input(\"Введите расстояние между объектами (в метрах): \"))\n\n# Рассчитываем силу гравитации\nF = G * m1 * m2 / r ** 2\n\n# Выводим результат\nprint(\"Сила гравитации между объектами: \", F, \"Н\")\n","repo_name":"fmajd6936/gravity_calculator.py","sub_path":"gravity_calculator.py","file_name":"gravity_calculator.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73840450824","text":"# -*- coding:utf-8 -*-\n__author__ = 'yangjian'\n\"\"\"\n\n\"\"\"\n\nfrom deeptables.datasets import dsutils\nfrom deeptables.models import deeptable\nfrom deeptables.utils import consts\nfrom hypernets.tabular import get_tool_box\n\n\nclass Test_DeepTable_CV:\n\n @staticmethod\n def load_data():\n print(\"Loading datasets...\")\n df = dsutils.load_adult().head(1000)\n\n return df\n\n def setup_class(self):\n self.X = self.load_data()\n self.y = self.X.pop(14)\n\n conf = deeptable.ModelConfig(metrics=['AUC'],\n apply_gbm_features=False,\n auto_categorize=False,\n auto_discrete=False)\n self.dt = deeptable.DeepTable(config=conf)\n\n self.X_train, \\\n self.X_eval, \\\n self.y_train, \\\n self.y_test = get_tool_box(self.X).train_test_split(self.X, self.y, test_size=0.2, random_state=42)\n self.oof_proba, self.eval_proba, self.test_proba = self.dt.fit_cross_validation(self.X_train,\n self.y_train,\n self.X_eval,\n num_folds=3,\n epochs=1,\n n_jobs=1)\n\n def teardown_class(self):\n print(\"Class teardown.\")\n\n def test_evaluate(self):\n result = self.dt.evaluate(self.X_eval, self.y_test)\n print(\"show result: \")\n print(result)\n print(str(result))\n assert result['AUC'] > 0\n\n def test_best_model(self):\n model = self.dt.best_model\n # print(model.summary())\n assert model\n\n def test_oof_proba(self):\n oof_predict = self.dt.proba2predict(self.oof_proba)\n assert oof_predict.shape, (1000,)\n\n def test_test_proba(self):\n test_predict = self.dt.proba2predict(self.eval_proba)\n assert test_predict.shape, (200,)\n\n def test_predict_proba_all_model_avg(self):\n proba = self.dt.predict_proba(self.X_eval, model_selector=consts.MODEL_SELECTOR_ALL)\n assert proba.shape, (200, 1)\n\n def test_predict_proba_all_model(self):\n proba_all = self.dt.predict_proba_all(self.X_eval)\n assert len(proba_all), 3\n assert proba_all['dnn_nets-kfold-1'].shape, (200, 1)\n\n def test_predict_proba(self):\n proba = self.dt.predict_proba(self.X_eval)\n assert proba.shape, (200, 1)\n\n def test_proba2predict(self):\n proba = self.dt.predict_proba(self.X_eval)\n preds = self.dt.predict(self.X_eval)\n preds2 = self.dt.proba2predict(proba)\n assert proba.shape, (200, 1)\n assert all(preds == preds2)\n assert preds2.shape, (200,)\n\n def test_get_model(self):\n best = self.dt.get_model(model_selector=consts.MODEL_SELECTOR_BEST)\n current = self.dt.get_model(model_selector=consts.MODEL_SELECTOR_CURRENT)\n byname = self.dt.get_model(model_selector='dnn_nets-kfold-1')\n assert best\n assert current\n assert byname\n\n def test_save_load(self):\n import time\n from deeptables.utils import fs\n\n filepath = f'{type(self).__name__}_{time.strftime(\"%Y%m%d%H%M%S\")}'\n self.dt.save(filepath)\n assert fs.exists(f'{filepath}/dt.pkl')\n assert fs.exists(f'{filepath}/dnn_nets-kfold-1.h5')\n assert fs.exists(f'{filepath}/dnn_nets-kfold-2.h5')\n assert fs.exists(f'{filepath}/dnn_nets-kfold-3.h5')\n newdt = deeptable.DeepTable.load(filepath)\n preds = newdt.predict(self.X_eval)\n assert preds.shape, (200,)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"DataCanvasIO/DeepTables","sub_path":"deeptables/tests/models/deeptable_cv_test.py","file_name":"deeptable_cv_test.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":607,"dataset":"github-code","pt":"81"} +{"seq_id":"12621449985","text":"\nBOT_TOKEN = 'TOKEN'\n\nNEWS_LINK = 'link'\nSALES_LINK = 'link'\nSPARE_PARTS_LINK = 'link'\nEVENTS_LINK = 'link'\nADDRESS_LINK = 'link'\nTECH_LINK = 'link'\n\nWEBHOOK_HOST = 'IP-address of your sever'\nWEBHOOK_PORT = 443 # 443, 80, 88 or 8443\nWEBHOOK_LISTEN = 'IP-address of your sever' # or 0.0.0.0\n\nWEBHOOK_SSL_CERT = 'path/to/webhook_cert.pem' # it's recommended to place it in the 'data' folder\nWEBHOOK_SSL_PRIVATE = 'path/to/webhook_pkey.pem' # it's recommended to place it in the 'data' folder\n\nWEBHOOK_URL_BASE = f'https://{WEBHOOK_HOST}:{WEBHOOK_PORT}'\nWEBHOOK_URL_PATH = f'/webhook/{BOT_TOKEN}'\n","repo_name":"evlachs/atvarmor_bot","sub_path":"conf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14850542523","text":"from typing import Any, Dict\n\nimport torch\n\nfrom transformers_framework.callbacks.save_additional_data import SaveDataCallback\nfrom transformers_framework.interfaces.logging import (\n LOSS,\n TOKEN_DETECTION_ACCURACY,\n TOKEN_DETECTION_F1,\n TOKEN_DETECTION_LOSS,\n TOKEN_DETECTION_PERPLEXITY,\n)\nfrom transformers_framework.pipelines.random_token_detection.base import RandomTokenDetectionPipeline\nfrom transformers_framework.processing.postprocessors import clustered_random_token_detection_processor\nfrom transformers_framework.utilities.arguments import FlexibleArgumentParser\nfrom transformers_framework.utilities.readers import read_clusters\n\n\nclass ClusterRandomTokenDetectionPipeline(RandomTokenDetectionPipeline):\n r\"\"\"\n A model that use RTS loss where the probability of swapping each token is weighted\n by the experience of previous similar switchings.\n \"\"\"\n\n def __init__(self, hyperparameters):\n super().__init__(hyperparameters)\n\n self.register_buffer(\n 'token_to_cluster_map', read_clusters(self.hyperparameters.clusters_filename), persistent=True,\n ) # token -> clusters\n number_of_clusters = self.token_to_cluster_map.max() + 1\n\n self.register_buffer(\n 'counts', torch.ones(number_of_clusters, number_of_clusters, dtype=torch.int64), persistent=True,\n ) # clusters -> clusters\n\n self.update_references()\n\n def configure_callbacks(self):\n return SaveDataCallback(self.hyperparameters, 'counts')\n\n def update_references(self):\n self.token_to_cluster_map_numpy = self.token_to_cluster_map.cpu().detach().numpy()\n self.counts_numpy = self.counts.cpu().detach().numpy()\n\n def update_count_vector(\n self,\n originals: torch.Tensor = None,\n tampereds: torch.Tensor = None,\n predictions: torch.Tensor = None,\n labels: torch.Tensor = None,\n attention_mask: torch.Tensor = None,\n ):\n r\"\"\"\n Update the vector of counts based on new predicitions.\n\n Args:\n originals:\n original ids of shape (batch_size, max_sequence_len)\n tampereds:\n modified ids of shape (batch_size, max_sequence_len)\n attention_mask:\n attention mask to update only on relevant positions of shape (batch_size, max_sequence_len)\n predictions:\n predictions for each modified ids of shape (batch_size, max_sequence_len)\n labels:\n gold labels of rts of shape (batch_size, max_sequence_len)\n\n Example:\n >>> attention_mask = torch.tensor([1, 1, 1, 1, 1, 1])\n >>> originals = torch.tensor([2, 3, 56, 1, 2, 23])\n >>> tampereds = torch.tensor([2, 33, 76, 1, 2, 28])\n >>> predictions = torch.tensor([0, 1, 0, 1, 1, 0])\n >>> labels = torch.tensor([0, 1, 1, 0, 0, 0])\n >>> updates = (predictions != labels) * 2 - 1\n torch.tensor([-1, -1, 1, 1, 1, -1])\n \"\"\"\n\n indexes = (attention_mask == 1) if attention_mask is not None else torch.full_like(originals, fill_value=True)\n if self.hyperparameters.update_only_on_predictions:\n indexes = indexes & (originals != tampereds) # select positions where something changed\n\n originals = originals[indexes]\n predictions = predictions[indexes]\n tampereds = tampereds[indexes]\n labels = labels[indexes]\n\n originals_clusters = self.token_to_cluster_map[originals]\n tampereds_clustures = self.token_to_cluster_map[tampereds]\n\n updates_matrix = torch.zeros_like(self.counts)\n updates_matrix[originals_clusters, tampereds_clustures] += (predictions != labels) * 2 - 1\n\n # gather changes from other processes\n updates_matrix = self.all_gather(updates_matrix).sum(dim=0)\n\n # works in distributed because it is registered as buffer\n self.counts += updates_matrix\n\n self.update_references()\n\n def training_step(self, batch, *args):\n r\"\"\" Here you compute and return the training loss and some additional metrics for e.g.\n the progress bar or logger.\n \"\"\"\n\n original_input_ids = batch.pop('original_input_ids')\n step_output = self.step(batch)\n\n self.update_count_vector(\n originals=original_input_ids,\n tampereds=batch['input_ids'],\n predictions=step_output.token_detection_predictions,\n labels=step_output.token_detection_labels,\n attention_mask=batch.get('attention_mask', None),\n )\n\n train_acc = self.train_acc(step_output.token_detection_predictions, step_output.token_detection_labels)\n train_f1 = self.train_f1(step_output.token_detection_predictions, step_output.token_detection_labels)\n train_ppl = self.train_ppl(step_output.token_detection_logits.float(), step_output.token_detection_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(TOKEN_DETECTION_LOSS, step_output.token_detection_loss)\n self.log(TOKEN_DETECTION_ACCURACY, train_acc)\n self.log(TOKEN_DETECTION_F1, train_f1)\n self.log(TOKEN_DETECTION_PERPLEXITY, train_ppl)\n\n return step_output.loss\n\n def validation_step(self, batch, *args):\n batch.pop('original_input_ids')\n return super().validation_step(batch, *args)\n\n def test_step(self, batch, *args):\n batch.pop('original_input_ids')\n return super().test_step(batch, *args)\n\n def postprocess(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n r\"\"\" Process single samples to add denoising objective. \"\"\"\n return clustered_random_token_detection_processor(\n sample=sample,\n input_columns=self.hyperparameters.input_columns,\n probability=self.hyperparameters.probability,\n tokenizer=self.tokenizer,\n max_sequence_length=self.hyperparameters.max_sequence_length,\n whole_word_detection=self.hyperparameters.whole_word_detection,\n token_to_cluster_map=self.token_to_cluster_map_numpy,\n counts=self.counts_numpy,\n beta=self.hyperparameters.beta,\n list_forbitten_replacements=self.tokenizer.all_special_ids,\n )\n\n @classmethod\n def add_argparse_args(self, parser: FlexibleArgumentParser):\n super().add_argparse_args(parser)\n parser.add_argument('--beta', default=2.0, required=False, type=float)\n parser.add_argument('--update_only_on_predictions', action=\"store_true\")\n parser.add_argument('--clusters_filename', type=str, required=True)\n SaveDataCallback.add_argparse_args(parser) # add arguments from callback\n","repo_name":"lucadiliello/transformers-framework","sub_path":"transformers_framework/pipelines/cluster_random_token_detection/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"36359483931","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n # nums is already in ascending order\n\n low = 0\n high = len(nums) - 1\n\n while low < high:\n mid = (low + high) // 2\n if target == nums[mid]:\n return mid\n elif target < nums[mid]:\n high = mid - 1\n elif target > nums[mid]:\n low = mid + 1\n \n if low == high and nums[low] == target:\n return low\n else:\n return -1\n\n# this solution has O(logn) complexity","repo_name":"sustvero/leetcowode","sub_path":"binarySearch/binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12298453527","text":"def compare_lists(l1, l2):\n if len(l1) != len(l2):\n return False\n \n hash_map = {}\n for num in l1:\n num_squared = num ** 2\n hash_map[num_squared] = 1 if num_squared not in hash_map else hash_map[num_squared] + 1\n\n for num in l2:\n if num not in hash_map or hash_map[num] < 1:\n return False\n hash_map[num] -= 1\n return True\n\nprint('-----compare_lists-----')\nprint(compare_lists([1, 2, 3], [1, 9]))\nprint(compare_lists([1, 2, 3], [4, 1, 9]))\nprint(compare_lists([1, 2, 1], [4, 1, 4]))\nprint(compare_lists([1, 2, 3, 2, 5], [9, 1, 4, 4, 11]))\n \ndef check_anagram(s1, s2):\n if len(s1) != len(s2):\n return False\n \n hash_map = {}\n for char in s1:\n hash_map[char] = (0 if char not in hash_map else hash_map[char]) + 1\n \n for char in s2:\n if char not in hash_map or hash_map[char] < 1:\n return False\n hash_map[char] -= 1\n return True\n\nprint('------check_anagram------')\nprint(check_anagram('', ''))\nprint(check_anagram('rat', 'car'))\nprint(check_anagram('qwerty', 'qtwyre'))\nprint(check_anagram('anagram', 'nagaram'))","repo_name":"La-BeTe/dsa","sub_path":"udemy-dsa-solutions/frequency_counter.py","file_name":"frequency_counter.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39725201255","text":"# -*- coding:utf-8 -*-\nfrom collections import OrderedDict\nimport csv\nimport json\n\nf_read = open('C:\\\\Users\\\\user\\\\Desktop\\\\songdolib_200225.csv', 'r', encoding='utf-8')\nf_read_csv = csv.reader(f_read)\n\nbooklist = [line for line in f_read_csv]\n\ndef get_booklist(i):\n id_num = booklist[i][0].strip()\n regi_num = booklist[i][1].strip()\n library_code = booklist[i][2].strip().split(' ')[0]\n category = booklist[i][2].strip().split(' ')[-1].split('-')[0]\n call_num = booklist[i][2].strip().split(' ')[-1]\n title = booklist[i][3].strip()\n author = booklist[i][4].strip()\n publisher = booklist[i][5].strip()\n year_of_pub = booklist[i][6].strip()\n price = booklist[i][7].strip()\n ISBN = booklist[i][8].strip()\n checked_out = booklist[i][9].strip()\n\n dictionary1 = OrderedDict()\n dictionary1['id'] = id_num\n dictionary1['regi_num'] = regi_num\n dictionary1['library_code'] = library_code\n dictionary1['category'] = category\n dictionary1['call_num'] = call_num\n dictionary1['title'] = title\n dictionary1['author'] = author\n dictionary1['publisher'] = publisher\n dictionary1['year_of_pub'] = year_of_pub\n dictionary1['price'] = price\n dictionary1['ISBN'] = ISBN\n dictionary1['checked_out'] = checked_out\n\n return dictionary1\n\n\ndef get_booklist_by_dictionary():\n booklist_dictionary = [get_booklist(i) for i in range(0, len(booklist))]\n return booklist_dictionary\n\nprint(json.dumps(get_booklist_by_dictionary(), ensure_ascii=False, indent='\\t'))\n\nwith open('C:\\\\Users\\\\user\\\\Desktop\\\\songdolib_200225.json', 'w', encoding='utf-8') as make_file:\n json.dump(get_booklist_by_dictionary(), make_file, ensure_ascii=False, indent='\\t')\n\nf_read.close()\n","repo_name":"marcwoo94/Shelf_Organizer_1st_Iteration","sub_path":"data_manipulation.py","file_name":"data_manipulation.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25530367159","text":"from flask import Flask\nfrom .api.routes import api\nfrom .site.routes import site\nfrom .admin.routes import admin\nfrom .models import db\n\n\ndef create_app():\n app = Flask(__name__)\n\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://///home/chus/Documentos/Portafolio/flask/hola/filestorage.db'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['IMAGE_UPLOADS'] = '/home/chus/Documentos/Portafolio/flask/hola/myapp/static/images'\n \n\n app.register_blueprint(api)\n app.register_blueprint(site)\n app.register_blueprint(admin)\n db.init_app(app)\n with app.app_context():\n db.create_all()\n return app\n\n #db.init_app(app)\n\n \n\n \n","repo_name":"JesusMAA/blog_chus","sub_path":"myapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20945498964","text":"def filter_datum(fields, redaction, message, separator):\n \"\"\"\n fields: a list of strings representing all fields to obfuscate\n redaction: a string representing by what the field will be obfuscated\n message: a string representing the log line\n separator: a string representing by which character is separating all fields in the log line (message)\n \"\"\"\n # splitting the log message into individual fields using the separator\n log_fields = message.split(separator)\n\n # obfuscating the specified fields\n for i, field in enumerate(log_fields):\n field_parts = field.split('=')\n if len(field_parts) == 2 and field_parts[0] in fields:\n log_fields[i] = '{}={}'.format(field_parts[0], redaction)\n else:\n log_fields[i] = field\n\n # joining the obfuscated fields back into a log message using separator\n obfuscated_message = separator.join(log_fields)\n\n # returning the obfuscated log message\n return obfuscated_message\n\n","repo_name":"Erickadikah/alx-backend-user-data","sub_path":"0x00-personal_data/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18900474884","text":"\"\"\"\nA complex number can be represented as a string on the form \"real+imaginaryi\" where:\n\nreal is the real part and is an integer in the range [-100, 100].\nimaginary is the imaginary part and is an integer in the range [-100, 100].\ni2 == -1.\nGiven two complex numbers num1 and num2 as strings, return a string of the complex number that represents their multiplications.\n\n\n\nExample 1:\n\nInput: num1 = \"1+1i\", num2 = \"1+1i\"\nOutput: \"0+2i\"\nExplanation: (1 + i) * (1 + i) = 1 + i2 + 2 * i = 2i, and you need convert it to the form of 0+2i.\nExample 2:\n\nInput: num1 = \"1+-1i\", num2 = \"1+-1i\"\nOutput: \"0+-2i\"\nExplanation: (1 - i) * (1 - i) = 1 + i2 - 2 * i = -2i, and you need convert it to the form of 0+-2i.\n\n\nConstraints:\n\nnum1 and num2 are valid complex numbers.\n\"\"\"\n\nclass Solution:\n def complexNumberMultiply(self, num1: str, num2: str) -> str:\n def getRealImaginary(num: str):\n complexN = num.split(\"+\")\n real = int(complexN[0])\n imag = int(complexN[1][:-1])\n return [real, imag]\n\n real1, imag1 = getRealImaginary(num1)\n real2, imag2 = getRealImaginary(num2)\n return str(real1*real2-imag1*imag2) + \"+\" + str(real1*imag2+real2*imag1) + \"i\"\n\n","repo_name":"yangmingxuan/pythonalgorithms","sub_path":"string/ComplexNumberMultiplication.py","file_name":"ComplexNumberMultiplication.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75013360583","text":"import sys, os\nimport os.path\nfrom os import path\nimport json\nimport argparse\nimport cutils\nimport image_slicer\nfrom PIL import Image\nimport uuid\n\ndef crop(out_path, input, height, width, k, page, area):\n im = Image.open(input)\n imgwidth, imgheight = im.size\n for i in range(0,imgheight,height):\n for j in range(0,imgwidth,width):\n box = (j, i, j+width, i+height)\n a = im.crop(box)\n try:\n o = a.crop(area)\n o.save(os.path.join(out_path,\"PNG\",\"%s\" % page,\"IMG-%s.png\" % k))\n except:\n pass\n k +=1\n\n\ndef slice_img(slice_img_path, out_dir, image_number, id_list, cols=2, rows=2, tile_size=1024, ext=\"png\"):\n\n lookup = {\n 'png':'PNG',\n 'jpg':\"JPEG\"\n }\n\n pil_image = utils.open_image(slice_img_path)\n print(pil_image.size)\n\n page = 0\n index_id = 0\n for r_val in range(0, rows):\n for c_val in range(0, cols):\n\n # get the id\n id_index_offset = image_number*rows*cols+index_id\n id_uuid = id_list[id_index_offset]\n\n\n print('{} row:{} col:{} id:{}'.format(slice_img_path, r_val, c_val, id_uuid))\n\n # Setting the points for cropped image \n left = c_val * tile_size\n top = r_val * tile_size\n right = (c_val+1) * tile_size\n bottom = (r_val+1) * tile_size\n box = (left, top, right, bottom)\n # print(box)\n \n # Cropped image of above dimension \n # (It will not change orginal image) \n im1 = pil_image.crop(box)\n out_img = '{}/{}.{}'.format(out_dir, id_uuid, ext) \n im1.save(out_img)\n\n index_id+=1\n\n\n\ndef generate_ids(count=1000):\n ids = []\n for i in range(0,count):\n ids.append(str(uuid.uuid4()))\n return ids\n\n\n\n# ====================================== \ndef main(argv):\n print(\"starting batch img processing. \")\n\n # Read in command-line parameters\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-i\", \"--in\", action=\"store\", required=True, dest=\"indir\", help=\"image input directory\")\n\n parser.add_argument(\"-o\", \"--out\", action=\"store\", required=True, dest=\"out\", help=\"image output directory\")\n\n parser.add_argument(\"-c\", \"--cols\", action=\"store\", required=True, dest=\"cols\", help=\"column slices\")\n\n parser.add_argument(\"-r\", \"--rows\", action=\"store\", required=True, dest=\"rows\", help=\"row slices\")\n\n parser.add_argument(\"-s\", \"--size\", action=\"store\", required=True, dest=\"size\", help=\"tile size\")\n\n parser.add_argument(\"-e\", \"--ext\", action=\"store\", required=False, default='png', dest=\"ext\", help=\"image type\")\n\n\n args = parser.parse_args()\n\n try:\n os.makedirs(args.out)\n except OSError:\n pass\n\n\n id_list=[]\n # look to see if there is an id list in the output dir\n ids_file = '{}/ids.json'.format(args.out)\n if path.exists(ids_file) :\n # load id_list as json file\n with open(ids_file) as f:\n id_list = json.load(f)\n \n else:\n # create id_list\n id_list = generate_ids(count=300)\n\n with open(ids_file, 'w') as json_file:\n json.dump(id_list, json_file)\n\n print(args.indir)\n all_files = utils.find_files(args.indir, pattern=\"*.{}\".format(args.ext))\n print(all_files)\n image_number = 0\n for file_path in all_files:\n slice_img(file_path, args.out, image_number, id_list, int(args.cols), int(args.rows), tile_size=int(args.size))\n image_number+=1\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"claytantor/img-proc","sub_path":"imgslice.py","file_name":"imgslice.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25883478166","text":"from telethon import TelegramClient, sync\nimport requests\nimport json\nimport time\nfrom telethon.sessions import StringSession, string\nfrom telethon.tl.functions.account import UpdateUsernameRequest\n\nimport random\nimport mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"telegramdata.cxh7xkivqcfs.us-east-1.rds.amazonaws.com\",\n user=\"vrushang\",\n passwd=\"root091098\",\n database=\"telegramdata\"\n)\nmycursor = mydb.cursor()\nsql = \"INSERT INTO promoters (id, username, phone,session) VALUES (%s,%s,%s,%s)\"\n\nget_bal_url = \"http://smspva.com/priemnik.php?metod=get_balance&service=opt29&apikey=PmwsDB2zVcVJNMWC4QeuaWYB84ZhKT\"\nreq_num_url = \"http://smspva.com/priemnik.php?metod=get_number&service=opt29&apikey=PmwsDB2zVcVJNMWC4QeuaWYB84ZhKT&country=RU\"\nget_sms_url = \"http://smspva.com/priemnik.php?metod=get_sms&service=opt29&apikey=PmwsDB2zVcVJNMWC4QeuaWYB84ZhKT&country=RU&id={}\"\n\nresp_bal = requests.get(get_bal_url)\ncurr_bal = json.loads(resp_bal.text)\nprint(\"Your Current Balance Is : \" + curr_bal['balance'])\n\nresp_num = requests.get(req_num_url)\ncurr_num = json.loads(resp_num.text)\nprint(\"Your Current Number Is : \" + str(curr_num['CountryCode']) + str(\n curr_num['number']) + \"id found : \" + str(curr_num['id']))\ntime.sleep(20)\nidnum = curr_num['id']\nprint(idnum)\n\n\ndef get_code(idnum):\n curr_sms = requests.get(get_sms_url.format(idnum))\n curr_sms = json.loads(curr_sms.text)\n curr_sms_text = curr_sms['sms']\n if curr_sms_text == \"null\":\n get_code(idnum)\n else:\n return curr_sms_text\n\n\napi_id = \"162650\"\napi_hash = \"1851642d6022571a418fbf25b4eda34e\"\nname = str(curr_num['number'])\nphone_number = str(curr_num['CountryCode'] + curr_num['number'])\nprint(phone_number)\n\nclient = TelegramClient(\"./sessions/bulk/session_{}\".format(phone_number), api_id, api_hash)\nclient.connect()\n\nif not client.is_user_authorized():\n client.send_code_request(phone_number, force_sms=True)\n time.sleep(30)\n code = get_code(idnum)\n name = \"TradingWiz\" + str(random.randint(1, 500))\n client.sign_up(\n code=code,\n first_name=name,\n last_name=name,\n )\n me = client.sign_in(phone_number, code)\n string = StringSession.save(client.session)\n client(UpdateUsernameRequest(name))\n\n myself = client.get_me()\n id = myself.id\n username = myself.username\n phone = myself.phone\n session = str(string)\n val = (id, username, phone, session)\n mycursor.execute(sql, val)\n mydb.commit()\n\nprint(curr_num['number'])\n","repo_name":"vrushangdev/telegrambot","sub_path":"make_account.py","file_name":"make_account.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41644976339","text":"#!/usr/bin/env python\nimport rospy\nimport threading\nfrom std_msgs.msg import Float64\n# from mavros_msgs.msg import MotorSetpoint\nfrom mavros_msgs.srv import CommandBool\nfrom numpy import sign\n\n\nclass depthControllerNode():\n def __init__(self):\n rospy.init_node(\"depthController\")\n\n self.arm_vehicle()\n\n self.data_lock = threading.RLock()\n\n self.roll_pub = rospy.Publisher(\"roll\",\n Float64,\n queue_size=1)\n self.pitch_pub = rospy.Publisher(\"pitch\",\n Float64,\n queue_size=1)\n self.yaw_pub = rospy.Publisher(\"yaw\",\n Float64,\n queue_size=1)\n self.thrust_pub = rospy.Publisher(\"thrust\",\n Float64,\n queue_size=1)\n self.vertical_thrust_pub = rospy.Publisher(\"vertical_thrust\",\n Float64,\n queue_size=1)\n self.lateral_thrust_pub = rospy.Publisher(\"lateral_thrust\",\n Float64,\n queue_size=1)\n self.roll = 0.0\n self.pitch = 0.0\n self.yaw = 0.0\n self.thrust = 0.0\n self.vertical_thrust = 0.0\n self.lateral_thrust = 0.0\n\n self.setpoint_sub = rospy.Subscriber(\"depth_setpoint\",\n Float64,\n self.on_setpoint,\n queue_size=1)\n # self.setpointMotor_sub = rospy.Subscriber(\"mavros/setpoint_motor/setpoint\",\n # MotorSetpoint,\n # self.on_setpoint_,\n # queue_size=1)\n\n self.depth_sub = rospy.Subscriber(\"depth\", Float64,\n self.depth_callback,\n queue_size=1)\n\n rospy.Timer(rospy.Duration(secs=5), self.control)\n\n \"\"\" # Parameter Theory -> not working\n self.Kp_krit = 2.32\n self.T_krit = 2.8\n self.p_gain = 0.6 * self.Kp_krit\n self.i_gain = self.p_gain / (0.5 * self.T_krit)\n self.d_gain = 0.125 * self.p_gain * self.T_krit \"\"\"\n # Parameter static\n self.static_p_gain = 0.4\n self.static_i_gain = 0.5\n self.static_d_gain = 0.1\n self.static_vorsteuerung = -0.05\n # Dynamic Parameter\n self.dynamic_p_gain = 0.8\n self.dynamic_i_gain = 0.2\n self.dynamic_d_gain = 0.4\n self.dynamic_vorsteuerung_up = 0.05\n self.dynamic_vorsteuerung_down = -0.1\n\n # Reconfigure Options via subscriber\n self.kp_dyn_sub = rospy.Subscriber(\"kp_dyn\", Float64,\n self.kp_dyn_callback, queue_size=1)\n self.ki_dyn_sub = rospy.Subscriber(\"ki_dyn\", Float64,\n self.ki_dyn_callback, queue_size=1)\n self.kd_dyn_sub = rospy.Subscriber(\"kd_dyn\", Float64,\n self.kd_dyn_callback, queue_size=1)\n # Reconfigure Options via subscriber\n self.kp_sta_sub = rospy.Subscriber(\"kp_sta\", Float64,\n self.kp_sta_callback, queue_size=1)\n self.ki_sta_sub = rospy.Subscriber(\"ki_sta\", Float64,\n self.ki_sta_callback, queue_size=1)\n self.kd_sta_sub = rospy.Subscriber(\"kd_sta\", Float64,\n self.kd_sta_callback, queue_size=1)\n\n self.i_buf = [0.0] * 10\n self.setpoint_buf = [0.0] * 5\n self.depth_setpoint = -0.5\n self.depth = self.depth_setpoint\n self.depth_old = self.depth\n self.depth_buf = [self.depth]\n self.depth_buffer_len = 5\n self.sensor_time = rospy.get_time()\n\n def publish(self):\n msg_roll = Float64()\n msg_roll.data = self.roll\n self.roll_pub.publish(msg_roll)\n\n msg_pitch = Float64()\n msg_pitch.data = self.pitch\n self.pitch_pub.publish(msg_pitch)\n\n msg_yaw = Float64()\n msg_yaw.data = self.yaw\n self.yaw_pub.publish(msg_yaw)\n\n msg_thrust = Float64()\n msg_thrust.data = self.thrust\n self.thrust_pub.publish(msg_thrust)\n\n msg_vertical_thrust = Float64()\n msg_vertical_thrust.data = self.vertical_thrust\n self.vertical_thrust_pub.publish(msg_vertical_thrust)\n\n msg_lateral_thrust = Float64()\n msg_lateral_thrust.data = self.lateral_thrust\n self.lateral_thrust_pub.publish(msg_lateral_thrust)\n\n def on_setpoint(self, msg):\n with self.data_lock:\n if not self.isRegion(msg.data):\n self.depth_setpoint = msg.data\n self.setpoint_buf.append(self.depth_setpoint)\n self.setpoint_buf.pop(0)\n self.control()\n\n def kp_dyn_callback(self, msg):\n with self.data_lock:\n self.dynamic_p_gain = msg.data\n\n def ki_dyn_callback(self, msg):\n with self.data_lock:\n self.dynamic_i_gain = msg.data\n\n def kd_dyn_callback(self, msg):\n with self.data_lock:\n self.dynamic_d_gain = msg.data\n\n def kp_sta_callback(self, msg):\n with self.data_lock:\n self.static_p_gain = msg.data\n\n def ki_sta_callback(self, msg):\n with self.data_lock:\n self.static_i_gain = msg.data\n\n def kd_sta_callback(self, msg):\n with self.data_lock:\n self.static_d_gain = msg.data\n\n def depth_callback(self, msg):\n with self.data_lock:\n self.depth = msg.data\n self.sensor_time = rospy.get_time()\n self.control()\n\n # might be redundant if only self.run is used\n def control(self, *args):\n if rospy.get_time() - self.sensor_time > 5:\n rospy.logwarn(\"Sensor Timeout\")\n self.vertical_thrust = 0\n self.publish()\n return\n\n if self.isStatic():\n self.act_p_gain = self.static_p_gain\n self.act_i_gain = self.static_i_gain\n self.act_d_gain = self.static_d_gain\n self.act_vorsteuerung = self.static_vorsteuerung\n else:\n self.act_p_gain = self.dynamic_p_gain\n self.act_i_gain = self.dynamic_i_gain\n self.act_d_gain = self.dynamic_d_gain\n if self.depth_setpoint > self.depth:\n self.act_vorsteuerung = self.dynamic_vorsteuerung_up\n else:\n self.act_vorsteuerung = self.dynamic_vorsteuerung_down\n\n self.i_buf.append(self.depth_setpoint - self.depth)\n self.i_buf.pop(0)\n self.vertical_thrust = \\\n self.act_p_gain * (self.depth_setpoint - self.depth) +\\\n self.act_i_gain * sum(self.i_buf) / len(self.i_buf) +\\\n self.act_d_gain * (self.i_buf[-4] - self.i_buf[-1])\\\n + self.act_vorsteuerung\n # self.depth_old = self.depth_setpoint - self.depth\n\n if abs(self.vertical_thrust) > 1:\n self.vertical_thrust = sign(self.vertical_thrust)\n\n if self.isRegion(self.depth) == 1:\n self.vertical_thrust = -0.1\n elif self.isRegion(self.depth) == -1:\n self.vertical_thrust = 0.1\n\n self.publish()\n\n def isRegion(self, setpoint):\n if setpoint > -0.1:\n return 1\n elif setpoint < -0.8:\n return -1\n else:\n return 0\n\n def isStatic(self):\n if self.i_buf[-4] == self.i_buf[-1]:\n return True\n else:\n return True\n\n def arm_vehicle(self):\n # wait until the arming serivce becomes available\n rospy.wait_for_service(\"mavros/cmd/arming\")\n # connect to the service\n arm = rospy.ServiceProxy(\"mavros/cmd/arming\", CommandBool)\n # call the service to arm the vehicle until service call was successfull\n while not arm(True).success:\n rospy.logwarn(\"Could not arm vehicle. Keep trying.\")\n rospy.sleep(1.0)\n rospy.loginfo(\"Armed successfully.\")\n\n def run(self):\n rospy.spin()\n\n\ndef main():\n node = depthControllerNode()\n node.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CarlGiest/FormulasAndVehicles","sub_path":"catkin_ws/src/depth_controller/nodes/depth_controller.py","file_name":"depth_controller.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"23372833743","text":"import os\nimport random\nimport socket\nimport string\nimport sys\n\nfrom utils import push_data, pull_data, pull_file, push_file\n\nmax_port = 65535\nmin_port = 1023\n\nclients_dic = {}\n\n\ndef add_to_dic(client_id, num):\n if client_id not in clients_dic:\n clients_dic[client_id] = {num: []}\n elif num not in clients_dic[client_id]:\n clients_dic[client_id][num] = []\n\n\ndef insert_updates(client_id, num, src_path, event, dst_path):\n for key in list(clients_dic[client_id].keys()):\n if str(key) != str(num):\n if dst_path == '':\n clients_dic[client_id][key].append([event, src_path])\n else:\n clients_dic[client_id][key].append([event, src_path, dst_path])\n\n\ndef delete_path(src_path):\n for root, dirs, files in os.walk(src_path, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n if os.path.isdir(src_path):\n os.rmdir(src_path)\n else:\n try:\n os.remove(src_path)\n except:\n pass\n\n\ndef delete_updates(client_id, num, event, src_path, dst_path):\n updates_list = clients_dic[client_id][num]\n if dst_path == '':\n wanted_update = [event, src_path]\n else:\n wanted_update = [event, src_path, dst_path]\n for update in updates_list:\n if update == wanted_update:\n clients_dic[client_id][num].remove(wanted_update)\n\n\ndef init_clients_folder():\n os.mkdir(os.path.abspath('Clients'))\n return os.path.abspath('Clients')\n\n\ndef create_new_client_id(num):\n client_id = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(128))\n os.mkdir(os.path.join(os.path.abspath('Clients'), client_id))\n print(client_id)\n add_to_dic(client_id, num)\n return client_id\n\n\ndef create_socket(path):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', PORT))\n s.listen()\n while True:\n client_socket, client_address = s.accept()\n with client_socket, client_socket.makefile('rb') as file:\n data = file.readline().strip().decode()\n\n if data == 'add my folder':\n num = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(128))\n client_socket.sendall(num.encode() + b'\\n')\n client_id = create_new_client_id(num)\n client_socket.sendall(client_id.encode() + b'\\n')\n folder_path = path + '/' + str(client_id)\n pull_data(folder_path, client_socket, file)\n\n elif data == 'update me':\n client_id = file.readline().strip().decode()\n client_number = file.readline().strip().decode()\n length = len(clients_dic[client_id][client_number])\n if length == 0:\n client_socket.sendall(str(-1).encode() + b'\\n')\n client_socket.close()\n continue\n client_socket.sendall((str(length)).encode() + b'\\n')\n temp_list = clients_dic[client_id][client_number]\n for i in range(0, length):\n event = temp_list[i][0]\n client_socket.sendall(event.encode() + b'\\n')\n src_path = temp_list[i][1]\n client_socket.sendall(src_path.encode() + b'\\n')\n if len(temp_list[i]) == 2:\n dst_path = ''\n else:\n dst_path = temp_list[i][2]\n client_socket.sendall(dst_path.encode() + b'\\n')\n if event == 'created':\n full_path = str(os.path.abspath('Clients') + '/' + str(client_id) + src_path)\n if os.path.isdir(full_path):\n client_socket.sendall('new folder'.encode() + b'\\n')\n else:\n client_socket.sendall('new file'.encode() + b'\\n')\n push_file(full_path, client_socket)\n\n if event == 'moved':\n full_path = str(os.path.abspath('Clients') + '/' + str(client_id) + dst_path)\n if os.path.isdir(full_path):\n client_socket.sendall('new folder'.encode() + b'\\n')\n else:\n client_socket.sendall('new file'.encode() + b'\\n')\n push_file(full_path, client_socket)\n\n clients_dic[client_id][client_number].clear()\n temp_list.clear()\n\n elif data == 'created':\n flag = file.readline().strip().decode()\n if flag == 'False':\n client_number = file.readline().strip().decode()\n relative_path = file.readline().strip().decode()\n new = file.readline().strip().decode()\n if new == 'new folder':\n try:\n os.makedirs(os.path.abspath('Clients') + '/' + str(client_id) + relative_path)\n except:\n pass\n elif new == 'new file':\n pull_file(os.path.abspath('Clients') + '/' + str(client_id) + os.path.dirname(relative_path),\n client_socket, file)\n\n insert_updates(client_id, client_number, relative_path, data, '')\n\n elif data == 'deleted':\n flag = file.readline().strip().decode()\n if flag == 'False':\n client_number = file.readline().strip().decode()\n src_path = file.readline().strip().decode()\n insert_updates(client_id, client_number, src_path, data, '')\n delete_path(os.path.abspath('Clients') + '/' + str(client_id) + src_path)\n\n elif data == 'moved':\n flag = file.readline().strip().decode()\n if flag == 'False':\n client_number = file.readline().strip().decode()\n src_path = file.readline().strip().decode()\n relative_path = file.readline().strip().decode()\n insert_updates(client_id, client_number, src_path, data, relative_path)\n delete_path(os.path.abspath('Clients') + '/' + str(client_id) + src_path)\n new = file.readline().strip().decode()\n if new == 'new folder':\n try:\n os.makedirs(os.path.abspath('Clients') + '/' + str(client_id) + relative_path)\n except:\n pass\n elif new == 'new file':\n pull_file(os.path.abspath('Clients') + '/' + str(client_id) + os.path.dirname(relative_path),\n client_socket, file)\n\n else:\n num = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(128))\n add_to_dic(client_id, num)\n client_socket.sendall(num.encode() + b'\\n')\n push_data(os.path.abspath('Clients') + '/' + data, client_socket)\n\n client_socket.close()\n\n\n# check if port is valid\ndef check_port(port):\n if not min_port <= port <= max_port:\n sys.exit()\n\n\n# main method\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n sys.exit()\n try:\n PORT = int(sys.argv[1])\n check_port(PORT)\n path = init_clients_folder()\n create_socket(path)\n except ValueError:\n sys.exit()\n","repo_name":"yuvalalroy/File_Sharing_System","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35154748686","text":"#initialising\r\nimport pygame\r\nimport math\r\nimport random\r\nimport sys\r\nfrom pygame import mixer\r\npygame.init()\r\n\r\n#colour\r\nRED=(255,0,0)\r\nBLUE=(0,0,255)\r\nYELLOW=(255,255,0)\r\nWHITE=(255,255,255)\r\nBACKGROUND_COLOR=(0,0,0)\r\n\r\n#screen\r\nWIDTH=800\r\nHEIGHT=600\r\nscreen=pygame.display.set_mode((WIDTH,HEIGHT))\r\n\r\n# Caption and Icon\r\npygame.display.set_caption(\"COVID-19\")\r\nicon = pygame.image.load('Covid1.png')\r\npygame.display.set_icon(icon)\r\n\r\n#font\r\nfont_style = pygame.font.SysFont(\"Times\", 70)\r\nmyFont = pygame.font.SysFont(\"comicsansms\", 25)\r\nfont = pygame.font.Font('freesansbold.ttf', 32)\r\nover_font = pygame.font.Font('freesansbold.ttf', 64)\r\ninfo_font=pygame.font.Font('freesansbold.ttf',16)\r\ncom=pygame.font.SysFont(\"comicsansms\",25)\r\n#Clock\r\nclock = pygame.time.Clock()\r\n\r\n#Sound\r\nmixer.music.load(\"background.wav\")\r\nmixer.music.play(-1)\r\n\r\n# Score\r\nscore=0\r\nscore_value = 0\r\n\r\n#background\r\nbg_6 = pygame.image.load('bg6.jpg')\r\nbg_1=pygame.image.load('bg1.jpg')\r\nbg_2=pygame.image.load('bg2.jpg')\r\nbg_3=pygame.image.load('bg3.jpg')\r\nbg_4=pygame.image.load('bg4.png')\r\nbg_5=pygame.image.load('bg5.jpg')\r\nbg_7=pygame.image.load('bg7.jpg')\r\n\r\n#player_game1\r\nplayer_size=50\r\nplayer_image=pygame.image.load('player.png')\r\nplayer_pos=[400,500]\r\n\r\n# Player_game2\r\nplayerImg = pygame.image.load('doctor.png')\r\nplayerX = 370\r\nplayerY = 480\r\nplayerX_change = 0\r\n\r\n#enemy_game1\r\nenemy_image=pygame.image.load('covid.png')\r\nenemy_pos=[random.randint(0,750),0]\r\nenemy_list=[enemy_pos]\r\nenemy_size=50\r\n\r\n# Enemy_game2\r\nenemyImg = []\r\nenemyX = []\r\nenemyY = []\r\nenemyX_change = []\r\nenemyY_change = []\r\nnum_of_enemies = 6\r\n\r\nfor i in range(num_of_enemies):\r\n enemyImg.append(pygame.image.load('covid.png'))\r\n enemyX.append(random.randint(0, 736))\r\n enemyY.append(random.randint(50, 150))\r\n enemyX_change.append(4)\r\n enemyY_change.append(40)\r\n\r\n#pills\r\npillImg = pygame.image.load('pill.png')\r\npillX = 0\r\npillY = 480\r\npillX_change = 0\r\npillY_change = 10\r\npill_state = \"ready\"\r\n\r\n#others\r\nSPEED=10\r\ntextX = 10\r\ntestY = 10\r\n\r\n#game_1 functions\r\n\r\ndef notice_1(msg,colour):\r\n no_1=font_style.render(msg,True,colour)\r\n screen.blit(no_1,[250,200])\r\ndef notice_2(msg,colour):\r\n no_2=com.render(msg,True,colour)\r\n screen.blit(no_2,[350,350])\r\ndef notice_3(msg,colour):\r\n no_3=myFont.render(msg,True,colour)\r\n screen.blit(no_3,[150,500])\r\ndef message(msg, color):\r\n mesg = myFont.render(msg, True, color)\r\n screen.blit(mesg, [300,300])\r\ndef notice_4(msg,colour):\r\n no_4=font_style.render(msg,True,colour)\r\n screen.blit(no_4,[50,100])\r\ndef notice_5(msg,colour):\r\n no_5=info_font.render(msg,True,colour)\r\n screen.blit(no_5,[300,300])\r\ndef notice_6(msg,colour):\r\n no_6=com.render(msg,True,colour)\r\n screen.blit(no_6,[300,200])\r\ndef notice_7(msg,colour):\r\n no_7=info_font.render(msg,True,colour)\r\n screen.blit(no_7,[50,500])\r\n\r\ndef set_level(score, SPEED):\r\n if score < 20:\r\n SPEED = 5\r\n elif score < 40:\r\n SPEED = 8\r\n elif score < 60:\r\n SPEED = 12\r\n else:\r\n SPEED = 15\r\n return SPEED\r\ndef drop_enemies(enemy_list):\r\n delay = random.random()\r\n if len(enemy_list) < 10 and delay < 0.1:\r\n x_pos = random.randint(0,WIDTH-enemy_size)\r\n y_pos = 0\r\n enemy_list.append([x_pos, y_pos])\r\n\r\ndef draw_enemies(enemy_list):\r\n for enemy_pos in enemy_list:\r\n screen.blit(enemy_image,(enemy_pos[0],enemy_pos[1]))\r\n\r\n\r\ndef update_enemy_positions(enemy_list, score):\r\n for idx, enemy_pos in enumerate(enemy_list):\r\n if enemy_pos[1] >= 0 and enemy_pos[1] < HEIGHT:\r\n enemy_pos[1] += SPEED\r\n else:\r\n enemy_list.pop(idx)\r\n score += 1\r\n return score\r\n\r\ndef collision_check(enemy_list, player_pos):\r\n for enemy_pos in enemy_list:\r\n if detect_collision(enemy_pos, player_pos):\r\n return True\r\n return False\r\n\r\ndef detect_collision(player_pos, enemy_pos):\r\n p_x = player_pos[0]\r\n p_y = player_pos[1]\r\n\r\n e_x = enemy_pos[0]\r\n e_y = enemy_pos[1]\r\n\r\n if (e_x >= p_x and e_x < (p_x + player_size)) or (p_x >= e_x and p_x < (e_x+enemy_size)):\r\n if (e_y >= p_y and e_y < (p_y + player_size)) or (p_y >= e_y and p_y < (e_y+enemy_size)):\r\n return True\r\n return False\r\n\r\n#game_1 loop\r\nintro=True\r\nwhile intro:\r\n screen.fill((0,0,0))\r\n screen.blit(bg_1,(0,0))\r\n notice_1(\"COVID-19\",RED)\r\n notice_2(\"Press ENTER\",WHITE)\r\n\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_RETURN:\r\n intro=False\r\n break\r\nins=True\r\nwhile ins:\r\n screen.blit(bg_6,(0,0))\r\n notice_4(\"LEVEL 1: ESCAPE\",WHITE)\r\n notice_5(\"*Score 200 to unlock next level\",YELLOW)\r\n notice_6(\"Press ENTER\",RED)\r\n notice_7(\"HINT:Use arrow keys to navigate\",YELLOW)\r\n\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_RETURN:\r\n ins=False\r\n break\r\n\r\n\r\ndef game_loop():\r\n game_over=False\r\n game_close=False\r\n\r\n player_pos=[400,500]\r\n x=player_pos[0]\r\n y=player_pos[1]\r\n score=0\r\n SPEED=10\r\n\r\n while not game_over:\r\n\r\n\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n x -= player_size\r\n elif event.key == pygame.K_RIGHT:\r\n x += player_size\r\n player_pos = [x,y]\r\n if x<=0:\r\n x=0\r\n if x>=736:\r\n x=736\r\n\r\n\r\n screen.fill(BACKGROUND_COLOR)\r\n screen.blit(bg_2,(0,0))\r\n drop_enemies(enemy_list)\r\n score = update_enemy_positions(enemy_list, score)\r\n if score>=200:\r\n game_over=True\r\n break\r\n\r\n SPEED = set_level(score, SPEED)\r\n text1 = \"Score:\" + str(score)\r\n label = myFont.render(text1, 1, BLUE)\r\n screen.blit(label, (WIDTH-200, HEIGHT-40))\r\n\r\n if collision_check(enemy_list,player_pos):\r\n game_close=True\r\n while game_close==True:\r\n screen.blit(bg_1,(0,0))\r\n message(\"You Lost! Press Esc-Quit\", WHITE)\r\n notice_4(\"Better Luck Next Time...\",YELLOW)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n game_over = True\r\n game_close=False\r\n pygame.quit()\r\n quit()\r\n\r\n\r\n draw_enemies(enemy_list)\r\n screen.blit(player_image,(player_pos[0],player_pos[1]))\r\n clock.tick(30)\r\n pygame.display.update()\r\n\r\ngame_loop()\r\n\r\n#game_2 functions\r\n\r\ndef info_1(x,y):\r\n info=font_style.render(\"Lvl2: Kill them all\",True,(RED))\r\n screen.blit(info,(x,y))\r\ndef info_2(x,y):\r\n info=myFont.render(\"PRESS ENTER\",True,(RED))\r\n screen.blit(info,(x,y))\r\ndef info_3(x,y):\r\n info=myFont.render(\"HINT:use SPACEBAR to shoot\",True,(YELLOW))\r\n screen.blit(info,(x,y))\r\ndef info_4(x,y):\r\n info=info_font.render(\"#Stay Home\",True,(YELLOW))\r\n screen.blit(info,(x,y))\r\ndef info_5(x,y):\r\n info=info_font.render(\"#Stay Safe\",True,(YELLOW))\r\n screen.blit(info,(x,y))\r\n\r\ndef info_6(x,y):\r\n info=font_style.render(\"Congratulations!!!!\",True,(YELLOW))\r\n screen.blit(info,(x,y))\r\ndef info_7(x,y):\r\n info=myFont.render(\"PRESS ENTER\",True,(RED))\r\n screen.blit(info,(x,y))\r\n\r\ndef info_8(x,y):\r\n info=myFont.render(\"You unlocked next level\",True,(RED))\r\n screen.blit(info,(x,y))\r\ndef info_9(x,y):\r\n info=myFont.render(\"Press Esc to quit\",True,(WHITE))\r\n screen.blit(info,(x,y))\r\ndef info_10(x,y):\r\n info=font_style.render(\"YOU WON\",True,(WHITE))\r\n screen.blit(info,(x,y))\r\ndef info_11(x,y):\r\n info=com.render(\"*score 500 to win the game\",True,(WHITE))\r\n screen.blit(info,(x,y))\r\n\r\ndef show_score(x, y):\r\n score = font.render(\"Score : \" + str(score_value), True, (BLUE))\r\n screen.blit(score, (x, y))\r\n\r\n\r\ndef game_over_text():\r\n over_text = over_font.render(\"GAME OVER\", True, (255, 255, 255))\r\n screen.blit(over_text, (200, 250))\r\n\r\ndef player(x, y):\r\n screen.blit(playerImg, (x, y))\r\n\r\ndef enemy(x, y, i):\r\n screen.blit(enemyImg[i], (x, y))\r\n\r\ndef fire_pill(x, y):\r\n global pill_state\r\n pill_state = \"fire\"\r\n screen.blit(pillImg, (x + 16, y + 10))\r\n\r\ndef isCollision(enemyX, enemyY, pillX, pillY):\r\n distance = math.sqrt(math.pow(enemyX - pillX, 2) + (math.pow(enemyY - pillY, 2)))\r\n if distance < 27:\r\n return True\r\n else:\r\n return False\r\n\r\n#game_2 loop\r\ncongrat=True\r\nwhile congrat:\r\n screen.blit(bg_3,(0,0))\r\n info_8(250,400)\r\n info_6(200,200)\r\n info_7(300,300)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_RETURN:\r\n congrat=False\r\n break\r\nnote=True\r\nwhile note:\r\n screen.blit(bg_4,(0,0))\r\n info_1(120,160)\r\n info_2(300,350)\r\n info_3(50,500)\r\n info_11(50,550)\r\n\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_RETURN:\r\n note=False\r\n break\r\n\r\nrunning = True\r\nquit=False\r\nwin=False\r\nwhile running:\r\n screen.fill((0, 0, 0))\r\n screen.blit(bg_5, (0, 0))\r\n\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n playerX_change = -5\r\n if event.key == pygame.K_RIGHT:\r\n playerX_change = 5\r\n if event.key == pygame.K_SPACE:\r\n if pill_state is \"ready\":\r\n pillSound = mixer.Sound(\"pill.wav\")\r\n pillSound.play()\r\n pillX = playerX\r\n fire_pill(pillX, pillY)\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n playerX_change = 0\r\n playerX += playerX_change\r\n if playerX <= 0:\r\n playerX = 0\r\n elif playerX >= 736:\r\n playerX = 736\r\n for i in range(num_of_enemies):\r\n if enemyY[i] > 440:\r\n for j in range(num_of_enemies):\r\n enemyY[j] = 2000\r\n quit=True\r\n while quit:\r\n screen.blit(bg_1,(0,0))\r\n info_4(300,400)\r\n info_5(400,400)\r\n game_over_text()\r\n info_9(250,350)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_ESCAPE:\r\n quit=False\r\n running=False\r\n pygame.quit()\r\n quit()\r\n\r\n\r\n enemyX[i] += enemyX_change[i]\r\n if enemyX[i] <= 0:\r\n enemyX_change[i] = 2\r\n enemyY[i] += enemyY_change[i]\r\n elif enemyX[i] >= 736:\r\n enemyX_change[i] = -2\r\n enemyY[i] += enemyY_change[i]\r\n\r\n\r\n collision = isCollision(enemyX[i], enemyY[i], pillX, pillY)\r\n if collision:\r\n explosionSound = mixer.Sound(\"explosion.wav\")\r\n explosionSound.play()\r\n pillY = 480\r\n pill_state = \"ready\"\r\n score_value += 1\r\n enemyX[i] = random.randint(0, 736)\r\n enemyY[i] = random.randint(50, 150)\r\n\r\n enemy(enemyX[i], enemyY[i], i)\r\n\r\n if pillY <= 0:\r\n pillY = 480\r\n pill_state = \"ready\"\r\n\r\n if pill_state is \"fire\":\r\n fire_pill(pillX, pillY)\r\n pillY -= pillY_change\r\n\r\n player(playerX, playerY)\r\n show_score(textX, testY)\r\n pygame.display.update()\r\n if score_value>=500:\r\n win=True\r\n while win:\r\n screen.blit(bg_7,(0,0))\r\n info_4(300,400)\r\n info_5(400,400)\r\n info_10(250,300)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n win=False\r\n running=False\r\npygame.quit()\r\nquit()\r\n","repo_name":"jefrin-solomon/covid-19","sub_path":"ori.py","file_name":"ori.py","file_ext":"py","file_size_in_byte":12584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"86306811807","text":"import pytest\nfrom pactman import Like, SomethingLike\nfrom pactman.mock.matchers import Matcher, Term\n\n\ndef test_is_something_like():\n assert SomethingLike is Like\n\n\ndef test_valid_types():\n types = [None, list(), dict(), 1, 1.0, \"string\", \"unicode\", Matcher()]\n\n for t in types:\n SomethingLike(t)\n\n\ndef test_invalid_types():\n with pytest.raises(AssertionError) as e:\n SomethingLike(set())\n\n assert \"matcher must be one of \" in str(e.value)\n\n\ndef test_basic_type():\n assert SomethingLike(123).ruby_protocol() == {\n \"json_class\": \"Pact::SomethingLike\",\n \"contents\": 123,\n }\n\n\ndef test_complex_type():\n assert SomethingLike({\"name\": Term(\".+\", \"admin\")}).ruby_protocol() == {\n \"json_class\": \"Pact::SomethingLike\",\n \"contents\": {\n \"name\": {\n \"json_class\": \"Pact::Term\",\n \"data\": {\n \"matcher\": {\"json_class\": \"Regexp\", \"s\": \".+\", \"o\": 0},\n \"generate\": \"admin\",\n },\n }\n },\n }\n","repo_name":"reecetech/pactman","sub_path":"pactman/test/mock_matchers/test_like.py","file_name":"test_like.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"81"} +{"seq_id":"42158204041","text":"from datetime import datetime\nfrom pathlib import Path\n\nimport click\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom PIL.Image import fromarray\nfrom imagehash import dhash\nfrom scipy.io import loadmat\nfrom skimage.transform import SimilarityTransform\nfrom tqdm.contrib import tenumerate\n\nfrom preprocessing.calculate_landmarks_from_directory import CALCULATED_COORDS\nfrom preprocessing.face_detector import FaceDetector\n\n\ndef calc_age(taken, dob):\n birth = datetime.fromordinal(max(int(dob) - 366, 1))\n\n # assume the photo was taken in the middle of the year\n if birth.month < 7:\n return taken - birth.year\n else:\n return taken - birth.year - 1\n\n\ndef get_meta(mat_path, db):\n meta = loadmat(mat_path)\n full_path = meta[db][0, 0][\"full_path\"][0]\n dob = meta[db][0, 0][\"dob\"][0] # Matlab serial date number\n gender = meta[db][0, 0][\"gender\"][0]\n photo_taken = meta[db][0, 0][\"photo_taken\"][0] # year\n face_score = meta[db][0, 0][\"face_score\"][0]\n second_face_score = meta[db][0, 0][\"second_face_score\"][0]\n age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]\n return full_path, dob, gender, photo_taken, face_score, second_face_score, age\n\n\ndef load_data(mat_path):\n d = loadmat(mat_path)\n return d[\"image\"], d[\"gender\"][0], d[\"age\"][0], d[\"db\"][0], d[\"img_size\"][0, 0], d[\"min_score\"][0, 0]\n\n\n@click.command()\n@click.option('--path', type=str, help='Path to IMDB dataset directory')\n@click.option('--quantity', type=int, default=None,\n help='Number of images, to process only part of data when you would like to test script')\ndef align(path, quantity):\n output_directory: Path = Path(path + '_aligned')\n output_image_directory: Path = output_directory / 'images'\n output_image_directory.mkdir(parents=True, exist_ok=True)\n\n face_detector = FaceDetector(weights='models/retina_face_weights/Resnet50_Final.pth')\n meta = get_meta('data/imdb-wiki/wiki_crop/wiki.mat', 'wiki')\n iteration_number: int = len(meta[0]) if quantity is None else max(len(meta[0]), quantity)\n result_data = list()\n for i, (full_path, dob, gender, photo_taken, face_score, second_face_score, age) in \\\n tenumerate(zip(*meta), total=iteration_number):\n if quantity is not None and i >= quantity:\n break\n\n if face_score < 1:\n continue\n\n if (not np.isnan(second_face_score)) and second_face_score > 0.0:\n continue\n\n if not 0 <= age <= 100:\n continue\n\n if np.isnan(gender):\n continue\n\n try:\n complete_image_path = Path(path) / str(full_path[0])\n img_raw = cv2.imread(str(complete_image_path), cv2.IMREAD_COLOR)\n\n dets, landmarks = face_detector.predict(img_raw)\n similarity_transform = SimilarityTransform()\n similarity_transform.estimate(landmarks.reshape((5, 2)), CALCULATED_COORDS.reshape((5, 2)))\n aligned_image = cv2.warpAffine(img_raw, similarity_transform.params[:2, :], (256, 256),\n borderMode=cv2.BORDER_REFLECT)\n hash = dhash(fromarray(img_raw))\n new_path = output_image_directory / f'{str(hash)}.jpg'\n\n result_data.append([complete_image_path, str(new_path), hash, int(gender), int(age)])\n cv2.imwrite(str(new_path), aligned_image)\n except Exception as e:\n print(f'Error {str(e)}')\n\n df = pd.DataFrame(result_data, columns=['base_path', 'aligned_path', 'hash', 'gender', 'age'])\n df = df.set_index('hash')\n\n df.to_csv(str(output_directory / 'all_data.csv'))\n\n\nif __name__ == '__main__':\n align()\n","repo_name":"plutasnyy/FaceOversampling","sub_path":"preprocessing/align_imdb.py","file_name":"align_imdb.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"38720814357","text":"\"\"\"\nThis module provides a terminal assistant using OpenAI's GPT-4 model.\n\"\"\"\n\nimport time\nimport os\nimport subprocess\nfrom configparser import ConfigParser\nimport sys\nimport speech_recognition as sr\nimport requests\nfrom pydub import AudioSegment\nfrom pydub.playback import play\n\nimport openai\nfrom gtts import gTTS\n\n# Redirect stderr to /dev/null\nsys.stderr = subprocess.DEVNULL\n\nconfig = ConfigParser()\nCONFIG_NAME = 'ta_auth.ini'\n\n\ndef create_config():\n \"\"\"\n Function to create a configuration file.\n \"\"\"\n openai_key = input(\"OpenAI API key: \")\n googleapi_api_key = input(\"GoogleAPI key: \")\n googleapi_search_engine_id = input(\"GoogleAPI search engine ID: \")\n\n config['AUTH'] = {\n 'openai': openai_key,\n 'googleapi_key': googleapi_api_key,\n 'googleapi_search_id': googleapi_search_engine_id\n }\n\n with open(CONFIG_NAME, 'w', encoding='utf-8') as config_file:\n config.write(config_file)\n\n\ndef check_for_config():\n \"\"\"\n Function to check for a configuration file.\n \"\"\"\n if os.path.exists(CONFIG_NAME):\n config.read(CONFIG_NAME)\n return\n\n create_config()\n\n\ncheck_for_config()\n\nopenai.api_key = config['AUTH']['openai']\nAPI_KEY = config['AUTH']['googleapi_key']\nSEARCH_ENGINE_ID = config['AUTH']['googleapi_search_id']\nENDPOINT = \"https://www.googleapis.com/customsearch/v1\"\n\n\ndef ask_gpt(prompt, model=\"gpt-4\", tokens=2500):\n \"\"\"\n Function to interact with the GPT model.\n \"\"\"\n response = openai.ChatCompletion.create(\n model=model,\n messages=[\n {\"role\": \"system\", \"content\": \"I am your helpful assistant\"},\n {\"role\": \"user\", \"content\": prompt}\n ],\n max_tokens=tokens,\n n=1,\n stop=None,\n temperature=0.7,\n )\n return response.choices[0].message['content']\n\n\ndef generate_speech(text):\n \"\"\"\n Function to generate speech from text using gTTS.\n \"\"\"\n gtts = gTTS(text=text, lang=\"en-au\")\n gtts.save(\"output.mp3\")\n\n\ndef recognize_speech():\n \"\"\"\n Function to recognize speech using Google Speech Recognition.\n \"\"\"\n recognizer = sr.Recognizer()\n\n with sr.Microphone() as source:\n print(\"Speak:\")\n audio = recognizer.listen(source)\n\n try:\n print(\"Recognizing...\")\n text = recognizer.recognize_google(audio)\n print(\"You:\", text)\n return text\n except sr.UnknownValueError:\n print(\"Could not understand audio.\")\n return \"\"\n except sr.RequestError as error:\n print(f\"Error: {error}\")\n return \"\"\n\n\ndef perform_google_search(query):\n \"\"\"\n Function to perform a Google search using the Custom Search JSON API.\n \"\"\"\n params = {\n 'key': API_KEY,\n 'cx': SEARCH_ENGINE_ID,\n 'q': query\n }\n\n response = requests.get(ENDPOINT, params=params, timeout=5)\n search_results = response.json()\n\n if 'items' in search_results:\n results = search_results['items']\n for result in results:\n print(result['title'])\n print(result['link'])\n print(result['snippet'])\n print()\n else:\n print(\"No results found.\")\n\n\ndef play_audio():\n \"\"\"\n Function to play the speech audio.\n \"\"\"\n sound = AudioSegment.from_mp3(\"output.mp3\")\n play(sound)\n\n\ndef chatbot():\n \"\"\"\n Main chatbot loop.\n \"\"\"\n username = input(\"Enter your username: \")\n print(f\"Hi {username}! (Type '!search' to query Google Search, \\\n Press 'Enter' to respond with text input, \\\n Press 'Shift+Enter' to respond with voice input, Type 'quit' to exit)\")\n role = \"I am a your helpful assistant. \\\n I try hard to give new and interesting replies. \\\n I'm also funny, witty, charming, and a great programmer. \"\n\n while True:\n user_input = input(\"You: \")\n if user_input.lower() == \"quit\":\n break\n if user_input.startswith('!search'):\n query = user_input[8:]\n perform_google_search(query)\n continue\n if user_input.strip() == \"\":\n user_input = recognize_speech()\n\n prompt = f\"User: {user_input}\\n{role}\\n\"\n response = ask_gpt(prompt)\n\n # Generate speech from the chatbot's response\n generate_speech(response)\n\n # Play the speech audio\n play_audio()\n\n print(f\"{username}: {response}\")\n\n time.sleep(3)\n\n\n# Execute the chatbot\nif __name__ == \"__main__\":\n chatbot()\n","repo_name":"webmaster-exit-1/terminal-assistant","sub_path":"terminal_assistant.py","file_name":"terminal_assistant.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"29316646986","text":"# Гласные и согласные\n# На вход программе подается одна строка с буквами русского языка. Напишите программу, которая определяет количество\n# гласных и согласных букв.\n\n# put your python code here\nvowel = \"ауоыиэяюёе\"\nconsonant = \"бвгджзйклмнпрстфхцчшщ\"\ncounter_v = 0\ncounter_c = 0\ns = input()\nfor c in s:\n if c.lower() in vowel:\n counter_v += 1\n if c.lower() in consonant:\n counter_c += 1\nprint(f\"Количество гласных букв равно {counter_v}\\nКоличество согласных букв равно {counter_c}\")\n","repo_name":"Olmeor/Generation_Python_-_a_course_for_beginners","sub_path":"9 Строковый тип данных/9.1.14 Гласные и согласные.py","file_name":"9.1.14 Гласные и согласные.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7411040951","text":"from pytorch_grad_cam import GradCAM, HiResCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad\nfrom pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget\nfrom pytorch_grad_cam.utils.image import show_cam_on_image\nfrom torchvision.models import resnet50\nfrom torchvision import transforms\nimport torch\nfrom PIL import Image\nimport cv2\nimport numpy as np\nfrom pytorch_grad_cam import GuidedBackpropReLUModel\nfrom pytorch_grad_cam.utils.image import show_cam_on_image, \\\n deprocess_image, \\\n preprocess_image\nfrom pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget\nimport argparse\nfrom config import cfg\nimport torchreid\nfrom model import make_model\nfrom torchvision.transforms import functional as TF\n\nfrom vit_grad_rollout import VITAttentionGradRollout\nimport timm\n\nfrom vit_rollout import VITAttentionRollout\n\ndef show_mask_on_image(img, mask):\n img = np.float32(img) / 155\n heatmap = cv2.applyColorMap(np.uint8(200 * mask), cv2.COLORMAP_JET)\n heatmap = np.float32(heatmap) / 255\n cam = heatmap + np.float32(img)\n cam = cam / np.max(cam)\n return np.uint8(255 * cam)\n\ndef show_mask(mask):\n heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)\n #heatmap = heatmap / np.max(heatmap)\n return np.uint8(heatmap)\n\n\ndef vit_cam2(opt, image_path):\n\n\n # print('using VIT to extract features'\n discard_ratio = 0.8\n category_index = None\n\n device = \"cpu\"\n model = make_model(cfg, num_class=1041, camera_num=15, view_num =15)\n model.load_param(cfg.TEST.WEIGHT)\n model.to(device)\n model.eval()\n\n\n rgb_img = cv2.imread(image_path, 1)\n #osize = rgb_img.shape[:2]\n #print(osize)\n \n rgb_img = cv2.resize(rgb_img, (128, 256))\n #rgb_img = np.float32(rgb_img) / 255\n \n rgb_tf = TF.to_tensor(rgb_img)\n\n print('rgb_tf:' ,rgb_tf.shape)\n\n\n input_tensor = TF.normalize(rgb_tf, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n input_tensor = input_tensor.unsqueeze(0)\n print(input_tensor.shape)\n \n if opt.grad:\n print(\"Doing Gradient Attention Rollout\")\n grad_rollout = VITAttentionGradRollout(model, discard_ratio= discard_ratio)\n mask = grad_rollout(input_tensor, category_index)\n else:\n attention_rollout = VITAttentionRollout(model, head_fusion=\"mean\", \n discard_ratio= discard_ratio)\n mask = attention_rollout(input_tensor)\n\n \n \n mask = cv2.resize(mask, (128, 256))\n\n #print('image:', rgb_tf.shape) \n\n #rgb_tf = rgb_tf.permute(1,2,0)\n #print('image permute:', rgb_tf.shape) \n\n # np_img = np.array(rgb_tf)\n np_img = rgb_img\n print(np_img.shape)\n mask_save = show_mask(mask)\n mask = show_mask_on_image(np_img, mask)\n \n #cv2.imwrite(\"./vis_result/transreid_mask2.png\", mask_save)\n cv2.imwrite(opt.o, mask)\n\n\n\ndef reshape_transform(tensor, height=14, width=14):\n result = tensor[:, 1:, :].reshape(tensor.size(0),\n height, width, tensor.size(2))\n\n # Bring the channels to the first dimension,\n # like in CNNs.\n result = result.transpose(2, 3).transpose(1, 2)\n return result\n\ndef vit_cam(opt, image_path):\n img = cv2.imread(image_path, 1)\n osize = img.shape[:2]\n img = np.float32(img) / 255\n print('size:', osize)\n rgb_img = cv2.resize(img, (224, 224))\n input_tensor = preprocess_image(rgb_img, mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n\n\n # If None, returns the map for the highest scoring category.\n # Otherwise, targets the requested category.\n\n\n targets = 0 #[ClassifierOutputTarget(770)]\n model = torch.hub.load('facebookresearch/deit:main',\n 'deit_base_patch16_224', pretrained=True)\n model.eval()\n\n target_layers = [model.blocks[-1].norm1]\n \n # AblationCAM and ScoreCAM have batched implementations.\n # You can override the internal batch size for faster computation.\n cam = GradCAMPlusPlus(model=model,\n target_layers=target_layers,\n use_cuda=False,\n reshape_transform=reshape_transform)\n \n grayscale_cam = cam(input_tensor=input_tensor,\n targets=targets,\n eigen_smooth=False,\n aug_smooth=False)\n \n grayscale_cam = grayscale_cam[0, :]\n \n grayscale_cam = cv2.resize(grayscale_cam, (osize[1], osize[0]))\n\n cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True)\n cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)\n\n cv2.imwrite(\"./cmresult_Vit2.png\", cam_image)\n\n\n\n\n\n\n\ndef OSnet_cam(image_path, opt):\n #resnet on imageNet\n #model = resnet50(pretrained=True)\n # OSnet for reid\n print('using OSnet to extract features')\n # model = torchreid.models.build_model(\n # name=\"resnet50\",\n # num_classes= 0,\n # loss=\"softmax\",\n # pretrained=True\n # )\n model = torchreid.models.build_model('osnet_x1_0', 1)\n torchreid.utils.load_pretrained_weights(model, '../weights/osnet_x1_0_MS_D_C.pth')\n model = model.to('cpu')\n print(model)\n model.eval()\n #print(model)\n target_layers = [model.conv5]\n #resnet\n rgb_img = cv2.imread(image_path, 1)[:, :, ::-1]\n rgb_img = np.float32(rgb_img) / 255\n input_tensor = preprocess_image(rgb_img,\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])# Create an input tensor image for your model..\n \n cam = GradCAMPlusPlus(model=model, target_layers=target_layers, use_cuda=False)\n targets = None #shirt\n grayscale_cam = cam(input_tensor=input_tensor, targets=targets,eigen_smooth=False,\n aug_smooth=False)\n # In this example grayscale_cam has only one image in the batch:\n grayscale_cam = grayscale_cam[0, :]\n cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)\n cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(opt.o, cam_image)\n\n\n\nif __name__ == '__main__':\n\n\n image_path = './fc2.png'\n\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-cnn', default=False, help='use cnn')\n parser.add_argument('-vit', default=False, help='use transformer')\n parser.add_argument('-grad', default=False, help='grad rollout')\n parser.add_argument(\n \"--o\", default=\"\", help=\"output name\", type=str)\n parser.add_argument(\n \"--c\", default=\"\", help=\"path to config file\", type=str)\n \n opt = parser.parse_args()\n\n\n if opt.c != \"\":\n cfg.merge_from_file(opt.c)\n cfg.freeze()\n\n if opt.cnn:\n OSnet_cam(image_path, opt)\n\n if opt.vit:\n vit_cam2(opt, image_path)\n\n","repo_name":"JamesLiao714/SSUTracker","sub_path":"TransReID/attmap.py","file_name":"attmap.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"36820466715","text":"from django.http import HttpResponse\nfrom django.test import TestCase\nfrom tellmeastory.models import User , Node\n\nCOOKIE_NAME: str = \"StoryUserLoggedIn\"\nUSERNAME: str = \"SomeUser\"\n\n\ndef insert_registered_user(username: str , password: str , dname: str) -> User:\n return User.objects.create(\n username=username ,\n password=password ,\n display_name=dname\n )\n\n\ndef insert_story_node(post_id: str , node_title: str , node_content: str , node_author: User) -> Node:\n return Node.objects.create(\n post_id=post_id ,\n node_title=node_title ,\n node_content=node_content ,\n node_author=node_author\n\n )\n\n\nclass CrateAndViewStories(TestCase):\n\n def test_story_page_view_invalid_information(self) -> None:\n user_obj = insert_registered_user(\"user1\" , \"pwisthepw\" , \"My Display Name\")\n insert_story_node(\"skjfiw-fjwood-wkkdld\" , \"My Story Title\" , \"This is the content of my story\" , user_obj)\n\n res: HttpResponse = self.client.get(\"/post/INVALIDPOST/\")\n\n # Make sure that the page loaded\n self.assertEqual(res.status_code , 200)\n # Make sure that an invalid post message is received\n self.assertContains(res , \"You have tried to access a story page that does not exist.\")\n\n return\n\n def test_story_page_view_valid_information(self) -> None:\n post_id = \"skjfiw-fjwood-wkkdld\"\n user_obj = insert_registered_user(\"user1\" , \"pwisthepw\" , \"My Display Name\")\n story_node = insert_story_node(post_id , \"My Story Title\" , \"This is the content of my story\" , user_obj)\n\n res: HttpResponse = self.client.get(\"/post/\" + post_id + \"/\")\n\n # Make sure that the page loaded\n self.assertEqual(res.status_code , 200)\n\n # Make sure that the post title is displayed\n self.assertContains(res , \"My Story Title\")\n\n # Make sure that the post content is displayed\n self.assertContains(res , \"This is the content of my story\")\n\n # Make Sure Reactions are Shown\n self.assertContains(res , \"0\")\n\n UserGet = User.objects.get(id=user_obj.id)\n # Add thumbs down Reaction\n story_node.add_reaction(\"thumbsdown\" , UserGet)\n\n res: HttpResponse = self.client.get(\"/post/\" + post_id + \"/\")\n\n # Check to make sure the thumbs down has displayed 1 node\n self.assertContains(res , \"1\")\n\n # Check to make sure the actual count is 1\n self.assertEqual(story_node.num_reactions_of_emoji(\"thumbsdown\") , 1)\n\n return\n","repo_name":"Shah-Kush/TellMeAStory4","sub_path":"story/tellmeastory/tests/test_storyview.py","file_name":"test_storyview.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3654200396","text":"# sum of natural numbers up to num\nnum = 25\n\nif num < 0:\n print(\"enter the positive number\")\nelse:\n sum = 0\n # use while loop to iterate until zero\n while(num > 0):\n print(\"Current num value is {0} & sum value is {1}: \".format(num, sum))\n sum += num\n num -= 1\n print(\"The sum is\",sum)\n ","repo_name":"JaySimaria/python-programs","sub_path":"Natural Numbers.py","file_name":"Natural Numbers.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1646978808","text":"import dis\nimport os\nimport discord\nfrom discord import app_commands\nfrom discord.ext import commands\nfrom discord.utils import get\nfrom datetime import datetime\nimport json\n# from keep_alive import keep_alive #Uncomment\nfrom utils import *\nfrom asyncio import run as AsyncRun\n\n# Dev Only\nfrom dotenv import load_dotenv\nload_dotenv() \n\ntoken = os.environ['TOKEN']\n\n#-----------------------------------------Customize for Unique Implementations-----------------------------------------------#\nconfig=config_dict\nbotPrefix=config['botPrefix']\nspecialServers=[832994908973170769]\nmasterGuild=config['masterGuild']\nupdateChannel=config['updateChannel']\nreaction_emojis=[\n \"\\U0001F512\",\n \"\\U0001F513\",\n \"\\U0001F441\",\n \"\\u23ea\",\n \"\\u25c0\",\n \"\\u25b6\",\n \"\\u23e9\",\n \"\\U00002705\",\n \"\\U0001f7e5\",\n \"\\U0001f7e6\",\n \"\\U0001f7e7\",\n \"\\U0001f7e8\",\n \"\\U0001f7e9\",\n \"\\U0001f7ea\",\n \"\\U0001f7eb\",\n \"\\U00002b1b\",\n \"\\U00002b1c\"\n ],\nmonitorName='private bot monitor'\n# client = commands.Bot(command_prefix=config['botPrefix']+\" \",intents=discord.Intents().all(),case_insensitive=True, description=description, owner_id=710430416045080656)\n#-------------------------------------------Important Customizable Functions-------------------------------------------------#\n\nasync def load_cogs(client):\n await client.load_extension('extensions.tester')\n await client.load_extension('extensions.tmdb_cog')\n await client.load_extension('extensions.fun')\n await client.load_extension('extensions.general')\n await client.load_extension('extensions.dicti_cog')\n # await client.load_extension('extensions.tourney')\n # await client.load_extension('extensions.rxnrole')\n await client.load_extension('extensions.games')\n print(\"Loaded Cogs\")\n\nasync def unload_cogs(client):\n await client.unload_extension('extensions.tester')\n await client.unload_extension('extensions.tmdb_cog')\n await client.unload_extension('extensions.fun')\n await client.unload_extension('extensions.general')\n await client.unload_extension('extensions.dicti_cog')\n # await client.unload_extension('extensions.tourney')\n # await client.unload_extension('extensions.rxnrole')\n await client.unload_extension('extensions.games')\n print(\"Unloaded Cogs\")\n\nasync def reload_cogs(client):\n await client.reload_extension('extensions.tester')\n await client.reload_extension('extensions.tmdb_cog')\n await client.reload_extension('extensions.fun')\n await client.reload_extension('extensions.general')\n await client.reload_extension('extensions.dicti_cog')\n # await client.reload_extension('extensions.tourney')\n # await client.reload_extension('extensions.rxnrole')\n await client.reload_extension('extensions.games')\n print(\"Reloaded Cogs\")\n\n# client=commands.Bot(command_prefix=config['botPrefix']+' ', description=config['description'], intents=discord.Intents.all(), case_insensitive=True,owner_id=config['owner_id'])\n\nclass MyClient(commands.Bot):\n def __init__(self, config) -> None:\n myIntents=discord.Intents.all()\n super().__init__(command_prefix=config['botPrefix']+' ', description=config['description'], intents=myIntents, case_insensitive=True,owner_id=config['owner_id'])\n \n async def setup_hook(self):\n await load_cogs(self)\n comms=await self.tree.sync()\n print(f'Synced {len(comms)} app_commands.')\n\nclient=MyClient(config)\n\ndef is_me(interaction: discord.Interaction) -> bool:\n return interaction.user.id == 710430416045080656\n\nasync def send_on_pvt_channel_creation(channel):\n await channel.send(\"Hello There!\")\n title=\"Welcome to your Private Text Channel\"\n description=\"Once your friends have joined the channel you may be able to lock it and chat without interruptions.\\n\\u200b\"\n colour=discord.Colour.green()\n fields = [\n field(\n \"To Lock the Channel click on :lock:\",\n \"**To Unlock the Channel click on :unlock:**\\n**To Toggle the Channel visibility click on :eye:**\"\n ),\n field(\n \"To invite someone into your channel\",\n f\"Type: `{botPrefix} pvtinvite @username`\"\n ),\n field(\n \"To kick an unwanted person from the channel\",\n f\"Type: `{botPrefix} votekick @username`\"\n ),\n field(\n \"Lock Status\",\n \"Unlocked\",\n True\n ),\n field(\n \"Channel Visibility\",\n \"Visible\",\n True\n ),\n field(\n 'Use `pb commands` to see full list of commands.',\n \"\\u200b\"\n )\n ]\n view=ChannelControls()\n message=await send_embed(channel,title,description,colour,fields=fields,footer=botPrefix.upper()+\"PVT\"+str(channel.id)[-5:],view=view)\n return message\n\n#-------------------------------------------------------General Commands-----------------------------------------------------#\n#inspire command\n@client.hybrid_command(name='inspire',help='Returns an inspirational quote from the interwebs.')\nasync def inspire(ctx):\n quote=get_quote()\n await ctx.send(\"`\"+quote+\"`\")\n\n\n#hello command\n@client.command(help=\"Says Hello.\")\nasync def hello(ctx):\n await ctx.send(\"Hello There!\")\n\n#----------------------------------------------------Developer Command-------------------------------------------------------#\n#refresh command\n@client.hybrid_command(name='refresh',help=\"Reloads all Cogs. Dev-only Command\",hidden=True)\n@app_commands.check(is_me)\nasync def refresh(ctx):\n if ctx.author.id==710430416045080656:\n await reload_cogs(client)\n await ctx.send(\"All cogs reloaded.\")\n else:\n await ctx.send(\"You do not have sufficient permissions to invoke this command at the moment.\")\n\n@refresh.error\nasync def is_me_error(ctx,error):\n if isinstance(error,commands.errors.CheckFailure):\n response_embed=await send_embed(ctx.channel,\"Error\",\"You do not have sufficient permissions to invoke this command at the moment.\",discord.Color.red(),isInteractionResponse=True)\n await ctx.interaction.response.send_message(embed=response_embed)\n\n#sync command\n@client.tree.command(name='sync',description=\"Sync all app_commands. Dev-only Command\")\n@app_commands.check(is_me)\nasync def _sync(interaction:discord.Interaction):\n comms=await client.tree.sync()\n print(f\"Synced {len(comms)} app_commands.\")\n await interaction.response.send_message(content=f\"{len(comms)} app commands synced with all guilds.\")\n\n@_sync.error\nasync def sync_error(interaction,error):\n if isinstance(error,app_commands.errors.CheckFailure):\n response_embed=await send_embed(interaction.channel,\"Error\",\"You do not have sufficient permissions to invoke this command at the moment.\",discord.Color.red(),isInteractionResponse=True)\n await interaction.response.send_message(embed=response_embed)\n else:\n raise\n\n\n#------------------------------------------------Private Channel Commands----------------------------------------------------#\n#votekick command\n@client.command(help=\"Initialises vote to Kick member of Private Voice Channel.\")\nasync def votekick(ctx, member: discord.Member):\n #error if person not connected to a channel, handle errors\n if ctx.author.voice.channel==member.voice.channel:\n fields=[\n field(\n f\"To Kick {member.name}:\",\n f\"Click on :white_check_mark: to kick `{member.name}` from the private channel.\\n `{member.name}` will only be kicked if majority votes to.\"\n ),\n field(\n f\"To Keep {member.name}:\",\n \"Click on :negative_squared_cross_mark:\"\n )\n ]\n msg= await send_embed(ctx.message.channel,\"Vote to Kick\",f'`{member.name}` will be kicked from the private channel if majority votes.',discord.Color.red(),fields=fields,footer=botPrefix.upper()+\"VKE\"+str(ctx.message.channel.id)[-5:])\n await msg.add_reaction('\\U00002705')\n await msg.add_reaction('\\U0000274E')\n else:\n ctx.send(f\"{member.name} doesn't appear to be connected to your private Voice Channel\")\n\n\n#pvtinvite to pvt channel command\n@client.hybrid_command(help=\"Invites mentioned user to private voice channel.\")\nasync def pvtinvite(ctx,member: discord.Member):\n if ctx.message.channel.name.endswith('-channel'):\n for roles in ctx.author.roles:\n if roles.name.endswith('channel member'):\n role=roles\n await member.add_roles(role)\n try:\n await member.send(f\"`{ctx.author.name}` has invited you to join a private channel: {ctx.message.channel.mention} on the server: `{ctx.message.guild.name}`.\\nYou should also probably join the Voice Channel with the same name. \\nCheers!\")\n await ctx.send(f\"`{member.name}` has been invited to join this private channel.\")\n except:\n await ctx.send(f\"`{member.name}` has their DM's closed. Nevertheless, I have given them access to your channel.\")\n\n\n#------------------------------------------------------Event Listeners-------------------------------------------------------#\n@client.event\nasync def on_ready():\n print('We are logged in as {0.user}'.format(client))\n # TODO: Uncomment for Prod\n # channel=client.get_channel(updateChannel)\n # await channel.purge(limit=3,check=is_me)\n # response_time,avg,status=get_bot_status()\n # if status==2:\n # print(\"UptimeRobot reports the bot to be UP.\")\n # else:\n # print(\"UptimeRobot does not give an accurate report.\")\n # await send_embed(channel,\"Status\",f\"**{config['botName']} is now online and operational.**\",discord.Color.green(),timestamp=datetime.now(),footer='clear',fields=[field(\"Discord API\",f\"{round(client.latency * 1000)} ms\",True),field(\"Round-Trip Response\",f\"{response_time}\"+(\" ms\" if response_time!='Unavailable' else '')+f\"\\nAverage: {avg}\"+(\" ms\" if avg!='Unavailable' else ''),True)])\n # await send_embed(channel,\"\",\"It can take upto **4** minutes to confirm that the bot is down.\",footer='If you encounter an undetected downtime head over to #bug-report.')\n\n#Reaction Listeners\n@client.event\nasync def on_raw_reaction_add(payload):\n if payload.member.bot!=0:\n return\n if not payload.emoji.name in reaction_emojis:\n return\n channel=payload.member.guild.get_channel(payload.channel_id)\n message=await channel.fetch_message(payload.message_id)\n embed=message.embeds[0]\n if not embed.footer:\n return\n id=embed.footer.text[0:10]\n if not id.lower().startswith(botPrefix.lower()):\n return\n if not id[-5:]==str(payload.channel_id)[-5:]:\n print(\"Rxn: Channel Error\",id[-5:],str(payload.channel_id)[-5:])\n return\n if id[2:].startswith(\"RR\"):\n #ReactionRole\n for em_pxy in embed.fields:\n if em_pxy.name.startswith(payload.emoji.name):\n role_name=em_pxy.name[len(payload.emoji.name)+3:]\n break\n if role_name:\n for r in payload.member.guild.roles:\n if r.name==role_name:\n role=r\n break\n if role:\n try:\n await payload.member.add_roles(role,reason=f\"Reacted on {config['botName']}'s Reaction Role post with id: {id}\")\n except:\n print(\"RR: Role Assign Failed due to:\")\n await channel.send(f\"{payload.member.mention}, I could not give you the {role.mention} role.\")\n raise\n else:\n print(f\"RR: Couldnt find role with rolename: {role_name} on guild: {payload.guild.name}\")\n else:\n print(\"RR: Couldnt find rolename in RR embed.\")\n return\n elif id[2:].startswith(\"PVT\"):\n #lock pvt room\n if payload.emoji.name=='\\U0001F512':\n # channel= client.get_channel(payload.channel_id)\n guild=payload.member.guild\n if channel.name.endswith('-channel'):\n for role in payload.member.roles:\n if role.name.endswith('channel member'):\n vChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n role=role\n break\n #voicechannel\n permsMember = vChannel.overwrites_for(role)\n permsMember.connect=True\n await vChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = vChannel.overwrites_for(guild.default_role)\n permsEveryone.connect=False\n await vChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n msg=await channel.fetch_message(payload.message_id)\n #txtchannel\n # txtChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+\"-channel\")\n txtChannel=channel\n permsMember = txtChannel.overwrites_for(role)\n permsMember.send_messages=True\n await txtChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = txtChannel.overwrites_for(guild.default_role)\n permsEveryone.send_messages=False\n await txtChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #updation\n # msg=await channel.fetch_message(payload.message_id)\n await update_lock_status(vChannel,message,guild)\n await message.remove_reaction(payload.emoji.name,payload.member)\n \n\n #Unlock Pvt Room\n if payload.emoji.name=='\\U0001F513':\n guild=payload.member.guild\n # channel= client.get_channel(payload.channel_id)\n if '-channel' in channel.name:\n for role in payload.member.roles:\n if 'channel member' in role.name:\n vChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n role=role\n break\n #vChannel\n permsMember = vChannel.overwrites_for(role)\n permsMember.connect=None\n await vChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = vChannel.overwrites_for(guild.default_role)\n permsEveryone.connect=None\n await vChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #txtchannel\n # txtChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+\"-channel\")\n txtChannel=channel\n permsMember = txtChannel.overwrites_for(role)\n permsMember.send_messages=None\n await txtChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = txtChannel.overwrites_for(guild.default_role)\n permsEveryone.send_messages=None\n await txtChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #updation\n # msg=await channel.fetch_message(payload.message_id)\n await update_lock_status(vChannel,message,guild)\n await message.remove_reaction(payload.emoji.name,payload.member)\n\n\n #Toggle Visibility\n if payload.emoji.name=='\\U0001F441':\n guild=payload.member.guild\n # channel= client.get_channel(payload.channel_id)\n if '-channel' in channel.name:\n for role in payload.member.roles:\n if 'channel member' in role.name:\n vChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n role=role\n break\n curr=check_vis_perms(vChannel,guild)\n if curr:\n permsMember = vChannel.overwrites_for(role)\n permsMember.view_channel=True\n await vChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = vChannel.overwrites_for(guild.default_role)\n permsEveryone.view_channel=False\n await vChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #txtchannel\n # txtChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+\"-channel\")\n txtChannel=channel\n permsMember = txtChannel.overwrites_for(role)\n permsMember.view_channel=True\n await txtChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = txtChannel.overwrites_for(guild.default_role)\n permsEveryone.view_channel=False\n await txtChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #updation\n # msg=await channel.fetch_message(payload.message_id)\n await update_visibility_status(vChannel,message,guild)\n await message.remove_reaction(payload.emoji.name,payload.member)\n else:\n permsMember = vChannel.overwrites_for(role)\n permsMember.view_channel=True\n await vChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = vChannel.overwrites_for(guild.default_role)\n permsEveryone.view_channel=None\n await vChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #txtchannel\n # txtChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+\"-channel\")\n txtChannel=channel\n permsMember = txtChannel.overwrites_for(role)\n permsMember.view_channel=True\n await txtChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = txtChannel.overwrites_for(guild.default_role)\n permsEveryone.view_channel=None\n await txtChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n #updation\n # msg=await channel.fetch_message(payload.message_id)\n await update_visibility_status(vChannel,message,guild)\n await message.remove_reaction(payload.emoji.name,payload.member)\n elif id[2:].startswith(\"AVC\"):\n #Carousel control\n buttons=['\\u23ea','\\u25c0','\\u25b6','\\u23e9']\n if payload.emoji.name in buttons:\n #is Carousel\n if payload.guild_id in specialServers:\n #is AV or Test Server\n # channel = client.get_channel(payload.channel_id)\n # msg = await channel.fetch_message(payload.message_id)\n #print(msg)\n # embed=msg.embeds[0]\n if embed != None:\n if embed.description.startswith('**Movie Synopsis**'):\n #is a Movie catalog\n index=int(embed.title[0])\n cat_size=get_catalog_size()\n #print(f\"Initial Given Index= {index}\")\n if index==1:\n if embed.title[1].isdigit():\n index=(index*10)+int(embed.title[1])\n #print(f'Given Index= {index}')\n elif index==0:\n #print(\"Index is 0.\")\n return\n index=index-1\n #print(f\"Normalised Index= {index}\")\n prev_index=index\n if payload.emoji.name==buttons[0]:\n index=0\n #print(f\"SeekStart, index={index}\")\n elif payload.emoji.name==buttons[1]:\n if index != 0:\n index=index-1\n #print(f\"Prev, index={index}\")\n elif payload.emoji.name==buttons[2]:\n if index != (cat_size-1):\n index=index+1\n #print(f\"Next, index={index}\")\n else:\n index=(cat_size-1)\n #print(f\"SeekEnd, index={index}\")\n #print(index)\n if index!=prev_index:\n title,description,image_url,link,fields,author,thumbnail_url=data_catalog(index)\n embed=await send_embed(channel,title,description,image_url=image_url,fields=[field(fields[0],fields[1])],send=False,author=author,thumbnail_url=thumbnail_url,footer=botPrefix.upper()+\"AVC\"+str(channel.id)[-5:]+'Use the Arrow icons below to navigate through the catalog.')\n await message.edit(embed=embed)\n #print(\"Display Updated\")\n await message.remove_reaction(payload.emoji.name,payload.member)\n else:\n print(\"Nothing to Update\")\n await message.remove_reaction(payload.emoji.name,payload.member)\n elif id[2:].startswith(\"VKE\"):\n #vote to kick in pvt room\n if payload.emoji.name=='\\U00002705':\n # channel= client.get_channel(payload.channel_id)\n if channel.name.endswith('-channel'):\n #print(channel.name)\n #print(payload.member.roles)\n for role in payload.member.roles:\n #print(role.name)\n if role.name.endswith('channel member'):\n guild=payload.member.guild\n vChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n role=role\n #print(role.name)\n break\n #print(role)\n # msg = await channel.fetch_message(payload.message_id)\n #opp_reaction=get(msg.reactions, emoji='\\U0000274E')\n #opp_reactors=await opp_reaction.users().flatten()\n #if payload.member.name in opp_reactors:\n # await msg.remove_reaction(opp_reaction.emoji,payload.member)\n reaction = get(message.reactions, emoji='\\U00002705')\n if reaction and reaction.count > (round(len(payload.member.voice.channel.members)/2)):\n #print(reaction.count)\n # embed=msg.embeds[0]\n #superset=client.get_all_members()\n targetName=embed.description[1:-60]\n #for person in superset:\n # if person.name==target:\n # target=person\n # break\n print(targetName)\n target=guild.get_member_named(targetName)\n if target==payload.member:\n await channel.send(f\"Why kick yourself {payload.member.mention}?\")\n else:\n await target.move_to(None)\n #print(\"Target Disconnected.\")\n #print (role.id)\n await target.remove_roles(role)\n #print(\"Target's Role Removed.\")\n await message.delete()\n await channel.send(f\"{target.name} has been kicked.\")\n\n@client.event\nasync def on_raw_reaction_remove(payload):\n# tGuild=client.get_guild(payload.guild_id)\n# tMember=tGuild.get_member(payload.user_id)\n# if tMember.bot==0:\n# if payload.emoji.name=='\\U0001F441':\n# #guild=client.get_guild(payload.guild_id)\n# channel= client.get_channel(payload.channel_id)\n# if '-channel' in channel.name:\n# for role in tMember.roles:\n# if 'channel member' in role.name:\n# vChannel=get_channel_by_name(tGuild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n# role=role\n# break\n# permsMember = vChannel.overwrites_for(role)\n# permsMember.view_channel=True\n# await vChannel.set_permissions(role, overwrite=permsMember)\n# permsEveryone = vChannel.overwrites_for(tGuild.default_role)\n# permsEveryone.view_channel=None\n# await vChannel.set_permissions(tGuild.default_role, overwrite=permsEveryone)\n# msg=await channel.fetch_message(payload.message_id)\n# await update_visibility_status(vChannel,msg,tGuild)\n# #await msg.remove_reaction(payload.emoji.name,payload.member)\n\n# if payload.emoji.name=='\\U0001F512':\n# channel= client.get_channel(payload.channel_id)\n# if '-channel' in channel.name:\n# for role in tMember.roles:\n# if 'channel member' in role.name:\n# vChannel=get_channel_by_name(client.get_guild(payload.guild_id),channel_name=str(role.name)[0:-15]+'\\'s Channel')\n# role=role\n# break\n# permsMember = vChannel.overwrites_for(role)\n# permsMember.connect=None\n# await vChannel.set_permissions(role, overwrite=permsMember)\n# permsEveryone = vChannel.overwrites_for(client.get_guild(payload.guild_id).default_role)\n# permsEveryone.connect=None\n# await vChannel.set_permissions(client.get_guild(payload.guild_id).default_role, overwrite=permsEveryone)\n pass\n\nclass ChannelControls(discord.ui.View):\n def __init__(self):\n super().__init__()\n\n @discord.ui.button(label='Lock',emoji='🔒')\n async def lock_channel(self, interaction:discord.Interaction,button: discord.ui.Button):\n guild=interaction.guild\n txtChannel=interaction.channel\n for role in interaction.user.roles:\n if role.name.endswith('channel member'):\n vChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n role=role\n break\n # Voice Channel\n permsMember = vChannel.overwrites_for(role)\n permsMember.connect=True\n await vChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = vChannel.overwrites_for(guild.default_role)\n permsEveryone.connect=False\n await vChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n # Text Channel\n permsMember = txtChannel.overwrites_for(role)\n permsMember.send_messages=True\n await txtChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = txtChannel.overwrites_for(guild.default_role)\n permsEveryone.send_messages=False\n await txtChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n button.disabled=True\n\n @discord.ui.button(label='Unlock',emoji='🔓')\n async def unlock_channel(self, interaction:discord.Interaction,button: discord.ui.Button):\n guild=interaction.guild\n txtChannel=interaction.channel\n for role in interaction.user.roles:\n if role.name.endswith('channel member'):\n vChannel=get_channel_by_name(guild,channel_name=str(role.name)[0:-15]+'\\'s Channel')\n role=role\n break\n # Voice Channel\n permsMember = vChannel.overwrites_for(role)\n permsMember.connect=None\n await vChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = vChannel.overwrites_for(guild.default_role)\n permsEveryone.connect=None\n await vChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n # Text Channel\n permsMember = txtChannel.overwrites_for(role)\n permsMember.send_messages=None\n await txtChannel.set_permissions(role, overwrite=permsMember)\n permsEveryone = txtChannel.overwrites_for(guild.default_role)\n permsEveryone.send_messages=None\n await txtChannel.set_permissions(guild.default_role, overwrite=permsEveryone)\n button.disabled=True\n\n#Voice State Listeners\n@client.event\nasync def on_voice_state_update(member, before, after):\n if member.bot:\n return\n #join channel\n if not before.channel:\n #print(f'{member.name} joined {after.channel.name}')\n pass\n\n #left channel\n if before.channel and not after.channel:\n #print(f'{member.name} left {before.channel.name}')\n pass\n\n #switch channel\n if before.channel and after.channel:\n if before.channel.id != after.channel.id:\n #print(f'{member.name} switched channels and is now in {after.channel.name}.')\n pass\n elif member.voice.self_mute:\n #print(f'{member.name} muted self.')\n pass\n elif member.voice.self_deaf:\n #print(f'{member.name} deafened self.')\n pass\n elif member.voice.self_stream:\n #print(f'{member.name} started streaming.')\n pass\n else:\n print(\"Something else happened.\")\n\n #creating channels\n if after.channel is not None:\n if after.channel.name == config['templateChannel']:\n #print(\"Transfer\")\n guild=after.channel.guild\n check=await if_owner(guild,member)\n if not check:\n pvt_text_channel, pvt_text_channel_guild = await create_text_channel(guild,f'{member.name}-channel'.lower(),category_name=\"Private Channels\")\n channel = await create_voice_channel(guild,f'{member.name}\\'s Channel'.lower(),category_name=\"Private Channels\",user_limit=None)\n if channel is not None:\n await member.move_to(channel)\n message=await send_on_pvt_channel_creation(pvt_text_channel)\n newRole=await pvt_text_channel_guild.create_role(\n name=f'{member.name} channel member'.lower(),\n hoist=False,\n mentionable=False,\n reason='Created Pvt Channel'\n )\n await member.add_roles(newRole)\n await message.add_reaction('\\U0001F512')\n await message.add_reaction('\\U0001F513')\n await message.add_reaction('\\U0001F441')\n await message.pin()\n if guild.id in specialServers:\n await pvt_text_channel.send(\"`Watch F.R.I.E.N.D.S: The Reunion as a part of AV Club's SNL here:`\")\n title,description,image_url,link,fields=initial_catalog()\n thumbnail_url='https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-19/s150x150/30590396_163613307799664_9089326030237204480_n.jpg?tp=1&_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_ohc=GppUZ8OOmQYAX8ArS8i&edm=ABfd0MgBAAAA&ccb=7-4&oh=b9f0d6377eae12d363e92b4ffd4a127c&oe=60B4CDD2&_nc_sid=7bff83'\n author='AV Club\\'s SNL'\n msg=await send_embed(pvt_text_channel,title,description,image_url=image_url,fields=[field(fields[0],fields[1])],thumbnail_url=thumbnail_url,author=author,footer=botPrefix.upper()+\"AVC\"+str(pvt_text_channel.id)[-5:]+' For assistance use pb support or contact a server admin with @Admin.')\n #buttons=['\\u23ea','\\u25c0','\\u25b6','\\u23e9']\n #for button in buttons:\n # await msg.add_reaction(button)\n else:\n #print(\"Channel Exists, Moved.\")\n await member.move_to(get_channel_by_name(guild,member.name+'\\'s channel'))\n\n elif after.channel.category.id == get_category_by_name(after.channel.guild,\"Private Channels\").id:\n roleName=after.channel.name[0:-10]+' channel member'\n print(roleName)\n pvtRole=discord.utils.get(after.channel.guild.roles, name=roleName)\n print(pvtRole)\n await member.add_roles(pvtRole)\n\n #deleteting pvt channels once empty\n if before.channel is not None:\n guild=before.channel.guild\n if before.channel.category.id == get_category_by_name(guild,\"Private Channels\").id:\n roleName=str(before.channel.name)[0:-10]+\" channel member\"\n print(roleName+\" to remove from user for leaving.\")\n pvtRole=get(guild.roles, name=roleName)\n print(pvtRole)\n await member.remove_roles(pvtRole)\n if len(before.channel.members) == 0:\n txtchannelName=str(before.channel.name)[0:-10]+\"-channel\"\n #Delete VC\n await before.channel.delete()\n #Delete TC\n channel = get_channel_by_name(guild,txtchannelName)\n await channel.delete()\n await pvtRole.delete()\n\n@client.event\nasync def on_command_error(ctx,error):\n if isinstance(error,commands.MissingRequiredArgument):\n comm=ctx.command.name\n f=open('Commands.json','r')\n comm_list=json.load(f)\n f.close()\n for i in comm_list:\n if comm_list[i]['identifier']==comm:\n syntax=comm_list[i]['syntax']\n break\n await send_embed(ctx.channel,\"\",'One or more required inputs weren\\'t provided.',discord.Colour.red(),fields=[field('The correct syntax is:',f\"{syntax}\")],footer='clear')\n elif isinstance(error,commands.DisabledCommand):\n await send_embed(ctx.channel,'',f\"`{botPrefix} {ctx.command.name}` is temporarily disabled.\",discord.Colour.red(),footer='clear')\n elif isinstance(error,commands.MemberNotFound):\n await send_embed(ctx.channel,'',f\"User: `{error.argument}` was not found in this server.\",discord.Colour.red(),footer='clear')\n elif isinstance(error,commands.RoleNotFound):\n await send_embed(ctx.channel,'',f\"Role: `{error.argument}` was not found in this server.\",discord.Colour.red(),footer='clear')\n elif isinstance(error,commands.MissingRole):\n await send_embed(ctx.channel,'',f\"Role: `{error.missing_role}` is required to invoke that command.\",discord.Colour.red(),footer='clear')\n elif isinstance(error,commands.CommandNotFound):\n pass\n elif isinstance(error,commands.errors.CheckFailure):\n pass\n else:\n await send_embed(ctx.channel,\"Error\",f\"An unexpected error occurred. Use `{botPrefix} commands` to verify your syntax.\",discord.Color.red(),footer=f'Alternatively, try again later; if the issue persists use {botPrefix} support to report the issue.')\n raise error\n\n# keep_alive() # TODO: Uncomment for Prod\nclient.run(token)","repo_name":"abhinavgeethan/Private-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":31609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26159345737","text":"import matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmpl.rcParams['legend.fontsize'] = 10\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n#defino el parametro\ntheta = np.linspace(-4 * np.pi, 4 * np.pi, 100)\nvu = np.linspace(-4, 4, 100)\nz = np.linspace(-2, 2, 100)\nr = z**2 + 1\n#especifico la \"trayectoria\" x(r,theta),y(r, theta),z o w(vu)\nx = r * np.sin(theta)\ny = r * np.cos(theta)\nw = vu - 2\nax.plot(x, y, z, label='curva paramétrica 1')\nax.plot(x, w, z, label='curva paramétrica 2')\nax.legend()\n\nplt.show()\n","repo_name":"Sekilloda/astrofisica-2020-b","sub_path":"python scripts/parametrico.py","file_name":"parametrico.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71072050505","text":"# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# .T la ma tran chuyen vi, chuyen ma tran hang sang ma tran cot\n# height (cm)\nX = np.array([[147, 150, 153, 158, 163, 165, 168, 170, 173, 175, 178, 180, 183]]).T\n# weight (kg)\ny = np.array([[ 49, 50, 51, 54, 58, 59, 60, 62, 63, 64, 66, 67, 68]]).T\n# Visualize data\n#Pyplot là một module của Matplotlib cung cấp các hàm đơn giản để thêm các thành phần plot như lines, images, text, v.v. vào các axes trong figure.\nplt.plot(X, y, 'ro')\n#Axis: Chúng là dòng số giống như các đối tượng và đảm nhiệm việc tạo các giới hạn biểu đồ.\nplt.axis([140, 190, 45, 75])\nplt.xlabel('Height (cm)')\nplt.ylabel('Weight (kg)')\nplt.show()\n\n#(cân nặng) = w_1*(chiều cao) + w_0\n# Building Xbar\n#Hàm np.ones() cho phép chúng ta khởi tạo một mảng có kích thước tùy chỉnh và các phần tử trong mảng sẽ chỉ mang giá trị là số 1.\none = np.ones((X.shape[0], 1))\n#Hàm concatenate () là một hàm từ gói NumPy. Về cơ bản, hàm này kết hợp các mảng NumPy với nhau.\n#Hàm này về cơ bản được sử dụng để nối hai hoặc nhiều mảng có cùng hình dạng dọc theo một trục được chỉ định.\nXbar = np.concatenate((one, X), axis = 1)\n\n# Calculating weights of the fitting line\n#dot tich vo huong\nA = np.dot(Xbar.T, Xbar)\nb = np.dot(Xbar.T, y)\nw = np.dot(np.linalg.pinv(A), b)\nprint('w = ', w)\n# Preparing the fitting line\nw_0 = w[0][0]\nw_1 = w[1][0]\n#Hàm np. linspace() cũng là một hàm được sử dụng để tạo ra một mảng từ các dãy số được chỉ định trước. Hàm này sẽ tạo ra một mảng Numpy thông qua một dãy số\n# và các phần tử trong mảng sẽ được cách đều sao cho phù hợp với ví trị bắt đầu và vị trí kết thúc khoảng.\nx0 = np.linspace(145, 185, 2)\ny0 = w_0 + w_1*x0\n\n# Drawing the fitting line\nplt.plot(X.T, y.T, 'ro') # data\nplt.plot(x0, y0) # the fitting line\nplt.axis([140, 190, 45, 75])\nplt.xlabel('Height (cm)')\nplt.ylabel('Weight (kg)')\nplt.show()\n\n## run code thay du lieu train nam kha sat duong thang du doan\ny1 = w_1*155 + w_0\ny2 = w_1*160 + w_0\n\nprint( u'Predict weight of person with height 155 cm: %.2f (kg), real number: 52 (kg)' %(y1) )\nprint( u'Predict weight of person with height 160 cm: %.2f (kg), real number: 56 (kg)' %(y2) )\nfrom sklearn import datasets, linear_model\n\n# fit the model by Linear Regression\nregr = linear_model.LinearRegression(fit_intercept=False) # fit_intercept = False for calculating the bias\nregr.fit(Xbar, y)\n\n# Compare two results\nprint( 'Solution found by scikit-learn : ', regr.coef_ )\nprint( 'Solution found by (5): ', w.T)\n ##run code thay k mac loi nao trong cach tim nghiem","repo_name":"Clapboiz/Scientific-research","sub_path":"Marchine learning/Code_py/linear_regression_weight+height.py","file_name":"linear_regression_weight+height.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70786275465","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#*************************************************************************\n# > File Name: Bed2Amplicon.py\n# > Author: xlzh\n# > Mail: xiaolongzhang2015@163.com \n# > Created Time: 2021年06月08日 星期二 10时12分15秒\n#*************************************************************************\n\nimport sys\nimport os.path\n\n\nclass Fasta(object):\n def __init__(self, fa_file):\n self.fa_file = fa_file\n self.__fa_fp = self.__fa_init()\n self.__fai_dict = self.__fai_load()\n\n def __fa_init(self):\n ''' Func: open the fasta handle\n '''\n if not os.path.exists(self.fa_file):\n sys.stderr.write('Err: No such file: %s\\n' %(self.fa_file))\n sys.exit(-1)\n\n return open(self.fa_file, 'r')\n\n def __fai_load(self):\n ''' Func: load the index of fasta file\n '''\n if not os.path.exists(self.fa_file + '.fai'):\n sys.stderr.write('Err: No such index file: %s\\n' %(self.fa_file+'.fai'))\n sys.exit(-1)\n\n fai_dict = {} # {'chr1':(offset, b_len, l_len), 'chr2':(), ...}\n\n fai_fp = open(self.fa_file+'.fai', 'r')\n for line in fai_fp:\n l = line.split()\n fai_dict[l[0]] = (int(l[2]), int(l[3]), int(l[4]))\n\n return fai_dict\n\n def __read_base(self):\n ''' Func: read 1-base each time\n '''\n base = self.__fa_fp.read(1)\n if base == '': return 0\n\n return base\n\n def get_seq(self, chrom, start, end):\n ''' Func: get the fasta sequence from start to end\n '''\n seq = []\n\n if (chrom not in self.__fai_dict) or (end < start):\n sys.stderr.write('Warning: Invaild input: %s:%d-%d\\n' %(chrom,start,end))\n return ''\n\n idx = self.__fai_dict[chrom]\n d_offset = idx[0] + (start-1) + (start-1)/idx[1]*(idx[2]-idx[1])\n\n self.__fa_fp.seek(d_offset) # put the file handle to the start position\n ch, seq_len = 1, end-start+1\n\n while (ch and seq_len):\n ch = self.__read_base()\n if ch not in ['\\r', '\\n', 0]: seq.append(ch); seq_len -= 1\n\n return ''.join(seq)\n\n\ndef get_reverse_complement(primer_seq):\n ''' Func: get the reverse complement sequence\n '''\n new_seq_list = []\n Table = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'}\n\n for s in primer_seq[::-1]:\n new_seq_list.append(Table[s])\n\n return ''.join(new_seq_list)\n\n\ndef show_error_message():\n sys.stderr.write(\"Error: invalid bed file format!\\n\")\n sys.stderr.write(\"bed file must be have 4 column and the 4-th column must has a marker of 'R' or 'F'\\n\")\n sys.stderr.write(\"to indicate reverse and forward primer\\n\")\n sys.stderr.write(\"eg. NC_045512.2\\t33\\t56\\tAmplicon1_F\\n\")\n sys.stderr.write(\" NC_045512.2\\t88\\t102\\tAmplicon1_R\\n\")\n sys.exit(-1)\n\n\ndef read_bed_file(bed_file, fasta_obj):\n ''' Func: read input bed file\n bed_dict = {\n 'amplicon1': [(forward record), (reverse record)], \n 'amplicon2': [(chr1, 3, 26), (chr1, 88, 102)]\n ...\n }\n Note: the last field must has a marker of 'R' or 'F' to indicate reverst and forward strand\n position is 0-based\n '''\n bed_dict = {}\n bed_fp = open(bed_file, \"r\")\n\n for line in bed_fp:\n llist = line.rstrip().split()\n if len(llist) < 4: show_error_message()\n\n amplicon = llist[3][:-1]\n strand_marker = llist[3][-1]\n if strand_marker not in ['F', 'R']: show_error_message()\n\n primer_seq = fasta_obj.get_seq(llist[0], int(llist[1])+1, int(llist[2])+1)\n if amplicon not in bed_dict: bed_dict[amplicon] = [None, None]\n\n if strand_marker == 'F': # forward strand\n bed_dict[amplicon][0] = (primer_seq, int(llist[2]))\n else: # reverse strand and has a marker of 'R'\n bed_dict[amplicon][1] = (get_reverse_complement(primer_seq), int(llist[1]))\n\n return bed_dict\n\n\ndef write_amplicon_file(amplicon_file, bed_dict):\n ''' Func: write the amplicon file out\n '''\n amplicon_fp = open(amplicon_file, 'w')\n amplicon_fp.write(\"#ForwardPrimer\\tReversePrimer\\tInsertLength\\tAdditionInfo\\n\")\n\n for amplicon in bed_dict:\n bed_tuple = bed_dict[amplicon]\n if not bed_tuple[0] or not bed_tuple[1]: \n continue # didn't find mathed bed record with different strand marker 'R' or 'F'\n\n insert_len = bed_tuple[1][1] - bed_tuple[0][1] + 1\n amplicon_fp.write(\"%s\\t%s\\t%d\\t%s\\n\" % (bed_tuple[0][0], bed_tuple[1][0], insert_len, amplicon))\n\n amplicon_fp.close()\n\n\ndef main():\n args = sys.argv\n\n if len(args) != 4:\n sys.stderr.write(\"usage: python Bed2Amplicon.py \\n\")\n sys.exit(-1)\n\n refer_file = args[1] # reference genome (.fasta)\n bed_file = args[2] # bed file with 0-based reference position\n amplicon_file = args[3] # converted amplicon primer file\n\n # refer_file = \"/Users/xlzh/Downloads/PTrimmer/Data/NC_045512.2.fas\"\n # bed_file = \"/Users/xlzh/Downloads/PTrimmer/Data/sarscov2_v2_primers_swift.bed\"\n # out_file = \"/Users/xlzh/Downloads/PTrimmer/Data/amplicon.txt\"\n\n fasta_obj = Fasta(refer_file)\n bed_dict = read_bed_file(bed_file, fasta_obj)\n write_amplicon_file(amplicon_file, bed_dict)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"DMU-lilab/pTrimmer","sub_path":"Test/Bed2Amplicon.py","file_name":"Bed2Amplicon.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"23552609311","text":"\"\"\"\n# File : main.py\n# Time : 2022/1/10 3:42 下午\n# Author : Qi\n# Description:\n\"\"\"\nfrom data import data_process, convert, train_test_spl\nfrom transformers import XLNetForSequenceClassification\nfrom transformers import AdamW, AutoTokenizer\nfrom model import eval, predict\nimport torch\n\n\nbatch_size = 32\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ncheckpoint = 'xlnet-base-cased'\ntokenizer = AutoTokenizer.from_pretrained(checkpoint)\nsentences, labels, classes, num_classes = data_process('train.csv')\ninput_ids, token_type_ids, attention_mask, labels = convert(checkpoint, sentences, labels)\ntrain_dataloader, validation_dataloader = train_test_spl(input_ids, token_type_ids, attention_mask, labels, batch_size)\n\n\nmodel = XLNetForSequenceClassification.from_pretrained('xlnet-base-cased', num_labels=num_classes).to(device)\n\n\nparam_optimizer = list(model.named_parameters())\nno_decay = ['bias', 'gamma', 'beta']\noptimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}]\n\noptimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)\n\nfor _ in range(2):\n for i, batch in enumerate(train_dataloader):\n batch = tuple(t.to(device) for t in batch)\n loss = model(batch[0], token_type_ids=batch[1], attention_mask=batch[2], labels=batch[3])[0]\n print(loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if i % 10 == 0:\n eval(model, validation_dataloader)\n\npredict(classes)\n\n\n\n","repo_name":"qitianyuu/nlp_RE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7132515747","text":"import time\nfrom db import cur, conn\n\n\ndef select_stock():\n select_ticker = input(\"Select ticker: \").upper()\n cur.execute(\n \"\"\"\n SELECT * FROM stocks\n WHERE ticker = %s\n \"\"\",\n [select_ticker],\n )\n selected_stocks = cur.fetchall()\n\n if not selected_stocks:\n print(\"Ticker does not exist!\")\n time.sleep(1)\n return\n\n for stock in selected_stocks:\n menu_width = 70\n print(\"=\" * menu_width)\n print(\n f\"\"\"\n Ticker: {stock[1]},\n Full Name: {stock[2]},\n Shares: {stock[3]},\n Price per Share: {stock[4]},\n Current Price: {stock[6]},\n Total Cost: {stock[5]},\n Current Cost: {stock[6] * stock[3]},\n Difference: {stock[8]}\n\"\"\"\n )\n print(\"=\" * menu_width)\n conn.commit()\n input(\"Press Enter to continue...\")\n","repo_name":"AleksKostadinov/stocks_portfolio","sub_path":"files/select_ticker.py","file_name":"select_ticker.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3231520612","text":"for i in range(25):\n print(\"\")\n\n\nimport openai\nimport os\n\nopenai.api_key_path = \"key.txt\"\n\nprompt = input(\"Input: \")\n\nresponse = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f\"Assistant acts like a super funny mexican guy. Assistant speaks english. He mimics all of those classic mexican accents though. He's very nice and amusing to talk to.\\n\\nHuman: {prompt}\\nAssistant:\",\n temperature=0.7,\n max_tokens=100,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0,\n stop=[\"Human:\", \"\\n\"],\n stream=True\n)\n\nprint(response)\n\nentireResponse = \"\"\n\nfor token in response:\n os.system('cls' if os.name == 'nt' else 'clear')\n entireResponse = f\"{entireResponse}{token.choices[0].text}\"\n print(entireResponse)","repo_name":"FelixCodesTech/OpenAI-API-Answer-Streaming","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"30955976022","text":"from .realtor_authenticated_apitestcase import RealtorAuthenticatedAPITestCase\n\nfrom rest_framework import status\n\nfrom model_mommy import mommy\n\n\nclass RealtorProfileTests(RealtorAuthenticatedAPITestCase):\n def test_get_realtor_profile(self):\n res = self.client.get('/api/realtor/profile/')\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['uid'], self.realtor.uid)\n\n def test_put_realtor_profile(self):\n profile = self.client.get('/api/realtor/profile/').data\n profile['user']['email'] = 'realtorput@realtor.com'\n res = self.client.put('/api/realtor/profile/', data=profile, format='json')\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['user']['email'], 'realtorput@realtor.com')\n\n def test_patch_realtor_profile(self):\n profile = self.client.get('/api/realtor/profile/').data\n profile['user']['email'] = 'realtorpatch@realtor.com'\n res = self.client.patch('/api/realtor/profile/', data=profile, format='json')\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['user']['email'], 'realtorpatch@realtor.com')","repo_name":"SiriusWhi/HomeCaptain_SAAS","sub_path":"homecaptain/apps/realtor/tests/realtor_profile_tests.py","file_name":"realtor_profile_tests.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10598346720","text":"#!/usr/local/bin/python3.7\nfrom sqlalchemy.orm import sessionmaker\nfrom models import QuoteDB, db_connect, create_table\n\ndef find_quotes (list_of_quotes):\n for quote in session.query(QuoteDB.quote).filter(QuoteDB.author == author_name):\n list_of_quotes.append(quote)\n if ( len(list_of_quotes) == 0):\n print (\"Not found\")\n\n\nauthor_name = input(\"Enter author full name, whose quotes you want to recieve : \")\n\nengine = db_connect()\ncreate_table(engine)\nself.Session = sessionmaker(bind=engine)\nlist_of_quotes = []\nfind_quotes (list_of_quotes)\nprint(list_of_quotes)\n\n\nsession.close()\n","repo_name":"KochetovNicolai/Python_822","sub_path":"Mishan1k/scrapy_spider/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71899906824","text":"import cv2\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom segment_anything import sam_model_registry, SamPredictor\nfrom torchvision import transforms\n\n\ndef show_points(coords, labels, ax, marker_size=375):\n pos_points = coords[labels == 1]\n neg_points = coords[labels == 0]\n ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='.', s=marker_size, edgecolor='white',\n linewidth=1.25)\n ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='.', s=marker_size, edgecolor='white',\n linewidth=1.25)\n\n\ndef show_mask(mask, ax, random_color=False):\n if random_color:\n color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n else:\n color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6])\n h, w = mask.shape[-2:]\n mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n ax.imshow(mask_image)\n\n\ndef show_box(box, ax):\n x0, y0 = box[0], box[1]\n w, h = box[2] - box[0], box[3] - box[1]\n ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))\n\n\ndef save_seg(img, save_path, mask, input_point=None, input_label=None, box=None):\n \"\"\"\n :param img: ndarray\n :param save_path:\n :param mask:\n :param input_point:\n :param input_label:\n :param box:\n :return:\n \"\"\"\n plt.figure(figsize=(10, 10))\n plt.imshow(img)\n show_points(input_point, input_label, plt.gca())\n show_mask(mask, plt.gca())\n if input_point is not None:\n show_points(input_point, input_label, plt.gca())\n if box is not None:\n show_box(box, plt.gca())\n plt.axis('off')\n plt.savefig(save_path, bbox_inches=\"tight\", pad_inches=0)\n\n\ndef save_mask(masks, save_path):\n \"\"\"\n :param masks:\n :param save_path:\n :return:\n \"\"\"\n mask = masks[0]\n mask = mask.astype(np.uint8) * 255\n mask_img = Image.fromarray(mask)\n mask_img.save(save_path)\n\n # for mask in masks:\n # h, w = mask.shape[0], mask.shape[1]\n # mask_img = Image.new('L', (w, h), 0) # 灰度图,所有mask图像都是单通道的灰度图\n # for i in range(h):\n # for j in range(w):\n # if mask[i][j]:\n # mask_img.putpixel((j, i), 255)\n # else:\n # mask_img.putpixel((j, i), 0)\n # mask_img.save(save_path)\n\n\ndef save_expand(masks, save_path, dilate_factor=15):\n \"\"\"\n :param masks: ndarray (HxW)\n :param save_path:\n :param dilate_factor:\n :return:\n \"\"\"\n mask = masks[0].astype(np.uint8) * 255\n dilate_mask = cv2.dilate(\n mask,\n np.ones((dilate_factor, dilate_factor), np.uint8),\n iterations=1\n )\n dilate_img = Image.fromarray(dilate_mask)\n dilate_img.save(save_path)\n\n # img = mask_img\n # img_tensor = transforms.ToTensor()(img)\n # dilation_kernel = torch.ones(kernel_num, kernel_num)\n # padding_num = (kernel_num - 1) // 2\n # dilated_img_tensor = torch.nn.functional.conv2d(\n # torch.nn.functional.pad(img_tensor.unsqueeze(0), (padding_num,) * 4),\n # dilation_kernel.unsqueeze(0).unsqueeze(0),\n # ).squeeze(0)\n # dilated_img = transforms.ToPILImage()(dilated_img_tensor.cpu())\n # dilated_img = binarize_image(dilated_img, threshold_pixel)\n # dilated_img.save(save_path)\n\n\nsam_checkpoint = \"./model/SAM/sam_vit_h_4b8939.pth\"\nmodel_type = \"vit_h\"\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nsam = sam_model_registry[model_type](checkpoint=sam_checkpoint)\nsam.to(device=device)\npredictor = SamPredictor(sam)\n\n\ndef inference(img_path, input_points, input_labels, input_box=None):\n \"\"\"\n :param img_path:\n :param input_points: ndarray\n :param input_labels: ndarray\n :param input_box: ndarray\n :return:\n \"\"\"\n prefix, _ = img_path.rsplit(\".\", 1)\n seg_path = prefix + \"_seg.png\"\n mask_path = prefix + \"_mask.png\"\n expand_path = prefix + \"_expand.png\"\n\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n predictor.set_image(img)\n masks, scores, _ = predictor.predict(\n point_coords=input_points,\n point_labels=input_labels,\n box=input_box,\n multimask_output=False,\n )\n\n # 1. 存储seg image\n save_seg(img, seg_path, masks, input_points, input_labels, input_box)\n # 2. 存储mask image\n save_mask(masks, mask_path)\n # 3. 存储expand image\n save_expand(masks, expand_path)\n","repo_name":"Amazingldl/VisualBox","sub_path":"SAM/SAMFunc.py","file_name":"SAMFunc.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"9267840261","text":"class HouseItem:\n def __init__(self, name, area):\n self.name = name\n self.area = area\n\n def __str__(self):\n return '[%s] 占地 %.2f' % (self.name, self.area)\n\n\nclass House:\n def __init__(self, house_type, area):\n self.house_type = house_type\n self.area = area\n\n # 剩余面积\n self.free_area = area\n\n # 家具名称列表\n self.item_list = []\n\n def __str__(self):\n # Python能够自动将一对括号内部的代码连接在一起\n return ('户型:%s\\n总面积: %.2f[剩余: %.2f]\\n家具: %s'\n % (self.house_type, self.area,\n self.free_area, self.item_list))\n\n def add_item(self, item):\n print('要添加 %s' % item)\n # 1.判断家具的面积\n if item.area > self.free_area:\n print(\"%s 面积太大了,无法添加\" % item.name)\n return\n # 2.将家具的名称添加到列表中\n self.item_list.append(item.name)\n\n # 3.计算剩余面积\n self.free_area -= item.area\n\n\n# 1.创建家具\nbed = HouseItem('席梦思', 30)\nchest = HouseItem('衣柜', 20)\ntable = HouseItem('餐桌', 20)\n\n# 创建房子对象\nmy_house = House('两室一厅', 60)\nmy_house.add_item(bed)\nmy_house.add_item(chest)\nmy_house.add_item(table)\n\nprint(my_house)\n\n'''\n小结\n 主程序只负责创建 房子 对象和 家具 对象\n 让 房子 对象调用 add_item 方法 将家具添加到房子中\n 面积计算 剩余面积 家具列表等处理都被 封装 到 房子类的内部\n'''","repo_name":"MH-Blog/Python","sub_path":"面向对象/01-面向对象基础/11-摆放家具.py","file_name":"11-摆放家具.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"zh","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"9470892394","text":"t = int(input())\nfor _ in range(t):\n\tn = int(input())\n\ts = input()\n\n\tans = [0]*n\n\tfor i in range(n):\n\t\td, k = s[0:i+1].count('D'), s[0:i+1].count('K')\n\t\tfor j in range(0, n, i+1):\n\t\t\tif j+i >= n:\n\t\t\t\tbreak\n\n\t\t\tdd, kk = s[j:j+i+1].count('D'), s[j:j+i+1].count('K')\n\t\t\tif d*kk == dd*k:\n\t\t\t\tans[j+i] = max(ans[j+i], (j+i+1)//(i+1))\n\t\t\telse:\n\t\t\t\tbreak\n\n\tprint(*ans, end=' \\n')","repo_name":"qazz625/Competitive-Programming-Codes","sub_path":"Codeforces/1536/brute.py","file_name":"brute.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23310154050","text":"import logging\nimport time\n\nimport requests\nfrom celery import shared_task\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef take_photo():\n import picamera\n\n logger.info('Starting photo capture')\n with picamera.PiCamera() as camera:\n camera.resolution = (1440, 1080)\n camera.rotation = 270\n camera.start_preview()\n time.sleep(1)\n camera.capture('foo.jpg')\n camera.stop_preview()\n logger.info('Saved image to disk, starting upload')\n files = {'file': open('foo.jpg', 'rb')}\n response = requests.post(\"http://booth.lkng.me/api/photos/\", files=files, data={'is_active': True})\n\n if response.status_code == 200 or response.status_code == 201:\n logger.info('Upload complete')\n else:\n logger.error(\n 'Could not upload image (status code from server: {}'.format(response.status_code),\n extra={'response': response}\n )\n","repo_name":"relekang/photobooth","sub_path":"photobooth/gallery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30967984187","text":"import requests\n\n#https://api.openweathermap.org/data/2.5/weather?lat=43.77&lon=-79.23&units=metric&appid=ed5b8dbb31740d7d0921491b323ed14b\n\nurl = 'https://api.openweathermap.org/data/2.5/weather?lat=43.7764&lon=-79.2318&units=metric&appid=ed5b8dbb31740d7d0921491b323ed14b'\n \nres = requests.get(url)\ndata = res.json()\nweather = data['weather'][0]['main']\ndescription = data['weather'][0]['description']\ntemp = str(data['main']['temp'])\ntemp_feels_like = str(data['main']['feels_like'])\npressure = str(data['main']['pressure'])\nwind_speed = str(data['wind']['speed'])\nhumidity = str(data['main']['humidity'])\n","repo_name":"Aran0713/Friday-1.0","sub_path":"FRIDAY 1.0/Definitions/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26002838848","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n#from userprofiles.views import LoginView\nfrom userprofiles.views import ListView, DetailView, DeleteView, UpdateView, CreateView\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'examen2.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n# url(r'^login/', LoginView.as_view(), name='login'),\n url(r'^New/', CreateView.as_view(), name='create'),\n url(r'^detail(?P\\d+)$',DetailView.as_view(),name='detail'),\n url(r'^list/$',ListView.as_view(),name='list'),\n url(r'^update(?P\\d+)$',UpdateView.as_view(),name='update'),\n url(r'^delete(?P\\d+)$',DeleteView.as_view(),name='delete'),\n url(r'^$','userprofiles.views.listar'),\n #url(r'^logout$', 'app.views.cerrar'),\n)\n","repo_name":"lizceth/examen2","sub_path":"examen2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34647137615","text":"# A VECTOR/POINT CLASS DEMO\nfrom math import sqrt\nimport numpy as np\nimport numbers\n\n\nclass MetaVector:\n def __init__(self, x=0.0, y=0.0, z=0.0):\n self.x = x\n self.y = y\n self.z = z\n\n def __repr__(self):\n x = self.x\n y = self.y\n z = self.z\n name = self.__class__.__name__\n return f\"<{name}:x={x},y={y},z={z}>\"\n\n def asarray(self):\n return np.array([self.x, self.y, self.z])\n\n\nclass Vector(MetaVector):\n \"\"\"\n A vector class.\n \"\"\"\n\n def norm(self):\n \"\"\"\n Returns the norm of the vector.\n \"\"\"\n x = self.x\n y = self.y\n z = self.z\n return sqrt(x ** 2 + y ** 2 + z ** 2)\n\n def normalize(self):\n x = self.x\n y = self.y\n z = self.z\n n = self.norm()\n out = Vector(x / n, y / n, z / n)\n return out\n\n def cross(self, other):\n a = self.asarray()\n oa = other.asarray()\n return Vector(*np.cross(a, oa))\n\n def scalarmul(self, other):\n return Vector(*(self.asarray() * other))\n\n def __mul__(self, other):\n if isinstance(other, Vector):\n return self.cross(other)\n elif isinstance(other, numbers.Number):\n return self.scalarmul(other)\n\n __rmul__ = scalarmul\n\n\nclass Point(MetaVector):\n def __sub__(self, other):\n a = self.asarray()\n oa = other.asarray()\n return Vector(*(a - oa))\n\n\nu = Vector(x=3)\nv = Vector(y=5)\nA = Point()\nB = Point(z=-5)\n","repo_name":"lcharleux/2022_2-3_USMB_scientific_Python_Tutorial_02","sub_path":"day1/vector_class.py","file_name":"vector_class.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72534068746","text":"import time\nfrom tkinter.filedialog import asksaveasfilename\n\n\ndef save_image_to_file(screenshot):\n ticks = str(time.time()).replace('.', '')[:13]\n default_filename = f\"{ticks}_screenshot.png\"\n save_path = asksaveasfilename(defaultextension='', filetypes=[(\"All Files\", \"*.*\")], initialfile=default_filename)\n\n if save_path:\n screenshot.save(save_path)","repo_name":"Joll-d/scrinJ","sub_path":"handlers/file_save_handler.py","file_name":"file_save_handler.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26632457555","text":"# 606. 根据二叉树创建字符串\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def tree2str(self, t):\n \"\"\"\n :type t: TreeNode\n :rtype: str\n \"\"\"\n if not t:\n return \"\"\n res = \"\"\n left = self.tree2str(t.left)\n right = self.tree2str(t.right)\n if left or right:\n res += \"(%s)\" % left\n if right:\n res += \"(%s)\" % right\n return str(t.val) + res\n","repo_name":"Teingi/python-test","sub_path":"leetcode_606.py","file_name":"leetcode_606.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"81"} +{"seq_id":"12684019215","text":"from PyQt5.QtCore import QPointF, QRectF, Qt\nfrom PyQt5.QtGui import QBrush, QColor, QPen, QPolygonF\nfrom PyQt5.QtWidgets import QToolTip\nfrom client.item_depth_manager import WosItemDepthManager\nfrom client.scene_item.battlefield_item import WosBattlefieldItem\nfrom client.ship_info import ShipInfo\nfrom client.wos import ItemType\nimport cCommonGame\n\n\nclass WosBattleShipItem(WosBattlefieldItem):\n def __init__(self, field_info, drag_boundary, ship_id=0, length=5, is_sunken=False):\n WosBattlefieldItem.__init__(self, field_info)\n self.drag_boundary = drag_boundary\n\n self.ship_info = ShipInfo(ship_id, cCommonGame.Position(0, 0))\n self.ship_info.size = length\n self.ship_info.is_sunken = is_sunken\n self.ship_info.rotated.connect(self.ship_rotated)\n self.ship_info.moved.connect(self.ship_moved)\n\n self.set_type(ItemType.SHIP)\n WosItemDepthManager().set_depth(self)\n\n self.start_pos = QPointF()\n self.end_pos = QPointF()\n self.head = QPolygonF()\n self.tail = QPolygonF()\n self.body = QPolygonF()\n self.update_body()\n\n self.brushes = dict()\n self.brushes[ShipInfo.Type.FRIENDLY] = QBrush(QColor(0, 200, 0, 255))\n self.brushes[ShipInfo.Type.HOSTILE] = QBrush(QColor(200, 0, 0, 255))\n self.brushes[ShipInfo.Type.CIVILIAN] = QBrush(QColor(200, 200, 200, 255))\n self.brushes[ShipInfo.Type.UNKNOWN] = QBrush(QColor(200, 200, 0, 255))\n self.brushes[ShipInfo.Type.BLACK] = QBrush(QColor(255, 255, 255, 255))\n self.brushes[ShipInfo.Type.SHADOW] = QBrush(QColor(0, 0, 0, 0))\n self.pens = dict()\n self.pens[ShipInfo.Type.FRIENDLY] = QPen(QColor(0, 0, 0, 255))\n self.pens[ShipInfo.Type.HOSTILE] = QPen(QColor(0, 0, 0, 255))\n self.pens[ShipInfo.Type.CIVILIAN] = QPen(QColor(0, 0, 0, 255))\n self.pens[ShipInfo.Type.UNKNOWN] = QPen(QColor(0, 0, 0, 255))\n self.pens[ShipInfo.Type.BLACK] = QPen(QColor(0, 0, 0, 255))\n self.pens[ShipInfo.Type.SHADOW] = QPen(QColor(0, 255, 0, 255), 2, Qt.DashLine)\n self.pen = QPen(QColor(0, 0, 0, 255))\n\n def boundingRect(self):\n return QRectF(self.start_pos, self.end_pos)\n\n def clone(self):\n ship_item = WosBattleShipItem(self.field_info, self.ship_info.ship_id, self.ship_info.size,\n self.ship_info.is_sunken)\n ship_item.set_grid_position(self.ship_info.position.x, self.ship_info.position.y)\n ship_item.set_heading(self.ship_info.heading)\n ship_item.set_is_draggable(self.is_draggable)\n return ship_item\n\n def draw_cross(self, painter):\n painter.setPen(QPen(QColor(255, 0, 0, 255), 3))\n rect = self.boundingRect()\n painter.drawLine(rect.topLeft(), rect.bottomRight())\n painter.drawLine(rect.topRight(), rect.bottomLeft())\n\n def get_ship_info(self):\n return self.ship_info\n\n def hoverEnterEvent(self, event):\n self.pens[self.ship_info.type].setWidth(2)\n self.show_tool_tip(event)\n self.update()\n\n def hoverLeaveEvent(self, event):\n self.pens[self.ship_info.type].setWidth(1)\n QToolTip.hideText()\n self.update()\n\n def hoverMoveEvent(self, event):\n self.show_tool_tip(event)\n\n def make_shadow(self, ship, actions):\n ship.set_grid_position(self.ship_info.position.x, self.ship_info.position.y)\n ship.set_heading(self.ship_info.heading)\n ship.set_size(self.ship_info.size)\n ship.set_ship_type(ShipInfo.Type.SHADOW)\n ship.set_type(ItemType.ANNOTATION)\n ship.set_is_hoverable(False)\n ship.set_is_sunken(self.ship_info.is_sunken)\n for action in actions:\n if action == cCommonGame.Action.FWD:\n ship.ship_info.move_forward()\n elif action == cCommonGame.Action.CW:\n ship.ship_info.turn_clockwise()\n elif action == cCommonGame.Action.CCW:\n ship.ship_info.turn_counter_clockwise()\n\n def mouseReleaseEvent(self, event):\n if self.drag_delta is not None:\n self.drag_delta = None\n self.snap_to_grid(self.pos().x(), self.pos().y())\n\n if event.button() == Qt.RightButton and self.is_draggable:\n self.rotate_ship()\n\n def paint(self, painter, style, widget=None):\n painter.setBrush(self.brushes[self.ship_info.type])\n painter.setPen(self.pens[self.ship_info.type])\n painter.drawPolygon(self.body)\n if self.ship_info.is_sunken:\n self.draw_cross(painter)\n\n def point_is_within(self, x, y):\n old_x = self.ship_info.position.x\n old_y = self.ship_info.position.y\n self.set_grid_position(x, y)\n positions = self.ship_info.get_placement()\n x1 = self.drag_boundary.min_x\n x2 = self.drag_boundary.max_x\n y1 = self.drag_boundary.min_y\n y2 = self.drag_boundary.max_y\n is_within = True\n for position in positions:\n if not (x1 <= position[0] < x2 and y1 <= position[1] < y2):\n is_within = False\n break\n self.set_grid_position(old_x, old_y)\n return is_within\n\n def rotate_ship(self):\n self.ship_info.turn_clockwise()\n\n def ship_rotated(self, heading):\n self.setRotation(heading)\n\n def ship_moved(self, x, y):\n self.setPos(self.map_grid_to_position(x, y))\n\n def set_grid_position(self, x, y):\n self.ship_info.set_position(x, y)\n\n def set_heading(self, heading):\n self.ship_info.set_heading(heading)\n\n def set_is_sunken(self, is_sunken):\n self.ship_info.is_sunken = is_sunken\n\n def set_ship_type(self, t):\n self.ship_info.set_type(t)\n\n def set_size(self, length):\n self.ship_info.size = length\n self.update_body()\n\n def show_tool_tip(self, event):\n tool_tip = ''\n if self.ship_info.type is ShipInfo.Type.FRIENDLY:\n tool_tip = \"Id: %s\" % self.ship_info.ship_id\n else:\n tool_tip = ''\n if self.ship_info.type is ShipInfo.Type.CIVILIAN:\n tool_tip = \"Civilian ship\"\n elif self.ship_info.type is ShipInfo.Type.HOSTILE:\n tool_tip = \"Hostile ship\"\n elif self.ship_info.type is ShipInfo.Type.UNKNOWN or self.ship_info.type is ShipInfo.Type.BLACK:\n tool_tip = \"Unidentified ship\"\n if self.ship_info.is_sunken:\n tool_tip += ' - Destroyed'\n tool_tip += \" (%s, %s)\" % (self.ship_info.position.x, self.ship_info.position.y)\n QToolTip.showText(event.screenPos(), tool_tip)\n\n def snap_to_grid(self, pos_x, pos_y):\n p = self.map_position_to_grid(pos_x, pos_y)\n if self.drag_boundary is None or self.point_is_within(p.x(), p.y()):\n self.set_grid_position(p.x(), p.y())\n else:\n self.set_grid_position(self.ship_info.position.x, self.ship_info.position.y)\n\n def update_body(self):\n self.start_pos = QPointF(self.field_info.size.x() * 0.2, self.field_info.size.y() * 0.2)\n self.end_pos = QPointF(self.field_info.size.x() * 0.8,\n self.ship_info.size * self.field_info.size.y() - self.start_pos.y())\n head_end_y = self.field_info.size.y() * 0.8\n\n # Center of origin at the middle of ship body (floor function)\n self.start_pos -= QPointF(0, self.field_info.size.y() * self.ship_info.get_y_center())\n self.end_pos -= QPointF(0, self.field_info.size.y() * self.ship_info.get_y_center())\n head_end_y -= self.field_info.size.y() * self.ship_info.get_y_center()\n\n self.head = QPolygonF(\n [QPointF(self.start_pos.x(), head_end_y),\n QPointF((self.end_pos.x() + self.start_pos.x()) / 2, self.start_pos.y()),\n QPointF(self.end_pos.x(), head_end_y)])\n\n self.tail = QPolygonF(\n [QPointF(self.start_pos.x(), head_end_y),\n QPointF(self.start_pos.x(), self.end_pos.y()),\n QPointF(self.end_pos.x(), self.end_pos.y()),\n QPointF(self.end_pos.x(), head_end_y)])\n\n self.body = QPolygonF(self.head + self.tail)\n","repo_name":"Ssaga/wos2019","sub_path":"client/scene_item/battleship_item.py","file_name":"battleship_item.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34260853703","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 28 18:30:01 2021\n\n@author: denesh\n\"\"\"\nimport numpy as np\nimport time\nimport cv2 as cv\nfrom sklearn.cluster import KMeans\nfrom matplotlib import pyplot as plt\n\n\ndef plot_comp_efforts(images):\n num_images= len(images)\n\n ## Vary features\n features = [100,300,500,700,900] #Here edge_thresh = 10\n \n time_orb_feat = np.zeros((5,num_images))\n time_kmeans_feat = np.zeros((5,num_images))\n \n for n in range(len(features)):\n for i in range(num_images):\n img = images[i] \n \n \n \n # your code here \n start = time.time()\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=features[n],edgeThreshold=10)\n # find the keypoints with ORB\n kp = orb.detect(img,None)\n time_orb_feat[n,i] = (time.time() - start)\n \n feat = []\n for point in kp:\n feat.append(point.pt)\n X = np.array(feat)\n start = time.time()\n \n kmeans = KMeans(n_clusters=4, random_state=0).fit(X)\n \n center = kmeans.cluster_centers_\n time_kmeans_feat[n,i] = (time.time() - start)\n \n \n ## Vary edgeThreshold\n edgethresh = [5,10,40,80] #Here nfeautures = 700\n \n time_orb_edge = np.zeros((4,num_images))\n time_kmeans_edge = np.zeros((4,num_images))\n \n for e in range(len(edgethresh)):\n for i in range(num_images):\n img = images[i] \n \n \n \n # your code here \n start = time.time()\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=700,edgeThreshold=edgethresh[e])\n # find the keypoints with ORB\n kp = orb.detect(img,None)\n time_orb_edge[e,i] = (time.time() - start)\n \n feat = []\n for point in kp:\n feat.append(point.pt)\n X = np.array(feat)\n start = time.time()\n \n kmeans = KMeans(n_clusters=4, random_state=0).fit(X)\n \n center = kmeans.cluster_centers_\n time_kmeans_edge[e,i] = (time.time() - start)\n \n \n \n \n x = features\n y = np.mean(time_orb_feat+time_kmeans_feat, axis=1) #Total time\n yerr = np.std(time_orb_feat+time_kmeans_feat, axis=1)\n \n fig, ax = plt.subplots()\n \n ax.errorbar(x, y, yerr=yerr,fmt='-o')\n \n ax.set_xlabel('nFeatures')\n ax.set_ylabel('Time (s)')\n ax.set_title('Wallclock time vs nfeatures for all images ')\n plt.show()\n \n x = edgethresh\n y = np.mean(time_orb_edge+time_kmeans_edge, axis=1) #Total time\n yerr = np.std(time_orb_edge+time_kmeans_edge, axis=1)\n \n fig, ax = plt.subplots()\n \n ax.errorbar(x, y, yerr=yerr,fmt='-o')\n \n ax.set_xlabel('edgeThreshold (pixels)')\n ax.set_ylabel('Time (s)')\n ax.set_title('Wallclock time vs edgeThreshold for all images')\n plt.show()\n","repo_name":"Denesh1998/MAV_Individual_Asg","sub_path":"plot_comp_efforts.py","file_name":"plot_comp_efforts.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75179295945","text":"import os\n\nimport numpy as np\n\nfrom nums.numpy import BlockArray\n\n\n# pylint: disable=import-outside-toplevel\n\n\ndef test_explicit_init():\n import nums\n import nums.core.application_manager as am\n\n nums.init()\n assert am.is_initialized()\n am.destroy()\n\n\ndef test_array_copy(nps_app_inst):\n import nums.numpy as nps\n assert nps_app_inst is not None\n\n ba = nps.arange(10)\n ba2 = nps.array(ba, copy=True)\n assert ba is not ba2\n\n\ndef test_loadtxt(nps_app_inst):\n import nums.numpy as nps\n assert nps_app_inst is not None\n\n seed = 1337\n rs = np.random.RandomState(seed)\n\n fname = \"test_text.out\"\n data = rs.random_sample(99).reshape(33, 3)\n\n np.savetxt(fname=fname, X=data)\n da: BlockArray = nps.loadtxt(fname)\n assert np.allclose(da.get(), data)\n\n os.remove(fname)\n assert not os.path.exists(fname)\n\n\ndef test_where(nps_app_inst):\n import nums.numpy as nps\n\n assert nps_app_inst is not None\n\n shapes = [\n (),\n (10**6,),\n (10**6, 1),\n (10**5, 10)\n ]\n for shape in shapes:\n arr: BlockArray = nps.random.rand(*shape)\n if len(shape) == 1:\n arr = arr.reshape(block_shape=(arr.shape[0] // 12,))\n elif len(shape) == 2:\n arr = arr.reshape(block_shape=(arr.shape[0] // 12,\n arr.shape[1]))\n results: tuple = nps.where(arr < 0.5)\n np_results = np.where(arr.get() < 0.5)\n for i in range(len(np_results)):\n assert np.allclose(np_results[i], results[i].get())\n results: tuple = nps.where(arr >= 0.5)\n np_results = np.where(arr.get() >= 0.5)\n for i in range(len(np_results)):\n assert np.allclose(np_results[i], results[i].get())\n\n\ndef test_reshape(nps_app_inst):\n import nums.numpy as nps\n assert nps_app_inst is not None\n ba = nps.arange(2*3*4).reshape((2, 3, 4), block_shape=(2, 3, 4))\n assert nps.allclose(ba.reshape(shape=(6, 4), block_shape=(6, 4)),\n nps.reshape(ba, shape=(6, 4)))\n\n\nif __name__ == \"__main__\":\n from nums.core import application_manager\n from nums.core import settings\n settings.system_name = \"serial\"\n nps_app_inst = application_manager.instance()\n test_where(nps_app_inst)\n # test_loadtxt(nps_app_inst)\n test_reshape(nps_app_inst)\n","repo_name":"liuhanyao98/nums_gpu_draft","sub_path":"tests/numpy/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35185984881","text":"#\n# @lc app=leetcode.cn id=1023 lang=python3\n#\n# [1023] 驼峰式匹配\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def camelMatch(self, queries: List[str], pattern: str) -> List[bool]:\n ret = []\n for q in queries:\n i, j = 0, 0\n while i < len(q) and j < len(pattern):\n if q[i] == pattern[j]: i, j = i + 1, j + 1\n elif q[i].isupper(): break\n else: i += 1\n if any(c.isupper() for c in q[i:]): ret.append(False)\n elif j == len(pattern): ret.append(True)\n else: ret.append(False)\n return ret\n# @lc code=end\n\nprint(Solution().camelMatch(queries = [\"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"], pattern = \"FB\"))\nprint(Solution().camelMatch(queries = [\"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"], pattern = \"FoBa\"))\nprint(Solution().camelMatch(queries = [\"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"], pattern = \"FoBaT\"))","repo_name":"HellOwhatAs/Leetcode","sub_path":"1023.驼峰式匹配.py","file_name":"1023.驼峰式匹配.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19944748940","text":"import numpy as np\r\nimport random\r\nfrom tensorflow import keras as tf\r\nfrom q import shape_saver\r\nf=shape_saver()\r\n#(tr_mat,tr_lable),(ts_mat,ts_lable)=tf.datasets.cifar10.load_data()\r\nclass_name={\"circle\":0,\"tri\":1,\"square\":2}\r\ntr_mat,tr_lable,ts_mat,ts_lable=[],[],[],[]\r\n\r\ndef rotate(vector, theta, rotation_around=None) -> np.ndarray:\r\n vector = np.array(vector)\r\n if vector.ndim == 1:\r\n vector = vector[np.newaxis, :]\r\n if rotation_around is not None:\r\n vector = vector - rotation_around\r\n vector = vector.T\r\n theta = np.radians(theta)\r\n rotation_matrix = np.array([\r\n [np.cos(theta), -np.sin(theta)],\r\n [np.sin(theta), np.cos(theta)]])\r\n output: np.ndarray = (rotation_matrix @ vector).T\r\n if rotation_around is not None:\r\n output = output + rotation_around\r\n return output.squeeze()\r\ndef shape_muls(POINTS):\r\n muls=[]\r\n for i in range(30):\r\n rnd=random.randint(1,89)\r\n if i==0:\r\n rnd=0\r\n POINTS2=[]\r\n for j in POINTS:\r\n #print(j)\r\n x, y = rotate((j[0] - 250, j[1] - 250), rnd)\r\n POINTS2.append(np.array([(int(x + 250), int(y + 250))]))\r\n muls.append(POINTS2)\r\n muls2=[]\r\n for i in muls:\r\n po=shape_to_mat(i)\r\n muls2.append(po)\r\n muls2.append(np.rot90(po,1))\r\n muls2.append(np.rot90(po, 2))\r\n muls2.append(np.rot90(po, 3))\r\n po=np.flip(po, 0)\r\n muls2.append(po)\r\n muls2.append(np.rot90(po, 1))\r\n muls2.append(np.rot90(po, 2))\r\n muls2.append(np.rot90(po, 3))\r\n return muls2\r\n#class_name=[\"circle\",\"tri\"]\r\ndef shape_to_mat(POINTS):\r\n def crop(line):\r\n miny, minx = 501, 501\r\n maxx, maxy = 0, 0\r\n #print(line[0])\r\n for j in line:\r\n i=j[0]\r\n if i[0] > maxx:\r\n maxx = i[0]\r\n elif i[0] < minx:\r\n minx = i[0]\r\n if i[1] > maxy:\r\n maxy = i[1]\r\n elif i[1] < miny:\r\n miny = i[1]\r\n return (minx, miny, maxx - minx, maxy - miny)\r\n minx, miny, maxx, maxy = crop(POINTS)\r\n #print(minx, miny, maxx, maxy)\r\n #print(POINTS[0][0])\r\n POINTS2=[]\r\n for P in POINTS:\r\n POO2=[(int(P[0][0] - minx), int(P[0][1] - miny))]\r\n POINTS2.append(POO2)\r\n spread=[[0 for i in range(28)] for j in range(28)]\r\n #print(\"po2\",POINTS2[0])\r\n try:\r\n for i in POINTS2:\r\n spread[int(28 * (i[0][0] - 1) / maxx)][int(28 * (i[0][1] - 1) / maxy)] =spread[int(28 * (i[0][0] - 1) / maxx)][int(28 * (i[0][1] - 1) / maxy)]+ 1\r\n except IndexError:\r\n print(spread)\r\n #for i in POINTS2[0]:\r\n #spread[int(28 * (i[0] - 1) / maxx)][int(28 * (i[1] - 1) / maxy)] += 1\r\n\r\n max=spread[0][0]\r\n min=spread[0][0]\r\n for i in spread:\r\n for j in i:\r\n if j > max:\r\n max = j\r\n elif j < min:\r\n min = j\r\n def standarise(n):\r\n return (n - min) / max\r\n\r\n for i in range(28):\r\n for j in range(28):\r\n spread[i][j] = standarise(spread[i][j])\r\n return spread\r\n#class_name=[\"circle\",\"tri\"]\r\nshuffle_g = []\r\nfor i in class_name:\r\n print(i)\r\n shps=f.get_wh(\"id\",\"arr\",\"shape='\"+i+\"'\")\r\n l=0\r\n for j in shps:\r\n print(l)\r\n l+=1\r\n #print(j[0][1])\r\n spraeds=shape_muls(j[0])\r\n for q in spraeds:\r\n shuffle_g.append([q, class_name[i]])\r\nnp.random.shuffle(shuffle_g)\r\ntr_mat = np.asarray([i[0] for i in shuffle_g])\r\ntr_lable = np.asarray([i[1] for i in shuffle_g])\r\n\r\n#print(tr_mat[:20])\r\nprint(len(tr_mat),len(tr_lable))\r\ntr_mat = np.asarray(tr_mat)\r\ntr_lable = np.asarray(tr_lable)\r\n#ts_lable = np.where(ts_lable == 'circle', 0, 1).astype(int)\r\n\r\nts_mat = tr_mat[:2000]\r\nts_lable = tr_lable[:2000]\r\n# class_name=[\"circle\", \"tri\"]\r\n\r\nmodel = tf.models.Sequential([\r\n tf.layers.Conv2D(28, (3,3), activation=\"relu\", input_shape=(28,28,1)),\r\n tf.layers.MaxPooling2D((2,2)),\r\n tf.layers.Conv2D(64, (3,3), activation=\"relu\"),\r\n tf.layers.MaxPooling2D((2,2)),\r\n tf.layers.Conv2D(64, (3,3), activation=\"relu\"),\r\n tf.layers.Flatten(),\r\n tf.layers.Dense(64, activation=\"relu\"),\r\n tf.layers.Dense(3, activation=\"softmax\")\r\n])\r\n\r\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\r\n\r\nmodel.fit(tr_mat, tr_lable, epochs=10, validation_split=0.2)\r\n\r\nloss,accuracy=model.evaluate(ts_mat,ts_lable)\r\n\r\nprint(f\"loss {loss}\")\r\nprint(f\"accuracy {accuracy}\")\r\nmodel.save('shasaver.model')","repo_name":"sylvi1e/cyber_proj_my","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10371396624","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2022/2/9 13:32\n# @Author : strawsyz\n# @File : extractor.py\n# @desc:\nimport torch\n\nfrom pytorch_i3d import InceptionI3d\n\n\ndef get_extractor(model_name: str):\n if model_name == \"resnet152\":\n return get_resnet152()\n elif model_name == \"i3d\":\n return get_i3d_model()\n\n\n# tensorflow\ndef get_resnet152():\n import keras\n from tensorflow.keras.models import Model # pip install tensorflow (==2.3.0)\n base_model = keras.applications.resnet.ResNet152(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000)\n\n # define model with output after polling layer (dim=2048)\n model = Model(base_model.input,\n outputs=[base_model.get_layer(\"avg_pool\").output])\n model.trainable = False\n return model\n\n\n# torch\ndef get_i3d_model(model_path=r\"C:\\(lab\\OtherProjects\\pytorch-i3d-master\\models\\rgb_imagenet.pt\"):\n import platform\n if platform.system() == \"Linux\":\n model_path = r\"/workspace/datasets/rgb_imagenet.pt\"\n\n i3d = InceptionI3d(400, in_channels=3)\n # i3d.replace_logits(157)\n i3d.load_state_dict(torch.load(model_path))\n i3d.cuda()\n return i3d\n\n\n# def get_resnet152():\n# # 这里省略掉一堆import\n# import torchvision.models as models\n#\n# from resnet import resnet152 as caffe_resnet\n# # import resnet\n# # 省略掉读取图片和预处理的步骤,下面的img就是已经经过预处理之后的图片\n#\n# model = caffe_resnet.resnet152(pretrained=True)\n# del model.fc\n# model.fc = lambda x: x\n# model = model.cuda()\n#\n# feat = model(img)\n\n\nif __name__ == '__main__':\n # import torchvision.models as models\n #\n # from resnet import resnet152 as caffe_resnet\n\n # import resnet\n # 省略掉读取图片和预处理的步骤,下面的img就是已经经过预处理之后的图片\n\n # model = caffe_resnet.resnet152(pretrained=True)\n # del model.fc\n # model.fc = lambda x: x\n # model = model.cuda()\n # print(model)\n\n import torchvision\n\n model = torchvision.models.resnet18(pretrained=True)\n\n del model.fc\n model.fc = lambda x: x\n model = model.cuda()\n img = torch.ones(3, 10, 3, 224, 224).cuda()\n img = img.reshape((-1, 3, 224, 224))\n feat = model(img)\n feat = feat.reshape((3, 10, -1))\n print(feat.shape)\n # feat = model(img)\n","repo_name":"strawsyz/straw","sub_path":"EasyDeep/feature_extract/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"39041266154","text":"import io\n\nfrom tqdm.auto import tqdm as _origin_tqdm\n\n__all__ = ['tqdm']\n\n\ndef tqdm(*args, silent: bool = False, **kwargs):\n \"\"\"\n An enhanced version of tqdm (progress bar) with an option to silence the output.\n\n This function modifies the behavior of tqdm to allow silencing the progress bar.\n\n :param args: Positional arguments to be passed to tqdm.\n :param silent: If True, the progress bar content will not be displayed.\n :type silent: bool\n :param kwargs: Additional keyword arguments to be passed to tqdm.\n :return: tqdm progress bar.\n :rtype: tqdm.std.tqdm\n \"\"\"\n with io.StringIO() as sio:\n if silent:\n kwargs['file'] = sio\n\n return _origin_tqdm(*args, **kwargs)\n","repo_name":"deepghs/sdeval","sub_path":"sdeval/utils/tqdm_.py","file_name":"tqdm_.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3899515041","text":"'''\n@mainpage Rat15S Compiler\n\n@section intro_sec Introduction\nThis will become a Rat15S compiler. Currently working on Lexical Analyzer.\n@author Reza Nikoopour\n@author Eric Roe\n'''\ndef main():\n tokens = Lexer()\n \nif __name__ == '__main__':\n sys.path.append('Lib')\n from lexicalanalyzer import Lexer\n main(sys.argv[1], sys.argv[2])\n","repo_name":"rnikoopour/Rat15SCompiler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"43075507578","text":"import time\nimport subprocess\n\n\n#-------------------------------------------------------------------------------\n# API\n#-------------------------------------------------------------------------------\n\nclass ConcurrentCommandRunner:\n '''Run subprocess commands in parallel.'''\n\n def __init__(self, commands, concurrency=None, sleep_seconds=0.5):\n '''\n Parameters\n ----------\n commands: iterable\n The commands to execute.\n concurrency: int or None\n Maximum number of commands to run at any given time. If None,\n all commands are started simultaneously.\n sleep_seconds: float\n Seconds to sleep in-between checking the status of commands.\n '''\n\n self.commands = commands\n self.commands_iter = iter(commands)\n self.sleep_seconds = sleep_seconds\n\n self.concurrency = concurrency if concurrency else len(commands)\n\n self.processes = {}\n self.finished_processes = {}\n self.finished = False\n\n def run(self):\n '''\n Run until all processes are completed. Returns the sum of the exit-codes\n of all commands.\n '''\n if not self.commands:\n return\n\n for _ in range(self.concurrency):\n self.start_next_cmd()\n\n while not self.finished or self.processes:\n self.tick()\n\n return self.returncode\n\n def returncode(self):\n '''The sum of the exit-codes of all commands.'''\n return sum(proc.returncode for proc in self.finished_processes)\n\n def tick(self):\n proc_to_remove = []\n for proc in self.processes:\n if proc.poll() is not None:\n proc_to_remove.append(proc)\n\n # We're done with these processes - don't check their status again.\n for proc in proc_to_remove:\n self.command_finished(proc, self.processes[proc])\n self.finished_processes[proc] = self.processes[proc]\n del self.processes[proc]\n\n # Start as many processes as have finished.\n for _ in range(len(proc_to_remove)):\n self.start_next_cmd()\n\n time.sleep(self.sleep_seconds)\n\n def start_next_cmd(self):\n try:\n cmd = next(self.commands_iter)\n except StopIteration:\n self.finished = True\n return\n\n proc = self.start_command(cmd)\n self.processes[proc] = cmd\n\n def start_command(self, cmd):\n '''\n Start a command - must return a subprocess.Popen object.\n The single 'cmd' argument is an element of self.commands.\n '''\n return subprocess.Popen(cmd, shell=isinstance(cmd, str))\n\n def command_finished(self, proc, cmd):\n '''\n Ran when a command has finished. Receives the subprocess.Popen\n object and the corresponding command element from which it was\n created.\n '''\n pass\n\n\n#-------------------------------------------------------------------------------\n# Functional interface\n#-------------------------------------------------------------------------------\n\ndef run(commands, concurrency=None, sleep_seconds=0.5, start_command=None):\n '''\n Run subprocess commands in parallel and yield the results kj\n\n Parameters\n ----------\n commands: iterable\n The commands to execute.\n concurrency: int or None\n Maximum number of commands to run at any given time. If None,\n all commands are started simultaneously.\n sleep_seconds: float\n Seconds to sleep in-between checking the status of commands.\n start_command: callable\n Function used to start commands. Must return a subprocess.Popen object.\n\n Yields\n ------\n (subprocess.Popen, cmd)\n Yields the completed subprocess.Popen object and the command element\n from which it was created.\n '''\n import queue\n import threading\n\n result_queue = queue.Queue()\n\n def command_finished(proc, cmd):\n result_queue.put((proc, cmd))\n\n def run_commands():\n run_with_callback(commands, concurrency, sleep_seconds, command_finished, start_command)\n result_queue.put(None)\n\n t1 = threading.Thread(target=run_commands, name=threading._newname('ConcurrentCommandRunner-%d'))\n t1.start()\n\n for proc, cmd in iter(result_queue.get, None):\n yield proc, cmd\n\n\ndef run_with_callback(commands, concurrency=None, sleep_seconds=0.5, command_finished=None, start_command=None):\n '''\n Run subprocess commands in parallel and pass the results of finished commands to a callback\n\n Parameters\n ----------\n commands: iterable\n The commands to execute.\n concurrency: int or None\n Maximum number of commands to run at any given time. If None,\n all commands are started simultaneously.\n sleep_seconds: float\n Seconds to sleep in-between checking the status of commands.\n command_finished: callable or None\n Function to call when a process finished. Receives the finished subprocess.Popen\n object and the command object from which it was created.\n start_command: callable or None\n Function used to start commands. Must return a subprocess.Popen object.\n\n Returns\n -------\n int\n The sum of the exit-codes of all commands.\n '''\n runner = ConcurrentCommandRunner(commands, concurrency, sleep_seconds)\n if start_command:\n runner.start_command = start_command\n if command_finished:\n runner.command_finished = command_finished\n return runner.run()\n\n\n#-------------------------------------------------------------------------------\n\n__all__ = ConcurrentCommandRunner, run, run_with_callback\n","repo_name":"gvalkov/python-commandpool","sub_path":"commandpool.py","file_name":"commandpool.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41695230824","text":"#!/usr/bin/python3\n\"\"\"\nGiven an integer array, you need to find one continuous subarray that if you\nonly sort this subarray in ascending order, then the whole array will be sorted\nin ascending order, too.\n\nYou need to find the shortest such subarray and output its length.\n\nExample 1:\nInput: [2, 6, 4, 8, 10, 9, 15]\nOutput: 5\nExplanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the\nwhole array sorted in ascending order.\n\nNote:\nThen length of the input array is in range [1, 10,000].\nThe input array may contain duplicates, so ascending order here means <=.\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def findUnsortedSubarray(self, nums: List[int]) -> int:\n \"\"\"\n Sorted at both ends\n Then search for the two ends by nums[i+1] > nums[i] on the left side\n (right side similar)\n\n Problem: may over-include, consider 1 2 5 9 4 6 ...\n need to shrink from 1 2 5 9 to 1 2 according to min value\n\n nums[lo - 1] <= min && max <= nums[hi + 1]\n \"\"\"\n n = len(nums)\n lo, hi = 0, n - 1\n while lo < hi and nums[lo] <= nums[lo + 1]:\n lo += 1\n\n while lo < hi and nums[hi - 1] <= nums[hi]:\n hi -= 1\n\n if hi <= lo:\n return 0\n\n mini = float('inf')\n maxa = -float('inf')\n for i in range(lo, hi + 1):\n mini = min(mini, nums[i])\n maxa = max(maxa, nums[i])\n\n while lo - 1 >= 0 and nums[lo - 1] > mini:\n lo -= 1\n while hi + 1 < n and nums[hi + 1] < maxa:\n hi += 1\n\n return hi - lo + 1\n\n def findUnsortedSubarray_sort(self, nums: List[int]) -> int:\n \"\"\"\n Brute force sort and compare O(n lgn)\n \"\"\"\n expected = list(sorted(nums))\n i = 0\n while i < len(nums) and nums[i] == expected[i]:\n i += 1\n\n j = len(nums) - 1\n while j >= i and nums[j] == expected[j]:\n j -= 1\n\n return j - i + 1\n\n\nif __name__ == \"__main__\":\n assert Solution().findUnsortedSubarray([2, 1]) == 2\n assert Solution().findUnsortedSubarray([2, 6, 4, 8, 10, 9, 15]) == 5\n","repo_name":"algorhythms/LeetCode","sub_path":"581 Shortest Unsorted Continuous Subarray.py","file_name":"581 Shortest Unsorted Continuous Subarray.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"14926241811","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nclass UserInfo(AbstractUser): #用户信息。继承了原有的user表,因为自己加入了一些字段,并且用到原有的认证功能\n nid = models.AutoField(primary_key=True)\n nickname = models.CharField(verbose_name='昵称', max_length=32)\n telephone = models.CharField(max_length=11, null=True, unique=True)\n avatar = models.FileField(upload_to = 'avatars/',default=\"/avatars/default.png\") #头像\n create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)\n blog = models.OneToOneField(to='Blog', to_field='nid',null=True) #一个用户只有一个博客\n def __str__(self):\n return self.username\n\nclass Blog(models.Model): #博客信息\n nid = models.AutoField(primary_key=True)\n title = models.CharField(verbose_name='个人博客标题', max_length=64)\n site = models.CharField(verbose_name='个人博客后缀', max_length=32, unique=True)\n theme = models.CharField(verbose_name='博客主题', max_length=32)\n def __str__(self):\n return self.title\n\nclass Category(models.Model): #博主个人文章分类表\n nid = models.AutoField(primary_key=True)\n title = models.CharField(verbose_name='分类标题', max_length=32)\n blog = models.ForeignKey(verbose_name='所属博客', to='Blog', to_field='nid') #一个博客有很多分类\n def __str__(self):\n return self.title\n\nclass Tag(models.Model):\n nid = models.AutoField(primary_key=True)\n title = models.CharField(verbose_name='标签名称', max_length=32)\n blog = models.ForeignKey(verbose_name='所属博客', to='Blog', to_field='nid') ##一个博客有很多标签\n def __str__(self):\n return self.title\n\nclass Article(models.Model):\n nid = models.AutoField(primary_key=True)\n title = models.CharField(max_length=50, verbose_name='文章标题')\n desc = models.CharField(max_length=255, verbose_name='文章描述')\n comment_count= models.IntegerField(default=0)\n up_count = models.IntegerField(default=0) #推荐\n down_count = models.IntegerField(default=0) #反对\n create_time = models.DateTimeField(verbose_name='创建时间')\n homeCategory = models.ForeignKey(to='Category', to_field='nid', null=True) #一个分类下面有多个文章\n #siteDetaiCategory = models.ForeignKey(to='SiteCategory', to_field='nid', null=True)\n user = models.ForeignKey(verbose_name='作者', to='UserInfo', to_field='nid') #一个作者可以写多篇文章\n tags = models.ManyToManyField( #标签和文章是多对多关系,下面这种格式为不让Django创建第三张表\n to=\"Tag\",\n through='Article2Tag',\n through_fields=('article', 'tag'),\n )\n def __str__(self):\n return self.title\n\nclass ArticleDetail(models.Model): #文章详细表,把文章内容单列出来\n nid = models.AutoField(primary_key=True)\n content = models.TextField()\n article = models.OneToOneField(to='Article', to_field='nid')\n\nclass Comment(models.Model): #评论表\n nid = models.AutoField(primary_key=True)\n article = models.ForeignKey(verbose_name='评论文章', to='Article', to_field='nid') #一篇文章有多条评论\n user = models.ForeignKey(verbose_name='评论者', to='UserInfo', to_field='nid') #一篇文章有多个人评论\n content = models.CharField(verbose_name='评论内容', max_length=255)\n create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)\n parent_comment = models.ForeignKey('self', null=True) #用于区分是文章的评论还是评论的评论,self表示关联自己表中的主键\n def __str__(self):\n return self.content\n\nclass ArticleUpDown(models.Model): #点赞表\n nid = models.AutoField(primary_key=True)\n user = models.ForeignKey('UserInfo', null=True) #一个用户可以为多篇文章点赞\n article = models.ForeignKey(\"Article\", null=True) #一篇文章可以有多个赞\n is_up=models.BooleanField(default=True) #用于判断是否点赞\n class Meta:\n unique_together = [ #联合唯一\n ('article', 'user'),\n ]\n\nclass Article2Tag(models.Model):\n nid = models.AutoField(primary_key=True)\n article = models.ForeignKey(verbose_name='文章', to=\"Article\", to_field='nid')\n tag = models.ForeignKey(verbose_name='标签', to=\"Tag\", to_field='nid')\n class Meta:\n unique_together = [\n ('article', 'tag'),\n ]\n def __str__(self):\n v=self.article.title+\"----\"+self.tag.title\n return v\n\n","repo_name":"mowangmo/blog","sub_path":"app01/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20988592769","text":"import os\nimport uuid\nfrom Common import SQLiteHelper\n\nclass DBService():\n\n def __init__(self):\n self.initDBConnect()\n self.initTable()\n\n def initDBConnect(self):\n self.db = SQLiteHelper.SQLiteHelper(os.path.abspath(\n os.path.dirname(__file__))+\"/DB/DB.db\")\n pass\n\n def initTable(self):\n self.db.execute(\"CREATE TABLE IF NOT EXISTS T_TWITTER_FOLLOWERS_HISTORY(\\\n USER_NAME VARCHAR(500),\\\n FOLLOWERS_COUNT VARCHAR(500),\\\n FOLLOWERS_COUNT_TEXT VARCHAR(500),\\\n FOLLOWERS_COUNT_CHANGE VARCHAR(500),\\\n IMP_DATE VARCHAR(10) DEFAULT (date('now')),\\\n IMP_TIME VARCHAR(19) DEFAULT (datetime('now','localtime'))\\\n )\")\n pass\n\n def addTwitterFollowers(self, username,followersCount,followersCountText,followersCountChange):\n self.db.execute(\"INSERT INTO T_TWITTER_FOLLOWERS_HISTORY(USER_NAME,FOLLOWERS_COUNT,FOLLOWERS_COUNT_TEXT,FOLLOWERS_COUNT_CHANGE) VALUES(?,?,?,?)\",(username,followersCount,followersCountText,followersCountChange))\n\n def getFollowers(self):\n result=self.db.query(\"SELECT * FROM T_TWITTER_FOLLOWERS_HISTORY ORDER BY IMP_TIME DESC LIMIT 1\")\n res={}\n for row in result:\n res= {\n 'USER_NAME':row[0],\n 'FOLLOWERS_COUNT':row[1],\n 'FOLLOWERS_COUNT_TEXT':row[2],\n 'FOLLOWERS_COUNT_CHANGE':row[3],\n 'IMP_DATE':row[4],\n 'IMP_TIME':row[5],\n }\n return res\n\n def getFollowersCountChange(self,baseDate):\n result=self.db.query(\"SELECT SUM(FOLLOWERS_COUNT_CHANGE) FROM T_TWITTER_FOLLOWERS_HISTORY WHERE IMP_DATE=?\",(baseDate,))\n countChange=0\n for row in result:\n countChange=row[0]\n return countChange","repo_name":"creatorMao/twitter-followers-ios-widget","sub_path":"DBService.py","file_name":"DBService.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13609580968","text":"'''\n\nGiven an integer n, count and return the number of zeros that are present in the given integer using recursion.\nInput Format :\nInteger n\nOutput Format :\nNo. of 0s\nSample Input :\n10204\nSample Output\n2\n\n'''\n\n\n\n\ndef countzeros(n):\n\n if n<0:\n n*=-1\n\n if n<10:\n if n==0:\n return 1\n return 0\n\n smallOutput=countzeros(n//10)\n\n if n%10==0:\n smallOutput+=1\n return smallOutput\n\n\n \nn=int(input())\n\nprint(countzeros(n))\n","repo_name":"ipiyushbhoi/Data-Structures-and-Algorithm-Problems","sub_path":"basic_recursion/count_zeroes.py","file_name":"count_zeroes.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37936695495","text":"from aiogram import types, Dispatcher\n\nfrom bot.db.file import get_top_5\nfrom bot.db.user import check_user\nfrom bot.keyboards.content_types_kb import content_types_kb\nfrom bot.keyboards.top_download_kb import top_download_kb\nfrom bot.templates.message import top_cmd_icons\nfrom bot.utils.extractor_id import get_id, regex_youtube\nfrom bot.utils.extractor_thumb import get_thumb\nfrom bot.utils.extractor_title import get_title\n\n\nasync def help_cmd(message: types.Message):\n await message.delete()\n await check_user(message)\n await message.answer(f\"🎉 Hey {message.from_user.full_name}!\\n\\n\"\n f\"I'm Youtube Grasper Bot 🤖!\\n\"\n f\"I can help you to grab videos from Youtube.\\n\"\n f\"Send me a link to a video and I will send you a link to download it.\")\n\n\nasync def input_url(message: types.Message):\n print(message.text)\n id_data = get_id(message.text)\n print(id_data)\n if id_data is None:\n await message.reply('🗿 Invalid URL, try again.')\n return\n await message.delete()\n await message.answer_photo(photo=get_thumb(id_data['id'], id_data['type']),\n caption=f\"🎸 {get_title(id_data['id'], id_data['type'])} 📽️\\n\\n\"\n f\"🤖 What do you need to download?\",\n reply_markup=content_types_kb(id_data['id'], id_data['type']))\n\n\nasync def get_top_videos(message: types.Message):\n top = await get_top_5()\n text_msg = '🏆 Top 5 downloads:\\n\\n' + '\\n\\n'.join([f'{top_cmd_icons.get(index)} {index}. {item.title} {item.quality} | Downloaded {item.dl_count} times 🚀' for index, item in enumerate(top, start=1)])\n await message.answer(text=text_msg, reply_markup=top_download_kb(top))\n\n\ndef register_msg(dp: Dispatcher):\n dp.register_message_handler(help_cmd, commands=['start', 'help', 'info', 'старт', 'помощь', 'инфо'])\n dp.register_message_handler(get_top_videos, commands=['top', 'топ'])\n dp.register_message_handler(input_url, regexp=regex_youtube)\n","repo_name":"disasstor/Youtube-Grasper-Bot","sub_path":"bot/handlers/message_handlers.py","file_name":"message_handlers.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10471714790","text":"from astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\nimport cv2\n\nimage = open(\"imageDownload-11-irsawebops8---1.png\")\n\n\nimage = cv2.imread(\"imageDownload-11-irsawebops8---1.png\")\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nmean = np.mean(gray)\n#\n# # Open Image file, extract data and header\n# hdulist = fits.open('WISE-Band-4.fits')\n#\n# hdu = hdulist[0] # Holds image data\n# imagedata = hdu.data # Extract the image data\n# hduheader = hdulist[0] # Header that contains image details\n# mean = np.mean(imagedata)\n#\n# print(hduheader)\n#\nnp.set_printoptions(threshold=np.inf)\nshowstart = 'yes'\nif showstart == 'yes':\n plt.figure() # Original, unaltered image:\n plt.title(\"Hubble ST Full Image\")\n plt.imshow(gray, cmap='binary', vmin=0, vmax=mean*10, origin={'lower', 'lower'})\n plt.colorbar()\n plt.show()\n\n\n\n\n\n","repo_name":"BobbyHemming/bcg-image-analysis","sub_path":"IRemission.py","file_name":"IRemission.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72148451145","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 10:55:52 2019\n\n@author: jarkko\n\nSTUPID UDP GAME SERVER -- version 0.1\n\n\"\"\"\n\n##from _thread import start_new_thread\nimport threading as Thread\nimport queue as Queue\nimport socket\nfrom bitstring import BitArray\nfrom time import sleep\nfrom random import randrange\nimport sys\n\nSTANDARD_HOST_PORT = ((\"localhost\", 7000))\nprint_lock = Thread.Lock()\n\nclass PacketCounter:\n def __init__(self):\n self.receivedPackages = 0\n self.sentPackages = 0\n self.sentBytes = 0\n self.receivedBytes = 0\n \n def printAll(self):\n print(\"\\nReceived: \")\n print(\"Packages:\\t\", self.receivedPackages)\n print(\"Bytes:\\t\\t\", self.receivedBytes)\n print(\"\\nSent:\\t\")\n print(\"Packages:\\t\", self.sentPackages)\n print(\"Bytes:\\t\\t\", self.sentBytes)\n\nplayers = []\ntaskQueue = Queue.Queue(maxsize=0)\npacketCounter = PacketCounter()\n\nclass Player:\n \n def __init__(self, x, y, color, number):\n self.x = x\n self.y = y\n self.color = color\n self.number = number\n\nclass DataResponser:\n \n def __init__(self, data, sukka, addr):\n self.sukka = sukka\n self.addr = addr\n self.bitArray = toBits(data)\n ## PLAYER DATA\n self.type = self.analyzetype()\n self.number = self.analyzenumber()\n self.x = self.analyzeX()\n self.y = self.analyzeY()\n\n self.analyzeANDresponseToClient()\n\n ### Analyze and response method:\n def analyzeANDresponseToClient(self):\n if self.type == 0:\n \n newPlayerNumber = createPlayerSlot(100 + randrange(100),30 + randrange(400),1 + randrange(5))\n \n ## New player creation and data ship to client.\n if (newPlayerNumber != None and newPlayerNumber<8):\n \n print (\"Setting a new player ({addr}), player number {number}\".format(addr=self.addr, number = newPlayerNumber))\n '''\n NEW PLAYER NUMBER REQUEST\n \n [FRAMETYPE = \"00\"][SUCCESS=1 (1 bit)][PLAYER NUMBER (3 bits)][PLAYER COLOR (3 bits)]\n [START COORDINATES X (10 bits)][START COORDINATES Y (10 bits)]\n '''\n typesuccessbit = BitArray(bin=\"001\")\n \n typesuccessbit.append(defaultPlayerBit(players[newPlayerNumber].x,players[newPlayerNumber].y, players[newPlayerNumber].color, newPlayerNumber))\n \n packetCounter.sentBytes += sendData(typesuccessbit, self.addr, self.sukka)\n packetCounter.sentPackages += 1\n \n ## Too many players. Sending error flag to client. \n else:\n \n print (\"New player request from {addr}. Cannot create new player. Server is full! (MAX 8 PLAYERS)\".format(addr = self.addr))\n a = BitArray(bin=\"00000000\")\n packetCounter.sentBytes += sendData(a, self.addr, self.sukka)\n packetCounter.sentPackages += 1\n \n ###print(\"User cannot be added: \", self.addr)\n \n \n ## Client request for all player data\n if self.type == 1:\n \n '''\n REQUEST FOR ALL PLAYER DATA!\n \n server response:\n [FRAMETYPE = \"01\"] ((( NOT ACTIVE FEATURE :: [PLAYER AMOUNT (3 bits)] )))\n [PLAYER NUMBER (3 bits)][PLAYER COLOR (3 bits)][X (10 bits)][Y (10 bits)]\n '''\n \n typesuccessbit = BitArray(bin=\"01\") ## Type = 01 , Player Amount = 111 = 7 = all player data is sent \n allPlayersBit = BitArray()\n \n for i in range(8):\n playerbit = defaultPlayerBit(players[i].x, players[i].y, players[i].color, players[i].number)\n allPlayersBit.append(playerbit)\n \n typesuccessbit.append(allPlayersBit)\n \n packetCounter.sentBytes += sendData(typesuccessbit, self.addr, self.sukka)\n packetCounter.sentPackages += 1\n \n \n ## Client sets player coordinates.\n if self.type == 2:\n \n players[self.number].x = self.x\n players[self.number].y = self.y\n \n '''\n CLIENT REQUEST FOR SETTING COORDINATES\n \n Client frame:\n [FRAMETYPE = \"02\"][PLAYER NUMBER (3 bits)][X (10 bits)][Y (10 bits)]\n '''\n \n ## Respond with players playing.\n '''\n [FRAMETYPE = \"01\"][PLAYER AMOUNT (3 bits)] \n -- if amount is 3 players -- \n [PLAYER NUMBER (3 bits)][PLAYER COLOR (3 bits)][X (10 bits)][Y (10 bits)]\n [PLAYER NUMBER (3 bits)][PLAYER COLOR (3 bits)][X (10 bits)][Y (10 bits)]\n [PLAYER NUMBER (3 bits)][PLAYER COLOR (3 bits)][X (10 bits)][Y (10 bits)]\n \n Total bits --> 5 + (26 * 3)\n '''\n amountOfActivePlayers = 0\n \n type_allplaying = BitArray(bin=\"10\")\n allPlayersBit = BitArray()\n \n \n \n for i in range(8):\n ## ... don't send data of the one who requested the data ... \n if players[i].number != None and players[i].number != self.number:\n playerbit = defaultPlayerBit(players[i].x, players[i].y, players[i].color, players[i].number)\n allPlayersBit.append(playerbit)\n amountOfActivePlayers +=1\n \n type_allplaying.append(BitArray('uint:3={value}'.format(value=amountOfActivePlayers)))\n type_allplaying.append(allPlayersBit)\n packetCounter.sentBytes += sendData(type_allplaying, self.addr, self.sukka)\n packetCounter.sentPackages += 1\n \n \n ## Client removes own player data (quits...)\n if self.type == 3:\n \n print(\"Player {number} leaves the game. Goodbye!\".format(number = self.number))\n\n players[self.number].number = None\n players[self.number].x = None\n players[self.number].y = None\n players[self.number].color = None\n \n packetCounter.sentBytes += sendData(BitArray(bin=\"11\"), self.addr, self.sukka)\n packetCounter.sentPackages += 1\n\n \n ### These methods parse response data bit to decimal form. \n def analyzenumber(self):\n if self.type == 2 or self.type == 3:\n numberbyte = self.bitArray[2:5]\n return (binToInt(numberbyte))\n else:\n return None \n \n def analyzeX(self):\n if self.type == 2:\n xbyte = self.bitArray[5:15]\n return (binToInt(xbyte)) \n else:\n return None\n \n def analyzeY(self):\n if self.type == 2:\n ybyte = self.bitArray[15:25]\n return (binToInt(ybyte)) \n else:\n return None\n \n def analyzetype(self):\n typebyte = self.bitArray[0:2]\n return (binToInt(typebyte))\n ## End of bit-parsers. \n\ndef defaultPlayerBit(x,y,color, number): ## Default player data to protocol bit format.\n \n if number != None:\n playerbit = BitArray('uint:3={value}'.format(value=number))\n xbit = BitArray('uint:10={value}'.format(value=x))\n ybit = BitArray('uint:10={value}'.format(value=y))\n \n if color != None:\n colorbit = BitArray('uint:3={value}'.format(value=color))\n else:\n ## If no color, value bits are \"000\".\n colorbit = BitArray('uint:3=0')\n playerbit.append(colorbit)\n \n playerbit.append(xbit)\n playerbit.append(ybit)\n \n else:\n playerbit = BitArray('uint:26=0')\n\n return playerbit\n \ndef createPlayerSlot(x, y, color):\n\n for i in range(8):\n if players[i].number == None:\n players[i].x = x\n players[i].y = y\n players[i].color = color\n players[i].number = i\n return i\n return None\n \ndef toBits(bytearraydata): ## Received byte array to bit array.\n\n bitArray = BitArray()\n for databyte in bytearraydata:\n bitByte = BitArray('uint:8={value}'.format(value=databyte))\n bitArray.append(bitByte)\n return bitArray \n \n \ndef binToInt(binArray):\n \n counter = len(binArray)-1\n retVal = 0\n \n for bin in binArray:\n if bin == True:\n retVal += 2**counter\n counter -= 1\n return retVal\n\n## Changes BitArray to ByteArray. If there are bits missing from last byte, last bits will be zeros. \n\ndef toBytes(bitArray): ## Bit array to byte array.\n\n data = bitArray\n allBytes = bytearray(b'')\n while data:\n dataSlice = BitArray()\n \n if len(data)>7:\n dataSlice = data[:8]\n del data[:8]\n \n else:\n dataSlice = data\n binaryString = \"0b\"\n for i in range(8-len(data)):\n binaryString += '0'\n dataSlice.append(binaryString)\n data = None\n \n ##print ([binToInt(dataSlice)])\n \n allBytes.extend([binToInt(dataSlice)])\n \n return allBytes\n\n \ndef threaded_data_analyzer(soppa,data):\n \n try:\n bitArray = BitArray()\n for databyte in data:\n bitByte = BitArray('uint:8={value}'.format(value=databyte))\n bitArray.append(bitByte)\n print(databyte)\n \n ##bitStream = BitStream(data)\n ##print (bitStream.pos)\n print(bitArray.bin)\n print_lock.release()\n except Exception as ex:\n print (ex)\n \ndef responseTaskWorker(sukka, taskQueue, stopTask):\n while True:\n queuefillingup=0\n queuelen = taskQueue.qsize()\n if queuelen > 50:\n queuefillingup+=1\n if queuefillingup > 100:\n print (\"Server is slowing down... Tasks in analyze/response queue: \", queuelen)\n queuefillingup=0\n \n if not taskQueue.empty():\n data = taskQueue.get()\n ##print (data)\n DataResponser(data[\"data\"], sukka, data[\"addr\"])\n taskQueue.task_done()\n else:\n sleep(0.01)\n if stopTask.is_set():\n print(\"Response task runner shutting down...\")\n break\n return\n \ndef readUDPStream(soppa, worker):\n \n (data,addr) = soppa.recvfrom(64)\n ##print('Connection from: ', addr[0])\n # Start a new thread and return its identifier \n ##print(data)\n taskQueue.put({\"data\": data, \"addr\": addr})\n if len(data) == 1:\n print (\"data length 0\")\n packetCounter.receivedBytes += len(data)\n packetCounter.receivedPackages += 1\n \n #print_lock.acquire()\n #start_new_thread(threaded_data_analyzer, (soppa, data,)) \n\n \ndef init_worker(soppa, stopTask):\n \n worker = Thread.Thread(target=responseTaskWorker, args=(soppa,taskQueue,stopTask))\n worker.setDaemon(True)\n worker.start()\n return worker\n \ndef init_server(hostport):\n \n for i in range(8):\n players.append(Player(None, None, None, None))\n \n try:\n soppa = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n soppa.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n soppa.bind(hostport)\n \n except socket.error as er:\n print (\"Server failed to start! Error: {error}\".format(error = er))\n return\n \n return soppa\n\ndef sendData(bits, host, sukka):\n #print (len(bytessit))\n bits.append(BitArray(bin=\"1\"))\n bytessit = toBytes(bits)\n sendBytes = sukka.sendto(bytessit, host)\n return sendBytes\n\n\ndef setting_HostAndPort():\n arg_count = len(sys.argv)\n if arg_count == 3:\n host = sys.argv[1]\n try:\n port = int(sys.argv[2])\n return (host, port)\n except ValueError: \n print (\"Port number {port} not valid.\".format(port = sys.argv[2]))\n return None\n else:\n print (\"Using default host:port (localhost:7000). Use python game_server.py to specify host and port.\")\n return STANDARD_HOST_PORT\n \n\n \ndef main():\n print (\"Stupid UDP Game Server, version 0.1\")\n HOST_PORT = setting_HostAndPort()\n if HOST_PORT == None:\n return\n stopTask = Thread.Event() # This is used to stop the response worker thread when server closes. \n soppa = init_server(HOST_PORT)\n worker = init_worker(soppa, stopTask)\n \n if soppa:\n print(\"UDP Game server started at {host}!\".format(host = HOST_PORT))\n else:\n print(\"Server cannot start. Try another host/port. Usage: python game_server.py \")\n return\n \n try:\n while True: \n readUDPStream(soppa, worker)\n \n except KeyboardInterrupt:\n print(\"\\nServer is closing...\")\n except socket.error as er:\n print (\"Server failed while running. Server shutting down! Error: {error}\".format(error = er))\n return\n \n stopTask.set()\n worker.join()\n print(\"Server is shut!\")\n soppa.close()\n \n sleep(0.5)\n \n packetCounter.printAll()\n\n return\n \nif __name__ == \"__main__\":\n main()\n\n ","repo_name":"jurbbo/udp-game-server-and-client-example","sub_path":"game_server.py","file_name":"game_server.py","file_ext":"py","file_size_in_byte":13473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1031103390","text":"import collections \n\ndef solution(N, road, K):\n dist = [0,0]+[float('INF')]*(N-1)\n graph = collections.defaultdict(list)\n for n1,n2,t in road:\n graph[n1].append((n2,t))\n graph[n2].append((n1,t)) \n queue = collections.deque([1])\n while queue:\n n= queue.popleft()\n for wn,wt in graph[n]:\n if dist[wn] > dist[n]+wt:\n dist[wn] = dist[n]+wt\n queue.append(wn)\n return len([d for d in dist if d <= K])-1\n","repo_name":"sawol/algorithm","sub_path":"programmers.co.kr/배달.py","file_name":"배달.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39308832297","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 26 23:09:56 2017\n\n@author: shwetank\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn import metrics\nfrom IPython.display import Image\nfrom sklearn import tree\nimport pydotplus \nfrom sklearn.metrics.pairwise import cosine_similarity as cs\nfrom sklearn.metrics import jaccard_similarity_score as js\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix\nimport warnings\nimport random\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\nprint(\"Reading Adjacency Matrix\")\nA = pd.read_csv(\"adjMat.csv\",header=None);\n\nno_edges = A.sum().sum()\nno_nodes = A.shape[1]\n\n###################### Basic Feature Definitions ##########################\nprint(\"Let Us Start Basic Feature Definition of IB and OB and read exp(A)\")\nA = A.values \nexpA = pd.read_csv(\"exp_mat.csv\",header = None)\nexpA = expA.values\n#expA_n = expA/(expA.max().max())\n#expA_n = expA_n.round(5)\nIB_exp = np.sum(expA,axis = 0) ###columnwise Inboundnes\nOB_exp = np.sum(expA,axis = 1) ###rowwise Outboundness\nIB_adj = np.sum(A,axis = 0)\nOB_adj = np.sum(A,axis = 1)\n\ncntrl = [] \nfor i in range(no_nodes): \n cntrl.append(expA[i][i])\n\nprint(\"Start appending features\")\n\nfeat_train = []\nlab_train = []\nfor i in range(no_nodes):\n for j in range(no_nodes):\n lab_train.append(A[i][j])\n temp = []\n #node features\n \n temp.append(IB_exp[i])\n temp.append(IB_exp[j])\n temp.append(OB_exp[i])\n temp.append(OB_exp[j])\n temp.append(cntrl[i])\n \n temp.append(cntrl[j])\n #edge features\n \n temp.append(expA[i][j])\n \n temp.append(IB_adj[i])\n temp.append(IB_adj[j])\n temp.append(OB_adj[i])\n temp.append(OB_adj[j])\n foo = cs(A[i],A[j])\n temp.append(foo[0][0])\n temp.append(js(A[i],A[j]))\n \n feat_train.append(temp)\n\nprint(\"Calculating training and testing feature\")\nedge_size = 2430\nper = 0.8 \nfeat_ones = []\nfeat_zeros = []\nlab_ones = []\nlab_zeros = []\ncount = 0\nfor i in range(no_nodes):\n for j in range(no_nodes):\n if i==j:\n continue\n if lab_train[300*i+j]==1:\n feat_ones.append(feat_train[300*i+j])\n lab_ones.append(1)\n else:\n if count None:\n \"\"\"\n Set a custom function for logging to the user interface.\n\n Args:\n ui_log_func (Callable[[str], Any]): A custom function for logging messages\n to the user interface.\n log_formatter (logging.Formatter, optional): The log formatter to use for\n UI logging. If not provided, the default formatter will be used.\n\n Returns:\n None\n \"\"\"\n self.ui_log_funcs.append(ui_log_func)\n if not log_formatter:\n log_formatter = self.log_formatter\n CustomLoggerManager().create_ui_log_func_handler(ui_log_func, log_formatter)\n\n\nclass CustomLoggerManager:\n \"\"\"\n A manager for creating and configuring custom loggers.\n\n This class follows the singleton pattern to ensure that only one instance of\n a logger manager is created.\n\n Args:\n default_config (DefaultConfig, optional): The default logging configuration.\n If not provided, a default configuration is used.\n\n Attributes:\n logger (logging.Logger): The custom logger instance configured by this manager.\n\n Usage:\n To access the custom logger, use the log() function:\n log().info(\"some info\")\n log().error(\"some error\")\n with log().insert_proc_id(proc_id):\n log().info(\"here should be proc id\")\n with log().insert_func_name():\n log().info(\"here also should be proc id and a func name\")\n \"\"\"\n\n _instance = None\n\n def __new__(cls, default_config: DefaultConfig = None):\n \"\"\"Create a new instance of CustomLoggerManager or return\n the existing instance if one exists.\"\"\"\n if cls._instance is None:\n cls._instance = super(CustomLoggerManager, cls).__new__(cls)\n cls._instance.init_logger(default_config)\n return cls._instance\n\n # pylint: disable=W0201\n def init_logger(self, default_config: DefaultConfig = None) -> None:\n \"\"\"\n Initialize the custom logger instance based on the provided or\n default configuration.\n\n Args:\n default_config (DefaultConfig, optional): The default logging configuration.\n If not provided, a default configuration is used.\n \"\"\"\n if default_config is None:\n default_config = DefaultConfig()\n self.logger = self.init_logging(default_config)\n\n @staticmethod\n def get_custome_logger(logger_name=\"forgelog\") -> CustomLogger:\n \"\"\"\n Get a custom logger instance with the specified name.\n\n Args:\n logger_name (str, optional): The name of the logger. Defaults to 'forgelog'.\n\n Returns:\n logging.Logger: A custom logger instance.\n\n \"\"\"\n # Check if the logger with the specified name already exists\n if logger_name in logging.Logger.manager.loggerDict:\n return logging.getLogger(logger_name)\n logger = CustomLogger(logger_name)\n logging.Logger.manager.loggerDict[logger_name] = logger\n return logger\n\n @staticmethod\n def create_filehandler(\n log_dir: str,\n log_level: Any,\n log_formatter: logging.Formatter,\n file_postfix: str,\n ) -> logging.FileHandler:\n \"\"\"\n Create a file handler for logging.\n\n Args:\n log_dir (str): The directory to store log files.\n Defaults to \"data/log\".\n log_level (int): The log level for the handler.\n log_formatter (logging.Formatter): The log message formatter.\n file_postfix (str): The postfix for log files.\n\n Returns:\n logging.FileHandler: A file handler for logging.\n\n \"\"\"\n os.makedirs(log_dir, exist_ok=True)\n\n # Get the current month and year\n current_month = datetime.datetime.now().strftime(\"%m\")\n current_year = datetime.datetime.now().strftime(\"%Y\")\n\n # Create the log file name using the current month and year\n log_file_name = f\"{current_month}.{current_year}.{file_postfix}\"\n # Create the full path to the log file\n log_file_path = os.path.join(log_dir, log_file_name)\n\n # Configure the logging module\n\n file_handler = logging.FileHandler(log_file_path, mode=\"a\")\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(log_level)\n\n return file_handler\n\n @classmethod\n def init_logging(cls, default_config: DefaultConfig) -> CustomLogger:\n \"\"\"\n Initialize the logging configuration based on the provided or\n default configuration.\n\n Args:\n default_config (DefaultConfig): The default logging configuration.\n\n Returns:\n logging.Logger: The custom logger instance.\n\n \"\"\"\n os.makedirs(default_config.log_dir, exist_ok=True)\n\n file_handler_info = cls.create_filehandler(\n default_config.log_dir,\n logging.INFO,\n default_config.log_formatter,\n default_config.info_file_postfix,\n )\n file_handler_debug = cls.create_filehandler(\n default_config.log_dir,\n logging.DEBUG,\n default_config.log_formatter,\n default_config.debug_file_postfix,\n )\n\n # file_handler_info = create_filehandler\n # Get the root logger and add the handlers\n # logger = logging.getLogger()\n logger = cls.get_custome_logger()\n logger.handlers = []\n # Each handlers could have own log level, because this func\n # could be called multiple times\n logger.addHandler(file_handler_info)\n logger.addHandler(file_handler_debug)\n\n # Create a console handler\n if default_config.is_print_in_con:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(default_config.log_formatter)\n console_handler.setLevel(logging.INFO)\n logger.addHandler(console_handler)\n # A file handler for saving logs to the file\n if default_config.ui_log_funcs:\n map(\n cls.create_ui_log_func_handler,\n (default_config.ui_log_funcs, default_config.log_formatter),\n )\n return logger\n\n def create_ui_log_func_handler(\n self,\n ui_log_func: Callable[[str], Any],\n log_formatter: logging.Formatter,\n ) -> None:\n \"\"\"\n Create a log handler for logging to the user interface using a custom\n log function.\n\n Args:\n ui_log_func (Callable[[str], Any]): A custom function for logging\n messages to the user interface.\n log_formatter (logging.Formatter): The log formatter to be used\n for UI logging.\n\n Returns:\n None\n \"\"\"\n ui_handler = UILogHandler(ui_log_func)\n ui_handler.setFormatter(log_formatter)\n ui_handler.setLevel(logging.INFO)\n self.logger.addHandler(ui_handler)\n\n\ndef log() -> CustomLogger:\n \"\"\"Emitting the log message.\"\"\"\n logger_instance = CustomLoggerManager()\n return logger_instance.logger\n","repo_name":"izharus/log_wizard","sub_path":"src/log_wizard/logger_manager.py","file_name":"logger_manager.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41035263458","text":"import re, sys, os, fnmatch, logging\n\n\n#Patents collections currently failing: #2, #17, #46, #98\n'''\nInput: Null\nOutput: Takes each .txt file from patent directories for every\n\tpatent, and prints each abstract* to its own .txt file.\n\t* Line 0 of the output file is the author, Line 1 is the title, and Line2 is the abstract \n\tThese will be used for training doc2vec, as well as comparing each abstract \n\tto the primarty patent in question \n'''\n#init logging\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='.pypatent.log')\n\ndef retrieve_text_files():\n\tlogging.info(\"* Beginning retrieve_text_files(). Abstracts --> txt files ... \")\n\tall_patents_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'TEXT_Files')) \n\tpatent_dirs = os.listdir(all_patents_dir)\n\n\tfor pd in patent_dirs: #E.g. #2 US200500blahblah-Description\n\t\t#Check to see if 'WOS Literature search' dir exists \n\t\tpd_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'TEXT_Files', pd))\n\t\tdir_exists = os.path.isdir(pd_path)\n\t\tif dir_exists is True:\n\t\t\tlogging.info(\"Directory \" + str(pd)+ \" exists!\")\n\t\tif dir_exists is False:\n\t\t\tlogging.info(\"Missing a directory\")\n\t\t\t#If it doesn't exist, just look in the patent directory (pd)\n\n\t\t#Init empty lists to append info to \n\t\tauthors = []\n\t\ttitles = []\n\t\tabstracts = []\n\t\t\n\t\ttraindir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'train')) #/PyPatent/train\n\t\t\n\t\tpatent_association = pd[:5] #grab first 5 chars to get patent number\n\t\tpatent_number = re.sub('[^\\d]','', patent_association) #just keep number\n\t\tlogging.info(\"* Pattent \"+ str(patent_number))\n\n\t\ttry:\n\t\t\tsearch_docs = os.listdir(pd_path)\n\t\t\tfor txtfile in search_docs:\n\t\t\t\t#the files for #128 are incorrect but files starting with just 128 are fine\n\t\t\t\t# #46 and #17 have been fixed \n\t\t\t\tif txtfile.startswith(\"#128\"):\n\t\t\t\t\t'''\n\t\t\t\t\tMissing ---------- dividers between search records:\n\t\t\t\t\t#17 Records Search parts 1-10\n\t\t\t\t\t#46 Records Search \n\t\t\t\t\t'''\n\t\t\t\t\tlogging.info(\"SKIPPING: \" + str(txtfile))\n\t\t\t\t\tlogging.info(\"its not formatted correctly...\")\n\t\t\t\t\tpass\n\t\t\t\t\n\t\t\t\telif fnmatch.fnmatch(txtfile, '*.txt'): #this ignores _DS_Store files \n\t\t\t\t\tf = os.path.abspath(os.path.join(pd_path, txtfile))\n\t\t\t\t\t#print(f)\n\t\t\t\t\tlogging.info(\"reading file: \" + str(f))\n\t\t\t\t\tfulltext = open(f, 'r', encoding=\"ISO-8859-1\")\n\t\t\t\t\treadtext = fulltext.read()\n\t\t\t\t\trecordtext = re.split('\\_+', readtext) #split on \"______\" type things\n\n\t\t\t\t\t#print(str(len(recordtext)) + \" RECORDS IN \" + str(f))\n\n\t\t\t\t\tfor record in recordtext:\n\t\t\t\t\t\tif (len(record)) < 100: #if its too short, its probably empty\n\t\t\t\t\t\t\tlogging.info(\"Empty record. Skip.\")\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#initialize empty lists to check that the record has author, title, abstract\n\t\t\t\t\t\t\tcurrent_author = []\n\t\t\t\t\t\t\tcurrent_title = []\n\t\t\t\t\t\t\tcurrent_abstract = [] \n\t\t\t\t\t\t\trunning_abstract = [] #for multi-line abstracts \n\n\t\t\t\t\t\t\ttext_lines = record.splitlines()\n\n\t\t\t\t\t\t\tfor i, line in enumerate(text_lines):\n\t\t\t\t\t\t\t\t#print(str(i)+\": \" + str(line[:20]))\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#Get Author\n\t\t\t\t\t\t\t\tif re.match(\"^(\\t*By\\:)\", line):\n\t\t\t\t\t\t\t\t\tcurrent_author.append(line)\n\t\t\t\t\t\t\t\t\tlogging.info(line)\n\t\t\t\t\t\t\t\telif re.match(\"^(\\t*Author\\(s\\))\", line):\n\t\t\t\t\t\t\t\t\tcurrent_author.append(line)\n\t\t\t\t\t\t\t\t\tlogging.info(line)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#Get Title\n\t\t\t\t\t\t\t\tif re.match(\"^(\\t*Title\\:)\", line):\n\t\t\t\t\t\t\t\t\tcurrent_title.append(line)\n\t\t\t\t\t\t\t\t\tlogging.info(line)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#Get Abstract\n\t\t\t\t\t\t\t\tif re.match(\"^(\\t*Abstract\\:)\", line):\n\t\t\t\t\t\t\t\t\trunning_abstract.append(line)\n\t\t\t\t\t\t\t\t\t#print(line)\n\t\t\t\t\t\t\t\t\tlogging.info(line)\n\t\t\t\t\t\t\t\t\t#TODO: get the next line\n\n\t\t\t\t\t\t\t\t#if the previous line was Abstract:\n\t\t\t\t\t\t\t\tif re.match(\"^(\\t*Abstract\\:)\",text_lines[i-1]):\n\t\t\t\t\t\t\t\t\t#and if this current line is longer than 100\n\t\t\t\t\t\t\t\t\tif len(line) >= 50:\n\t\t\t\t\t\t\t\t\t\tif not re.match(\"^\\t*(Title\\:|Source|By|Author|Conference)\", line):\n\t\t\t\t\t\t\t\t\t\t\t#print(line)\n\t\t\t\t\t\t\t\t\t\t\trunning_abstract.append(line)\n\n\t\t\t\t\t\t\t\t#if there was \"Abstract:\" header two lines ago, \n\t\t\t\t\t\t\t\t# it may still be part of the abstract \n\t\t\t\t\t\t\t\tif re.match(\"^(\\t*Abstract\\:)\",text_lines[i-2]):\n\t\t\t\t\t\t\t\t\t#and if this current line is longer than 100\n\t\t\t\t\t\t\t\t\tif len(line) >= 50:\n\t\t\t\t\t\t\t\t\t\tif not re.match(\"^\\t*(Title\\:|Source|By|Author|Conference)\", line):\n\t\t\t\t\t\t\t\t\t\t\t#print(line)\n\t\t\t\t\t\t\t\t\t\t\trunning_abstract.append(line)\n\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif line == text_lines[-1]: #if its the last line\n\t\t\t\t\t\t\t\t\t#print(len(running_abstract))\n\t\t\t\t\t\t\t\t\tif len(running_abstract) >= 1:\n\t\t\t\t\t\t\t\t\t\tjoined_text = (\" \").join(running_abstract)\n\t\t\t\t\t\t\t\t\t\t#print(joined_text)\n\t\t\t\t\t\t\t\t\t\tcurrent_abstract.append(joined_text)\n\n\t\t\t\t\t\t\t#Make sure this abstract has Author, Title, Abstract\n\t\t\t\t\t\t\tif (len(current_author)) == 0: #no author\n\t\t\t\t\t\t\t\tempty_author = 'Null Author'\n\t\t\t\t\t\t\t\tlogging.info(\"Null Author\")\n\t\t\t\t\t\t\t\tauthors.append(empty_author)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tauthors.append(current_author[0])\n\n\t\t\t\t\t\t\tif (len(current_title)) == 0: #no title\n\t\t\t\t\t\t\t\tempty_title = 'Null Title'\n\t\t\t\t\t\t\t\tlogging.info(\"Null Title\")\n\t\t\t\t\t\t\t\ttitles.append(empty_title)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ttitles.append(current_title[0])\n\n\t\t\t\t\t\t\tif (len(current_abstract)) == 0: #no abstract\n\t\t\t\t\t\t\t\tempty_abstract = 'Null Abstract'\n\t\t\t\t\t\t\t\tlogging.info(\"Null Abstract\")\n\t\t\t\t\t\t\t\tabstracts.append(empty_abstract)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tabstracts.append(current_abstract[0])\n\n\t\t\t#NB: this is on the level of 1 patent (multiple lit search files )\n\t\t\t# logging.info(\"* Done extracting abstracts from this file... \")\n\t\t\t# logging.info(str(len(authors)) + \" authors\")\n\t\t\t# logging.info(str(len(titles)) + \" titles\")\n\t\t\t# print(str(len(abstracts)) + \" abstracts\")\n\n\t\t\t'''\n\t\t\tThe only lit searches showing up with repeats are Patents 2, 17, 46\n\t\t\t17 and 46 were excluded for bad formatting\n\t\t\t2 ... will need to investivate why nothing is retrieved here\n\t\t\t'''\n\t\t\t# unique_authors = list(set(authors))\n\t\t\t# unique_titles = list(set(titles))\n\t\t\t# unique_abstracts = list(set(abstracts))\n\n\t\t\t# print(\"* KEEPING ONLY UNIQUE ABSTRACTS ... \")\n\t\t\t# print(str(len(unique_authors)) + \" unique authors\")\n\t\t\t# print(str(len(unique_titles)) + \" unique titles\")\n\t\t\t# print(str(len(unique_abstracts)) + \" unique abstracts\")\n\t\t\t# print(\"#\" * 20)\n\n\t\t\t#Print abstracts to 'train' folder\n\t\t\t#only grab the first thing from abstracts where there might be duplicates?\n\n\t\t\tprint_info = list(zip(authors, titles, abstracts))\n\t\t\tlogging.info(\"* printing abstracts to txt\")\n\t\t\tfor i in range(0, len(print_info)):\n\t\t\t\tabstract_name = str(patent_number) + '_' +str(i) + '.txt'\n\t\t\t\tcompleteName = os.path.join(traindir, abstract_name)\n\t\t\t\tlogging.info(completeName)\n\t\t\t\tsys.stdout = open(completeName, \"w\")\n\t\t\t\tprint(print_info[i][0]) #line 0 will be author\n\t\t\t\tprint('\\n')\n\t\t\t\tprint(print_info[i][1]) #line 1 will be title\n\t\t\t\tprint('\\n')\n\t\t\t\tprint(print_info[i][2]) #line 2 will be abstract\n\t\t\t\tprint('\\n')\n\n\t\texcept Exception as e: #probably a .DS_Store file\n\t\t\tlogging.info(e)\n\n\nretrieve_text_files()\n\n","repo_name":"hclent/PyPatent","sub_path":"readabstracttxt.py","file_name":"readabstracttxt.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"243361100","text":"#Openpyxl imports\nfrom openpyxl import load_workbook, Workbook, cell\nfrom openpyxl.styles import Color, PatternFill, Font, Border\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.writer.excel import save_virtual_workbook\n\n\n#Database imports\nfrom django.db import transaction\nfrom prof.models import FileSpace, RunSpace, TransactionData, PnLData\nfrom django.core.files.base import ContentFile\n\n#Global constants import\nfrom prof.settings import Transactions_settings_dict, PnL_settings_dict, Key_settings_dict\n\n#Other imports\nfrom datetime import date, timedelta, datetime\nimport pandas as pd, numpy as np\n\n\ndef ifHeaderReturnIndex(row, desiredColumns):\n '''This function checks if a particular excel row is the header row. It does so by simply checking that each of the\n column names in the desired header row appear in the row once, and exactly once. If it is a header row, the function\n returns the index of the header columms as a tuple. If not, it retruns the python 'None' object. The arguments are:\n 1. An openpyxl worksheet\n 2. A tuple of strings - the column headers'''\n rowtuple=tuple(cell.value for cell in row)\n i=True\n \n for ColumnName in desiredColumns:\n if rowtuple.count(ColumnName) != 1: i=False\n if i==False:\n return None\n else:\n a=list()\n for i in desiredColumns:\n a.append(rowtuple.index(i))\n return tuple(a)\n \n \ndef isRowValid(row,index):\n '''Determines if the row is a valid data row. For the moment, simply checks if the first column dicted by selectedIndex is empty.'''\n return bool(row[index[0]].value)\n\ndef isRowAggregate(row,index):\n '''This function determines if a row is an aggregate row. For the moment, it will simply look at the bold rows.'''\n return row[index[0]].font.b\n\n@transaction.atomic\ndef ProcessTransactionData(fileObject, run, Transactions_settings_dict = Transactions_settings_dict,foreignKeyFileObject=None):\n ''' This function takes the file object, the name of the sheet in which the data is stores, and loads the transaction data into the Django model TransactionData.'''\n sheetName = Transactions_settings_dict['TransactionSheetName']\n desiredColumns = Transactions_settings_dict['desiredColumns']\n if not(foreignKeyFileObject): foreignKeyFileObject = fileObject\n wb=load_workbook(fileObject.File, read_only=True, data_only=True)\n ws=wb.get_sheet_by_name(sheetName)\n selectedIndex=None\n for row in ws.rows:\n if selectedIndex:\n if isRowValid(row,selectedIndex):\n newrow=[row[i].value for i in selectedIndex]\n dictionary=dict(zip(outputColumnNames,newrow))\n dictionary['run']=run\n dictionary['SourceFile']=foreignKeyFileObject\n TransactionData(**dictionary).save()\n else:\n if ifHeaderReturnIndex(row,desiredColumns):\n selectedIndex=ifHeaderReturnIndex(row, desiredColumns)\n outputColumnNames=[Transactions_settings_dict['column_names_to_model_dict'][row[i].value] for i in selectedIndex]\n\n\n\n\n\n\ndef revCells(row):\n '''This is formatting for a row that carries revenue.'''\n for cell in row:\n cell.fill=PatternFill(start_color='d2fedb', end_color='d2fedb', fill_type='solid')\n\ndef costCells(row):\n '''This is formatting for a row that carries costs.'''\n for cell in row:\n cell.fill=PatternFill(start_color='ffe8e8', end_color='ffe8e8', fill_type='solid')\n\ndef headerCells(row):\n '''This is formatting for a row that has column names.'''\n for cell in row:\n cell.fill=PatternFill(start_color='ffff00', end_color='ffff00', fill_type='solid')\n cell.font=Font(bold=True)\n\ndef makeBold(row):\n '''This function makes a row bold'''\n for cell in row:\n cell.font=Font(bold=True)\n\n\ndef fixWidth(ws):\n '''\n This function takes an openpyxl worksheet as an input and returns an openpyxl worksheet after setting the column\n width for columns in the sheet at the maximum of the length of the string in any of the cells in the column.\n '''\n column_widths = []\n for row in ws:\n for i, cell in enumerate(row):\n if len(column_widths) > i:\n if len(str(cell.value))+2 > column_widths[i]:\n column_widths[i] = len(str(cell.value))+2\n else:\n column_widths += [len(str(cell.value))+2]\n\n for i, column_width in enumerate(column_widths):\n ws.column_dimensions[get_column_letter(i+1)].width = column_width\n \ndef isRevAggregate(row,index,revenueMarkers=PnL_settings_dict['revenueMarkers']):\n '''\n This function simply determined if a row in the PnL is a \"Net Revenue\" row, mostly used for stlying purposes.\n Presently, it simply looks at the text in the firxt column and checks if any of the markets are present.\n '''\n mark=False\n for marker in revenueMarkers:\n if marker in row[index[0]].value: mark=True\n return mark\n \ndef applyPnLStyling(ws,pnl_column_names,revenueMarkers):\n headerfound=False\n Revenue=True\n for row in ws.rows:\n if headerfound and Revenue:\n revCells(row)\n if isRevAggregate(row,selectedIndex,revenueMarkers): Revenue = False\n elif headerfound and not(Revenue):\n costCells(row)\n elif ifHeaderReturnIndex(row,pnl_column_names):\n headerCells(row)\n selectedIndex=ifHeaderReturnIndex(row, pnl_column_names)\n headerfound=True\n else:\n makeBold(row)\n \n\ndef generateSamplePnLBUSheet(run, ws, BU, cunstructor_dict = PnL_settings_dict):\n '''\n This function generates a sample PnL based on the transaction data. It basically accepts the run, and the BU as\n an argument and then determines the period in which tansactions were present. It then creates a sample based\n on this determined period.'''\n \n #Determine the periods in which transacitons are present for the business unit, and create column names\n if BU == cunstructor_dict['aggregate_sheet_name']:\n period_start_date=min(x.TransactionDate for x in TransactionData.objects.filter(run=run))\n period_end_date=max(x.TransactionDate for x in TransactionData.objects.filter(run=run))\n else:\n period_start_date= run.RunPeriodStart # min(x.TransactionDate for x in TransactionData.objects.filter(run=run, BusinessUnit = BU))\n period_end_date= run.RunPeriodEnd # max(x.TransactionDate for x in TransactionData.objects.filter(run=run, BusinessUnit = BU))\n date_extent=[period_start_date+timedelta(i) for i in range((period_end_date-period_start_date).days+1)]\n date_set=set(datetime.strftime(i,'%b-%Y') for i in date_extent)\n period_column_names = sorted(date_set, key=lambda day: datetime.strptime(day, \"%b-%Y\"))\n \n #Create a preamble\n preamble_rows=[['Business Unit', 'Please duplicate this sheet for each business unit'],['Period Start Date',period_start_date ],['Period End Date',period_end_date ],[]]\n\n #Create the list of column names\n leading_column_names=cunstructor_dict['leading_column_names']\n trailing_column_names=[cunstructor_dict['total_column_name']]\n pnl_column_names=leading_column_names+period_column_names+trailing_column_names \n \n #Create sample body\n sample_body = [list(i) for i in zip(cunstructor_dict['sample_line_descriptions'],cunstructor_dict['sample_line_codes'])]\n \n #Append preamble to worksheet\n for row in preamble_rows:\n ws.append(row)\n\n #Append header\n ws.append(pnl_column_names)\n\n #Append body, marking the aggregate rows as bold\n for i, rowvals in [i for i in zip(cunstructor_dict['sample_line_aggregate_status'],sample_body)]:\n row=[]\n for v in rowvals:\n newcell = cell.Cell(ws,column='A', row=1,value=v)\n row.append(newcell)\n if i==1: makeBold(row)\n ws.append(row)\n\n #Fix column width for readability\n fixWidth(ws)\n\n #Apply cost and revenue coloring\n applyPnLStyling(ws,pnl_column_names,cunstructor_dict['revenueMarkers'])\n \n return(ws)\n \n \ndef generateSamplePnL(run, cunstructor_dict = PnL_settings_dict):\n wb=Workbook()\n wb.remove_sheet(wb.active)\n #BUs = set(x.BusinessUnit for x in TransactionData.objects.filter(run=run))\n #BUs.add(cunstructor_dict['aggregate_sheet_name'])\n BUs = [cunstructor_dict['aggregate_sheet_name']] #well, we decided to create a sample just for the whole thing in the end.\n for BU in BUs:\n ws=wb.create_sheet(BU)\n generateSamplePnLBUSheet(run, ws, BU, cunstructor_dict)\n return(ContentFile(save_virtual_workbook(wb)))\n \n \n \ndef updateRunPeriod(run):\n run.RunPeriodStart = min(x.TransactionDate for x in TransactionData.objects.filter(run=run))\n run.RunPeriodEnd = max(x.TransactionDate for x in TransactionData.objects.filter(run=run))\n run.save()\n \n\ndef returnPnLPeriodColumnNames(run):\n date_extent=[run.RunPeriodStart+timedelta(i) for i in range((run.RunPeriodEnd-run.RunPeriodStart).days+1)]\n date_set=set(datetime.strftime(i,'%b-%Y') for i in date_extent)\n return sorted(date_set, key=lambda day: datetime.strptime(day, \"%b-%Y\"))\n \n \n \n \n \ndef processPnLSheet_to_pd(ws, run, PnL_settings_dict=PnL_settings_dict):\n '''This function processes a PnL file sheet. It takes an openpyxl worksheet (ws), and then returnds a pandas dataframe.\n The arguments are:\n 1. ws: Name of the worksheet to process\n 2. PnL_settings_dict: The dictionary in prof/settings'''\n desiredColumns = PnL_settings_dict['leading_column_names'] + returnPnLPeriodColumnNames(run) + [PnL_settings_dict['total_column_name']]\n revenueSwitch=True\n selectedIndex=None\n for row in ws.iter_rows():\n if selectedIndex:\n if isRowValid(row,selectedIndex):\n newrow=[row[i].value for i in selectedIndex]\n newrow.append(isRowAggregate(row,selectedIndex))\n newrow.append(revenueSwitch)\n if isRevAggregate(row,selectedIndex): revenueSwitch = False\n df=df.append(pd.DataFrame([newrow], columns=outputColumnNames))\n elif ifHeaderReturnIndex(row,desiredColumns):\n selectedIndex=ifHeaderReturnIndex(row, desiredColumns)\n outputColumnNames=[row[i].value for i in selectedIndex]\n outputColumnNames.append(PnL_settings_dict['revenue_or_cost_column_name'])\n outputColumnNames.append(PnL_settings_dict['aggregate_line_flag_column_name'])\n df=pd.DataFrame(columns=outputColumnNames)\n return(df)\n\n@transaction.atomic\ndef processPnLFile(fileObject, run, PnL_settings_dict=PnL_settings_dict, foreignKeyFileObject=None):\n '''This function processes a PnL file sheet. It takes an PnL file object, and feeds the data to the Django model\n named PnLData.\n The arguments are:\n 1. fileObject: The PnL file object (an object of FileSpace model)\n 2. run: The associated run, an object of the RunSpace model\n 3. PnL_settings_dict: The dictionary in prof/settings\n 4. foreignKeyFileObject: The file object which should be marked as the foreign key if not the same as fileObject'''\n if not(foreignKeyFileObject): foreignKeyFileObject = fileObject\n wb=load_workbook(fileObject.File, read_only=True, data_only=True)\n periodColumns = returnPnLPeriodColumnNames(run)\n desiredColumns = PnL_settings_dict['leading_column_names'] + returnPnLPeriodColumnNames(run) + [PnL_settings_dict['total_column_name']]\n columnsForModel = PnL_settings_dict['leading_column_names'] +[PnL_settings_dict['revenue_or_cost_column_name']] + [PnL_settings_dict['aggregate_line_flag_column_name']]\n fullPnL=pd.concat([processPnLSheet_to_pd(ws, run, PnL_settings_dict).set_index(PnL_settings_dict['leading_column_names']) for ws in wb.worksheets], keys=wb.get_sheet_names(),names=[PnL_settings_dict['BU_column_name']]).reset_index().replace(np.nan,0)\n fullPnL = pd.melt(fullPnL,id_vars=columnsForModel, value_vars= \n periodColumns,\n var_name = PnL_settings_dict['period_column_name'], value_name=PnL_settings_dict['amount_column_name'])\n fullPnL.columns = [PnL_settings_dict['column_names_to_model_dict'][i] for i in fullPnL.columns]\n list_of_dicts = [row.to_dict() for index,row in fullPnL.iterrows()]\n for row in list_of_dicts:\n row['run'] = run\n row['SourceFile'] = foreignKeyFileObject\n PnLData(**row).save()\n \n \ndef createQuantityKey(run, Transactions_settings_dict= Transactions_settings_dict, Key_settings_dict=Key_settings_dict):\n wb=Workbook()\n wb.remove_sheet(wb.active)\n ws = wb.create_sheet('Quantity')\n Tr=pd.DataFrame.from_records(TransactionData.objects.filter(run=run).values())\n Tr['Month'] = Tr['TransactionDate'].apply(lambda x: datetime.strftime(x,'%b-%Y'))\n key_mont_column_names_sorted = sorted(set(Tr['Month']), key=lambda day: datetime.strptime(day, \"%b-%Y\"))\n QKey=Tr[['BusinessUnit','ProductNumber','Month', 'Quantity']].groupby( ['BusinessUnit','ProductNumber','Month']).sum().round().unstack()\n QKey.columns=QKey.columns.droplevel(0)\n QKey.columns.name=None\n #QKey[QKey!=np.nan]=None\n #QKey['Trailing Twelve Months'] = np.nan\n QKeyAgg=Tr[['BusinessUnit','ProductNumber', 'Quantity']].groupby( ['BusinessUnit','ProductNumber']).sum().round()\n QKeyAgg.rename(columns={'Quantity': Key_settings_dict['last_col_name']},inplace=True)\n QKey=pd.concat([QKey,QKeyAgg],axis=1)\n QKey=QKey.reset_index().fillna(0)\n QKey = QKey[['BusinessUnit','ProductNumber']+key_mont_column_names_sorted+[Key_settings_dict['last_col_name']]]\n row=[]\n for v in QKey.columns:\n newcell = cell.Cell(ws,column='A', row=1,value=v)\n newcell.font=Font(bold=True)\n row.append(newcell)\n ws.append(row)\n for index, row in QKey.iterrows():\n ws.append(list(row))\n row = ws.row_dimensions[1]\n row.font = Font(bold=True)\n fixWidth(ws)\n return(ContentFile(save_virtual_workbook(wb)))","repo_name":"gitkwr/profmodel","sub_path":"prof/ProcessingFunctions.py","file_name":"ProcessingFunctions.py","file_ext":"py","file_size_in_byte":14073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40946687309","text":"from abc import ABC, abstractmethod\nimport os\nimport pathlib\nfrom typing import Union\n\nfrom pyschism.server import ServerConfig, SlurmConfig\n\n\nclass Makefile(ABC):\n\n def __init__(self, server_config: ServerConfig = None, hotstart=None):\n if server_config is None:\n server_config = ServerConfig()\n self.server_config = server_config\n self.hotstart = hotstart\n\n def write(self, path: Union[str, os.PathLike], overwrite: bool = False):\n path = pathlib.Path(path)\n if path.exists() and overwrite is not True:\n raise IOError(\n f\"File {str(path)} exists and overwrite is not True.\")\n with open(path, 'w') as f:\n f.write(str(self))\n\n @property\n @abstractmethod\n def run(self):\n \"\"\"Makefile run target.\"\"\"\n\n @property\n def tail(self):\n return \"\"\"\ntail:\n tail -f outputs/mirror.out outputs/fatal.error\n\"\"\"\n\n @property\n def symlinks(self):\n return r\"\"\"\nsymlinks:\n @set -e;\\\n if [ ! -z $${SYMLINK_OUTPUTS_DIR} ];\\\n then \\\n ln -sf $${SYMLINK_OUTPUTS_DIR} $${ROOT_DIR}outputs;\\\n else \\\n mkdir -p $${ROOT_DIR}outputs;\\\n fi;\\\n touch outputs/mirror.out outputs/fatal.error\n\"\"\"\n\n\nclass DefaultMakefile(Makefile):\n\n def __str__(self):\n f = [\n \"# Makefile driver generated by PySCHISM.\",\n r\"MAKEFILE_PATH:=$(abspath $(lastword $(MAKEFILE_LIST)))\",\n r\"ROOT_DIR:=$(dir $(MAKEFILE_PATH))\",\n str(self.server_config),\n self.default,\n self.symlinks,\n self.run,\n self.tail,\n ]\n return \"\\n\".join([line.replace(\" \", \"\\t\") for line in f])\n\n @property\n def default(self):\n return r\"\"\"\ndefault: symlinks\n\"\"\"\n\n @property\n def run(self):\n f = [\n '',\n 'run:',\n ' @set -e;\\\\',\n ]\n\n if self.hotstart is not None:\n f.extend([\n f' pushd {self.hotstart.path.parent};\\\\',\n f' {self.hotstart.binary} -i {self.hotstart.iteration};\\\\',\n ' popd;\\\\',\n f' mv {self.hotstart.path} ./hotstart.nc;\\\\',\n ])\n\n return '\\n'.join([line.replace(\" \", \"\\t\") for line in f]) + r\"\"\"\n rm -rf outputs/mirror.out outputs/fatal.error;\\\n touch outputs/mirror.out outputs/fatal.error;\\\n eval 'tail -f outputs/mirror.out outputs/fatal.error &';\\\n tail_pid=$${!};\\\n ${MPI_LAUNCHER} ${NPROC} ${SCHISM_BINARY};\\\n err_code=$${?};\\\n kill \"$${tail_pid}\";\\\n exit $${err_code}\n\"\"\"\n\n\nclass SlurmMakefile(Makefile):\n\n def __str__(self):\n f = [\n \"# Makefile driver generated by PySCHISM.\",\n r\"MAKEFILE_PATH:=$(abspath $(lastword $(MAKEFILE_LIST)))\",\n r\"ROOT_DIR:=$(dir $(MAKEFILE_PATH))\",\n str(self.server_config),\n self.default,\n self.symlinks,\n self.slurm,\n self.run,\n self.tail,\n ]\n return \"\\n\".join([line.replace(\" \", \"\\t\") for line in f])\n\n @property\n def default(self):\n return \"\"\"\ndefault: slurm\n\"\"\"\n\n @property\n def slurm(self):\n return r\"\"\"\nslurm: symlinks\n @set -e;\\\n printf \"#!/bin/bash --login\\n\" > ${SLURM_JOB_FILE};\\\n printf \"#SBATCH -D .\\n\" >> ${SLURM_JOB_FILE};\\\n if [ ! -z \"${SLURM_ACCOUNT}\" ];\\\n then \\\n printf \"#SBATCH -A ${SLURM_ACCOUNT}\\n\" >> ${SLURM_JOB_FILE};\\\n fi;\\\n if [ ! -z \"${SLURM_MAIL_USER}\" ];\\\n then \\\n printf \"#SBATCH --mail-user=${SLURM_MAIL_USER}\\n\" >> ${SLURM_JOB_FILE};\\\n printf \"#SBATCH --mail-type=${SLURM_MAIL_TYPE:-all}\\n\" >> ${SLURM_JOB_FILE};\\\n fi;\\\n printf \"#SBATCH --output=${SLURM_LOG_FILE}\\n\" >> ${SLURM_JOB_FILE};\\\n printf \"#SBATCH -n ${SLURM_NTASKS}\\n\" >> ${SLURM_JOB_FILE};\\\n if [ ! -z \"${SLURM_WALLTIME}\" ];\\\n then \\\n printf \"#SBATCH --time=${SLURM_WALLTIME}\\n\" >> ${SLURM_JOB_FILE};\\\n fi;\\\n if [ ! -z \"${SLURM_PARTITION}\" ] ;\\\n then \\\n printf \"#SBATCH --partition=${SLURM_PARTITION}\\n\" >> ${SLURM_JOB_FILE};\\\n fi;\\\n printf \"\\nset -e\\n\" >> ${SLURM_JOB_FILE};\\\n printf \"${MPI_LAUNCHER} ${SCHISM_BINARY}\" >> ${SLURM_JOB_FILE}\n\"\"\"\n\n @property\n def run(self):\n f1 = [\n '',\n 'run:',\n ' @set -e;\\\\',\n ]\n\n if self.hotstart is not None:\n f1.extend([\n f' pushd {self.hotstart.path.parent};\\\\',\n f' {self.hotstart.binary} -i {self.hotstart.iteration};\\\\',\n ' popd;\\\\',\n f' mv {self.hotstart.path} ./hotstart.nc;\\\\',\n ])\n\n return '\\n'.join([line.replace(\" \", \"\\t\") for line in f1]) + r\"\"\"\n touch ${SLURM_LOG_FILE};\\\n eval 'tail -f ${SLURM_LOG_FILE} outputs/mirror.out outputs/fatal.error &';\\\n tail_pid=$${!};\\\n job_id=$$(sbatch ${SLURM_JOB_FILE});\\\n printf \"$${job_id}\\n\";\\\n job_id=$$(echo $${job_id} | awk '{print $$NF}');\\\n ctrl_c() { \\\n scancel \"$${job_id}\";\\\n };\\\n while [ $$(squeue -j $${job_id} | wc -l) -eq 2 ];\\\n do \\\n trap ctrl_c SIGINT;\\\n done;\\\n kill \"$${tail_pid}\"\n\"\"\"\n\n\nclass MakefileDriver:\n\n def __new__(cls, server_config: ServerConfig = None, hotstart=None):\n if isinstance(server_config, SlurmConfig):\n return SlurmMakefile(server_config, hotstart=hotstart)\n return DefaultMakefile(server_config, hotstart=hotstart)\n","repo_name":"TrellixVulnTeam/pyschism-sciclone-tests_5BBQ","sub_path":"pyschism/pyschism/makefile.py","file_name":"makefile.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17633780788","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .models import List, Item\n\ndef entryView(request):\n\n context={\n\n }\n return render(request, 'Organiapp/entry.html', context)\n\n\ndef indexView(request):\n if request.method == 'POST':\n typped = request.POST.get('add_task')\n delete = request.POST.get('delete_task')\n\n if typped != '':\n new_entry = List(\n name=typped,\n )\n new_entry.save()\n \n if delete != '' and typped == '':\n task = List.objects.filter(id=delete)\n task.delete()\n\n lists = List.objects.all()\n context = {\n 'lists': lists,\n }\n return render(request, 'Organiapp/index.html', context)\n\n\n\ndef detailView(request, list_id):\n if request.method == 'POST':\n typped = request.POST.get('add_task')\n valid = request.POST.get('valid_task')\n delete = request.POST.get('delete_task')\n\n if typped != '':\n new_entry = Item(\n name=typped,\n list_host_id=list_id,\n )\n new_entry.save()\n \n # valid and delete are the same: it delete the item in database.\n # in next version, a new database takes all the valid entry as archive\n\n if valid != '' and typped == '':\n task = Item.objects.filter(id=valid)\n task.delete()\n\n if delete != '' and typped == '':\n task = Item.objects.filter(id=delete)\n task.delete()\n\n print(request.POST)\n items = Item.objects.filter(list_host_id=list_id)\n listCurrent = List.objects.filter(id=list_id).first()\n context = {\n 'listCurrent': listCurrent,\n 'items': items,\n }\n return render(request, 'Organiapp/detail.html', context)\n\n","repo_name":"romainledru/OrganiApp-deploy","sub_path":"Organi/Organiapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9640421404","text":"import markovify\n\n# Load file\ntext = open(\"../text/new_keisuke_honda.txt\", \"r\").read()\n\n# Build model\ntext_model = markovify.NewlineText(text, state_size=3, well_formed=False)\n\n# Make Dictionary as Json_format\nwith open('../text/learned_data.json', 'w') as f:\n f.write(text_model.to_json())","repo_name":"IchiroKobayashi/ichi-flask-challenge","sub_path":"api/src/service/make_dictionary.py","file_name":"make_dictionary.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38588732660","text":"#!/usr/bin/env python\n\n#\n# Author: Ezequiel Alfie \n#\n# demonstrating simultaneous use of 3D textures and surfaces\n#\n#\n#\n# needs CUDA 4.x and pycuda with v4 launch interface\n# (later than commit dd12c742c6ea35cd06ce25fd17abf21c01cd6ff7 Apr 21, 2012)\n#\n\nfrom __future__ import division\nimport numpy as np\nimport pycuda.driver as drv\nfrom pycuda.compiler import SourceModule\n\nimport pycuda.autoinit\nimport numpy.testing\n\n\n\ndef array_format_to_dtype(af):\n if af == drv.array_format.UNSIGNED_INT8:\n return np.uint8\n elif af == drv.array_format.UNSIGNED_INT16:\n return np.uint16\n elif af == drv.array_format.UNSIGNED_INT32:\n return np.uint32\n elif af == drv.array_format.SIGNED_INT8:\n return np.int8\n elif af == drv.array_format.SIGNED_INT16:\n return np.int16\n elif af == drv.array_format.SIGNED_INT32:\n return np.int32\n elif af == drv.array_format.FLOAT:\n return np.float32\n else:\n raise TypeError(\n \"cannot convert array_format '%s' to a numpy dtype\"\n % array_format)\n\n#\n# numpy3d_to_array\n# this function was\n# taken from pycuda mailing list (striped for C ordering only)\n#\ndef numpy3d_to_array(np_array, allow_surface_bind=True):\n\n import pycuda.autoinit\n\n d, h, w = np_array.shape\n\n descr = drv.ArrayDescriptor3D()\n descr.width = w\n descr.height = h\n descr.depth = d\n descr.format = drv.dtype_to_array_format(np_array.dtype)\n descr.num_channels = 1\n descr.flags = 0\n\n if allow_surface_bind:\n descr.flags = drv.array3d_flags.SURFACE_LDST\n\n device_array = drv.Array(descr)\n\n copy = drv.Memcpy3D()\n copy.set_src_host(np_array)\n copy.set_dst_array(device_array)\n copy.width_in_bytes = copy.src_pitch = np_array.strides[1]\n copy.src_height = copy.height = h\n copy.depth = d\n\n copy()\n\n return device_array\n\n\ndef array_to_numpy3d(cuda_array):\n\n import pycuda.autoinit\n\n descriptor = cuda_array.get_descriptor_3d()\n\n w = descriptor.width\n h = descriptor.height\n d = descriptor.depth\n\n shape = d, h, w\n\n dtype = array_format_to_dtype(descriptor.format)\n\n numpy_array=np.zeros(shape, dtype)\n\n copy = drv.Memcpy3D()\n copy.set_src_array(cuda_array)\n copy.set_dst_host(numpy_array)\n\n itemsize = numpy_array.dtype.itemsize\n\n copy.width_in_bytes = copy.dst_pitch = w*itemsize\n copy.dst_height = copy.height = h\n copy.depth = d\n\n copy()\n\n return numpy_array\n\n\nsrc_module=r'''\n#include \n#include \n#include \n\ntexture tex_in;\nsurface surf_out;\n\n__global__ void test_3d_surf(int32_t Nz, int32_t Ny, int32_t Nx)\n{\n\n int x = blockDim.x * blockIdx.x + threadIdx.x;\n int y = blockDim.y * blockIdx.y + threadIdx.y;\n int z = blockDim.z * blockIdx.z + threadIdx.z;\n\n if (x < Nx && y < Ny && z < Nz) {\n float value = tex3D(tex_in, (float) x, (float) y, float (z));\n\n surf3Dwrite((float) value, surf_out, sizeof(float) * x, y, z, cudaBoundaryModeZero);\n }\n\n}\n'''\n\nmod=SourceModule(src_module, cache_dir=False, keep=False)\n\nkernel=mod.get_function(\"test_3d_surf\")\narg_types = (np.int32, np.int32, np.int32)\n\ntex_in=mod.get_texref('tex_in')\nsurf_out=mod.get_surfref('surf_out')\n\n# random shape\nshape_x = np.random.randint(1,255)\nshape_y = np.random.randint(1,255)\nshape_z = np.random.randint(1,255)\n\ndtype=np.float32 # should match src_module's datatype\n\nnumpy_array_in=np.random.randn(shape_z, shape_y, shape_x).astype(dtype).copy()\ncuda_array_in = numpy3d_to_array(numpy_array_in)\ntex_in.set_array(cuda_array_in)\n\nzeros=np.zeros_like(numpy_array_in)\ncuda_array_out = numpy3d_to_array(zeros,allow_surface_bind=True)\nsurf_out.set_array(cuda_array_out)\n\n\nblock_size_z, block_size_y, block_size_x = 8,8,8 #hardcoded, tune to your needs\ngridz = shape_z // block_size_z + 1 * (shape_z % block_size_z != 0)\ngridy = shape_y // block_size_y + 1 * (shape_y % block_size_y != 0)\ngridx = shape_x // block_size_x + 1 * (shape_x % block_size_x != 0)\ngrid = (gridx, gridy, gridz)\nblock = (block_size_x, block_size_y, block_size_x)\n\nkernel.prepare(arg_types,texrefs=[tex_in])\nkernel.prepared_call(grid, block, shape_z, shape_y, shape_x)\n\nnumpy_array_out = array_to_numpy3d(cuda_array_out)\nnumpy.testing.assert_array_almost_equal(numpy_array_out, numpy_array_in)\n","repo_name":"CIERA-Northwestern/rapidpe_gpu","sub_path":"test_pycuda/demo_3dsurf.py","file_name":"demo_3dsurf.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7109513557","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.fx\nfrom torch.fx import symbolic_trace\n\n\n\nclass Net(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 8)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\nnet = Net()\n# print(net)\n\nnet_traced = symbolic_trace(net)\nprint(net_traced.graph)\nprint(net_traced.code)\nprint(net_traced.graph.print_tabular())\n\n\ndef quant_and_eval():\n import copy\n import time \n import torchvision\n from torch.quantization import get_default_qconfig\n from torch.quantization.quantize_fx import prepare_fx, convert_fx\n\n m = torchvision.models.resnet18()\n m.eval()\n\n qconfig = get_default_qconfig(backend='fbgemm')\n qconfig_dict = {\n \"\": qconfig,\n }\n\n _m = copy.deepcopy(m)\n m_prepared = prepare_fx(_m, qconfig_dict=qconfig_dict)\n m_int8 = convert_fx(m_prepared, )\n\n m_int8.load_state_dict(m_int8.state_dict())\n m_int8.eval()\n\n for k in m_int8.state_dict():\n print(k)\n\n x = torch.rand(1, 3, 224, 224)\n\n t0 = time.time()\n\n for _ in range(10):\n out1 = m(x)\n\n t1 = time.time()\n print(t1 - t0)\n\n for _ in range(10):\n out2 = m_int8(x)\n\n t2 = time.time()\n print(t2 - t1)\n\n\n print(torch.argmax(out1))\n print(torch.argmax(out2))\n\n\n # test_loader: DataLoader\n # evaluate_model(model, test_loader)\n # evaluate_model(model_int8, test_loader)\n\n # calib\n # def calib_quant_model(model, dataloader):\n # model.eval()\n # with torch.inference_mode():\n # for inputs, labels in dataloader:\n # _ = model(inputs)\n # print('calib done')\n \n # calib_quant_model(test_loader)\n\n\n # torch.onnx.export(m_int8, x, 'm_int8.onnx')\n\n\nif __name__ == '__main__':\n\n\n x = torch.rand(1, 3, 224, 224)\n quant_and_eval()\n","repo_name":"lyuwenyu/AI","sub_path":"pp/pytorch/fx.py","file_name":"fx.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"17503369414","text":"import numpy as np\n\n\ndef calculate(lis):\n if(len(lis) < 9):\n raise ValueError(\"List must contain nine numbers.\")\n normal = np.array(lis)\n dim3 = normal.copy()\n dim3 = dim3.reshape(3, 3)\n # mean\n normal_mean = np.mean(normal)\n axis1_mean = np.mean(dim3, axis=0)\n axis2_mean = np.mean(dim3, axis=1)\n meanfinal = (\n [axis1_mean.tolist(), axis2_mean.tolist(), eval(str(normal_mean))])\n # mean end\n\n # variance start\n normal_variance = np.var(normal)\n axis1_variance = np.var(dim3, axis=0)\n axis2_variance = np.var(dim3, axis=1)\n varfinal = ([axis1_variance.tolist(), axis2_variance.tolist(),\n eval(str(normal_variance))])\n # variance end\n\n # standard deviation start\n nor_stan = np.std(normal)\n a1_std = np.std(dim3, axis=0)\n a2_std = np.std(dim3, axis=1)\n stanfinal = ([a1_std.tolist(), a2_std.tolist(), eval(str(nor_stan))])\n # standard deviation end\n\n # max start\n maxn = np.max(normal)\n a1max = np.max(dim3, axis=0)\n a2max = np.max(dim3, axis=1)\n maxfinal = ([a1max.tolist(), a2max.tolist(), eval(str(maxn))])\n # max end\n\n # min start\n minn = np.min(normal)\n a1min = np.min(dim3, axis=0)\n a2min = np.min(dim3, axis=1)\n minfinal = ([a1min.tolist(), a2min.tolist(), eval(str(minn))])\n # min end\n\n # sum start\n sumn = np.sum(normal)\n a1sum = np.sum(dim3, axis=0)\n a2sum = np.sum(dim3, axis=1)\n sumfinal = ([a1sum.tolist(), a2sum.tolist(), eval(str(sumn))])\n\n # sum end\n return {'mean': meanfinal, 'variance': varfinal, 'standard deviation': stanfinal, 'max': maxfinal, 'min': minfinal, 'sum': sumfinal}\n\n\nprint(calculate([0, 1, 2, 3, 4, 5, 6, 7, 8]))\n","repo_name":"Krishnasai3cks/50-Projects-in-Node.js-Python-HTML-CSS-JS","sub_path":"Python_Projects/FCC_Data_Analysis/Mean variance std/Mean_Variance_STD.py","file_name":"Mean_Variance_STD.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11105220635","text":"#from threading import Lock\nfrom flask import session, request\nfrom flask_socketio import Namespace, emit, join_room, leave_room, \\\n close_room, rooms, disconnect\n\nthread = None\nthread_lock = None #Lock()\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n count = 0\n while True:\n socketio.sleep(10)\n count += 1\n socketio.emit('my_response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')\n\nclass AuxNamespace(Namespace):\n def on_my_event(self, message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']})\n\n def on_my_broadcast_event(self, message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']},\n broadcast=True)\n\n def on_join(self, message):\n join_room(message['room'])\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': 'In rooms: ' + ', '.join(rooms()),\n 'count': session['receive_count']})\n\n def on_leave(self, message):\n leave_room(message['room'])\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': 'In rooms: ' + ', '.join(rooms()),\n 'count': session['receive_count']})\n\n def on_close_room(self, message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response', {'data': 'Room ' + message['room'] + ' is closing.',\n 'count': session['receive_count']},\n room=message['room'])\n close_room(message['room'])\n\n def on_my_room_event(self, message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']},\n room=message['room'])\n\n def on_disconnect_request(self):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': 'Disconnected!', 'count': session['receive_count']})\n disconnect()\n\n def on_my_ping(self):\n emit('my_pong')\n\n def on_connect(self):\n #global thread, socketio, thread_lock\n #with thread_lock:\n # if thread is None:\n # thread = socketio.start_background_task(\n # target=background_thread)\n emit('my_response', {'data': 'Connected', 'count': 0})\n\n def on_disconnect(self):\n print('Client disconnected', request.sid)\n","repo_name":"GenesisKernel/blockexplorer","sub_path":"genesis_block_explorer/views/genesis/aux/socketio.py","file_name":"socketio.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"11432338177","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\n\n\nclass LinearRegression(object):\n\n def __init__(self, features=np.array([]), target=np.array([]), feat_names = None):\n \"\"\"Constructor\n\n :param features: A matrix of features\n :param target: An array\n \"\"\"\n\n # the number of features\n self.num_features = len(features[0])\n # the names of the features\n self.feat_names = feat_names if feat_names is not None else ['Feature {}'.format(i+1) for i in range(self.num_features)]\n # the number of examples\n self.num_examples = len(features)\n # an array of weights\n self.parameters = np.array([0 for i in range(self.num_features + 1)])\n # add a first column of 1's to the feature matrix\n m = np.zeros((self.num_examples, self.num_features+1))+1\n m[:, 1:] = features\n # feature matrix\n self.features = m\n # target array\n self.target = target\n\n def h_function(self):\n \"\"\" Hypothesis function\n\n :return: dot product of the parameters array and the feature matrix\n \"\"\"\n return np.dot(self.parameters, np.transpose(self.features))\n\n def cost(self):\n \"\"\"\n :return: Value of the cost function\n \"\"\"\n return np.sum((self.h_function()-target)**2)\n\n def cost_der(self, parameter_index):\n \"\"\" Returns the derivative of the cost function wrt the parameter with index 'parameter_index'\n\n :param parameter_index: the index of the parameter\n :return: derivative with respect to the specified parameter of the cost function\n \"\"\"\n e = (self.h_function()-self.target)*self.features[:, parameter_index]\n return (np.sum(e))/self.num_examples\n\n def update_parameters(self, learning_rate=1):\n \"\"\" Performs a simultaneous update of parameters.\n\n :param learning_rate: learning rate\n \"\"\"\n new_parameter_list = []\n for i in range(len(self.parameters)):\n new_parameter_list.append(self.parameters[i] -\n learning_rate*self.cost_der(i))\n self.parameters = np.array(new_parameter_list)\n\n def plot_features_h(self, title='', ran=None):\n \"\"\" Shows plots of features and of the hypothesis function wrt that feature\n\n :param title: title of the plot\n :param feat_names: the names of the features\n :param ran: the range of features that are plotted. By default it plots all the features\n \"\"\"\n ran = ran if ran is not None else len(self.features[0])\n if ran > len(self.features[0]):\n ran = len(self.features[0])\n for i in range(1, ran):\n plt.plot(self.features[:, i], self.target, '.')\n theta0 = self.parameters[0]\n thetai = self.parameters[i]\n xs = np.linspace(min(self.features[:, i]), max(self.features[:, i]), 100)\n ys = [theta0 + thetai * x for x in xs] # this is the h function\n plt.plot(xs, ys, color='r')\n plt.xlabel('{}'.format(self.feat_names[i-1]))\n plt.ylabel('MEDV')\n plt.title(title)\n plt.show()\n\n def scaling(self):\n \"\"\" Scales all the features so that they all are in a (-1, 1) range\n \"\"\"\n m = np.zeros((self.num_examples, self.num_features + 1)) + 1\n for i in range(1, len(self.features[0])):\n average = sum(self.features[:, i])/self.num_examples\n maxx = max(self.features[:, i])\n m[:, i] = (self.features[:, i]-average)/maxx\n self.features = m\n\n def r2(self):\n \"\"\"Calculates the coefficient of determination\n \"\"\"\n enum = (self.target-self.h_function())**2\n denom = (self.target-np.mean(self.target))**2\n return 1 - np.sum(enum)/np.sum(denom)\n\n def gradient_descent(self, learning_rate, freq_plots=1000, ran=None):\n \"\"\" Performs the gradient descent algorithm.\n Every 'freq_plots' iterations and at convergence, plots features and hypothesis function\n After convergence shows the plot of the value of the cost function at each iteration;\n of the actual prices vs the predicted prices\n and prints the coefficient of determination.\n\n :param learning_rate: learning rate\n :param freq_plots: how often the features and the hypothesis function are plotted.\n By default, every 1000 iterations\n :param ran: how many of the features are plotted every 'freq_plots' iterations.\n By default: all of them\n ran = 1 : None of them\n e.g. ran = 5 : shows the plots of the first 4 features\n \"\"\"\n self.scaling()\n repetitions = 0\n cost_list = []\n while True:\n cost_before = self.cost()\n cost_list.append(self.cost())\n self.update_parameters(learning_rate)\n if repetitions % freq_plots == 0:\n self.plot_features_h('Iteration {}'.format(repetitions),ran)\n repetitions += 1\n if np.abs(cost_before - self.cost()) < 0.0001: # tests convergence\n self.plot_features_h('Convergence at iteration {}'.format(repetitions), ran)\n break\n cost_array = np.array(cost_list)\n plt.plot(range(repetitions), cost_array)\n plt.xlabel('REPETITIONS')\n plt.ylabel('COST')\n plt.show()\n hs = self.h_function()\n ys = self.target\n plt.plot(hs, ys, '.')\n plt.xlabel('PREDICTED PRICES')\n plt.ylabel('ACTUAL PRICES')\n plt.show()\n print(self.r2())\n\n\nif __name__ == \"__main__\":\n\n boston = datasets.load_boston()\n features = boston.data\n target = boston.target\n\n CRIM = np.vstack(features[:, 0])\n ZN = np.vstack(features[:, 1])\n INDUS = np.vstack(features[:, 2])\n CHAS = np.vstack(features[:, 3])\n NOX = np.vstack(features[:, 4])\n RM = np.vstack(features[:, 5])\n AGE = np.vstack(features[:, 6])\n DIS = np.vstack(features[:, 7])\n RAD = np.vstack(features[:, 8])\n TAX = np.vstack(features[:, 9])\n PTRATIO = np.vstack(features[:, 10])\n B = np.vstack(features[:, 11])\n LSTAT = np.vstack(features[:, 12])\n\n FEATURES = [CRIM, ZN, INDUS, CHAS, NOX, RM, AGE, DIS, RAD, TAX, PTRATIO, B, LSTAT]\n FEATURE_NAMES = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']\n INTER_NAMES = ['AGE+NOX', 'RAD+TAX', 'INDUS+NOX', 'RM+LSTAT', 'AGE+DIS', 'LSTAT+AGE', 'TAX+NOX', 'NOX+LSTAT',\n 'INDUS+LSTAT', 'INDUS+DIS', 'INDUS+AGE', 'ZN+DIS', 'TAX+INDUS']\n\n def add_interaction(features, f1, f2):\n \"\"\"\n :param features: a matrix of features\n :param f1: a feature array\n :param f2: a feature array\n :return: product of (scaled) f1 and f2 appended to features\n \"\"\"\n f1scaled = (f1 - np.mean(f1))/max(f1)\n f2scaled = (f2 - np.mean(f2)) / max(f2)\n p = f1scaled*f2scaled\n return np.append(features, p, 1)\n\n # log of target\n logtarget = np.log(target)\n # log of DIS\n logdis = np.log(DIS)\n # features + log of DIS\n featureswlogdis = np.append(features, logdis, 1)\n # the following adds to 'features + logdis' a number of columns corresponding to interactions between features\n f = featureswlogdis\n f1 = add_interaction(f, AGE, NOX)\n f2 = add_interaction(f1, RAD, TAX)\n f3 = add_interaction(f2, INDUS, NOX)\n f4 = add_interaction(f3, RM, LSTAT)\n f5 = add_interaction(f4, AGE, DIS)\n f6 = add_interaction(f5, LSTAT, AGE)\n f7 = add_interaction(f6, TAX, NOX)\n f8 = add_interaction(f7, NOX, LSTAT)\n f9 = add_interaction(f8, INDUS, LSTAT)\n f10 = add_interaction(f9, INDUS, DIS)\n f11 = add_interaction(f10, INDUS, AGE)\n f12 = add_interaction(f11, ZN, DIS)\n f13 = add_interaction(f12, TAX, INDUS)\n\n # candidates for baseline\n MEDVvsRM = LinearRegression(RM, target, ['RM']) # r2 = 0.4835253373442252\n MEDVvsLSTAT = LinearRegression(LSTAT, target, ['LSTAT']) # r2 = 0.5441462664295631\n MEDVvsALL = LinearRegression(features, target, FEATURE_NAMES) # r2 = 0.7406074029262693\n\n # improvement #1\n LOGDIS = LinearRegression(featureswlogdis, logtarget, FEATURE_NAMES+['LOGDIS']) # r2 = 0.8000014145388733\n\n # improvement #2\n INTER = LinearRegression(f13, logtarget, FEATURE_NAMES+['LOGDIS']+INTER_NAMES) # r2 = 0.8441889821747711\n\n # Run gradient descent for the implementations and prints the coefficient of determination\n # also shows that theta_0 = mean(target) if you uncomment the relevant line\n # change the value of ran depending on how many plots you want to see.\n # e.g. ran= 1 --> no plot ; ran=None --> all plots ; ran=4 --> first 3 features\n print('\\nCoefficient of determination for MEDVvsRM:')\n MEDVvsRM.gradient_descent(1.5, 600, ran=1)\n #print(MEDVvsRM.parameters[0], '=', np.mean(MEDVvsRM.target))\n\n print('\\nCoefficient of determination for MEDVvsLSTAT:')\n MEDVvsLSTAT.gradient_descent(1, 200, ran=1)\n #print(MEDVvsLSTAT.parameters[0], '=', np.mean(MEDVvsLSTAT.target))\n\n print('\\nCoefficient of determination for MEDVvsALL:')\n MEDVvsALL.gradient_descent(1, 900, ran=1)\n #print(MEDVvsALL.parameters[0], '=', np.mean(MEDVvsALL.target))\n\n print('\\nCoefficient of determination for LOGDIS:')\n LOGDIS.gradient_descent(1, 3000, ran=1)\n #print(LOGDIS.parameters[0], '=', np.mean(LOGDIS.target))\n\n print('\\nCoefficient of determination for INTER:')\n INTER.gradient_descent(1, 10000, ran=1) # takes around 20000 iterations\n #print(INTER.parameters[0], '=', np.mean(INTER.target))\n\n","repo_name":"GianCarloMilanese/ProgrammingProject","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":9800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32148140983","text":"# notebook 2 for MAE6226 \n# started 6 feb 2014\n# Ian Carr\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\n\n# defining space\nN = 200 #num points in each direction\nxStart,xEnd = -4.0,4.0 #setting up boundaries\nyStart,yEnd = -2.0,2.0\nx = np.linspace(xStart,xEnd,N) #making the arrays\ny = np.linspace(yStart,yEnd,N) \nX,Y = np.meshgrid(x,y) #combining the arrays into a mesh\n\n# imposing parameters\nUinf = 1.0 #defining freestream velocity\nalphaInDegrees = 0 #defining angle of attack\nalpha = alphaInDegrees*pi/180\n\n# computing the velocity on grid\nuFreestream = Uinf*cos(alpha)*np.ones((N,N),dtype=float)\nvFreestream = Uinf*sin(alpha)*np.ones((N,N),dtype=float)\n\n# computing the stream fn from velocity components\npsiFreestream = + Uinf*cos(alpha)*Y - Uinf*sin(alpha)*X\n\n# defining function for velocity field of source and sink\ndef getVelocity(strength,xs,ys,X,Y): #passing strength, positions, mesh\n u = (strength/(2*pi))*((X-xs)/((X-xs)**2+(Y-ys)**2))\n v = (strength/(2*pi))*((Y-ys)/((X-xs)**2+(Y-ys)**2))\n return u,v\n\n# defining fuction to compute stream fn of source and sink\ndef getStreamFunction(strength,xs,ys,X,Y):\n psi = (strength/(2*pi))*np.arctan2((Y-ys),(X-xs))\n return psi\n \nstrengthSource = 5.0 #strength of source\nxSource,ySource = -1.0,0.0\n\n# computing the velocity components\nuSource,vSource = getVelocity(strengthSource,xSource,ySource,X,Y)\n\n# computing the stream fn\npsiSource = getStreamFunction(strengthSource,xSource,ySource,X,Y)\n\n# superimposing the source and freestream\nu = uFreestream + uSource\nv = vFreestream + vSource\npsi = psiFreestream + psiSource\n\n# plotting\nsize = 10\nplt.figure(figsize=(size,(yEnd-yStart)/(xEnd-xStart)*size))\nplt.grid(True)\nplt.xlabel('x',fontsize=16)\nplt.ylabel('y',fontsize=16)\nplt.xlim(xStart,xEnd)\nplt.ylim(yStart,yEnd)\nplt.streamplot(X,Y,u,v,density=2.0,linewidth=1,arrowsize=1,arrowstyle='->')\nplt.scatter(xSource,ySource,c='b',s=80,marker='o')\n\n# computing stagnation point\nxStagnation = xSource - strengthSource/(2*pi*Uinf)*cos(alpha)\nyStagnation = ySource - strengthSource/(2*pi*Uinf)*sin(alpha)\n\n# adding a stagnation point\nplt.scatter(xStagnation,yStagnation,c='b',s=80,marker='o')\n\n# adding a line along the stagnation streamline\nif (alpha==0):\n plt.contour(X,Y,psi,\\\n levels=[-strengthSource/2,+strengthSource/2],\\\n colors='r',linewidths=2,linestyles='solid')\n\n# adding a sync to the flow\nstrengthSink = -5.0 \nxSink,ySink = 1.0,0.0\n\n# computing the velocity of the sink\nuSink,vSink = getVelocity(strengthSink,xSink,ySink,X,Y)\n\n# computing the stream function of the sink\npsiSink = getStreamFunction(strengthSink,xSink,ySink,X,Y)\n\n# superimposing the source sink and freestream\nu = uSource + uSink + uFreestream\nv = vSource + vSink + vFreestream\npsi = psiSource + psiSink + psiFreestream\n\n# plotting\nsize = 10\nplt.figure(figsize=(size,(yEnd-yStart)/(xEnd-xStart)*size))\nplt.grid(True)\nplt.xlabel('x',fontsize=16)\nplt.ylabel('y',fontsize=16)\nplt.xlim(xStart,xEnd)\nplt.ylim(yStart,yEnd)\nplt.streamplot(X,Y,u,v,density=2.0,linewidth=1,arrowsize=1,arrowstyle='->')\nplt.scatter(xSource,ySource,c='r',s=80,marker='o')\nplt.scatter(xSink,ySink,c='r',s=80,marker='o')\nif (alpha==0):\n plt.contour(X,Y,psi,levels=[0.0],colors='r',linewidths=2,linestyles='solid')\n\n# calculating coefficent of pressure\nCp = 1.0-(u**2+v**2)/Uinf**2\n\n# plotting\nsize=10\nplt.figure(figsize=(1.1*size,(yEnd-yStart)/(xEnd-xStart)*size))\nplt.xlabel('x',fontsize=16)\nplt.ylabel('y',fontsize=16)\nplt.xlim(xStart,xEnd)\nplt.ylim(yStart,yEnd)\ncontf = plt.contourf(X,Y,Cp,levels=np.linspace(-2.0,1.0,100),extend='both')\ncbar = plt.colorbar(contf)\ncbar.set_label('$C_p$',fontsize=16)\ncbar.set_ticks([-2.0,-1.0,0.0,1.0])\nplt.scatter([xSource,xSink],[ySource,ySink],c='r',s=90,marker='o')\nplt.contour(X,Y,psi,\\\n levels=[0.0],\\\n colors='r',linewidth=2,linestyles='solid')\n\nplt.show()","repo_name":"iancarr/AeroHydro","sub_path":"notebooks/source-sink-freestream.py","file_name":"source-sink-freestream.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11564519227","text":"\nfrom __future__ import division, print_function\n\nfrom re import sub as re_sub\n\nimport numpy as np\n\nfrom idepi.constants import GAPS\n\n\n__all__ = [\n 'BASE_ALPH',\n 'base_10_to_n',\n 'base_26_to_alph',\n 'get_noise',\n 'sanitize_seq',\n 'clamp'\n]\n\nBASE_ALPH = 26\n\n\ndef base_10_to_n(n, N):\n if n < 0:\n sign = -1\n elif n == 0:\n return [0]\n else:\n sign = 1\n n *= sign\n digits = []\n while n:\n digits.append(n % N)\n n //= N\n return digits\n\n\ndef base_26_to_alph(cols):\n for i, v in enumerate(cols):\n if v <= 0 and (i + 1) < len(cols):\n cols[i + 1] -= 1\n cols[i] += 26\n if cols[-1] == 0:\n cols.pop()\n alph = ''\n for v in reversed(cols):\n alph += chr(ord('a') + v - 1)\n return alph\n\n\n# def alph_to_base_26(str):\n# cols = {}\n# col_idx = 0\n# for i in range(len(str)-1, -1, -1):\n# new_val = ord(str[i]) - ord('a') + 1\n# cols[col_idx] = new_val\n# col_idx += 1\n# for i in range(col_idx):\n# if cols[i] > 25:\n# cols[i] %= 26\n# if (i+1) not in cols:\n# cols[i+1] = 0\n# cols[i+1] += 1\n# return cols\n\n\n# def base_n_to_10(cols, N):\n# num = 0\n# for k, v in cols.items():\n# num += pow(N, k) * v\n# return num\n\n\ndef get_noise(seqrecord, label='IC50'):\n from .util import seqrecord_get_values\n # just return the \"mean\" as noise\n return np.mean(seqrecord_get_values(seqrecord.description, label))\n\n\ndef sanitize_seq(seq, alphabet):\n alphdict = alphabet.todict()\n assert(len(GAPS) > 0 and len(seq) > 0 and len(alphdict) > 0)\n try:\n seq = str(seq)\n seq = seq.upper()\n seq = re_sub(r'[%s]' % GAPS, '-', seq)\n seq = re_sub(r'[^%s]' % ''.join(alphdict.keys()), 'X', seq)\n except TypeError:\n raise RuntimeError(\n 'something is amiss with things:\\n GAPS = %s\\n seq = %s\\n alphabet = %s\\n' % (\n GAPS, seq, alphdict)\n )\n return seq\n\n\ndef clamp(x):\n if x < 0.:\n return 0.\n if x > 1.:\n return 1.\n return x\n","repo_name":"nlhepler/idepi","sub_path":"idepi/_common.py","file_name":"_common.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5908740601","text":"from flask import Flask, request, flash, render_template, redirect, url_for\nfrom datetime import date, timedelta\nimport json\n\nfrom src.forms.RegisterMemberForm import RegisterMemberForm\nfrom src.forms.RegisterLibrarianForm import RegisterLibrarianForm\nfrom src.forms.AddBookForm import AddBookForm\nfrom src.forms.AddAuthorForm import AddAuthorForm\nfrom src.forms.AddBookGenreForm import AddBookGenreForm\nfrom src.forms.SearchBookForm import SearchBookForm \nfrom src.forms.LoanBookForm import LoanBookForm \nfrom src.forms.ImposePaymentForm import ImposePaymentForm\nfrom src.forms.ReturnBookForm import ReturnBookForm\nfrom src.forms.PayFeeForm import PayFeeForm\n\n\nfrom src.models.PersonModel import PersonModel\nfrom src.models.BookModel import BookModel \nfrom src.models.FunctionalityModel import FunctionalityModel\n\n\n\napp = Flask(__name__, template_folder=\"src/templates\")\napp.secret_key = \"parmegiano\"\napp.static_folder = \"src/static\"\n\nPerson = PersonModel()\nBook = BookModel()\nFunctionality = FunctionalityModel() \n\n@app.route(\"/\")\ndef index():\n return render_template(\"base.html\")\n\n#Member\n@app.route(\"/member/register\", methods=[\"GET\", \"POST\"])\ndef registerMember():\n form = RegisterMemberForm() \n if request.method == \"POST\": \n if form.validate():\n try:\n res = Person.registerMember(form.data)\n return redirect(url_for(\"showMember\", id=res[\"id\"]))\n\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n return render_template(\"RegisterMember.html\", form = form)\n\n@app.route(\"/member/\", methods=[\"GET\"])\ndef showMember(id):\n id = request.view_args[\"id\"]\n record = Person.getMemberById(id)\n return render_template(\"ShowDbEntry.html\", record = record)\n\n@app.route(\"/member/all\", methods=[\"GET\"])\ndef showAllMembers():\n records = Person.getAllMembers()\n return render_template(\"ShowDbEntries.html\", records=records)\n\n\n\n\n@app.route(\"/librarian/register\", methods=[\"GET\", \"POST\"])\ndef registerLibrarian():\n form = RegisterLibrarianForm() \n if request.method == \"POST\": \n if form.validate():\n try:\n res = Person.registerLibrarian(form.data)\n return redirect(url_for(\"showLibrarian\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n\n return render_template(\"RegisterLibrarian.html\", form = form)\n\n@app.route(\"/librarian/\", methods=[\"GET\"])\ndef showLibrarian(id):\n id = request.view_args[\"id\"]\n record = Person.getLibrarianById(id)\n return render_template(\"ShowDbEntry.html\", record = record )\n\n@app.route(\"/librarian/all\", methods=[\"GET\"])\ndef showAllLibrarians():\n records = Person.getAllLibrarians()\n return render_template(\"ShowDbEntries.html\", records=records)\n\n\n\n\n@app.route(\"/book\", methods=[\"GET\", \"POST\"])\ndef addBook():\n authors = [(author[\"id\"], author[\"first_name\"] \n + \" \" + author[\"last_name\"] \n + \" - \" + author[\"nationality\"]) for author in Book.getAllAuthors()]\n genres = [(genre[\"id\"], genre[\"name\"] ) for genre in Book.getAllGenres()]\n form = AddBookForm() \n form.authors.choices = authors\n form.genres.choices = genres\n if request.method == \"POST\": \n print(form.data)\n if form.validate():\n try:\n res = Book.addBook(form.data)\n return redirect(url_for(\"showBook\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(\"blad:\", e.with_traceback())\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n\n return render_template(\"AddBook.html\", form = form)\n\n@app.route(\"/book/\", methods=[\"GET\"])\ndef showBook(id):\n id = request.view_args[\"id\"]\n record = Book.getBookById(id)\n return render_template(\"ShowDbEntry.html\", record = record )\n\n@app.route(\"/book/all\", methods=[\"GET\"])\ndef showAllBooks():\n records = Book.getAllBooks()\n return render_template(\"ShowDbEntries.html\", records=records)\n\n@app.route(\"/book/search\", methods=[\"GET\", \"POST\"])\ndef searchBook():\n form = SearchBookForm() \n if request.method == \"POST\": \n if form.validate():\n try:\n books = Book.searchBook(form.data)\n return render_template(\"SearchBook.html\", form = form, books=books)\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n\n return render_template(\"SearchBook.html\", form = form)\n\n\n\n\n@app.route(\"/book/author\", methods=[\"GET\", \"POST\"])\ndef addAuthor():\n form = AddAuthorForm() \n if request.method == \"POST\": \n if form.validate():\n try:\n res = Book.addAuthor(form.data)\n return redirect(url_for(\"showAuthor\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n\n return render_template(\"AddAuthor.html\", form = form)\n\n@app.route(\"/book/author/\", methods=[\"GET\"])\ndef showAuthor(id):\n id = request.view_args[\"id\"]\n record = Book.getAuthorById(id)\n return render_template(\"ShowDbEntry.html\", record = record )\n\n@app.route(\"/book/author/all\", methods=[\"GET\"])\ndef showAllAuthors():\n records = Book.getAllAuthors()\n return render_template(\"ShowDbEntries.html\", records=records)\n\n\n\n\n\n@app.route(\"/book/genre\", methods=[\"GET\", \"POST\"])\ndef addBookGenre():\n form = AddBookGenreForm() \n if request.method == \"POST\": \n if form.validate():\n try:\n res = Book.addGenre(form.data)\n return redirect(url_for(\"showBookGenre\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n return render_template(\"AddBookGenre.html\", form = form)\n\n@app.route(\"/book/genre/\", methods=[\"GET\"])\ndef showBookGenre(id):\n id = request.view_args[\"id\"]\n record = Book.getGenreById(id)\n return render_template(\"ShowDbEntry.html\", record = record )\n\n@app.route(\"/book/genre/all\", methods=[\"GET\"])\ndef showAllGenres():\n records = Book.getAllGenres()\n return render_template(\"ShowDbEntries.html\", records=records)\n\n\n@app.route(\"/loan/\", methods=[\"GET\", \"POST\"])\ndef loanBook():\n form = LoanBookForm()\n if request.method == \"POST\": \n if form.validate():\n if Person.hasFeesToPay(form.data[\"id_member\"]):\n flash(\"Masz opłaty do zapłacenia gagatku!\")\n elif Functionality.isBookLent(form.data[\"id_book\"]):\n flash(\"Książka jest już wypożyczona!\")\n else:\n try:\n loanData = {k: v for k,v in form.data.items()}\n loanData[\"loan_date\"] = date.today()\n loanData[\"due_date\"] = date.today() + timedelta(days=30)\n res = Functionality.loanBook(loanData)\n return redirect(url_for(\"showLoan\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n return render_template(\"LoanBook.html\", form = form)\n\n@app.route(\"/loan/\", methods=[\"GET\"])\ndef showLoan(id):\n id = request.view_args[\"id\"]\n record = Functionality.getLoanById(id)\n return render_template(\"ShowDbEntry.html\", record = record )\n\n@app.route(\"/loan/all\", methods=[\"GET\"])\ndef showAllLoans():\n records = Functionality.getAllLoans()\n return render_template(\"ShowDbEntries.html\", records=records)\n\n@app.route(\"/loan/return\", methods=[\"GET\", \"POST\"])\ndef returnBook():\n form = ReturnBookForm()\n if request.method == \"POST\": \n if form.validate():\n try:\n res = Functionality.returnBook(form.data)\n print(res)\n return redirect(url_for(\"showLoan\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n return render_template(\"ReturnBook.html\", form = form)\n\n\n\n\n@app.route(\"/payment/\", methods=[\"GET\", \"POST\"])\ndef imposePayment():\n form = ImposePaymentForm()\n if request.method == \"POST\": \n if form.validate():\n try:\n res = Functionality.imposePayment(form.data)\n print(res)\n return redirect(url_for(\"showPayment\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n return render_template(\"ImposePayment.html\", form = form)\n\n@app.route(\"/payment/\", methods=[\"GET\"])\ndef showPayment(id):\n id = request.view_args[\"id\"]\n record = Functionality.getPaymentById(id)\n return render_template(\"ShowDbEntry.html\", record = record )\n\n@app.route(\"/payment/all\", methods=[\"GET\"])\ndef showAllPayments():\n records = Functionality.getAllPayments()\n print(records)\n return render_template(\"ShowDbEntries.html\", records=records)\n\n@app.route(\"/payment/pay\", methods=[\"GET\", \"POST\"])\ndef payFee():\n form = PayFeeForm()\n if request.method == \"POST\": \n if form.validate():\n try:\n res = Functionality.payFee(form.data)\n return redirect(url_for(\"showPayment\", id=res[\"id\"]))\n except BaseException as e:\n flash(\"Wystąpił błąd bazy danych!\")\n print(e)\n else:\n flash(\"Wprowadzono nieprawidłowe dane!\")\n return render_template(\"PayFee.html\", form = form)\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"erzar0/projekt-bazy-danych","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33523339005","text":"# name = []\n# score = []\n\n# ## Storing multiple input values into a lists, name and score\n# for _ in range(int(input())):\n# name.append(input())\n# score.append(float(input()))\n# # Connverting Lists name and score into a dictionary\n# name_score = dict(zip(name,score))\n\n# def find_lowest_score(my_dict):\n# \"\"\"This function will take a dictionary as in input and Looping throgh all the items in the Dictionary and \n# loop though all the items to find keys which have highest value\"\"\"\n# for key,value in my_dict.items():\n# if value == min(my_dict.values()):\n# return key\n\n# # finding highest score \n# lowest_score = find_lowest_score(name_score)\n# print(lowest_score)\n\n# # finding the second highest by deleting max score key pair from the dicttionary and creating a fresh dictionary for calculations\n# del name_score[lowest_score]\n\n# second_lowest_score = find_lowest_score(name_score)\n# print(second_lowest_score)\n\n\nmarksheet = []\nfor _ in range(0,int(input())):\n marksheet.append([input(), float(input())])\n\nsecond_highest = sorted(list(set([marks for name, marks in marksheet])))[1]\nprint('\\n'.join([a for a,b in sorted(marksheet) if b == second_highest]))","repo_name":"rgadge/my-vscode-git","sub_path":"HackerRank-Python/11-Lowest-Second-Lowest.py","file_name":"11-Lowest-Second-Lowest.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24393443957","text":"chars = {\r\n 2:'ABC',\r\n 3:'DEF',\r\n 4:'GHI',\r\n 5:'JKL',\r\n 6:'MNO',\r\n 7:'PQRS',\r\n 8:'TUV',\r\n 9:'WXYZ'\r\n}\r\n\r\ndialing = 0\r\n\r\ndef dial(string):\r\n for i in range(len(string)):\r\n for k, v in chars.items():\r\n if string[i] in v:\r\n global dialing\r\n dialing += k + 1\r\n\r\ndial_string = input()\r\ndial(dial_string)\r\n\r\nprint(dialing)","repo_name":"andtomorrow/algorithm","sub_path":"백준/Bronze/5622. 다이얼/다이얼.py","file_name":"다이얼.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29489901989","text":"from typing import Dict, Optional, Union, Type, TYPE_CHECKING, List\nimport gym\nfrom yaml import dump\nimport numpy as np\n\nimport eagerx\nfrom eagerx.core.space import Space\nfrom eagerx.core.view import SpecView\nfrom eagerx.utils.utils import (\n replace_None,\n deepcopy,\n)\nfrom eagerx.utils.utils_sub import substitute_args\n\n\nif TYPE_CHECKING:\n from eagerx.core.entities import Engine\n\n\nclass EntitySpec(object):\n def __init__(self, params):\n super(EntitySpec, self).__setattr__(\"_params\", params)\n\n def __setattr__(self, name, value):\n raise AttributeError(\"You cannot set the new attributes to EntitySpec.\")\n\n def __str__(self):\n return dump(self._params)\n\n @property\n @deepcopy\n def params(self):\n return self._params\n\n\nclass BackendSpec(EntitySpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the selected backend.\"\"\"\n\n def initialize(self, spec_cls):\n pass\n\n @property\n def config(self) -> SpecView:\n \"\"\"Provides an API to get/set the parameters to initialize.\n\n :return: (mutable) API to get/set parameters.\n \"\"\"\n return SpecView(self, depth=[\"config\"], unlocked=True)\n\n\nclass ProcessorSpec(EntitySpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the processor.\"\"\"\n\n def initialize(self, spec_cls):\n pass\n\n @property\n def config(self) -> SpecView:\n \"\"\"Provides an API to get/set the parameters to initialize.\n\n :return: (mutable) API to get/set parameters.\n \"\"\"\n return SpecView(self, depth=[], unlocked=True)\n\n\nclass EngineStateSpec(EntitySpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the engine state.\"\"\"\n\n def initialize(self, spec_cls):\n pass\n\n @property\n def config(self) -> SpecView:\n \"\"\"Provides an API to get/set the parameters to initialize.\n\n :return: API to get/set parameters.\n \"\"\"\n return SpecView(self, depth=[], unlocked=True)\n\n\nclass BaseNodeSpec(EntitySpec):\n def __init__(self, params):\n super().__init__(params)\n\n def _lookup(self, depth, unlocked=False):\n name = self._params[\"config\"][\"name\"]\n return SpecView(self, depth=[depth], name=name, unlocked=unlocked)\n\n @property\n def config(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters to initialize.\n\n The default parameters are:\n\n - .. py:attribute:: Spec.config.name: str\n\n User specified unique node name.\n\n - .. py:attribute:: Spec.config.rate: float\n\n Rate (Hz) at which the :func:`~eagerx.core.entities.Node.callback` is called.\n\n - .. py:attribute:: Spec.config.process: int = 0\n\n Process in which the node is launched. See :class:`~eagerx.core.constants.process` for all options.\n\n - .. py:attribute:: Spec.config.color: str = grey\n\n Specifies the color of logged messages & node color in the GUI.\n Check-out the termcolor documentation for the supported colors.\n\n - .. py:attribute:: Spec.config.print_mode: int = 1\n\n Specifies the different modes for printing: `{1: TERMCOLOR, 2: ROS}`.\n\n - .. py:attribute:: Spec.config.log_level: int = 30\n\n Specifies the log level for the engine: `{0: SILENT, 10: DEBUG, 20: INFO, 30: WARN, 40: ERROR, 50: FATAL}`\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"config\", unlocked=True)\n\n @property\n def inputs(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.inputs`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.inputs..window: int = 1\n\n A non-negative number that specifies the number of messages to pass to the node's :func:`~eagerx.core.entities.Node.callback`.\n\n - *window* = 1: Only the last received input message.\n\n - *window* = *x* > 1: The trailing last *x* received input messages.\n\n - *window* = 0: All input messages received since the last call to the node's :func:`~eagerx.core.entities.Node.callback`.\n\n .. note:: With *window* = 0, the number of input messages may vary and can even be zero.\n\n - .. py:attribute:: Spec.inputs..processor: ProcessorSpec = None\n\n A processor that preprocesses the received input message before passing it\n to the node's :func:`~eagerx.core.entities.Node.callback`.\n\n - .. py:attribute:: Spec.inputs..space: dict = None\n\n This space defines the format of valid messages.\n\n - .. py:attribute:: Spec.inputs..delay: float = 0.0\n\n A non-negative simulated delay (seconds). This delay is ignored if\n :attr:`~eagerx.core.entities.Engine.simulate_delays` = True\n in the engine's :func:`~eagerx.core.entities.Engine.spec`.\n\n - .. py:attribute:: Spec.inputs..skip: bool = False\n\n Skip the dependency on this input during the first call to the node's :func:`~eagerx.core.entities.Node.callback`.\n May be necessary to ensure that the connected graph is directed and acyclic.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"inputs\")\n\n @property\n def outputs(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.outputs`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.outputs..processor: ProcessorSpec = None\n\n A processor that preprocesses the output message, returned by :func:`~eagerx.core.entities.Node.callback`,\n before publishing it.\n\n - .. py:attribute:: Spec.outputs..space: dict = None\n\n This space defines the format of valid messages.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"outputs\")\n\n @property\n def states(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.states`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.states..space: dict = None\n\n This space defines the format of valid messages.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"states\")\n\n def initialize(self, spec_cls):\n import eagerx.core.register as register\n\n try:\n params = register.LOOKUP_TYPES[spec_cls.callback]\n except KeyError:\n if spec_cls.__name__ == \"EnvNode\":\n params = dict()\n else:\n raise\n\n if \"targets\" in params:\n from eagerx.core.entities import ResetNode\n\n assert issubclass(\n spec_cls, ResetNode\n ), \"You can only have targets registered for nodes that inherit from the ResetNode baseclass.\"\n add_ft = True\n else:\n add_ft = False\n\n # Set default components\n for component, cnames in params.items():\n for cname, space in cnames.items():\n if component == \"outputs\":\n if cname not in self.config.outputs:\n self.config.outputs.append(cname)\n mapping = dict(\n rate=\"$(config rate)\",\n processor=None,\n space=space,\n )\n # Add feedthrough entries for each output if node is a reset node (i.e. when it has a target)\n if add_ft:\n mapping_ft = dict(\n delay=0.0,\n window=1,\n skip=False,\n processor=None,\n space=space,\n address=None,\n )\n with self.feedthroughs as d:\n d[cname] = mapping_ft\n elif component == \"inputs\":\n if cname not in self.config.inputs:\n self.config.inputs.append(cname)\n address = \"engine/outputs/tick\" if cname == \"tick\" else None\n space = eagerx.Space(shape=(), dtype=\"int64\") if cname == \"tick\" else space\n mapping = dict(\n delay=0.0,\n window=1,\n skip=False,\n processor=None,\n space=space,\n address=address,\n )\n elif component == \"targets\":\n if cname not in self.config.targets:\n self.config.targets.append(cname)\n mapping = dict(\n processor=None,\n space=space,\n address=None,\n )\n else:\n if cname not in self.config.states:\n self.config.states.append(cname)\n component = \"states\"\n mapping = dict(\n processor=None,\n space=space,\n )\n with getattr(self, component) as d:\n d[cname] = mapping\n\n def add_input(\n self,\n cname: str,\n window: int = 1,\n delay: float = 0.0,\n skip: bool = False,\n address: str = None,\n processor: Optional[ProcessorSpec] = None,\n space: Optional[gym.spaces.Space] = None,\n ):\n mapping = dict(\n window=window,\n delay=delay,\n skip=skip,\n space=space,\n address=address,\n processor=processor.params if processor else None,\n )\n with self.inputs as d:\n d[cname] = mapping\n\n def add_output(\n self,\n cname: str,\n processor: Optional[ProcessorSpec] = None,\n space: Optional[gym.spaces.Space] = None,\n ):\n mapping = dict(\n rate=\"$(config rate)\",\n space=space,\n processor=processor.params if processor else None,\n )\n with self.outputs as d:\n d[cname] = mapping\n\n def build(self, ns):\n params = self.params # Creates a deepcopy\n name = self.config.name\n entity_id = self.config.entity_id\n\n # Replace args in .yaml\n context = {\n \"ns\": {\"env_name\": ns, \"node_name\": name},\n \"config\": params[\"config\"],\n }\n substitute_args(params, context, only=[\"config\", \"ns\"])\n\n # Process inputs\n inputs = []\n for cname in self.config.inputs:\n assert (\n cname in params[\"inputs\"]\n ), f'Received unknown {\"input\"} \"{cname}\". Check the spec of \"{name}\" with entity_id \"{entity_id}\".'\n assert (\n \"targets\" not in params or cname not in params[\"targets\"]\n ), f'Input \"{cname}\" cannot have the same cname as a target. Change either the input or target cname. Check the spec of \"{name}\" with entity_id \"{entity_id}\".'\n n = RxInput(name=cname, **params[\"inputs\"][cname])\n inputs.append(n)\n\n # Process outputs\n outputs = []\n for cname in self.config.outputs:\n msg = f\"The rate ({params['outputs'][cname]['rate']} Hz) set for action '{cname}' does not equal the environment rate ({self.config.rate} Hz).\"\n assert params[\"outputs\"][cname][\"rate\"] == self.config.rate, msg\n assert (\n cname in params[\"outputs\"]\n ), f'Received unknown {\"output\"} \"{cname}\". Check the spec of \"{name}\" with entity_id \"{entity_id}\".'\n if \"address\" in params[\"outputs\"][cname]:\n address = params[\"outputs\"][cname].pop(\"address\")\n else:\n address = \"%s/outputs/%s\" % (name, cname)\n n = RxOutput(name=cname, address=address, **params[\"outputs\"][cname])\n outputs.append(n)\n\n states = []\n for cname in self.config.states:\n assert (\n cname in params[\"states\"]\n ), f'Received unknown {\"state\"} \"{cname}\". Check the spec of \"{name}\" with entity_id \"{entity_id}\".'\n if \"address\" in params[\"states\"][cname]: # if 'env/supervisor', the state address is pre-defined (like an input)\n n = RxState(name=cname, **params[\"states\"][cname])\n else:\n address = \"%s/states/%s\" % (name, cname)\n n = RxState(name=cname, address=address, **params[\"states\"][cname])\n states.append(n)\n\n targets = []\n if \"targets\" in self.config:\n for cname in self.config.targets:\n assert (\n cname in params[\"targets\"]\n ), f'Received unknown {\"target\"} \"{cname}\". Check the spec of \"{name}\" with entity_id \"{entity_id}\".'\n n = RxState(name=cname, **params[\"targets\"][cname])\n targets.append(n)\n\n feedthroughs = []\n if \"feedthroughs\" in params:\n assert \"targets\" in self.config, f'No targets defined for ResetNode \"{name}\".'\n assert len(self.config.targets) > 0, f'No targets selected for ResetNode \"{name}\".'\n for cname in self.config.outputs:\n # Add output details to feedthroughs\n assert (\n cname in params[\"feedthroughs\"]\n ), f'Feedthrough \"{cname}\" must directly correspond to a selected output. Check the spec of \"{name}\" with entity_id \"{entity_id}\".'\n params[\"feedthroughs\"][cname][\"space\"] = params[\"outputs\"][cname][\"space\"]\n n = RxFeedthrough(feedthrough_to=cname, **params[\"feedthroughs\"][cname])\n feedthroughs.append(n)\n\n params[\"outputs\"] = [i.build(ns=ns) for i in outputs]\n params[\"inputs\"] = [i.build(ns=ns) for i in inputs]\n params[\"states\"] = [i.build(ns=ns) for i in states]\n params[\"targets\"] = [i.build(ns=ns) for i in targets]\n params[\"feedthroughs\"] = [i.build(ns=ns) for i in feedthroughs]\n\n # Create rate dictionary with outputs\n chars_ns = len(ns) + 1\n rate_dict = dict()\n for i in params[\"outputs\"]:\n assert i[\"rate\"] is not None and isinstance(i[\"rate\"], (int, float)) and i[\"rate\"] > 0, (\n f'The rate of node \"{name}\" (and output cname \"{i[\"name\"]}\") is misspecified: rate=\"{i[\"rate\"]}\". '\n 'Make sure that it is of type(rate)=(\"int\", \"float\",) and rate > 0.'\n )\n address = i[\"address\"][chars_ns:]\n rate_dict[address] = i[\"rate\"] # {'rate': i['rate']}\n\n # Put parameters in node namespace (watch out, order of dict keys probably matters...)\n node_params = {name: params, \"rate\": rate_dict}\n return replace_None(node_params)\n\n\nclass NodeSpec(BaseNodeSpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the node.\n\n .. note:: You may encounter (or use) the syntax \"`$(config [parameter_name])`\" to couple the values of several parameters\n in the spec. This creates a coupling between parameters so that modifications to the value of one parameter\n also affect the coupled parameter value.\n\n For example, setting `spec.inputs.in_1.space.low = \"$(config low)\"` will set the value of\n `spec.inputs.in_1.space.low=spec.config.low` when the node is initialized. Hence, any change to\n `low` will also be reflected in the space parameter `low`.\n \"\"\"\n\n pass\n\n\nclass ResetNodeSpec(BaseNodeSpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the node.\n\n .. note:: You may encounter (or use) the syntax \"`$(config [parameter_name])`\" to couple the values of several parameters\n in the spec. This creates a coupling between parameters so that modifications to the value of one parameter\n also affect the coupled parameter value.\n\n For example, setting `spec.inputs.in_1.space.low = \"$(config low)\"` will set the value of\n `spec.inputs.in_1.space.low=spec.config.low` when the node is initialized. Hence, any change to\n `low` will also be reflected in the space parameter `low`\n \"\"\"\n\n @property\n def targets(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.targets`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.targets..processor: ProcessorSpec = None\n\n A processor that preprocesses the received state message before passing it\n to the node's :func:`~eagerx.core.entities.ResetNode.callback`.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"targets\")\n\n @property\n def feedthroughs(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of a feedthrough corresponding to registered :func:`eagerx.core.register.outputs`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.feedthroughs..processor: ProcessorSpec = None\n\n A processor that preprocesses the received input message before passing it\n to the node's :func:`~eagerx.core.entities.Node.callback`.\n\n - .. py:attribute:: Spec.feedthroughs..space: dict = None\n\n This space defines the format of valid messages.\n\n - .. py:attribute:: Spec.feedthroughs..delay: float = 0.0\n\n A non-negative simulated delay (seconds). This delay is ignored if\n :attr:`~eagerx.core.entities.Engine.simulate_delays` = True\n in the engine's :func:`~eagerx.core.entities.Engine.spec`.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"feedthroughs\")\n\n\nclass EngineSpec(BaseNodeSpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the engine.\"\"\"\n\n @property\n def config(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters to initialize.\n\n The default parameters are:\n\n - .. py:attribute:: Spec.config.rate: float\n\n Rate (Hz) at which the :func:`~eagerx.core.entities.Engine.callback` is called.\n\n - .. py:attribute:: Spec.config.process: int = 0\n\n Process in which the engine is launched. See :class:`~eagerx.core.constants.process` for all options.\n\n - .. py:attribute:: Spec.config.sync: bool = True\n\n Flag that specifies whether we run reactive or asynchronous.\n\n - .. py:attribute:: Spec.config.real_time_factor: float = 0\n\n A specified upper bound on the real-time factor. `Wall-clock-rate`=`real_time_factor`*`rate`.\n If `real_time_factor` < 1 the simulation is slower than real time.\n\n - .. py:attribute:: Spec.config.simulate_delays: bool = True\n\n Flag that specifies whether input delays are simulated.\n You probably want to set this to `False` when running in the real-world.\n\n - .. py:attribute:: Spec.config.color: str = grey\n\n Specifies the color of logged messages. Check-out the termcolor documentation for the supported colors.\n\n - .. py:attribute:: Spec.config.print_mode: int = 1\n\n Specifies the different modes for printing: `{1: TERMCOLOR, 2: ROS}`.\n\n - .. py:attribute:: Spec.config.log_level: int = 30\n\n Specifies the log level for the engine: `{0: SILENT, 10: DEBUG, 20: INFO, 30: WARN, 40: ERROR, 50: FATAL}`.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"config\", unlocked=True)\n\n\nclass ObjectSpec(EntitySpec):\n \"\"\"A specification that specifies how :class:`~eagerx.core.env.BaseEnv` should initialize the object.\n\n .. note:: You may encounter (or use) the syntax \"`$(config [parameter_name])`\" to couple the values of several parameters\n in the spec. This creates a coupling between parameters so that modifications to the value of one parameter\n also affect the coupled parameter value.\n\n For example, setting `spec.sensors.in_1.space.low = \"$(config low)\"` will set the value of\n `spec.sensors.in_1.space.low=spec.config.low` when the node is initialized. Hence, any change to\n `low` will also be reflected in the space parameter `low`\n \"\"\"\n\n def __init__(self, params):\n super().__init__(params)\n\n def _lookup(self, depth, unlocked=False):\n name = self._params[\"config\"][\"name\"]\n return SpecView(self, depth=[depth], name=name, unlocked=unlocked)\n\n def gui(\n self,\n engine_cls: Type[\"Engine\"],\n interactive: Optional[bool] = True,\n resolution: Optional[List[int]] = None,\n filename: Optional[str] = None,\n ) -> Union[None, np.ndarray]:\n \"\"\"Opens a graphical user interface of the object's engine implementation.\n\n .. note:: Requires `eagerx-gui`:\n\n .. highlight:: python\n .. code-block:: python\n\n pip3 install eagerx-gui\n\n :param engine_cls: The class engine (not instance!) that was used to register the engine implementation (e.g. \"PybulletEngine\").\n :param interactive: If `True`, an interactive application is launched.\n Otherwise, an RGB render of the GUI is returned.\n This could be useful when using a headless machine.\n :param resolution: Specifies the resolution of the returned render when `interactive` is `False`.\n If `interactive` is `True`, this argument is ignored.\n :param filename: If provided, the GUI is rendered to an svg file with this name.\n If `interactive` is `True`, this argument is ignored.\n :return: RGB render of the GUI if `interactive` is `False`.\n \"\"\"\n import eagerx.core.register as register\n\n spec_copy = ObjectSpec(self.params)\n engine_id = engine_cls.__module__ + \"/\" + engine_cls.__qualname__\n spec_copy._params[\"engine\"] = {}\n graph = register.add_engine(spec_copy, engine_id)\n return graph.gui(interactive=interactive, resolution=resolution, filename=filename)\n\n @property\n def engine(self) -> Union[SpecView]:\n \"\"\"Provides an API to set/get the parameters of an engine-specific implementation.\n\n The mutable parameters are:\n\n - Arguments (excluding spec) of the selected engine's :func:`~eagerx.core.entities.Engine.add_object` method.\n\n - .. py:attribute:: Spec.engine.states.: EngineState\n\n Link an :class:`~eagerx.core.specs.EngineState` to a registered state with :func:`eagerx.core.register.states`.\n\n :return: API to get/set parameters.\n \"\"\"\n return SpecView(self, depth=[\"engine\"], name=self._params[\"config\"][\"name\"])\n\n @property\n def sensors(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.sensors`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.sensors..rate: float = 1.0\n\n Rate (Hz) at which the sensor's :func:`~eagerx.core.entities.EngineNode.callback` is called.\n\n - .. py:attribute:: Spec.sensors..space: dict = None\n\n This space defines the format of valid messages.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"sensors\")\n\n @property\n def actuators(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.actuators`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.actuators..rate: float = 1.0\n\n Rate (Hz) at which the actuator's :func:`~eagerx.core.entities.EngineNode.callback` is called.\n\n - .. py:attribute:: Spec.actuators..window: int = 1\n\n A non-negative number that specifies the number of messages to pass to the node's\n :func:`~eagerx.core.entities.EngineNode.callback`.\n\n - *window* = 1: Only the last received input message.\n\n - *window* = *x* > 1: The trailing last *x* received input messages.\n\n - *window* = 0: All input messages received since the last call to the node's\n :func:`~eagerx.core.entities.EngineNode.callback`.\n\n .. note:: With *window* = 0, the number of input messages may vary and can even be zero.\n\n\n - .. py:attribute:: Spec.actuators..space: dict = None\n\n This space defines the format of valid messages.\n\n - .. py:attribute:: Spec.actuators..delay: float = 0.0\n\n A non-negative simulated delay (seconds). This delay is ignored if\n :attr:`~eagerx.core.entities.Engine.simulate_delays` = True\n in the engine's :func:`~eagerx.core.entities.Engine.spec`.\n\n - .. py:attribute:: Spec.actuators..skip: bool = False\n\n Skip the dependency on this input during the first call to the node's :func:`~eagerx.core.entities.EngineNode.callback`.\n May be necessary to ensure that the connected graph is directed and acyclic.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"actuators\")\n\n @property\n def states(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters of registered :func:`eagerx.core.register.engine_states`.\n\n The mutable parameters are:\n\n - .. py:attribute:: Spec.states..space: dict = None\n\n This space defines the format of valid messages.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"states\")\n\n @property\n def config(self) -> SpecView:\n \"\"\"Provides an API to set/get the parameters to initialize.\n\n The default parameters are:\n\n - Additional parameters registered with the :func:`eagerx.core.register.config` decorator.\n\n - .. py:attribute:: Spec.config.name: str\n\n User specified unique object name.\n\n - .. py:attribute:: Spec.config.actuators: list\n\n List with selected actuators. Must be a subset of the registered :func:`eagerx.core.register.actuators`.\n\n - .. py:attribute:: Spec.config.sensors: list\n\n List with selected sensors. Must be a subset of the registered :func:`eagerx.core.register.sensors`.\n\n - .. py:attribute:: Spec.config.states: list\n\n List with selected engine_states. Must be a subset of the registered :func:`eagerx.core.register.engine_states`.\n\n The API becomes **read-only** once the entity is added to :class:`~eagerx.core.graph.Graph`.\n\n :return: API to get/set parameters.\n \"\"\"\n return self._lookup(\"config\", unlocked=True)\n\n def initialize(self, spec_cls):\n import eagerx.core.register as register\n\n agnostic = register.LOOKUP_TYPES[spec_cls.make]\n\n # Set default components\n for component, cnames in agnostic.items():\n for cname, space in cnames.items():\n if component == \"sensors\":\n mapping = dict(\n rate=1,\n processor=None,\n space=space,\n )\n elif component == \"actuators\":\n mapping = dict(\n rate=1,\n delay=0.0,\n window=1,\n skip=False,\n processor=None,\n space=space,\n )\n else:\n component = \"states\"\n mapping = dict(\n processor=None,\n space=space,\n )\n with getattr(self, component) as d:\n d[cname] = mapping\n # Select component per default\n if cname not in getattr(self.config, component):\n getattr(self.config, component).append(cname)\n\n def _initialize_engine_config(self, engine_config):\n # Add default config\n with self.engine as d:\n d.update(engine_config)\n d[\"states\"] = {}\n # Add all states to engine-specific params\n with d.states as s:\n for cname in self.states.keys():\n s[cname] = None\n\n def _add_graph(self, graph):\n # Register EngineGraph\n nodes, actuators, sensors = graph.register()\n\n # Pop states that were not implemented.\n with self.engine.states as d:\n for cname in list(self.engine.states.keys()):\n if d[cname] is None:\n d.pop(cname)\n\n # Set engine_spec\n with self.engine as d:\n d.actuators = actuators\n d.sensors = sensors\n d.nodes = nodes\n\n def _initialize_object_graph(self):\n mapping = dict()\n for component in [\"sensors\", \"actuators\"]:\n try:\n mapping[component] = getattr(self, component)\n except AttributeError:\n continue\n\n from eagerx.core.graph_engine import EngineGraph\n\n graph = EngineGraph.create(**mapping)\n return graph\n\n def add_engine(self, engine_id):\n # Construct context & replace placeholders\n context = {\"config\": self.config.to_dict()}\n substitute_args(self._params[\"config\"], context, only=[\"config\"]) # First resolve args within the context\n substitute_args(self._params, context, only=[\"config\"]) # Resolve rest of params\n\n # Add engine entry\n import eagerx.core.register as register\n\n self._params[\"engine\"] = {}\n register.add_engine(self, engine_id)\n\n def build(self, ns, engine_id):\n params = self.params # Creates a deepcopy\n name = self.config.name\n\n # Construct context\n context = {\"ns\": {\"env_name\": ns, \"obj_name\": name}}\n substitute_args(params[\"config\"], context, only=[\"ns\"]) # First resolve args within the context\n substitute_args(params, context, only=[\"ns\"]) # Resolve rest of params\n\n # Get agnostic definition\n agnostic = dict()\n for key in list(params.keys()):\n if key not in [\"actuators\", \"sensors\", \"states\"]:\n if key not in [\"config\", engine_id]:\n params.pop(key)\n continue\n agnostic[key] = params.pop(key)\n\n # Get engine definition\n engine = params.pop(engine_id)\n nodes = engine.pop(\"nodes\")\n specific = dict()\n for key in list(engine.keys()):\n if key not in [\"actuators\", \"sensors\", \"states\"]:\n continue\n specific[key] = engine.pop(key)\n\n # Replace node names\n for key in list(nodes.keys()):\n key_sub = substitute_args(key, context, only=[\"ns\"])\n nodes[key_sub] = nodes.pop(key)\n\n # Sensors & actuators\n sensor_addresses = dict()\n dependencies = []\n for obj_comp in [\"actuators\", \"sensors\"]:\n for obj_cname in params[\"config\"][obj_comp]:\n try:\n entry_lst = specific[obj_comp][obj_cname]\n except KeyError:\n raise KeyError(\n f'\"{obj_cname}\" was selected in {obj_comp} of \"{name}\", but there is no implementation for it in engine \"{engine_id}\".'\n )\n\n for entry in reversed(entry_lst):\n node_name, node_comp, node_cname = entry[\"name\"], entry[\"component\"], entry[\"cname\"]\n obj_comp_params = agnostic[obj_comp][obj_cname]\n node_params = nodes[node_name]\n\n # Determine node dependency\n dependencies += entry[\"dependency\"]\n\n # Set rate\n rate = obj_comp_params[\"rate\"]\n msg_start = f'Different rate specified for {obj_comp[:-1]} \"{obj_cname}\" and enginenode \"{node_name}\": '\n msg_end = \"If an enginenode implements a sensor/actuator, their specified rates must be equal.\"\n msg_mid = f'{node_params[\"config\"][\"rate\"]} vs {rate}. '\n assert node_params[\"config\"][\"rate\"] == rate, msg_start + msg_mid + msg_end\n for o in node_params[\"config\"][\"outputs\"]:\n msg_mid = f'{node_params[\"outputs\"][o][\"rate\"]} vs {rate}. '\n assert node_params[\"outputs\"][o][\"rate\"] == rate, msg_start + msg_mid + msg_end\n\n # Set component params\n node_comp_params = nodes[node_name][node_comp][node_cname]\n if obj_comp == \"sensors\":\n # Make sure that space of node is contained within agnostic space.\n agnostic_space = Space.from_dict(obj_comp_params[\"space\"])\n node_space = Space.from_dict(node_comp_params[\"space\"])\n msg = (\n f\"The space of EngineNode `{node_name}.{node_comp}.{node_cname}` is different \"\n f\"(dtype, shape, low, or high) from the space of `{name}.{obj_comp}.{obj_cname}`: \\n\\n\"\n f\"{node_name}.{node_comp}.{node_cname}.space={node_space} \\n\\n\"\n f\"{name}.{obj_comp}.{obj_cname}.space={agnostic_space} \\n\\n\"\n )\n assert agnostic_space.contains_space(node_space), msg\n node_comp_params.update(obj_comp_params)\n node_comp_params[\"address\"] = f\"{name}/{obj_comp}/{obj_cname}\"\n sensor_addresses[f\"{node_name}/{node_comp}/{node_cname}\"] = f\"{name}/{obj_comp}/{obj_cname}\"\n else: # Actuators\n agnostic_space = Space.from_dict(obj_comp_params[\"space\"])\n node_space = Space.from_dict(node_comp_params[\"space\"])\n msg = (\n f\"The space of EngineNode `{node_name}.{node_comp}.{node_cname}` is different \"\n f\"(dtype, shape, low, or high) from the space of `{name}.{obj_comp}.{obj_cname}`: \\n\\n\"\n f\"{name}.{node_comp}.{node_cname}.space={node_space} \\n\\n\"\n f\"{name}.{obj_comp}.{obj_cname}.space={agnostic_space} \\n\\n\"\n )\n assert agnostic_space.contains_space(node_space), msg\n\n agnostic_processor = obj_comp_params.pop(\"processor\")\n node_comp_params.update(obj_comp_params)\n\n if agnostic_processor is not None:\n msg = (\n f\"A processor was defined for {node_name}.{node_comp}.{node_cname}, however the engine \"\n \"implementation also has a processor defined. You can only have one processor.\"\n )\n assert node_comp_params[\"processor\"] is None, msg\n node_comp_params[\"processor\"] = agnostic_processor\n\n # Pop rate. Actuators are more-or-less inputs so have no rate?\n node_comp_params.pop(\"rate\")\n # Reassign converter in case a node provides the implementation for multiple actuators\n obj_comp_params[\"processor\"] = agnostic_processor\n\n # Get set of node we are required to launch\n dependencies = list(set(dependencies))\n\n # Verify that no dependency is an unlisted actuator node.\n not_selected = [cname for cname in agnostic[\"actuators\"] if cname not in params[\"config\"][\"actuators\"]]\n for cname in not_selected:\n try:\n for entry in specific[\"actuators\"][cname]:\n node_name, node_comp, node_cname = (entry[\"name\"], entry[\"component\"], entry[\"cname\"])\n msg = (\n f'There appears to be a dependency on enginenode \"{node_name}\" for the implementation of '\n f'engine \"{engine_id}\" for object \"{name}\" to work. However, enginenode \"{node_name}\" is '\n f'directly tied to an unselected actuator \"{cname}\". '\n \"The actuator must be selected to resolve the graph.\"\n )\n assert node_name not in dependencies, msg\n except KeyError:\n # We pass here, because if cname is not selected, but also not implemented,\n # we are sure that there is no dependency.\n pass\n\n # Replace enginenode outputs that have been renamed to sensor outputs\n for node_address, sensor_address in sensor_addresses.items():\n for _, node_params in nodes.items():\n for _cname, comp_params in node_params[\"inputs\"].items():\n if node_address == comp_params[\"address\"]:\n comp_params[\"address\"] = sensor_address\n\n # Create states\n states = []\n state_names = []\n obj_comp = \"states\"\n for obj_cname in params[\"config\"][\"states\"]:\n args = agnostic[obj_comp][obj_cname]\n args[\"name\"] = f\"{name}/{obj_comp}/{obj_cname}\"\n args[\"address\"] = f\"{name}/{obj_comp}/{obj_cname}\"\n try:\n args[\"state\"] = specific[obj_comp][obj_cname]\n except KeyError:\n raise KeyError(\n f'\"{obj_cname}\" was selected in {obj_comp} of \"{name}\", but there is no implementation for it in engine \"{engine_id}\".'\n )\n states.append(RxEngineState(**args))\n state_names.append(f'{ns}/{args[\"name\"]}')\n\n # Gather node names\n params[\"node_names\"] = [f\"{ns}/{node_name}\" for node_name in list(nodes.keys()) if node_name in dependencies]\n params[\"state_names\"] = state_names\n\n # Add engine\n params[\"engine\"] = engine\n\n # Add agnostic definition\n params.update(**agnostic)\n\n # Add states\n assert \"states\" not in params[\"engine\"], \"The keyword `states` is reserved.\"\n params[\"engine\"][\"states\"] = [s.build(ns) for s in states]\n nodes = [NodeSpec(params) for name, params in nodes.items() if name in dependencies]\n return {name: replace_None(params)}, nodes\n\n\n# REQUIRED FOR BUILDING SPECS\n\n\nclass Component(object):\n def __init__(self, **kwargs):\n # Iterates over provided arguments and sets the provided arguments as class properties\n for key, value in kwargs.items():\n if key == \"__class__\":\n continue # Skip if __class__ type\n setattr(self, key, value)\n\n\nclass RxInput(Component):\n def __init__(\n self,\n name: str,\n address: str,\n window: int = 0,\n processor: Dict = None,\n space: Dict = None,\n delay: float = 0.0,\n skip: bool = False,\n dtype: str = None,\n ):\n # Store parameters as properties in baseclass\n # IMPORTANT! Do not define variables locally you do **not** want to store\n # on the parameter server anywhere before calling the baseclass' constructor.\n kwargs = locals().copy()\n kwargs.pop(\"self\")\n super(RxInput, self).__init__(**kwargs)\n\n # Calculate other parameters based on previously defined attributes.\n\n # Error check the parameters here.\n\n def build(self, ns=\"\"):\n params = self.__dict__.copy()\n params[\"address\"] = \"/\".join(filter(None, [ns, params[\"address\"]]))\n # Set dtype if not already set by source\n if params[\"dtype\"] is None:\n params[\"dtype\"] = params[\"space\"][\"dtype\"]\n return params\n\n\nclass RxOutput(Component):\n def __init__(\n self,\n name: str,\n address: str,\n rate: float,\n processor: Dict = None,\n space: Dict = None,\n ):\n # Store parameters as properties in baseclass\n # IMPORTANT! Do not define variables locally you do **not** want to store\n # on the parameter server anywhere before calling the baseclass' constructor.\n kwargs = locals().copy()\n kwargs.pop(\"self\")\n super(RxOutput, self).__init__(**kwargs)\n\n # Calculate other parameters based on previously defined attributes.\n\n # Error check the parameters here.\n\n def build(self, ns=\"\"):\n params = self.__dict__.copy()\n params[\"address\"] = \"/\".join(filter(None, [ns, params[\"address\"]]))\n params[\"dtype\"] = params[\"space\"][\"dtype\"]\n return params\n\n\nclass RxFeedthrough(Component):\n def __init__(\n self,\n address: str,\n feedthrough_to: str,\n window: int = 1,\n processor: Dict = None,\n space: Dict = None,\n delay: float = 0.0,\n skip: bool = False,\n dtype: str = None,\n ):\n # Store parameters as properties in baseclass\n # IMPORTANT! Do not define variables locally you do **not** want to store\n # on the parameter server anywhere before calling the baseclass' constructor.\n kwargs = locals().copy()\n kwargs.pop(\"self\")\n super(RxFeedthrough, self).__init__(**kwargs)\n\n # Calculate other parameters based on previously defined attributes.\n\n # Error check the parameters here.\n\n def build(self, ns=\"\"):\n params = self.__dict__.copy()\n params[\"address\"] = \"/\".join(filter(None, [ns, params[\"address\"]]))\n # Set dtype if not already set by source\n if params[\"dtype\"] is None:\n params[\"dtype\"] = params[\"space\"][\"dtype\"]\n return params\n\n\nclass RxState(Component):\n def __init__(\n self,\n name: str,\n address: str,\n space: Dict,\n processor: Dict = None,\n dtype: str = None,\n ):\n # Store parameters as properties in baseclass\n # IMPORTANT! Do not define variables locally you do **not** want to store\n # on the parameter server anywhere before calling the baseclass' constructor.\n kwargs = locals().copy()\n kwargs.pop(\"self\")\n super(RxState, self).__init__(**kwargs)\n\n # Calculate other parameters based on previously defined attributes.\n\n # Error check the parameters here.\n\n def build(self, ns=\"\"):\n params = self.__dict__.copy()\n params[\"address\"] = \"/\".join(filter(None, [ns, params[\"address\"]]))\n # Set dtype if not already set by source\n if params[\"dtype\"] is None:\n params[\"dtype\"] = params[\"space\"][\"dtype\"]\n return params\n\n\nclass RxEngineState(Component):\n def __init__(\n self,\n name: str,\n address: str,\n state: Dict,\n processor: Dict = None,\n space: Dict = None,\n ):\n # Store parameters as properties in baseclass\n # IMPORTANT! Do not define variables locally you do **not** want to store\n # on the parameter server anywhere before calling the baseclass' constructor.\n kwargs = locals().copy()\n kwargs.pop(\"self\")\n super(RxEngineState, self).__init__(**kwargs)\n\n # Calculate other parameters based on previously defined attributes.\n\n # Error check the parameters here.\n\n def build(self, ns=\"\"):\n params = self.__dict__.copy()\n params[\"address\"] = \"/\".join(filter(None, [ns, params[\"address\"]]))\n params[\"dtype\"] = params[\"space\"][\"dtype\"]\n return params\n","repo_name":"sg774/eagerx","sub_path":"eagerx/core/specs.py","file_name":"specs.py","file_ext":"py","file_size_in_byte":44906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"23042106401","text":"from django.contrib.auth.hashers import make_password\nfrom django.shortcuts import get_object_or_404\nfrom drf_extra_fields.fields import Base64ImageField\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom recipes.models import (Favorite, Ingredient, IngredientRecipe, Recipe,\n ShoppingCart, Tag)\nfrom users.models import Follow, User\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для модели User.\"\"\"\n is_subscribed = serializers.SerializerMethodField('is_subscribed_user')\n\n class Meta:\n model = User\n fields = (\n 'id', 'email', 'username', 'first_name', 'last_name', 'password',\n 'is_subscribed',\n )\n extra_kwargs = {\n 'password': {'write_only': True, 'required': True},\n }\n\n def is_subscribed_user(self, obj):\n user = self.context['request'].user\n return (\n user.is_authenticated\n and obj.following.filter(user=user).exists()\n )\n\n def create(self, validated_data):\n validated_data['password'] = (\n make_password(validated_data.pop('password'))\n )\n return super().create(validated_data)\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для модели Ingredient.\"\"\"\n\n class Meta:\n model = Ingredient\n fields = '__all__'\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для модели Tag.\"\"\"\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n\nclass IngredientRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для модели IngredientRecipe.\"\"\"\n id = serializers.ReadOnlyField(source='ingredient.id')\n name = serializers.ReadOnlyField(source='ingredient.name')\n measurement_unit = serializers.ReadOnlyField(\n source='ingredient.measurement_unit'\n )\n\n class Meta:\n model = IngredientRecipe\n fields = ('id', 'name', 'measurement_unit', 'amount',)\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для чтения модели Recipe.\"\"\"\n author = UserSerializer(read_only=True)\n tags = TagSerializer(many=True, read_only=True)\n ingredients = IngredientRecipeSerializer(\n source='ingredientrecipe_set',\n many=True\n )\n image = Base64ImageField()\n is_favorited = serializers.SerializerMethodField(read_only=True)\n is_in_shopping_cart = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n fields = (\n 'id',\n 'author',\n 'tags',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n model = Recipe\n\n def get_user(self):\n return self.context['request'].user\n\n def get_is_favorited(self, obj):\n user = self.get_user()\n return (\n user.is_authenticated\n and user.favorites.filter(recipe=obj).exists()\n )\n\n def get_is_in_shopping_cart(self, obj):\n user = self.get_user()\n return (\n user.is_authenticated\n and user.shopping_cart.filter(recipe=obj).exists()\n )\n\n\nclass IngredientRecipeCreateSerializer(serializers.ModelSerializer):\n \"\"\"\"Сериализатор для создания ингредиента.\"\"\"\n id = serializers.IntegerField(write_only=True)\n\n class Meta:\n model = IngredientRecipe\n fields = ('id', 'amount',)\n\n\nclass RecipeCreateSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для создания модели Recipe.\"\"\"\n ingredients = IngredientRecipeCreateSerializer(many=True)\n tags = serializers.PrimaryKeyRelatedField(\n many=True,\n queryset=Tag.objects.all()\n )\n author = UserSerializer(read_only=True)\n image = Base64ImageField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n\n def validate_ingredients(self, value):\n ingredients = []\n for item in value:\n if item in ingredients:\n raise ValidationError('Ингредиенты не могут повторяться!')\n ingredients.append(item)\n return value\n\n def create_ingredients(self, ingredients, recipe):\n IngredientRecipe.objects.bulk_create(\n [IngredientRecipe(\n recipe=recipe,\n ingredient=get_object_or_404(Ingredient, pk=ingredient['id']),\n amount=ingredient['amount']\n ) for ingredient in ingredients]\n )\n\n def create(self, validated_data):\n tags = validated_data.pop('tags')\n ingredients = validated_data.pop('ingredients')\n recipe = Recipe.objects.create(**validated_data)\n recipe.tags.set(tags)\n self.create_ingredients(recipe=recipe, ingredients=ingredients)\n return recipe\n\n def update(self, instance, validated_data):\n tags = validated_data.pop('tags')\n instance.tags.set(tags)\n ingredients = validated_data.pop('ingredients')\n instance.ingredients.clear()\n self.create_ingredients(recipe=instance, ingredients=ingredients)\n return instance\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return RecipeSerializer(instance, context=context).data\n\n\nclass RecipePreviewSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для предпросмотра рецепта.\"\"\"\n image = Base64ImageField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'name',\n 'image',\n 'cooking_time'\n )\n\n\nclass FavoritesSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для модели Favorite.\"\"\"\n\n class Meta:\n model = Favorite\n fields = '__all__'\n validators = [\n UniqueTogetherValidator(\n queryset=Favorite.objects.all(),\n fields=('user', 'recipe'),\n message='Рецепт уже в находится в избранном'\n ),\n ]\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return RecipePreviewSerializer(instance, context=context).data\n\n\nclass ShoppingCartSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для модели ShoppingCart.\"\"\"\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n validators = [\n UniqueTogetherValidator(\n queryset=ShoppingCart.objects.all(),\n fields=('user', 'recipe'),\n message='Рецепт уже в находится в списке покупок'\n ),\n ]\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return RecipePreviewSerializer(instance, context=context).data\n\n\nclass FollowSerializer(UserSerializer):\n recipes = serializers.SerializerMethodField()\n recipes_count = serializers.SerializerMethodField()\n\n class Meta(UserSerializer.Meta):\n fields = UserSerializer.Meta.fields + ('recipes', 'recipes_count',)\n validators = (\n UniqueTogetherValidator(\n queryset=Follow.objects.all(),\n fields=('user', 'author'),\n message='Нельзя подписаться дважды!'\n ),\n )\n\n def get_recipes_count(self, obj):\n return obj.recipes.count()\n\n def validate(self, data):\n user = self.context['request'].user\n author = data.get('author')\n if user == author:\n raise serializers.ValidationError(\n 'Вы не можете подписаться на самого себя!'\n )\n return data\n\n def get_recipes(self, obj):\n request = self.context.get('request')\n limit = request.query_params.get('recipes_limit')\n queryset = obj.recipes.all()[:int(limit)]\n serializer = RecipePreviewSerializer(\n queryset, many=True, read_only=True\n )\n return serializer.data\n\n\nclass SetPasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(write_only=True)\n current_password = serializers.CharField(write_only=True)\n","repo_name":"OlyaDiv/foodgram-project-react","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35311285082","text":"from bot import (TEAMS_URL, FIXTURES_URL, TEAM_STATS_URL,\n LEAGUES_URL, PLAYERS_URL, LOGGER)\nimport bot.proxy as api\nfrom bot.utils.interpret import (image_to_base64, url_to_base64,\n base64_to_image)\n\ntid = int()\nname = str()\n\n\nasync def compete(team):\n global tid, name\n\n query = {\n \"name\": team\n }\n\n info = await api.link(TEAMS_URL, query)\n\n if info:\n name = info[0][\"team\"][\"name\"]\n tid = info[0][\"team\"][\"id\"]\n return True\n\n return False\n\n\nasync def next_ten():\n query = {\n \"season\": api.season,\n \"team\": tid,\n \"next\": 10\n }\n fixtures = await api.link(FIXTURES_URL, query)\n\n if not fixtures:\n any_query = {\n \"season\": api.season,\n \"team\": tid,\n }\n\n fixtures = await api.link(FIXTURES_URL, any_query)\n\n upcoming = dict()\n for match in fixtures:\n for side in match[\"teams\"]:\n if match[\"teams\"][side][\"id\"] == tid:\n continue\n upcoming[match[\"fixture\"][\"id\"]] = match[\"teams\"][side][\"name\"]\n\n return upcoming\n\n\nasync def all_leagues():\n query = {\n \"season\": api.season,\n \"team\": tid\n }\n leagues = await api.link(LEAGUES_URL, query)\n\n return {k[\"league\"][\"id\"]: k[\"league\"][\"name\"] for k in leagues}\n\n\nasync def team_statistics(lid: int) -> tuple:\n query = {\n \"league\": lid,\n \"season\": api.season,\n \"team\": tid\n }\n team = await api.link(TEAM_STATS_URL, query)\n\n logoURL = url_to_base64(team[\"team\"][\"logo\"])\n try:\n logoPIL = base64_to_image(logoURL)\n logoPIL.thumbnail((300, 300))\n logoURL = bytes(f\"data:image/{logoPIL.format};base64,\",\n encoding=\"utf-8\") + image_to_base64(logoPIL)\n interpretedURL = logoURL.decode(\"utf-8\")\n except:\n LOGGER.info(\"resized image (api) unrecognized by pyrogram.\")\n finally:\n interpretedURL = team[\"team\"][\"logo\"]\n\n minutes = team[\"goals\"][\"for\"][\"minute\"]\n goals = 0\n peaked = \"-\"\n for scope in minutes:\n bracket = minutes[scope][\"total\"]\n total = 0 if bracket is None else int(bracket)\n if total > goals:\n goals = bracket\n peaked = scope\n\n minutes = team[\"goals\"][\"against\"][\"minute\"]\n goals = 0\n nadired = \"-\"\n for scope in minutes:\n bracket = minutes[scope][\"total\"]\n total = 0 if bracket is None else int(bracket)\n if total > goals:\n goals = total\n nadired = scope\n\n return (\n interpretedURL, team[\"team\"][\"name\"], team[\"form\"],\n [\n team[\"fixtures\"][\"wins\"][\"total\"],\n team[\"fixtures\"][\"draws\"][\"total\"],\n team[\"fixtures\"][\"loses\"][\"total\"]\n ],\n team[\"goals\"][\"for\"][\"total\"][\"total\"], peaked,\n team[\"goals\"][\"against\"][\"total\"][\"total\"], nadired,\n team[\"biggest\"][\"streak\"][\"loses\"],\n team[\"failed_to_score\"][\"total\"],\n team[\"biggest\"][\"streak\"][\"wins\"],\n team[\"clean_sheet\"][\"total\"]\n )\n\n\nasync def all_players():\n query = {\n \"team\": tid,\n \"season\": api.season\n }\n\n playersJSON = await api.link(PLAYERS_URL, query)\n\n players = dict()\n for player in playersJSON:\n pid = player[\"player\"][\"id\"]\n pname = player[\"player\"][\"name\"]\n players[pid] = pname\n\n return players\n","repo_name":"yaqoah/kogoro","sub_path":"bot/proxy/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70431238985","text":"import sqlite3\r\nimport cryptography\r\nfrom cryptography.fernet import Fernet\r\nimport random\r\nimport hashlib as hasher\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef rast():#rastgele oluşturulacak metin için fonksiyon\r\n metin= \"\"\"We take revenge during goat-yoga class.\r\n A herd of ladies arrive each Saturday, and\r\n Friday night we gorge ourselves like horses\r\n before a race. We watch, waiting for one moment:\r\n table pose. When they’re on all fours, we snap\r\n into action: climb up on their backs, pose for\r\n a picture, then release the coffee bean waterfall\r\n right into their highlighted hair. The group\r\n screams! We feign ignorance. Who else can dominate\r\n humans this way and go unpunished? Serving goat\r\n cheese with wine tonight, Brittany? Ok. But remember\r\n you’re about to go into downward dog, and none of\r\n us are housebroken.\"\"\"\r\n\r\n x=metin.split()\r\n y=random.sample(x,20)\r\n yeni=\"\"\r\n for i in y:\r\n yeni=yeni+\" \"+str(i)\r\n return yeni \r\n \r\nsifreleyici=hasher.sha256()#özet için\r\n\r\ncon=sqlite3.connect(\"KVT.db\")#veri tabanı ile olan bağlantıyı açma\r\n\r\nkullaniciAd=input(\"Kullanıcı Adı:\")\r\nparola=input(\"Parola:\")\r\n\r\nsorgu = \"SELECT id FROM kullanicilar where ad='{}'\". format(kullaniciAd)#giriş yapılan kullanıcı adına ait id'yi alma\r\ncursor=con.cursor()\r\ncursor.execute(sorgu)\r\nid1= cursor.fetchone()\r\n\r\nsorgu1 = \"SELECT id FROM kullanicilar where parola='{}'\".format(parola)#giriş yapılan parolaya ait id'yi alma\r\ncursor1=con.cursor()\r\ncursor1.execute(sorgu1)\r\nid2= cursor1.fetchone()\r\n\r\n\r\nif id1==None or id2==None:\r\n print('Kullanıcı adı ya da parola hatalı!')\r\n\r\nelif id1==id2:#alınan id'lerin eşleşmesi durumunda girişe izin verme\r\n print(\"Giriş Başarılı\")\r\n islem=input('''Yapmak istediğiniz işlemi seçiniz:\r\n 1.Mesaj gönder\r\n 2.Mesajlara bak\\n ''')\r\n\r\n if(islem==\"1\"):\r\n \r\n alici=input(\"Mesaj göndermek istediğiniz kullanıcı adını giriniz:\")\r\n key=Fernet.generate_key()#anahtar oluşturma\r\n f=Fernet(key)\r\n\r\n sorgu2=\"SELECT id FROM kullanicilar where ad='{}'\".format(alici)#ekrandan girilen alıcının id'sini alma\r\n cursor2=con.cursor()\r\n cursor2.execute(sorgu2)\r\n aliciID = cursor2.fetchone()\r\n\r\n if aliciID==None:\r\n print('Böyle bir kullanıcı bulunmamaktadır!')\r\n else:\r\n a=input(\"Kullanıcıya mesaj göndermek için 1'e basınız.\\nKullanıcıya rastgele oluşturulan metni iletmek için 2'ye basınız.\")\r\n \r\n if a=='1':\r\n mesaj=input(\"Gönderilecek mesajı giriniz: \")\r\n sifreleyici.update(mesaj.encode(\"utf-8\"))#mesajın özetini alma\r\n hashliMetin = sifreleyici.hexdigest()\r\n \r\n token = f.encrypt(mesaj.encode(\"utf-8\"))#mesajı şifreleme\r\n \r\n sorgu3y=\"\"\"INSERT INTO anahtar(aliciId, gondericiId,anahtar) VALUES ({},{},\"{}\") \"\"\".format(aliciID[0],id1[0],key)#anahtar tablosuna ekleme\r\n cursor3y=con.cursor()\r\n cursor3y.execute(sorgu3y)\r\n \r\n sorgug=\"\"\"SELECT anahtarId FROM anahtar where anahtar=\"{}\" \"\"\".format(key)#anahtarın id'sini alma\r\n cursorg=con.cursor()\r\n cursorg.execute(sorgug)\r\n aId=cursorg.fetchone()\r\n \r\n #mesaj bilgilerinin mesajlar tablosuna eklenmesi\r\n sorgu3=\"\"\"INSERT INTO mesajlarTbl(anahtarId,alanID, gonderenID,mesajIcerik,ozet) VALUES ({},{},{},\"{}\",\"{}\")\"\"\".format(aId[0],aliciID[0],id1[0],token,hashliMetin)\r\n cursor3=con.cursor()\r\n cursor3.execute(sorgu3)\r\n \r\n print(\"Mesaj Gönderildi!\")\r\n \r\n\r\n elif a=='2':\r\n x=rast()#rastgele mesaj oluşturma\r\n print(\"Oluşan mesaj:{}\".format(x))\r\n sifreleyici.update(x.encode(\"utf-8\"))#oluşan mesajın özetini çıkarma\r\n hashliMetin = sifreleyici.hexdigest()\r\n \r\n token = f.encrypt(x.encode(\"utf-8\"))#oluşan mesajı şifreleme\r\n \r\n sorgu3y=\"\"\"INSERT INTO anahtar(aliciId, gondericiId,anahtar) VALUES ({},{},\"{}\") \"\"\".format(aliciID[0],id1[0],key)#anahtar tablosuna ekleme\r\n cursor3y=con.cursor()\r\n cursor3y.execute(sorgu3y)\r\n \r\n sorgug=\"\"\"SELECT anahtarId FROM anahtar where anahtar=\"{}\" \"\"\".format(key)#anahtar id\r\n cursorg=con.cursor()\r\n cursorg.execute(sorgug)\r\n aId=cursorg.fetchone()\r\n\r\n #mesaj bilgilerini mesajlar tablosuna ekleme\r\n sorgu3=\"\"\"INSERT INTO mesajlarTbl(anahtarId,alanID, gonderenID,mesajIcerik,ozet) VALUES ({},{},{},\"{}\",\"{}\")\"\"\".format(aId[0],aliciID[0],id1[0],token,hashliMetin)\r\n cursor3=con.cursor()\r\n cursor3.execute(sorgu3)\r\n \r\n print(\"Mesaj Gönderildi!\")\r\n \r\n \r\n elif(islem==\"2\"):\r\n \r\n sorgu4=\"SELECT gonderenID FROM mesajlarTbl where alanID = '{}'\".format(id1[0])\r\n\r\n cursor4=con.cursor()\r\n cursor4.execute(sorgu4)\r\n gonderenID=cursor4.fetchone()\r\n\r\n sorgukey=\"SELECT * FROM anahtar where aliciId = '{}'\".format(id1[0])#gelen mesajın anahtar tablosunda ki anahtarını almak için\r\n cursorkey=con.cursor()\r\n cursorkey.execute(sorgukey)\r\n size=cursorkey.fetchall()\r\n for i in size:\r\n dbkey=i[3]\r\n dbkey=dbkey[1:]\r\n \r\n sorgu5=\"SELECT ad FROM kullanicilar where id='{}'\".format(gonderenID[0])#mesajı gönderen kişinin adını almak için\r\n cursor5=con.cursor()\r\n cursor5.execute(sorgu5)\r\n for i in cursor5.fetchall():\r\n gonderen=str(i[0])\r\n print(\"Gönderen Kişi:\" + gonderen)\r\n\r\n sorgu6=\"SELECT * FROM mesajlarTbl where alanID = '{}' \".format(id1[0])#mesajlar tablosunda ki mesaj bilgileri için\r\n cursor6=con.cursor()\r\n cursor6.execute(sorgu6)\r\n \r\n for i in cursor6.fetchall():\r\n vtgMesaj=str(i[4])\r\n ozetliMesaj=str(i[5])\r\n \r\n vtgMesaj=vtgMesaj[1:]\r\n vtgAnahtar=Fernet(dbkey)#veri tabanından gelen anahtarın Fernet ile tanımlanması\r\n mesajCoz=vtgAnahtar.decrypt(vtgMesaj.encode('utf-8'))#anahtar ile mesajın çözülmesi\r\n mesajCoz=mesajCoz.decode(\"utf-8\")\r\n print(\"Gelen mesaj: {}\".format(mesajCoz))\r\n \r\n mesajCoz=vtgAnahtar.decrypt(vtgMesaj.encode('utf-8'))\r\n sifreleyici.update(mesajCoz)#çözülen mesajın özetinin alınması\r\n hashliMetin = sifreleyici.hexdigest()\r\n \r\n if ozetliMesaj!=hashliMetin:#veri tababanından gelen özet ile burada tekrar oluşturulan özetin karşılaştırılması\r\n print(\"değiştirilmiş\")\r\n \r\n\r\nelse:\r\n print('Kullanıcı adı ya da parola hatalı!')\r\ncon.commit()\r\ncon.close()#veri tabanı bağlantısının kapatılması\r\n","repo_name":"kubracomert/Messaging-App","sub_path":"message-app.py","file_name":"message-app.py","file_ext":"py","file_size_in_byte":7285,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39635470929","text":"import pygame\nimport asset\nimport functions\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, image):\n pygame.sprite.Sprite.__init__(self)\n #rect variables\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.y = 460\n self.rect_y_cap = 460\n self.rect.x = 1\n #movement variables\n self.vel = 6\n self.jump = False\n self.jumpcount = 12\n self.jumpcount_cap = 12\n self.jumpblocker = 0\n self.left = False\n self.right = True\n self.walkblocker = 0\n #combat variables\n self.shoot_bullet = False\n self.throwblocker = 0\n self.attack = False\n self.meleblocker = 0\n self.weapon_damage = 1\n self.health = 30\n self.health_cap = 30\n self.deathblocker = 0\n self.hit_loop = 0\n\n\n def draw(self, screen):\n if self.left:\n functions.identify_death(self, screen, 0)\n functions.identify_jump_attack(self, screen, 0)\n functions.identify_jump_throw(self, screen, 0)\n functions.identify_jump(self, screen, 0)\n functions.identify_attack(self, screen, 0)\n functions.identify_throw(self, screen, 0)\n functions.identify_walk(self, screen, 0)\n \n else:\n functions.identify_death(self, screen, 1)\n functions.identify_jump_attack(self, screen, 1)\n functions.identify_jump_throw(self, screen, 1)\n functions.identify_jump(self, screen, 1)\n functions.identify_attack(self, screen, 1)\n functions.identify_throw(self, screen, 1)\n functions.identify_walk(self, screen, 1)\n\n #pygame.draw.rect(screen, (250,0,0), self.rect, 2)\n \n def mele_enemy_collision(self, player, enemy_container):\n if self.attack == True:\n for enemy in enemy_container:\n if pygame.sprite.collide_rect(player, enemy):\n enemy.health -= self.weapon_damage\n","repo_name":"rpast/impocalypse","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4268582952","text":"import pytest\nfrom unittest import mock\n\nfrom submission import tasks\n\n\n@mock.patch('submission.helpers.send_email')\ndef test_task_send_email(mock_send_email):\n kwargs = {\n 'subject': 'this thing',\n 'reply_to': ['reply@example.com'],\n 'recipients': ['to@example.com'],\n 'text_body': 'Hello',\n }\n tasks.send_email(submission_id=1, **kwargs)\n\n assert mock_send_email.call_count == 1\n assert mock_send_email.call_args == mock.call(**kwargs)\n\n\n@mock.patch('submission.helpers.create_zendesk_ticket')\ndef test_create_zendesk_ticket(mock_create_zendesk_ticket):\n kwargs = {\n 'subject': 'subject123',\n 'full_name': 'jim example',\n 'email_address': 'test@example.com',\n 'payload': {'field': 'value'},\n 'service_name': 'some-service',\n }\n tasks.create_zendesk_ticket(submission_id=1, **kwargs)\n\n assert mock_create_zendesk_ticket.call_count == 1\n assert mock_create_zendesk_ticket.call_args == mock.call(**kwargs)\n\n\n@mock.patch('submission.helpers.send_gov_notify_email')\ndef test_task_send_gov_notify_email(mock_send_gov_notify_email):\n kwargs = {\n 'subject': 'this thing',\n 'reply_to': ['reply@example.com'],\n 'recipients': ['to@example.com'],\n 'text_body': 'Hello',\n }\n tasks.send_gov_notify_email(submission_id=1, **kwargs)\n\n assert mock_send_gov_notify_email.call_count == 1\n assert mock_send_gov_notify_email.call_args == mock.call(**kwargs)\n\n\n@mock.patch('submission.helpers.send_gov_notify_letter')\ndef test_task_send_gov_notify_letter(mock_send_gov_notify_letter):\n kwargs = {\n 'template_id': '123456',\n 'personalisation': 'Hello',\n }\n tasks.send_gov_notify_letter(submission_id=1, **kwargs)\n\n assert mock_send_gov_notify_letter.call_count == 1\n assert mock_send_gov_notify_letter.call_args == mock.call(**kwargs)\n\n\n@mock.patch('submission.helpers.send_pardot')\ndef test_task_send_pardot(mock_send_pardot):\n kwargs = {\n 'pardot_url': 'http://www.example.com/some/submission/path/',\n 'payload': {'field': 'value'},\n }\n tasks.send_pardot(submission_id=1, **kwargs)\n\n assert mock_send_pardot.call_count == 1\n assert mock_send_pardot.call_args == mock.call(**kwargs)\n\n\n@pytest.mark.django_db\n@mock.patch('submission.helpers.send_gov_notify_email')\ndef test_task_send_buy_from_uk_enquiries_as_csv(mock_send_gov_notify_email):\n kwargs = {\n }\n tasks.send_buy_from_uk_enquiries_as_csv(**kwargs)\n\n assert mock_send_gov_notify_email.call_count == 1\n","repo_name":"uktrade/directory-forms-api","sub_path":"submission/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34013080320","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport plotly.express as px\nimport db as db\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output\n\n\napp = dash.Dash()\napp.config['suppress_callback_exceptions'] = True\napp.config.suppress_callback_exceptions = True\n\nlang_ms = db.key_ms[::2]\nvalue_ms = db.key_ms[1::2]\n\nlang_syd = db.key_syd[::2]\nvalue_syd = db.key_syd[1::2]\n\naurin_ms_lang = ['ar', 'de', 'el', 'it', 'zh']\naurin_ms_value = [11234, 13571, 60919, 80456, 17984]\n\naurin_syd_lang = ['ar', 'de', 'el', 'it', 'zh']\naurin_syd_value = [40117, 13571, 44697, 49655, 29208]\n\ncolors = {\n 'background': '#111111',\n 'background2': '#cccccc',\n 'text': '#000000'\n}\n\napp.layout = html.Div(style={'backgroundColor': colors['background2']}, children=[\n html.Br(),\n\n html.H1(\n children='Melbourne/Sydney',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'id':'graph',\n 'font-family':'Sans-serif',\n 'font-size':36\n }\n ),\n html.Div(children='Twitter', style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n html.Br(),\n html.Button('Bar Graph', id='button1',\n type='submit', n_clicks_timestamp=0),\n html.Button('Bar Graph 2', id='button2',\n type='submit', n_clicks_timestamp=0),\n html.Div(id='p_section'),\n html.Button('Melbourne Map', id='mapbutton1',\n type='submit', n_clicks_timestamp=0),\n html.Button('Sydney Map', id='mapbutton2',\n type='submit', n_clicks_timestamp=0),\n # dcc.Graph(\n # id='bar-graph',figure={})\n html.Br(),\n html.Br(),\n html.Div(id='map_area', style={\n 'position': 'absolute', 'top': '80%', 'left': '30%'}),\n\n\n])\n\n\n@app.callback(\n dash.dependencies.Output('p_section', 'children'),\n [dash.dependencies.Input('button1', 'n_clicks_timestamp'),\n Input('button2', 'n_clicks_timestamp')\n ])\ndef update_chart(btn1, btn2):\n btn_state = [int(btn1), int(btn2)]\n if all(v == 0 for v in btn_state):\n return html.H1(\n children=' ',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n }\n )\n max_index = btn_state.index(max(i for i in btn_state if i is not None))\n if max_index == 0:\n fig = dcc.Graph(\n id='bar-graph',\n figure={\n 'data': [{'x': lang_ms, 'y': value_ms, 'type': 'bar', 'name': 'Melbourne'},\n {'x': lang_syd, 'y': value_syd, 'type': 'bar', 'name': u'Sydney'}],\n 'layout': {'title': 'Twitter Data', 'xaxis': dict(title='Language', titlefont=dict(family='Courier New, monospace', size=20, color='blue')),\n 'yaxis': dict(title='Tweet Count', titlefont=dict(family='Helvetica, monospace', size=20, color='blue'), range=[0, 1000], tick=20, autorange=False)}\n })\n return fig\n else:\n fig = dcc.Graph(\n id='bar-graph',\n figure={\n 'data': [{'x': aurin_ms_lang, 'y': aurin_ms_value, 'type': 'bar', 'name': 'Melbourne'},\n {'x': aurin_syd_lang, 'y': aurin_syd_value, 'type': 'bar', 'name': u'Sydney'}],\n 'layout': {'title': 'Aurin-Data', 'xaxis': dict(title='Language', titlefont=dict(family='Courier New, monospace', size=20, color='blue')),\n 'yaxis': dict(title='Tweet Count', titlefont=dict(family='Helvetica, monospace', size=20, color='blue'), range=[0, 50000], tick=20, autorange=False)}\n })\n return fig\n\n\n@app.callback(\n dash.dependencies.Output('map_area', 'children'),\n [dash.dependencies.Input('mapbutton1', 'n_clicks_timestamp'),\n Input('mapbutton2', 'n_clicks_timestamp')])\ndef update_chart(btn1, btn2):\n btn_state = [int(btn1), int(btn2)]\n if all(v == 0 for v in btn_state):\n return html.H1(\n children=' ',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n }\n )\n max_index = btn_state.index(max(i for i in btn_state if i is not None))\n if max_index == 0:\n fig = html.Iframe(src=\"static/melbourne/melb.html\",\n height='700px', width='800px')\n return fig\n else:\n return html.Iframe(src=\"/static/Sydney/Sydney.html\", height='700px', width='800px')\n\n\nif __name__ == '__main__':\n app.run_server(debug=False, dev_tools_ui=False,\n dev_tools_props_check=False)\n","repo_name":"shivmistry605/COMP90024-28","sub_path":"Frontend/button_my.py","file_name":"button_my.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7257342417","text":"\r\n\"\"\"\r\n This gets the training data from the data folder and saves it to a large\r\n csv file for ease of use\r\n\"\"\"\r\nimport pandas as pd\r\nimport os\r\n\r\n# locations of data\r\n\r\ntraining_loc_b = \"/../medical data/training_setB/\"\r\ntraining_loc_a = \"/../medical data/training/\"\r\n\r\ndef copyData(data_folder_location, csv_file_location):\r\n DATA = pd.DataFrame()\r\n old_percent = 0\r\n df = pd.DataFrame()\r\n\r\n for index, filename in enumerate(os.listdir(data_folder_location)):\r\n temp_df = pd.read_csv(data_folder_location + filename, sep='|')\r\n df = pd.concat([df, temp_df]).reset_index(drop=True)\r\n\r\n percent = int((index/20336)*100)\r\n if percent > old_percent:\r\n old_percent = percent\r\n print(str(percent) + \"%\")\r\n\r\n\r\n df.to_csv(data_folder_location + csv_file_location,\r\n encoding='utf-8', index=False)\r\n\r\n return DATA\r\n\r\ndata_folder_location = os.getcwd() + training_loc_a\r\ncsv_file_location = '/../concated_training_data_A.csv'\r\n\r\nif not os.path.isfile(data_folder_location + csv_file_location):\r\n TRAIN_DATA_A = copyData(data_folder_location, csv_file_location)\r\n\r\ndata_folder_location = os.getcwd() + training_loc_b\r\ncsv_file_location = '/../concated_training_data_B.csv'\r\n\r\nif not os.path.isfile(data_folder_location + csv_file_location):\r\n TRAIN_DATA_B = copyData(data_folder_location, csv_file_location)\r\n\r\n\r\nelse:\r\n TRAIN_DATA_A = pd.read_csv(os.getcwd() + '/../medical data/concated_training_data_A.csv')\r\n TRAIN_DATA_B = pd.read_csv(os.getcwd() + '/../medical data/concated_training_data_B.csv')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"willmorcombe/Early-classification-of-sepsis","sub_path":"Data Preparation/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17312368652","text":"\nimport collections\nimport networkx as nx\nimport numpy as np \nfrom random import randint\n\nclass Board:\n\tdef __init__(self):\n\t\tself.matrix = np.zeros((2,3))\n\t\tself.matrix[0,0] = 5\n\t\tself.matrix[0,1] = 1\n\t\tself.matrix[0,2] = 0\n\t\tself.matrix[1,0] = 4\n\t\tself.matrix[1,1] = 3\n\t\tself.matrix[1,2] = 2\n\t\tself.goal = np.zeros((2,3))\n\t\tself.goal[0,0] = 0\n\t\tself.goal[0,1] = 1\n\t\tself.goal[0,2] = 2\n\t\tself.goal[1,0] = 3\n\t\tself.goal[1,1] = 4\n\t\tself.goal[1,2] = 5\n\n\t\tself.hamDistance = 5\n\t\tself.manDistance = 5\n\n\tdef __str__(self):\n\t\treturn np.array_str(self.matrix)\n\n\tdef hamming(self):\n\t\tself.hamming = 0\n\t\tfor i in range(1,2):\n\t\t\tfor j in range(1,3):\n\t\t\t\t\tif self.matrix[i,j] != self.goal[i,j]:\n\t\t\t\t\t\tself.hamming += 1\n\t\treturn self.hamDistance\n\n\tdef manhattan(self):\n\t\tself.manDistance = 0\n\t\tfor i in range(1,2):\n\t\t\tfor j in range(1,3):\n\t\t\t\t\tif self.matrix[i,j] != self.goal[i,j]:\n\t\t\t\t\t\tself.manDistance += i+j\n\t\treturn self.manDistance \n\nclass SolverDFS:\n\tdef __init__(self):\n\t\t\tself.stateGraph = nx.Graph()\n\n\tdef solve(self, board):\n\t\t\tself.stateGraph.add_node(board)\n\t\t\t\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n","repo_name":"fredericboileau/McGill","sub_path":"COMP- AI/python-AI/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73135350345","text":"import datetime\nimport time\nimport pytz\nimport requests\nimport GlobalVariable\nimport JDServiceAPI\nimport NoticePush\nimport NoticeTemplate\nfrom urllib.parse import quote\n\n\n# 获取当天时间和当天积分\ndef todayPointIncome():\n today_total_point = 0\n today_date = \"\"\n res = requests.get(GlobalVariable.jd_base_url + \"todayPointIncome\", headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n result = res_json[\"result\"]\n today_total_point = result[\"todayTotalPoint\"]\n todayDate = result[\"todayDate\"]\n today_date = datetime.datetime.strptime(todayDate, \"%Y%m%d\").strftime(\"%Y年%m月%d日\")\n else:\n errorMessage = res.json()['error']['message']\n print(errorMessage)\n print(\"Request todayPointIncome failed!\")\n GlobalVariable.final_result[\"today_date\"] = today_date\n GlobalVariable.final_result[\"today_total_point\"] = str(today_total_point)\n return today_total_point\n\n\n# 获取今日总积分\ndef pinTotalAvailPoint():\n total_avail_point = 0\n res = requests.get(GlobalVariable.jd_base_url + \"pinTotalAvailPoint\", headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n result = res_json[\"result\"]\n total_avail_point = result[\"totalAvailPoint\"]\n else:\n print(\"Request pinTotalAvailPoint failed!\")\n GlobalVariable.final_result[\"total_avail_point\"] = str(total_avail_point)\n return total_avail_point\n\n\n# 路由账户信息\ndef routerAccountInfo(mac):\n params = {\n \"mac\": mac,\n }\n res = requests.get(GlobalVariable.jd_base_url + \"routerAccountInfo\", params=params, headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n result = res_json[\"result\"]\n accountInfo = result[\"accountInfo\"]\n mac = accountInfo[\"mac\"]\n amount = accountInfo[\"amount\"]\n bindAccount = accountInfo[\"bindAccount\"]\n GlobalVariable.service_headers[\"pin\"] = quote(bindAccount)\n recentExpireAmount = accountInfo[\"recentExpireAmount\"]\n recentExpireTime = accountInfo[\"recentExpireTime\"]\n recentExpireTime_str = datetime.datetime.fromtimestamp(recentExpireTime / 1000).strftime(\"%Y-%m-%d %H:%M:%S\")\n account_info = {\"amount\": str(amount), \"bindAccount\": str(bindAccount),\n \"recentExpireAmount\": str(recentExpireAmount), \"recentExpireTime\": recentExpireTime_str}\n index = GlobalVariable.findALocation(mac)\n if index != -1:\n point_info = GlobalVariable.final_result[\"pointInfos\"][index]\n point_info.update(account_info)\n else:\n print(\"Find mac failure!\")\n else:\n print(\"Request routerAccountInfo failed!\")\n\n\n# 路由活动信息\ndef routerActivityInfo(mac):\n params = {\n \"mac\": mac,\n }\n res = requests.get(GlobalVariable.jd_base_url + \"router:activityInfo\", params=params,\n headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n result = res_json[\"result\"]\n # finishActivity = result[\"finishActivity\"]\n if result[\"routerUnderwayResult\"] is None:\n exit\n else:\n totalIncomeValue = result[\"routerUnderwayResult\"][\"totalIncomeValue\"]\n satisfiedTimes = result[\"routerUnderwayResult\"][\"satisfiedTimes\"]\n activity_info = {\"mac\": mac, \"totalIncomeValue\": totalIncomeValue, \"satisfiedTimes\": satisfiedTimes}\n index = GlobalVariable.findALocation(mac)\n if index != -1:\n point_info = GlobalVariable.final_result[\"pointInfos\"][index]\n point_info.update(activity_info)\n else:\n print(\"Request routerActivityInfo failed!\")\n\n\n# 收益信息\ndef todayPointDetail():\n params = {\n \"sortField\": \"today_point\",\n \"sortDirection\": \"DESC\",\n \"pageSize\": \"30\",\n \"currentPage\": \"1\"\n }\n MACS = []\n res = requests.get(GlobalVariable.jd_base_url + \"todayPointDetail\", params=params, headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n result = res_json[\"result\"]\n todayDate = result[\"todayDate\"]\n totalRecord = result[\"pageInfo\"][\"totalRecord\"]\n pointInfos = result[\"pointInfos\"]\n GlobalVariable.final_result[\"todayDate\"] = datetime.datetime.strptime(todayDate, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n GlobalVariable.final_result[\"totalRecord\"] = str(totalRecord)\n GlobalVariable.final_result[\"pointInfos\"] = pointInfos\n for info in pointInfos:\n mac = info[\"mac\"]\n MACS.append(mac)\n routerActivityInfo(mac)\n routerAccountInfo(mac)\n pointOperateRecordsShow(mac)\n\n JDServiceAPI.getListAllUserDevices()\n\n for mac in MACS:\n JDServiceAPI.getControlDevice(mac, 2)\n JDServiceAPI.getControlDevice(mac, 3)\n else:\n errorMessage = res.json()['error']['message']\n print(errorMessage)\n print(\"Request todayPointDetail failed!\")\n\n\n# 点操作记录显示\ndef pointOperateRecordsShow(mac):\n params = {\n \"source\": 1,\n \"mac\": mac,\n \"pageSize\": GlobalVariable.records_num,\n \"currentPage\": 1\n }\n point_records = []\n res = requests.get(GlobalVariable.jd_base_url + \"pointOperateRecords:show\", params=params,\n headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n result = res_json[\"result\"]\n pointRecords = result[\"pointRecords\"]\n for pointRecord in pointRecords:\n recordType = pointRecord[\"recordType\"]\n pointAmount = pointRecord[\"pointAmount\"]\n createTime = pointRecord[\"createTime\"]\n createTime_str = datetime.datetime.fromtimestamp(createTime / 1000).strftime(\"%Y-%m-%d\")\n point_record = {\"recordType\": recordType, \"pointAmount\": pointAmount, \"createTime\": createTime_str}\n point_records.append(point_record)\n index = GlobalVariable.findALocation(mac)\n if index != -1:\n point_info = GlobalVariable.final_result[\"pointInfos\"][index]\n point_info.update({\"pointRecords\": point_records})\n else:\n print(\"Request pointOperateRecordsShow failed!\")\n\n\n# 解析设备名称\ndef resolveDeviceName(DEVICENAME):\n if \"\" == DEVICENAME:\n # print(\"未设置自定义设备名\")\n pass\n else:\n devicenames = DEVICENAME.split(\"&\")\n for devicename in devicenames:\n mac = devicename.split(\":\")[0]\n name = devicename.split(\":\")[1]\n GlobalVariable.device_name.update({mac: name})\n\n\n# 解析设备ip\ndef resolveDeviceIP(DEVICE_IP):\n if \"\" == DEVICE_IP:\n # print(\"未设置自定义IP\")\n pass\n else:\n deviceIPs = DEVICE_IP.split(\"&\")\n for deviceIP in deviceIPs:\n mac = deviceIP.split(\":\")[0]\n ip = deviceIP.split(\":\")[1]\n GlobalVariable.device_ip.update({mac: ip})\n\n\n# 检测更新\ndef checkForUpdates():\n remote_address = \"https://raw.githubusercontent.com/leifengwl/JDRouterPush/main/config.ini\"\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36\"\n }\n res = requests.get(url=remote_address, headers=GlobalVariable.headers)\n if res.status_code == 200:\n res_json = res.json()\n GlobalVariable.final_result[\"announcement\"] = res_json[\"announcement\"]\n if res_json[\"version\"] != GlobalVariable.version:\n GlobalVariable.final_result[\"updates_version\"] = res_json[\"version\"]\n GlobalVariable.final_result[\"update_log\"] = res_json[\"updateLog\"]\n else:\n print(\"欢迎使用JDRouterPush!\")\n else:\n print(\"checkForUpdate failed!\")\n\n\n# region 通知结果\n\n# 结果显示\ndef resultDisplay():\n today_date = GlobalVariable.final_result[\"today_date\"]\n today_total_point = GlobalVariable.final_result[\"today_total_point\"]\n title = today_date + \"到账积分:\" + today_total_point\n if GlobalVariable.final_result.get(\"todayDate\") is None:\n push(\"信息获取失败,无权限\", \"请检查wskey是否有效\")\n return\n todayDate = GlobalVariable.final_result[\"todayDate\"]\n total_avail_point = GlobalVariable.final_result[\"total_avail_point\"]\n totalRecord = GlobalVariable.final_result[\"totalRecord\"]\n pointInfos = GlobalVariable.final_result[\"pointInfos\"]\n content = \"\"\n point_infos = \"\"\n bindAccount = \"\"\n # 更新检测\n if GlobalVariable.final_result.get(\"updates_version\"):\n content = content + \"**JDRouterPush更新提醒:**\" \\\n + \"\\n```\\n最新版:\" + GlobalVariable.final_result[\"updates_version\"] \\\n + \" 当前版本:\" + GlobalVariable.version\n if GlobalVariable.final_result.get(\"update_log\"):\n content = content + \"\\n\" + GlobalVariable.final_result[\"update_log\"] + \"\\n```\"\n if GlobalVariable.final_result.get(\"announcement\"):\n content = content + \"\\n> \" + GlobalVariable.final_result[\"announcement\"] + \" \\n\\n\"\n for pointInfo in pointInfos:\n mac = pointInfo[\"mac\"]\n todayPointIncome = pointInfo.get(\"todayPointIncome\",\"获取失败\")\n allPointIncome = pointInfo.get(\"allPointIncome\",\"获取失败\")\n amount = pointInfo.get(\"amount\",\"获取失败\")\n bindAccount = pointInfo.get(\"bindAccount\",\"获取失败\")\n recentExpireAmount = pointInfo.get(\"recentExpireAmount\",\"获取失败\")\n recentExpireTime = pointInfo.get(\"recentExpireTime\",\"获取失败\")\n satisfiedTimes = \"\"\n if pointInfo.get(\"satisfiedTimes\"):\n satisfiedTimes = pointInfo[\"satisfiedTimes\"]\n\n point_infos += \"\\n\" + \"- \" + GlobalVariable.device_name.get(str(mac[-6:]), GlobalVariable.device_list[mac][\n \"device_name\"]) + \"==>\" \\\n + \"\\n - 今日积分:\" + str(todayPointIncome) \\\n + \"\\n - 可用积分:\" + str(amount) \\\n + \"\\n - 总收积分:\" + str(allPointIncome)\n if satisfiedTimes != \"\":\n point_infos += \"\\n - 累计在线:\" + str(satisfiedTimes) + \"天\"\n if pointInfo.get(\"runInfo\"):\n point_infos += \"\\n - 当前网速:\" + pointInfo[\"speed\"] \\\n + \"\\n - 当前IP:\" + pointInfo[\"wanip\"] \\\n + \"\\n - 当前模式:\" + pointInfo[\"model\"] \\\n + \"\\n - 固件版本:\" + pointInfo[\"rom\"]\n if pointInfo.get(\"pluginInfo\"):\n point_infos += \"\\n - 插件状态:\" + pointInfo[\"status\"] \\\n + \"\\n - 缓存大小:\" + pointInfo[\"cache_size\"]\n point_infos += \"\\n - 在线时间:\" + pointInfo.get(\"onlineTime\", \"---\") \\\n + \"\\n - 最近到期积分:\" + str(recentExpireAmount) \\\n + \"\\n - 最近到期时间:\" + recentExpireTime \\\n + \"\\n - 最近\" + str(GlobalVariable.records_num) + \"条记录:\"\n pointRecords = pointInfo[\"pointRecords\"]\n if pointInfo.get(\"pointRecords\") is not None:\n for pointRecord in pointRecords:\n recordType = pointRecord[\"recordType\"]\n recordType_str = \"\"\n if recordType == 1:\n recordType_str = \"积分收入:\"\n else:\n recordType_str = \"积分支出:\"\n pointAmount = pointRecord[\"pointAmount\"]\n createTime = pointRecord[\"createTime\"]\n point_infos = point_infos + \"\\n - \" + \\\n createTime + \" \" + recordType_str + str(pointAmount)\n notifyContentJson = {\"content\": content, \"date\": todayDate, \"total_today\": today_total_point,\n \"avail_today\": total_avail_point, \"account\": bindAccount, \"devicesCount\": totalRecord,\n \"detail\": point_infos}\n\n push(title,notifyContentJson)\n\ndef push(title,content):\n\n if isinstance(content,str):\n markdownContent = content\n normalContent = content\n else:\n # mk模板\n markdownContent = NoticeTemplate.markdownTemplate().format(**content)\n # 普通模板\n normalContent = NoticeTemplate.normalTemplate().format(**content)\n\n NoticePush.server_push(title, markdownContent.replace(\"- ***\", \"```\"))\n NoticePush.push_plus(title, markdownContent)\n # print(\"标题->\", title)\n # print(\"内容->\\n\", markdownContent)\n\n\n NoticePush.telegram_bot(title, normalContent)\n NoticePush.bark(title, normalContent)\n NoticePush.enterprise_wechat(title, normalContent)\n\n\n # 信息输出测试\n print(\"标题->\", title)\n # print(\"内容->\\n\", normalContent)\n\n# endregion\n\n# 处理IP\ndef handleIP(wanip, ipSegment):\n print(\"当前IP:%s ===> 期待IP:%s\" % (wanip, ipSegment))\n wanip_list = wanip.split(\".\")\n ipSegment_list = ipSegment.split(\".\")\n for wanip, ipSegment in zip(wanip_list, ipSegment_list):\n if wanip == ipSegment or ipSegment == \"*\":\n pass\n else:\n if \"<\" in ipSegment:\n ip = ipSegment.split(\"<\")[1]\n if int(wanip) >= int(ip):\n return False\n elif \">\" in ipSegment:\n ip = ipSegment.split(\">\")[1]\n if int(wanip) <= int(ip):\n return False\n else:\n return False\n return True\n\n\n# ip切换\ndef networkSegmentSwitch():\n resolveDeviceIP(GlobalVariable.NETWORK_SEGMENT)\n todayPointDetail()\n if GlobalVariable.final_result.get(\"pointInfos\"):\n pointInfos = GlobalVariable.final_result[\"pointInfos\"]\n for pointInfo in pointInfos:\n mac = pointInfo[\"mac\"]\n wanip = pointInfo[\"wanip\"]\n if GlobalVariable.device_ip.get(str(mac[-6:])) is not None:\n ipSegment = GlobalVariable.device_ip.get(str(mac[-6:]))\n if handleIP(wanip, ipSegment):\n print(\"ip段符合\")\n else:\n print(\"IP段不符合\")\n # 重启路由器\n JDServiceAPI.getControlDevice(mac, 4)\n print(\"等待重启。。。\")\n time.sleep(30)\n raise Exception('重新启动')\n else:\n raise Exception('获取IP失败')\n\n\n# 主操作\ndef main():\n if GlobalVariable.RECORDSNUM.isdigit():\n GlobalVariable.records_num = int(GlobalVariable.RECORDSNUM)\n resolveDeviceName(GlobalVariable.DEVICENAME)\n checkForUpdates()\n todayPointIncome()\n pinTotalAvailPoint()\n todayPointDetail()\n resultDisplay()\n\n\n# endregion\n\ndef runTest(i):\n if i > 10:\n return\n try:\n if GlobalVariable.WSKEY is None or GlobalVariable.WSKEY.strip() == '':\n print(\"未获取到环境变量'WSKEY',执行中止\")\n return\n GlobalVariable.headers[\"wskey\"] = GlobalVariable.WSKEY\n GlobalVariable.service_headers[\"tgt\"] = GlobalVariable.WSKEY\n if GlobalVariable.NETWORK_SEGMENT is None or GlobalVariable.NETWORK_SEGMENT.strip() == '':\n main()\n else:\n hourNow = datetime.datetime.now(pytz.timezone('PRC')).hour\n if hourNow < 6:\n print(\"当前时间小于6点,执行IP切换\")\n networkSegmentSwitch()\n else:\n print(\"当前时间大于6点,执行信息推送\")\n main()\n except Exception as e:\n print(\"出现错误:\", e)\n print(\"准备重新执行...\")\n time.sleep(3)\n runTest(++i)\n\n\n# 读取配置文件\nif __name__ == '__main__':\n runTest(0)\n\n","repo_name":"leifengwl/JDRouterPush","sub_path":"JDRouterPush.py","file_name":"JDRouterPush.py","file_ext":"py","file_size_in_byte":15925,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"81"} +{"seq_id":"3767091548","text":"import os\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom pathlib import Path\nimport torch.utils.data as data\n\ndef tensor_load_rgbimage(filename, size=None, scale=None, keep_asp=False):\n img = Image.open(filename).convert('RGB')\n if size is not None:\n if isinstance(size, tuple):\n img = img.resize(size, Image.ANTIALIAS)\n elif keep_asp:\n size2 = int(size * 1.0 / img.size[0] * img.size[1])\n img = img.resize((size, size2), Image.ANTIALIAS)\n else:\n img = img.resize((size, size), Image.ANTIALIAS)\n\n elif scale is not None:\n img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)\n img = np.array(img).transpose(2, 0, 1)\n img = torch.from_numpy(img).float()\n return img\n\n\ndef tensor_save_rgbimage(tensor, filename, cuda=False):\n if cuda:\n img = tensor.clone().cpu() * 255\n else:\n img = tensor.clone() * 255\n img = img.clamp(0, 255).numpy()\n img = img.transpose(1, 2, 0).astype('uint8')\n img = Image.fromarray(img)\n img.save(filename)\n\n\ndef tensor_save_bgrimage(tensor, filename, cuda=False):\n (b, g, r) = torch.chunk(tensor, 3)\n tensor = torch.cat((r, g, b))\n tensor_save_rgbimage(tensor, filename, cuda)\n\n\ndef gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / (ch * h * w)\n return gram\n\n\ndef subtract_imagenet_mean_batch(batch):\n \"\"\"Subtract ImageNet mean pixel-wise from a BGR image.\"\"\"\n tensortype = type(batch.data)\n mean = tensortype(batch.data.size())\n mean[:, 0, :, :] = 103.939\n mean[:, 1, :, :] = 116.779\n mean[:, 2, :, :] = 123.680\n if batch.is_cuda:\n mean = mean.cuda()\n return batch - mean\n\n\ndef add_imagenet_mean_batch(batch):\n \"\"\"Add ImageNet mean pixel-wise from a BGR image.\"\"\"\n tensortype = type(batch.data)\n mean = tensortype(batch.data.size())\n mean[:, 0, :, :] = 103.939\n mean[:, 1, :, :] = 116.779\n mean[:, 2, :, :] = 123.680\n return batch + mean\n\ndef imagenet_clamp_batch(batch, low, high):\n batch[:,0,:,:].data.clamp_(low-103.939, high-103.939)\n batch[:,1,:,:].data.clamp_(low-116.779, high-116.779)\n batch[:,2,:,:].data.clamp_(low-123.680, high-123.680)\n\n\ndef preprocess_batch(batch):\n batch = batch.transpose(0, 1)\n (r, g, b) = torch.chunk(batch, 3)\n batch = torch.cat((b, g, r))\n batch = batch.transpose(0, 1)\n return batch\n\nclass FlatFolderDataset(data.Dataset):\n def __init__(self, root, transform):\n super(FlatFolderDataset, self).__init__()\n self.root = root\n self.paths = list(Path(self.root).glob('*'))\n self.transform = transform\n\n def __getitem__(self, index):\n path = self.paths[index]\n img = Image.open(str(path)).convert('RGB')\n img = self.transform(img)\n return img\n\n def __len__(self):\n return len(self.paths)\n\n def name(self):\n return 'FlatFolderDataset'\n \n# def init_vgg16(model_folder):\n# \"\"\"load the vgg16 model feature\"\"\"\n# if not os.path.exists(os.path.join(model_folder, 'vgg16.weight')):\n# if not os.path.exists(os.path.join(model_folder, 'vgg16.t7')):\n# os.system(\n# 'wget http://cs.stanford.edu/people/jcjohns/fast-neural-style/models/vgg16.t7 -O ' + os.path.join(model_folder, 'vgg16.t7'))\n# vgglua = load_lua(os.path.join(model_folder, 'vgg16.t7'))\n# vgg = Vgg16()\n# for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):\n# dst.data[:] = src\n# torch.save(vgg.state_dict(), os.path.join(model_folder, 'vgg16.weight'))\n\n\nclass StyleLoader():\n def __init__(self, style_folder, style_size, batch_size, cuda=True):\n self.folder = style_folder\n self.style_size = style_size\n self.files = os.listdir(style_folder)\n self.cuda = cuda\n self.batch_size = batch_size\n \n def get(self, i):\n idx = i%len(self.files)\n filepath = os.path.join(self.folder, self.files[idx])\n style = tensor_load_rgbimage(filepath, self.style_size) \n style = style.unsqueeze(0).repeat(self.batch_size, 1, 1, 1)\n style = preprocess_batch(style)\n if self.cuda:\n style = style.cuda()\n return style\n # style_v = Variable(style, requires_grad=False)\n # return style_v\n\n def size(self):\n return len(self.files)\n","repo_name":"StanwieCB/stanford-cs221-proj","sub_path":"utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34665593623","text":"#!/usr/bin/env python3\n\nimport rlcard\nfrom rlcard.models import limitholdem_rule_models\nfrom rlcard.utils import set_global_seed, tournament\nfrom mcts.mcts_agent import MCTS_Agent\nfrom rlcard.utils import Logger\nimport pandas as pd\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser(description='MCTS values')\nparser.add_argument('-d', metavar='duration', type=float, action='store',\n help='duration for agent to search')\nparser.add_argument('-e', metavar='explore', type=float,\n help='exploration parameter')\nparser.add_argument('-n', metavar='name', type=str,\n help='name for output dir')\nparser.add_argument('-ma', metavar='model_action', type=str,\n help='model action file path')\nparser.add_argument('-mh', metavar='model_hand_rank', type=str,\n help='model hand rank file path')\n\nargs = parser.parse_args()\nname = args.n\nduration = args.d\nexplore = args.e\nmodel_action = args.ma\nmodel_hand_rank = args.mh\n\n# Make environment\nenv = rlcard.make('limit-holdem', config={'seed': 0})\neval_env = rlcard.make('limit-holdem', config={'seed': 10})\n#episode_num = 5\nnum_tournaments = 25\n# episode_num = 100\n# evaluate_every = 10\nevaluate_num = 1000\n\nlog_dir = name\nlogger = Logger(log_dir)\n\n\n# Set a global seed\nset_global_seed(0)\n\n# Set up agents\nagent1 = limitholdem_rule_models.LimitholdemRuleAgentV1()\nagent2 = MCTS_Agent(action_num=env.action_num, duration=duration, exploration=explore,\n model_action=model_action, model_hand_rank=model_hand_rank)\nenv.set_agents([agent2, agent1])\neval_env.set_agents([agent2, agent1])\n\n\nfor i in range(num_tournaments):\n logger.log_performance(i * 10, tournament(eval_env, evaluate_num)[0])\n\n# for episode in range(episode_num):\n#\n# # Generate data from the environment\n# trajectories, _ = env.run(is_training=True)\n#\n# # print(trajectories)\n#\n# # Evaluate the performance. Play with random agents.\n# if episode % evaluate_every == 0:\n# logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])\n\n# Close files in the logger\nlogger.close_files()\n\npd.DataFrame.to_csv(agent2.action_df, os.path.join(log_dir, 'action.csv'))\n\n# Plot the learning curve\nlogger.plot(name)\n","repo_name":"zachwdawson/zbot","sub_path":"zbot/mcts/simulations/rule_based_test.py","file_name":"rule_based_test.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73839491464","text":"import abc\nimport re\n\nimport numpy as np\nfrom sklearn.tree import _tree\nfrom sklearn.tree._export import _BaseTreeExporter\nfrom sklearn.tree._reingold_tilford import buchheim, Tree\nfrom sklearn.tree import _criterion\nfrom numbers import Integral\n\n\nclass TreeProxy(object):\n \"\"\"Attach node_dict attr for any tree object\"\"\"\n\n def __init__(self, tree, nodes_ext):\n self.tree = tree\n self.nodes_ext = nodes_ext\n\n def __getattribute__(self, name):\n if name == 'nodes_ext':\n return object.__getattribute__(self, \"nodes_ext\")\n else:\n self_tree = object.__getattribute__(self, \"tree\")\n return object.__getattribute__(self_tree, name)\n\n\ndef _color_brew(n):\n \"\"\"Generate n colors with equally spaced hues.\n Parameters\n ----------\n n : int\n The number of colors required.\n Returns\n -------\n color_list : list, length n\n List of n tuples of form (R, G, B) being the components of each color.\n \"\"\"\n color_list = []\n\n # Initialize saturation & value; calculate chroma & value shift\n s, v = 0.75, 0.9\n c = s * v\n m = v - c\n\n for h in np.arange(25, 385, 360. / n).astype(int):\n # Calculate some intermediate values\n h_bar = h / 60.\n x = c * (1 - abs((h_bar % 2) - 1))\n # Initialize RGB with same hue & chroma as our color\n rgb = [(c, x, 0),\n (x, c, 0),\n (0, c, x),\n (0, x, c),\n (x, 0, c),\n (c, 0, x),\n (c, x, 0)]\n r, g, b = rgb[int(h_bar)]\n # Shift the initial RGB values to match value and store\n rgb = [(int(255 * (r + m))),\n (int(255 * (g + m))),\n (int(255 * (b + m)))]\n color_list.append(rgb)\n\n return color_list\n\n\n# MPL\nclass _TreeExporter(_BaseTreeExporter):\n \"\"\"\n Fix sklearn convert color bug and add text label on edge of decision tree\n \"\"\"\n def __init__(\n self,\n max_depth=None,\n feature_names=None,\n treatment_names=None,\n class_names=None,\n label=\"all\",\n filled=False,\n impurity=True,\n node_ids=False,\n proportion=False,\n rounded=False,\n precision=3,\n fontsize=None,\n include_uncertainty=False,\n uncertainty_level=0.1,\n ):\n\n super().__init__(\n max_depth=max_depth,\n feature_names=feature_names,\n class_names=class_names,\n label=label,\n filled=filled,\n impurity=impurity,\n node_ids=node_ids,\n proportion=proportion,\n rounded=rounded,\n precision=precision,\n )\n self.fontsize = fontsize\n\n # validate\n if isinstance(precision, Integral):\n if precision < 0:\n raise ValueError(\n \"'precision' should be greater or equal to 0.\"\n \" Got {} instead.\".format(precision)\n )\n else:\n raise ValueError(\n \"'precision' should be an integer. Got {} instead.\".format(\n type(precision)\n )\n )\n\n # The depth of each node for plotting with 'leaf' option\n self.ranks = {\"leaves\": []}\n # The colors to render each node with\n self.colors = {\"bounds\": None}\n\n self.characters = [\"#\", \"[\", \"]\", \"<=\", \"\\n\", \"\", \"\"]\n self.bbox_args = dict()\n if self.rounded:\n self.bbox_args[\"boxstyle\"] = \"round\"\n\n self.arrow_args = dict(arrowstyle=\"<-\")\n\n self.include_uncertainty = include_uncertainty\n self.uncertainty_level = uncertainty_level\n self.treatment_names = treatment_names\n\n def get_color(self, value):\n # Find the appropriate color & intensity for a node\n if self.colors[\"bounds\"] is None:\n # Classification tree\n color = list(self.colors[\"rgb\"][np.argmax(value)])\n sorted_values = sorted(value, reverse=True)\n if len(sorted_values) == 1:\n alpha = 0\n else:\n alpha = (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1])\n else:\n # Regression tree or multi-output\n color = list(self.colors[\"rgb\"][0])\n alpha = (value - self.colors[\"bounds\"][0]) / (\n self.colors[\"bounds\"][1] - self.colors[\"bounds\"][0]\n )\n # unpack numpy scalars\n alpha = float(alpha)\n # compute the color as alpha against white\n color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]\n # Return html color code in #RRGGBB format\n return \"#%02x%02x%02x\" % tuple(color) # Fix color format error\n\n def _make_tree(self, node_id, et, criterion, nodes_ext, depth=0):\n # traverses _tree.Tree recursively, builds intermediate\n # \"_reingold_tilford.Tree\" object\n name = self.node_to_str(TreeProxy(et, nodes_ext=nodes_ext), node_id, criterion=criterion)\n if et.children_left[node_id] != _tree.TREE_LEAF and (\n self.max_depth is None or depth <= self.max_depth\n ):\n children = [\n self._make_tree(\n et.children_left[node_id], et, criterion, depth=depth + 1, nodes_ext=nodes_ext\n ),\n self._make_tree(\n et.children_right[node_id], et, criterion, depth=depth + 1, nodes_ext=nodes_ext\n ),\n ]\n else:\n return Tree(name, node_id)\n return Tree(name, node_id, *children)\n\n def recurse(self, node, tree, ax, max_x, max_y, text_pos, depth=0):\n import matplotlib.pyplot as plt\n\n kwargs = dict(\n bbox=self.bbox_args.copy(),\n ha=\"center\",\n va=\"center\",\n zorder=100 - 10 * depth,\n xycoords=\"axes fraction\",\n arrowprops=self.arrow_args.copy(),\n )\n kwargs[\"arrowprops\"][\"edgecolor\"] = plt.rcParams[\"text.color\"]\n\n if self.fontsize is not None:\n kwargs[\"fontsize\"] = self.fontsize\n\n # offset things by .5 to center them in plot\n xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y)\n\n if self.max_depth is None or depth <= self.max_depth:\n if self.filled:\n kwargs[\"bbox\"][\"fc\"] = self.get_fill_color(tree, node.tree.node_id)\n else:\n kwargs[\"bbox\"][\"fc\"] = ax.get_facecolor()\n\n if node.parent is None:\n # root\n ax.annotate(node.tree.label, xy, **kwargs)\n else:\n xy_parent = (\n (node.parent.x + 0.5) / max_x,\n (max_y - node.parent.y - 0.5) / max_y,\n )\n ax.annotate(node.tree.label, xy_parent, xy, **kwargs)\n\n text_pos_mapping = {\n 1: ('yes', 'right', -0.015),\n -1: ('no', 'left', 0.015)\n # 0: ('center', 0, '')\n }\n\n if text_pos in [1, -1]:\n text_pos_config = text_pos_mapping[text_pos]\n ax.text((xy_parent[0] - xy[0])/2 + xy[0] + text_pos_config[2],\n (xy_parent[1] - xy[1])/2 + xy[1],\n text_pos_config[0], va=\"center\", ha=text_pos_config[1], rotation=0)\n\n n_children = len(node.children)\n for i, child in enumerate(node.children):\n if i == 0:\n next_text_pos = 1\n elif i == n_children - 1:\n next_text_pos = -1\n else:\n next_text_pos = 0\n self.recurse(child, tree, ax, max_x, max_y, text_pos=next_text_pos, depth=depth + 1)\n\n else:\n xy_parent = (\n (node.parent.x + 0.5) / max_x,\n (max_y - node.parent.y - 0.5) / max_y,\n )\n kwargs[\"bbox\"][\"fc\"] = \"grey\"\n ax.annotate(\"\\n (...) \\n\", xy_parent, xy, **kwargs)\n\n def export(self, decision_tree, node_dict, ax=None):\n\n import matplotlib.pyplot as plt\n from matplotlib.text import Annotation\n if ax is None:\n ax = plt.gca()\n ax.clear()\n ax.set_axis_off()\n my_tree = self._make_tree(0, decision_tree.tree_, decision_tree.criterion, nodes_ext=node_dict)\n draw_tree = buchheim(my_tree)\n\n # important to make sure we're still\n # inside the axis after drawing the box\n # this makes sense because the width of a box\n # is about the same as the distance between boxes\n max_x, max_y = draw_tree.max_extents() + 1\n ax_width = ax.get_window_extent().width\n ax_height = ax.get_window_extent().height\n\n scale_x = ax_width / max_x\n scale_y = ax_height / max_y\n self.recurse(draw_tree, decision_tree.tree_, ax, max_x, max_y, text_pos=0) # update inoke recurse\n\n anns = [ann for ann in ax.get_children() if isinstance(ann, Annotation)]\n\n # update sizes of all bboxes\n renderer = ax.figure.canvas.get_renderer()\n\n for ann in anns:\n ann.update_bbox_position_size(renderer)\n\n if self.fontsize is None:\n # get figure to data transform\n # adjust fontsize to avoid overlap\n # get max box width and height\n extents = [ann.get_bbox_patch().get_window_extent() for ann in anns]\n max_width = max([extent.width for extent in extents])\n max_height = max([extent.height for extent in extents])\n # width should be around scale_x in axis coordinates\n size = anns[0].get_fontsize() * min(\n scale_x / max_width, scale_y / max_height\n )\n for ann in anns:\n ann.set_fontsize(size)\n\n return anns\n\n @abc.abstractmethod\n def node_replacement_text(self, tree, node_id, criterion):\n raise NotImplemented\n\n def node_to_str(self, tree, node_id, criterion):\n text = super().node_to_str(tree, node_id, criterion)\n replacement = self.node_replacement_text(tree, node_id, criterion)\n if replacement is not None:\n # HACK: it's not optimal to use a regex like this, but the base class's node_to_str doesn't expose any\n # clean way of achieving this\n text = re.sub(\"value = .*(?=\" + re.escape(self.characters[5]) + \")\",\n # make sure we don't accidentally escape anything in the substitution\n replacement.replace('\\\\', '\\\\\\\\'),\n text,\n flags=re.S)\n return text\n\n\nclass _CateTreeExporter(_TreeExporter):\n\n def __init__(self, include_uncertainty=False, uncertainty_level=0.1,\n *args, treatment_names=None, **kwargs):\n self.include_uncertainty = include_uncertainty\n self.uncertainty_level = uncertainty_level\n self.treatment_names = treatment_names\n super().__init__(*args, **kwargs)\n\n def get_fill_color(self, tree, node_id):\n\n # Fetch appropriate color for node\n if 'rgb' not in self.colors:\n # red for negative, green for positive\n self.colors['rgb'] = [(233, 150, 60), (6, 42, 220)]\n\n # in multi-target use mean of targets\n tree_min = np.min(np.mean(tree.value, axis=1)) - 1e-12\n tree_max = np.max(np.mean(tree.value, axis=1)) + 1e-12\n\n node_val = np.mean(tree.value[node_id])\n\n if node_val > 0:\n value = [max(0, tree_min) / tree_max, node_val / tree_max]\n elif node_val < 0:\n value = [node_val / tree_min, min(0, tree_max) / tree_min]\n else:\n value = [0, 0]\n\n return self.get_color(value)\n\n def node_replacement_text(self, tree, node_id, criterion):\n # Write node mean CATE\n node_info = tree.nodes_ext[node_id]\n node_string = '------CATE mean------' + self.characters[4]\n value_text = \"\"\n mean = node_info['mean']\n if hasattr(mean, 'shape') and (len(mean.shape) > 0):\n if len(mean.shape) == 1:\n for i in range(mean.shape[0]):\n value_text += \"{}\".format(np.around(mean[i], self.precision))\n if 'ci' in node_info:\n value_text += \" ({}, {})\".format(np.around(node_info['ci'][0][i], self.precision),\n np.around(node_info['ci'][1][i], self.precision))\n if i != mean.shape[0] - 1:\n value_text += \", \"\n value_text += self.characters[4]\n elif len(mean.shape) == 2:\n for i in range(mean.shape[0]):\n for j in range(mean.shape[1]):\n value_text += \"{}\".format(np.around(mean[i, j], self.precision))\n if 'ci' in node_info:\n value_text += \" ({}, {})\".format(np.around(node_info['ci'][0][i, j], self.precision),\n np.around(node_info['ci'][1][i, j], self.precision))\n if j != mean.shape[1] - 1:\n value_text += \", \"\n value_text += self.characters[4]\n else:\n raise ValueError(\"can only handle up to 2d values\")\n else:\n value_text += \"{}\".format(np.around(mean, self.precision))\n if 'ci' in node_info:\n value_text += \" ({}, {})\".format(np.around(node_info['ci'][0], self.precision),\n np.around(node_info['ci'][1], self.precision))\n value_text += self.characters[4]\n node_string += value_text\n\n # Write node std of CATE\n node_string += \"------CATE std------\" + self.characters[4]\n std = node_info['std']\n value_text = \"\"\n if hasattr(std, 'shape') and (len(std.shape) > 0):\n if len(std.shape) == 1:\n for i in range(std.shape[0]):\n value_text += \"{}\".format(np.around(std[i], self.precision))\n if i != std.shape[0] - 1:\n value_text += \", \"\n elif len(std.shape) == 2:\n for i in range(std.shape[0]):\n for j in range(std.shape[1]):\n value_text += \"{}\".format(np.around(std[i, j], self.precision))\n if j != std.shape[1] - 1:\n value_text += \", \"\n if i != std.shape[0] - 1:\n value_text += self.characters[4]\n else:\n raise ValueError(\"can only handle up to 2d values\")\n else:\n value_text += \"{}\".format(np.around(std, self.precision))\n node_string += value_text\n return node_string\n\n\nclass _PolicyTreeMPLExporter(_TreeExporter):\n\n def __init__(self, *args, show_all_treatments=True, **kwargs):\n self.show_all_treatments = show_all_treatments\n super().__init__(*args, **kwargs)\n\n self.node_dict = None\n\n def get_fill_color(self, tree, node_id):\n if 'rgb' not in self.colors:\n self.colors['rgb'] = _color_brew(tree.n_outputs) # [(179, 108, 96), (81, 157, 96)]\n\n node_val = tree.value[node_id][:, 0]\n node_val = node_val - np.min(node_val)\n if np.max(node_val) > 0:\n node_val = node_val / np.max(node_val)\n return self.get_color(node_val)\n\n def ensure_treatments(self, value):\n if self.treatment_names is not None:\n return self.treatment_names\n else:\n return [\"T%s%s%s\" % (self.characters[1], i,self.characters[2]) for i in range(len(value))]\n\n def node_replacement_text(self, tree, node_id, criterion):\n # NOTE does not calc node_dict yet\n # if self.node_dict is not None:\n # return self._node_replacement_text_with_dict(tree, node_id, criterion)\n value = tree.value[node_id][:, 0]\n\n node_string = \"\"\n if not self.show_all_treatments:\n node_string = 'value = %s' % np.round(value[1:] - value[0], self.precision)\n\n # if tree.children_left[node_id] == _tree.TREE_LEAF: # NOTE: for all node not only leaf\n\n treatments = self.ensure_treatments(value)\n treatments_str = \"------CATE mean------\\n\"\n if self.show_all_treatments:\n for k, v in zip(treatments, value):\n treatments_str += f\"{k}={np.round(v, self.precision)}\\n\"\n\n else:\n treatments_str += \"Treatment: \"\n if self.treatment_names:\n # f\"{}={}\"\n treatments_str += self.treatment_names[np.argmax(value)]\n else:\n treatments_str += \"T%s%s%s\" % (self.characters[1],\n np.argmax(value),\n self.characters[2])\n node_string += treatments_str\n\n return node_string\n\n def _node_replacement_text_with_dict(self, tree, node_id, criterion):\n\n # Write node mean CATE\n node_info = self.node_dict[node_id]\n node_string = 'CATE' + self.characters[4]\n value_text = \"\"\n mean = node_info['mean']\n if hasattr(mean, 'shape') and (len(mean.shape) > 0):\n if len(mean.shape) == 1:\n for i in range(mean.shape[0]):\n value_text += \"{}\".format(np.around(mean[i], self.precision))\n if 'ci' in node_info:\n value_text += \" ({}, {})\".format(np.around(node_info['ci'][0][i], self.precision),\n np.around(node_info['ci'][1][i], self.precision))\n if i != mean.shape[0] - 1:\n value_text += \", \"\n value_text += self.characters[4]\n else:\n raise ValueError(\"can only handle up to 1d values\")\n else:\n value_text += \"{}\".format(np.around(mean, self.precision))\n if 'ci' in node_info:\n value_text += \" ({}, {})\".format(np.around(node_info['ci'][0], self.precision),\n np.around(node_info['ci'][1], self.precision))\n value_text += self.characters[4]\n node_string += value_text\n\n if tree.children_left[node_id] == _tree.TREE_LEAF:\n # Write recommended treatment and value - cost\n value = tree.value[node_id][:, 0]\n node_string += 'value - cost = %s' % np.round(value[1:], self.precision) + self.characters[4]\n\n value = tree.value[node_id][:, 0]\n treatments = self.ensure_treatments(value)\n\n if self.show_all_treatments:\n treatments_str = \"\"\n for k, v in zip(treatments, value):\n treatments_str += f\"{k}={v}\"\n\n else:\n treatments_str = \"Treatment: \"\n if self.treatment_names:\n # f\"{}={}\"\n treatments_str += self.treatment_names[np.argmax(value)]\n else:\n treatments_str += \"T%s%s%s\" % (self.characters[1],\n np.argmax(value),\n self.characters[2])\n\n node_string += treatments_str\n node_string += self.characters[4]\n\n return node_string\n","repo_name":"DataCanvasIO/YLearn","sub_path":"ylearn/effect_interpreter/_export.py","file_name":"_export.py","file_ext":"py","file_size_in_byte":19603,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"81"} +{"seq_id":"38783096008","text":"# This program rotates the given array of num elements by d elements\n\n\ndef array_rotation(arr1, d1):\n test_arr = []\n new_arr = []\n for j in range(d1):\n element = arr1.pop()\n test_arr.append(element)\n test_arr.sort()\n new_arr = test_arr + arr1\n return new_arr\n\n\narr = []\n\nnum = int(input(\"Enter the number of elements in the array: \"))\n\nfor i in range(num):\n arr.append(int(input(\"Enter an array number: \")))\n\nd = int(input(\"By how many elements should the array be rotated: \"))\n\nprint(\"The given array is:\", arr)\nprint(\"The array after rotation is:\", array_rotation(arr, d))\n\ninput()\n","repo_name":"cartoonshow57/SmallPythonPrograms","sub_path":"array_rotation.py","file_name":"array_rotation.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39922033937","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n################################################################################\n# Unit testing code for create_trilinos_github_test_failure_issue.py #\n################################################################################\n\n\nimport sys\nimport imp\nimport shutil\nimport unittest\n\nimport create_trilinos_github_test_failure_issue as CTGTFI\n\n\nclass test_stripGentConfigBuildName(unittest.TestCase):\n\n def test_1(self):\n genConfigBuildName = CTGTFI.stripGentConfigBuildName(\n \"PR-10472-test-ats2_cuda-10.1.243-gnu-8.3.1-blah-blah-package-enables-911\")\n genConfigBuildName_expected = \\\n \"ats2_cuda-10.1.243-gnu-8.3.1-blah-blah-package-enables\"\n self.assertEqual(\n genConfigBuildName, genConfigBuildName_expected)\n\n\nclass test_getUniqueGenConfigBuildNamesList(unittest.TestCase):\n\n def test_1(self):\n genConfigBuildNamesList = CTGTFI.getUniqueGenConfigBuildNamesList(\n [\n \"PR-10472-test-ats2_cuda-10.1.243-gnu-8.3.1-blah-blah-package-enables-911\",\n \"PR-10472-test-ats2_cuda-10.1.243-gnu-8.3.1-blah-blah-package-enables-912\",\n \"PR-10472-test-sems-rhel7_blah-blah-124\",\n \"PR-10473-test-ats2_cuda-10.1.243-gnu-8.3.1-blah-blah-package-enables-913\",\n \"PR-10472-test-sems-rhel7_blah-blah-125\",\n \"PR-10473-test-sems-rhel7_blah-blah-126\",\n ]\n )\n genConfigBuildNamesList_expected = [\n \"ats2_cuda-10.1.243-gnu-8.3.1-blah-blah-package-enables\",\n \"sems-rhel7_blah-blah\",\n ]\n self.assertEqual(\n genConfigBuildNamesList, genConfigBuildNamesList_expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"trilinos/Trilinos","sub_path":"commonTools/framework/github_issue_creator/create_trilinos_github_test_failure_issue_unit_tests.py","file_name":"create_trilinos_github_test_failure_issue_unit_tests.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"81"} +{"seq_id":"2190638646","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\n'''\nGiven an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.\n\nNote:\n\nThe solution set must not contain duplicate triplets.\n\nExample:\n\nGiven array nums = [-1, 0, 1, 2, -1, -4],\n\nA solution set is:\n[\n [-1, 0, 1],\n [-1, -1, 2]\n]\n'''\n\nclass Solution:\n def threeSum(self, nums):\n n = len(nums)\n solutions = set()\n nums = sorted(nums)\n for i in range(0, n-2):\n a = nums[i]\n front = i + 1\n back = n - 1\n while front < back:\n b = nums[front]\n c = nums[back]\n if a+b+c == 0:\n sol = (a, b, c)\n solutions.add(sol)\n front += 1\n back -= 1\n elif a+b+c > 0:\n back -= 1\n else:\n front += 1\n ret = []\n for s in solutions:\n r = []\n for e in s:\n r.append(e)\n ret.append(r)\n return ret\n\n\n\n\ndef main():\n solution = Solution()\n nums = [-1, 0, 1, 2, -1, -4]\n s = solution.threeSum(nums)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zanekoch/Interview-Practice","sub_path":"leetCode/3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37725584469","text":"from random import random\nfrom time import time\nfrom typing import Any, Callable\n\nimport pytest\n\nfrom py9lib.io_ import ratelimit\n\n\ndef foo():\n return None\n\n\ndef timeit(f: Callable[..., Any], n_calls: int) -> float:\n t0 = time()\n for i in range(n_calls):\n f()\n return time() - t0\n\n\ndef test_basic():\n\n with pytest.raises(ValueError):\n ratelimit(1, 0)\n\n with pytest.raises(ValueError):\n ratelimit(0, 1)\n\n # test we don't crash\n ratelimit(1, 1)(foo)()\n\n\ndef test_simple():\n\n get_f = lambda: ratelimit(3, 0.1)(foo)\n\n assert timeit(get_f(), 1) < 0.01\n assert timeit(get_f(), 3) < 0.01\n assert timeit(get_f(), 5) > 0.2\n\n get_f = ratelimit(1, 0.2)(foo)\n assert timeit(get_f, 1) < 0.01\n assert timeit(get_f, 2) > 0.2\n\n\ndef test_microfuzz():\n\n for pool in range(1, 4):\n for n_calls in range(1, 4):\n for _ in range(5):\n timeit(ratelimit(pool, 1e-6 + random() * 1e-3)(foo), n_calls)\n","repo_name":"qdbp/py9lib","sub_path":"tests/test_ratelimit.py","file_name":"test_ratelimit.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5877533817","text":"import tensorflow as tf\nimport cv2\nimport numpy as np\nimport os\n\nclass Inference:\n def __init__(self, model_path):\n self.model = tf.keras.models.load_model(model_path)\n\n def preprocess_image(self, image_path):\n custom_image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n custom_image = cv2.resize(custom_image, (28, 28))\n custom_image = cv2.threshold(custom_image, 200, 255, cv2.THRESH_BINARY)[1]\n custom_image = custom_image.reshape(1, 28, 28, 1)\n return custom_image\n\n def predict(self, preprocessed_image):\n predictions = self.model.predict(preprocessed_image)\n predicted_digit = np.argmax(predictions)\n return predicted_digit\n\n def mnist_result(self, single_digit='image_processing_text/processed_image/single_digit'):\n\n\n file_value = {}\n for file in os.listdir(single_digit):\n if file.endswith(\".png\"):\n img = self.preprocess_image(os.path.join(single_digit, file))\n res = self.predict(img)\n file_value[file] = res\n return file_value\n\nif __name__ =='__main__':\n# Usage\n model_path = \"mnist_model_final.h5\"\n image_path = 'image_processing_text/processed_image/single_digit/left_digit_section_5.png'\n\n digit_predictor = Inference(model_path)\n print(digit_predictor.mnist_result())\n","repo_name":"vanshpundir/Text-Extractor","sub_path":"model/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19590166585","text":"from langchain.document_loaders import TextLoader\nfrom langchain.chains.summarize import load_summarize_chain\nfrom langchain.indexes import VectorstoreIndexCreator\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.llms import OpenAI\nfrom langchain.chains import RetrievalQA\n\nimport os\n\nmodel = ChatOpenAI(model='gpt-3.5-turbo')\n\n\nOPENAI_API_KEY = os.environ['OPENAI_API_KEY']\n\ndef build_reference_for_qa(path):\n input_files = [f'{path}.c']\n output_file = 'gpt_reference.txt'\n\n with open(output_file, 'w', encoding='utf-8') as outfile:\n for file_name in input_files:\n with open(file_name, 'r', encoding='utf-8') as infile:\n contents = infile.read()\n outfile.write(f\"\\nThis is {file_name}.\\n\")\n outfile.write(contents + '\\n')\n \n return output_file\n\n\ndef loading(des):\n from langchain.document_loaders import UnstructuredFileLoader\n return UnstructuredFileLoader(des)\n\ndef split_files(loader):\n from langchain.text_splitter import RecursiveCharacterTextSplitter\n document = loader.load() \n text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200) # Changed from 1000 -> 500 incase the file is not big enough\n return text_splitter.split_documents(document)\n\ndef create_qa(loader):\n from langchain.embeddings import OpenAIEmbeddings\n from langchain.vectorstores import Chroma\n from langchain import VectorDBQA\n \n embeddings = OpenAIEmbeddings()\n db = Chroma.from_documents(split_files(loader), embeddings)\n retriever = db.as_retriever(search_kwargs={\"k\": 1})\n # splited = split_files(loader)\n # docsearch = Chroma.from_documents(split_files(loader), embeddings)\n # return VectorDBQA.from_chain_type(llm=model, chain_type=\"map_rerank\", vectorstore=docsearch,return_source_documents=True)\n \n return RetrievalQA.from_chain_type(llm=model, chain_type=\"stuff\", retriever=retriever)\n \n # index_creator = VectorstoreIndexCreator(\n # vectorstore_cls=Chroma, \n # embedding=OpenAIEmbeddings(),\n # text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n # ).from_loaders([loader])\n # return index_creator\n \n# def qa_with_docs(loader,query):\n# from langchain.chains.question_answering import load_qa_chain\n# docs = split_files(loader)\n# chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n# # chain.run(input_documents=loader.load(), question=query)\n \n# #little modification here\n# chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)\n \n\ndef query_about_files(query,qa):\n # return query.query_with_sources(qa)\n # return qa.run(query)\n \n return qa({\"query\": query}, return_only_outputs=True)\n\n\ndef summerize_chain(llm,file):\n chain = load_summarize_chain(llm, chain_type=\"stuff\", verbose=True)\n chain.run(file)\n \n## All those functions above is about processing the file and creating a chain\n## Now, we will try to focus on designed the prompt.\n\ndef build_prompt_for_qa(query):\n from langchain import PromptTemplate\n categories = [\"Pwn\",\"Reverse\"]\n main_prompt = \"\"\"\n Description: You are PwnGPT: an analyst in the midst of a Capture the Flag (CTF) competition. \n Your task is to help contestants analyze decompiled C files derived from binary files they provide.\n You must give the possibility of the vulnerability first\n Keep in mind that you only have access to the C language files and are not able to ask for any additional information about the files.\n When you give respones, you must give the location of the vulnerability, and the reason why it is a vulnerability, else, you cannot respone.\n Utilize your expertise to analyze the C files thoroughly and provide valuable insights to the contestants.\n Prompt: A contestant in the CTF competition has just submitted a decompiled C file to you for analysis. \n They are looking for any potential vulnerabilities, weaknesses, or clues that might assist them in the competition. \n Using only the information provided in the C file, offer a detailed analysis, highlighting any areas of interest or concern.\n DO NOT GENERATED INFOMATION THAT IS UNSURE\n \n And here are some examples: \n \"\"\"\n examples = [\n # {\n # \"query\": \"Can you find any buffer overflow vulnerabilities in this code?\",\n # \"answer\": \"YES, Upon analysis of the code, I have identified a potential buffer overflow vulnerability in the function read_input. The function uses gets() to read user input into a fixed-size buffer without checking the input length, which could lead to an overflow. Consider replacing gets() with a safer alternative, such as fgets(), and validate the input size to prevent this issue.\"\n # },{\n # \"query\": \"Does this code have any race condition vulnerabilities?\",\n # \"answer\": \"YES, Upon analysis, I have identified a potential race condition vulnerability in the function write_to_file. The function opens a file using fopen() and performs write operations without proper synchronization mechanisms, such as locks. This could lead to data corruption or unauthorized access if multiple processes or threads access the file simultaneously. To mitigate this issue, consider implementing proper file locking mechanisms or using atomic file operations.\"\n # },\n {\n \"query\": \"Is there any insecure use of xxx in this code?\",\n \"answer\": \"0% NO, the xxx in this code is secure. The code uses the xxx() function from the xxx, which is a secure xxxx. \"\n },{\n \"query\": \"Is there a xxx vulnerability in this code?\",\n \"answer\": \"100% Yes, there is a xxx vulnerability in the xxxxx() function. The function uses printf() to print user input without specifying a proper format string. This could allow an attacker to exploit the vulnerability to read or write arbitrary memory locations. To fix this issue, use a xxx that matches the expected input, such as printf(\\\"%s\\\", user_input);.\"\n },\n # {\n # \"query\": \"Does this code have any heap-based buffer overflow vulnerabilities?\",\n # \"answer\": \"NO, there do not appear to be any heap-based buffer overflow vulnerabilities in the code. The functions that handle memory allocation and manipulation use malloc() and memcpy() appropriately, ensuring that the allocated buffer size matches the data being copied. Additionally, input validation checks are in place to prevent the possibility of buffer overflows.\"\n # },{\n # \"query\": \"Are there any double-free vulnerabilities in this code?\",\n # \"answer\": \"NO, I could not find any double-free vulnerabilities in the code. The functions that deal with memory management use free() appropriately, and there are no instances where the same memory block is freed more than once. This indicates that proper care has been taken to prevent memory management issues in the code.\"\n # },{\n # \"query\": \"Can you find any use-after-free vulnerabilities in this code?\",\n # \"answer\": \"YES, Upon analysis, I have identified a use-after-free vulnerability in the process_data() function. The function frees a memory block using free() and then proceeds to access the same memory block later in the code. This could be exploited by an attacker to execute arbitrary code or cause a crash. To resolve this issue, ensure that memory is accessed only before it is freed and that any pointers to freed memory are properly nullified.\"\n {\n \"query\": \"Does this code have any xxx vulnerabilities?\",\n \"answer\": \"50% MAYBE, I might detected a xxx in the code. Specifically in the xxxx() function. The code uses xxx to xxx, which could lead to an overflow. Consider replacing xxx with a safer alternative, such as xxx(), and validate the input size to prevent this issue.\"\n },\n # \"query\": \"Can you find any SQL injection vulnerabilities in this code?\",\n # \"answer\": \"NO, there are no SQL injection vulnerabilities present in this code. The code does not interact with any databases, and all user input is handled using safe string manipulation functions. This demonstrates good coding practices and ensures that the code is not susceptible to SQL injection attacks.\"\n # },\n ]\n \n example_template = \"User: {query}\\GPT: {answer}\"\n example_prompt = PromptTemplate(\n input_variables=[\"query\", \"answer\"],\n template=example_template\n )\n suffix = \"User: {query}\\nGPT: \"\n from langchain import FewShotPromptTemplate\n few_shot_prompt_template = FewShotPromptTemplate(\n examples=examples,\n example_prompt=example_prompt,\n prefix=main_prompt,\n suffix=suffix,\n input_variables=[\"query\"],\n example_separator=\"\\n\\n\"\n )\n return few_shot_prompt_template.format(query=query)\n\ndef generate_pwntools_templates():\n prompt = \"\"\"\n After analysising the function of every function of the source code;\n You will need to generate a pwntools template that can be use by Python with the source provided.\n the template should be looking like this: (Everything in the [] is a according to the program.)\n \n [function_name]([arguement]):\n [code]\n \n For example; This is a function that can be use to interact with `delete` function in a certain heap exploition program:\n \n def deletenote(id):\n p.recvuntil('option--->>')\n p.sendline('4')\n p.recvuntil('note:')\n p.sendline(str(id))\n \n HINT: YOU WILL ONLY NEED TO GENERATE THE MAIN FUNCTION OF THE SOURCE CODE.\n \"\"\"\n \n return prompt","repo_name":"DDizzzy79/Ret2GPT","sub_path":"langchain_preprocess/prompt_builder.py","file_name":"prompt_builder.py","file_ext":"py","file_size_in_byte":9804,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"81"} +{"seq_id":"14316052978","text":"from kivy.app import App\r\nfrom kivy.clock import Clock\r\nfrom kivy.core.text import LabelBase\r\nfrom kivy.core.window import Window\r\nfrom kivy.utils import get_color_from_hex\r\nfrom kivy.lang import Builder\r\nfrom time import strftime\r\n__version__ = \"1.0.3\"\r\nkvfile = Builder.load_string(\"\"\"\r\n \r\n