', re.S | re.M)\n\n get_text_list(\"https://www.neihan-8.com/wenzi//\")\n","repo_name":"1987617587/lsh_py","sub_path":"pachong/PCdemo1/day05/刘士豪_20200327/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"26845916834","text":"import re\nimport requests\n\n# List the input files\ninput_files = ['ins1.txt', 'ins2.txt', 'ins3.txt']\n\n# Iterate through the input files\nfor input_file in input_files:\n # Open the current input file and read its contents into a list\n with open(input_file, 'r') as f:\n lines = f.readlines()\n\n # Iterate through the list of URLs\n for line in lines:\n # Download the JavaScript file from the URL\n response = requests.get(line)\n contents = response.text\n\n # Use a regular expression to find all URLs\n urls = re.findall(r'https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+', contents)\n\n # Print the URLs\n for url in urls:\n print(url)\n","repo_name":"zhirobyte/Python-Repo","sub_path":"filterjs.py","file_name":"filterjs.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"44563814341","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 14/3/2023\n@author: ZhizhuoYin\n\"\"\"\n\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom torch.nn.functional import softmax\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom scipy.stats import wasserstein_distance\n\n\ndef forward(model, loader, device, writer, epoch, user_edge_dict = None,is_user = False, is_item=True, optimizer=None, train_flag=True, max_item_id=0, last_update=0):\n if train_flag:\n model.train()\n else:\n model.eval()\n hit20, mrr20, hit10, mrr10, hit5, mrr5, hit1, mrr1 = [], [], [], [], [], [], [], []\n\n mean_loss = 0.0\n itemlist = []\n edgerepeat = []\n item_edges = [[], []]\n edgelist = [[] for i in range(max_item_id+1)] # elements [receiver,times,index]\n globalItem2IndMapper = {}\n itemindex = 0\n\n for i, batch in enumerate(loader):\n if train_flag:\n optimizer.zero_grad()\n x = [it[0] for it in batch.x.tolist()]\n edge_index = batch.edge_index.tolist()\n edge_count = batch.edge_count.tolist()\n itemlist += list(filter(lambda d: d not in globalItem2IndMapper,x))\n\n item = itemlist\n if is_item:\n for it in x:\n if it not in globalItem2IndMapper:\n globalItem2IndMapper[it] = itemindex\n itemindex += 1\n\n for k in range(len(edge_index[0])):\n isexist = 0\n for receiver in edgelist[x[edge_index[0][k]]]:\n if receiver[0] == x[edge_index[1][k]]:\n receiver[1] += 1\n isexist = 1\n break\n if not isexist:\n item_edges[0] += [x[edge_index[0][k]]-1]\n item_edges[1] += [x[edge_index[1][k]]-1]\n edgelist[x[edge_index[0][k]]].append([x[edge_index[1][k]],1,len(edgerepeat)])\n edgerepeat += [edge_count[k]]\n\n usredgelist = [[], []]\n userid = batch.userid.tolist()\n if is_user == True:\n for u in userid:\n for v in userid:\n v = int(v)\n u = int(u)\n if (u in user_edge_dict) and (v in user_edge_dict):\n if v in user_edge_dict[u]['in']:\n usredgelist[0].append(v)\n usredgelist[1].append(u)\n if v in user_edge_dict[u]['out']:\n usredgelist[0].append(u)\n usredgelist[1].append(v)\n if is_item:\n usredgelist = torch.tensor(usredgelist, dtype=torch.long)\n item_edge_index = torch.tensor(item_edges,dtype=torch.long)\n item = torch.tensor(item,dtype=torch.long)\n scores = model(batch.to(device),train_flag=train_flag, is_user=is_user, is_item=is_item, user_edge_list=usredgelist.to(device) ,item=item.to(device),item_edge_index=item_edge_index.to(device), max_item_id=max_item_id)\n else:\n scores = model(batch.to(device), train_flag=train_flag, is_user=is_user, is_item=is_item, max_item_id=max_item_id)\n targets = batch.y - 1\n loss = model.loss_function(scores, targets)\n\n if train_flag:\n loss.backward()\n optimizer.step()\n writer.add_scalar('loss/train_batch_loss', loss.item(), last_update + i)\n else:\n sub_scores = scores.topk(20)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit20.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr20.append(0)\n else:\n mrr20.append(1 / (np.where(score == target)[0][0] + 1))\n\n sub_scores = scores.topk(10)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit10.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr10.append(0)\n else:\n mrr10.append(1 / (np.where(score == target)[0][0] + 1))\n\n sub_scores = scores.topk(5)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit5.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr5.append(0)\n else:\n mrr5.append(1 / (np.where(score == target)[0][0] + 1))\n\n sub_scores = scores.topk(1)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit1.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr1.append(0)\n else:\n mrr1.append(1 / (np.where(score == target)[0][0] + 1))\n\n mean_loss += loss / batch.num_graphs\n\n if train_flag:\n writer.add_scalar('loss/train_loss', mean_loss.item(), epoch)\n else:\n writer.add_scalar('loss/test_loss', mean_loss.item(), epoch)\n hit20 = np.mean(hit20) * 100\n mrr20 = np.mean(mrr20) * 100\n print(str(hit20)+'\\t'+str(mrr20))\n writer.add_scalar('index/hit20', hit20, epoch)\n writer.add_scalar('index/mrr20', mrr20, epoch)\n hit10 = np.mean(hit10) * 100\n mrr10 = np.mean(mrr10) * 100\n print(str(hit10)+'\\t'+str(mrr10))\n writer.add_scalar('index/hit10', hit10, epoch)\n writer.add_scalar('index/mrr10', mrr10, epoch)\n hit5 = np.mean(hit5) * 100\n mrr5 = np.mean(mrr5) * 100\n print(str(hit5)+'\\t'+str(mrr5))\n writer.add_scalar('index/hit5', hit5, epoch)\n writer.add_scalar('index/mrr5', mrr5, epoch)\n hit1 = np.mean(hit1) * 100\n mrr1 = np.mean(mrr1) * 100\n print(str(hit1)+'\\t'+str(mrr1))\n writer.add_scalar('index/hit1', hit1, epoch)\n writer.add_scalar('index/mrr1', mrr1, epoch)\n return [[hit20,hit10,hit5,hit1],[mrr20,mrr10,mrr5,mrr1],epoch]\n return []\n\ndef forward_entropy(model, loader, device, max_item_id=0):\n for i, batch in enumerate(loader):\n scores = softmax(model(batch.to(device), train_flag=False, max_item_id=max_item_id), dim=1)\n dis_score = Categorical(scores)\n if i == 0:\n entropy = dis_score.entropy()\n else:\n entropy = torch.cat((entropy, dis_score.entropy()))\n \n pro = entropy.cpu().detach().numpy()\n weights = np.exp((pd.Series(pro).rank() / len(pro)).values)\n return weights / np.sum(weights)\n\n\ndef forward_cross_entropy(model, loader, device, max_item_id=0):\n for i, batch in enumerate(loader):\n scores = softmax(model(batch.to(device),train_flag=False, max_item_id= max_item_id), dim=1)\n targets = batch.y - 1\n if i == 0:\n cross_entropy = torch.nn.functional.cross_entropy(scores, targets, reduction='none')\n else:\n cross_entropy = torch.cat((cross_entropy, torch.nn.functional.cross_entropy(scores, targets, reduction='none')))\n\n pro = cross_entropy.cpu().detach().numpy()\n return pro / pro.sum()\n\n\ndef forward_wass(model, loader, device, max_item_id=0):\n distance = []\n for i, batch in enumerate(loader):\n\n scores = softmax(model(batch.to(device), train_flag=False, max_item_id = max_item_id), dim=1)\n targets = batch.y - 1\n\n targets_1hot = torch.zeros_like(scores).scatter_(1, targets.view(-1, 1), 1).cpu().numpy()\n distance += list(wasserstein_distance(score, target) for score, target in zip(scores.cpu().numpy(), targets_1hot))\n\n weights = np.exp((pd.Series(distance).rank() / len(distance)).values)\n return weights / np.sum(weights)\n","repo_name":"Williamy946/GIUA-GNN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74287833527","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\n\ndef plot_embedding(data, label, title):\n x_min, x_max = np.min(data, 0), np.max(data, 0)\n data = (data - x_min) / (x_max - x_min)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n for i in range(data.shape[0]):\n plt.text(data[i, 0], data[i, 1], str(label[i]),\n color=plt.cm.Set1(label[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n plt.xticks([])\n plt.yticks([])\n plt.title(title)\n return fig\n\ninputn = np.load(\"input.npy\") # (500, 929, 2)\natt_out = np.load(\"att_out.npy\") # (500, 929, 2)\natt_out2 = np.load(\"att_out2.npy\") # (500, 929, 2)\natt_out3 = np.load(\"att_out3.npy\") # (500, 929, 2)\nlabel = np.load(\"label_test500.npy\") # (500,)\nprint(label.shape)\n\nselect_f = inputn\n#select_f = att_out\n#select_f = att_out2\n#select_f = att_out3\n\nfig = plt.figure()\ntsne = TSNE(n_components=2, init='pca', random_state=0)\nstack = np.concatenate((select_f[:,:,0], select_f[:,:,1]), axis=1)\nprint(stack.shape)\nresult = tsne.fit_transform(stack)\nprint(result.shape)\n#fig = plot_embedding(result, label,'t-SNE embedding of the digits')\nx_min, x_max = np.min(result, 0), np.max(result, 0)\nresult = (result - x_min) / (x_max - x_min)\n\ncolor = [\"#B0E0E6\",\"#EE6363\"]\n#color = [\"#B0E0E6\",\"#EE00EE\"]\n\nax = plt.subplot(111)\nfor i in range(result.shape[0]):\n if(label[i] == 0):\n s1 = plt.scatter(result[i, 0], result[i, 1],s=20,color=color[label[i]])\nfor i in range(result.shape[0]):\n if(label[i] == 1):\n s2 = plt.scatter(result[i, 0], result[i, 1],s=20,color=color[label[i]])\nplt.xlabel('Dimension 1')\nplt.ylabel('Dimension 2')\nplt.title('t-SNE embedding of the input layer')\n#plt.title('t-SNE embedding of the global attention layer')\n#plt.title('t-SNE embedding of the 1st MHA layer')\n#plt.title('t-SNE embedding of the 2nd MHA layer')\nplt.legend((s1,s2),('0','1') ,loc = 'best')\nplt.show()","repo_name":"Liuzhe30/AttADR","sub_path":"visulization/vis-tsne-representation.py","file_name":"vis-tsne-representation.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"19436182783","text":"import collections\nimport re\n\n\ndef read_stopwords():\n with open('./stopwords.txt', \"r\") as file:\n stopwords = file.read().split(',')\n # Remove newline characters and make the words lowercase\n stopwords = set([word.strip().lower() for word in stopwords])\n return stopwords\n\n\ndef remove_stopwords_bigrams(bigrams):\n stopwords = read_stopwords()\n filtered_bigrams = []\n for b1, b2 in bigrams:\n if b1.lower() not in stopwords and b2.lower() not in stopwords:\n filtered_bigrams.append((b1, b2))\n return filtered_bigrams\n\n\ndef get_bigram_frequencies(in_file, out_file):\n # Open the input file and read in the contents\n print('Reading in file...')\n with open(in_file, 'r') as infile:\n text = infile.read()\n\n # Tokenize the words in the text\n print('Tokenizing words...')\n words = re.findall(r'\\b[^\\W\\d_]{2,}\\b', text)\n\n # Normalize the case of the words\n print('Normalizing case...')\n words = [word.lower() for word in words]\n\n # Generate the bigrams\n print('Generating bigrams...')\n bigrams = [(words[i], words[i + 1]) for i in range(len(words) - 1)]\n\n print('Removing stopwords...')\n bigrams = remove_stopwords_bigrams(bigrams)\n\n # Count the frequency of each bigram\n print('Counting bigram frequency...')\n bigram_counts = collections.Counter(bigrams)\n\n # Sort the bigrams by frequency\n print('Sorting bigrams by frequency...')\n sorted_bigrams = sorted(bigram_counts.items(), key=lambda x: x[1], reverse=True)\n\n # Open the output file and write the bigram frequencies to it\n print('Writing to output file...')\n with open(out_file, 'w') as outfile:\n for bigram, count in sorted_bigrams:\n # ignore less than\n if count < 1000:\n continue\n outfile.write(f'{bigram[0]} {bigram[1]},{count}\\n')\n\n\nif __name__ == '__main__':\n # Test the function\n # Download oscar corpus from here https://www.kaggle.com/code/bmukhtar/starter-kazakh-oscar-corpus-05b5dbd5-d\n get_bigram_frequencies('kk.txt', 'kk_bigrams.txt')\n","repo_name":"BMukhtar/KazakhSpellingAndSuggestion","sub_path":"generate_bigrams.py","file_name":"generate_bigrams.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"19802888956","text":"from distutils.core import setup\nimport os.path\n\nREADME = os.path.join(os.path.dirname(__file__), 'README.md')\n\nversion = '1.0'\n\nwith open(README) as fp:\n longdesc = fp.read()\n\nsetup(name='ignore-from-github',\n include_package_data=True,\n version=version,\n description='Add common sets of ignored file types to your .gitignore easily',\n long_description=longdesc,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Software Development',\n 'Intended Audience :: Developers'\n ],\n author='Anson Rosenthal',\n author_email='anson.rosenthal@gmail.com',\n license='MIT License',\n url='https://github.com/anrosent/ignore.git',\n scripts=['ignore']\n)\n","repo_name":"anrosent/ignore","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"16883905593","text":"from typing import Sized\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\n\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\ndef calculate(image1, image2):\n # 灰度直方图算法\n # 计算单通道的直方图的相似值\n hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])\n hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])\n # 计算直方图的重合度\n degree = 0\n for i in range(len(hist1)):\n if hist1[i] != hist2[i]:\n degree = degree + \\\n (1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))\n else:\n degree = degree + 1\n degree = degree / len(hist1)\n return degree\n\ndef classify_hist_with_split(image1, image2, size=(255,255)):\n image1 = cv2.resize(image1, size)\n image2 = cv2.resize(image2, size)\n sub_image1 = cv2.split(image1)\n sub_image2 = cv2.split(image2)\n sub_data = 0\n for im1, im2 in zip(sub_image1, sub_image2):\n sub_data += calculate(im1, im2)\n sub_data = sub_data / 3\n return sub_data\n\n\ntem2018 = cv2.imread(r\"D:/Program/reefStudy/data/tem2018.png\")\ntem2019 = cv2.imread(r\"D:/Program/reefStudy/data/tem2019.png\")\ntem2020 = cv2.imread(r\"D:/Program/reefStudy/data/tem2020.png\")\ntem2021 = cv2.imread(r\"D:/Program/reefStudy/data/tem2021.png\")\n\npicList = [tem2018,tem2019,tem2020,tem2021]\n\ntem2018 = cv2.resize(tem2018, (657,398))\ntem2019 = cv2.resize(tem2019, (657,398))\ntem2020 = cv2.resize(tem2020, (657,398))\ntem2021 = cv2.resize(tem2021, (657,398))\n\n# tmp1 = cv2.addWeighted(tem2018,0.5,tem2019,0.5,0)\n# tmp2 = cv2.addWeighted(tem2020,0.5,tem2021,0.5,0)\n# tmp3 = cv2.addWeighted(tmp1,0.5,tmp2,0.5,0)\n\n# globalreef = cv2.imread(r\"D:/Program/reefStudy/data/gr2020.png\")\n# globalreef = cv2.resize(globalreef, (657,398))\n\n# tmp3 = cv2.subtract(tem2021,tem2020)\n# print(classify_hist_with_split(tmp3,tem2021))\n# tmp3 = cv2.addWeighted(globalreef,0.8,tmp3,0.2,0)\n\ngr2018 = cv2.imread(r\"D:/Program/reefStudy/data/gr2018.png\")\ngr2019 = cv2.imread(r\"D:/Program/reefStudy/data/gr2019.png\")\ngr2020 = cv2.imread(r\"D:/Program/reefStudy/data/gr2020.png\")\n\nplt.plot([0.24361189,0.2487901])\nplt.plot([0.22682571411132812,0.266563355922699])\nplt.legend([\"水温变化速率\",\"珊瑚变化速率\"])\nplt.title(\"水温变化速率和珊瑚变化速率比较\")\nplt.show()\n\n\n\n# tmp3 = cv2.cvtColor(tmp3, cv2.COLOR_BGR2GRAY)\n\n\n# cv2.imshow('tmp3',tmp3)\n# cv2.waitKey()","repo_name":"MicosLiang/reefStudy","sub_path":"temAndReef.py","file_name":"temAndReef.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"25975129049","text":"import subprocess\nimport os\nimport time\n\n\ndef getProjectName():\n projectName = input('Project name: ')\n return projectName\n\n\ndef getFlutterPath():\n userProfile = os.environ.get('USERPROFILE')\n flutterPath = f'{userProfile}\\\\Downloads\\\\flutter_windows_3.0.5-stable\\\\flutter\\\\bin\\\\flutter.bat'\n\n return flutterPath\n\n\ndef askForTypeOfProject():\n print('Select the type of project:')\n typeOfProject = int(input(\n '1. Basic Riverpod structure project\\n2. Responsive riverpod structure project\\n'))\n\n while typeOfProject not in range(1, 3):\n typeOfProject = askForTypeOfProject()\n\n return typeOfProject\n\n\ndef askForFeaturesInProject():\n featuresString = input(\n 'This project will be using feature first approach.\\nEnter the features you want in your app:\\nExample: auth, chat, call, products, home or type skip to skip this step\\n')\n\n if featuresString.lower() == 'skip':\n return []\n else:\n featuresList = featuresString.split(',')\n features = []\n\n for feature in featuresList:\n features.append(feature.strip())\n\n if 'home' in features:\n features.remove('home')\n return features\n\n\ndef createFlutterProject(projectName):\n flutterPath = getFlutterPath()\n runTerminalCommand(f'{flutterPath} create {projectName}')\n\n\ndef flutterPubGet(projectName):\n flutterPath = getFlutterPath()\n runTerminalCommand(f'{flutterPath} pub get',\n directoryName=f'.\\{projectName}')\n\n\ndef addFlutterPackage(packageName, directoryName):\n flutterPath = getFlutterPath()\n runTerminalCommand(f'{flutterPath} pub add {packageName}',\n directoryName=directoryName)\n\n\ndef runTerminalCommand(command, directoryName=''):\n try:\n if (len(directoryName) > 0):\n process = subprocess.Popen(command, cwd=directoryName)\n process.wait()\n else:\n process = subprocess.Popen(command)\n process.wait()\n except ():\n print('some error occured during while executing some commands')\n\n\ndef createFile(filePath, content):\n\n with open(filePath, 'w') as f:\n f.write(content)\n print(f\"File {filePath} created successfully.\")\n\n\ndef createFolders(folders):\n for i in range(len(folders)):\n if i > 0:\n if doesFolderExists(folders[i - 1]):\n os.mkdir(folders[i])\n else:\n os.mkdir(folders[i])\n\n\ndef createFiles(files):\n for filePath in files:\n createFile(filePath, files[filePath])\n\n\ndef doesFolderExists(filePath):\n while not os.path.exists(filePath):\n print(f'Creating {filePath} ...')\n time.sleep(0.1)\n\n files = filePath.split('\\\\')\n return True\n","repo_name":"Nitin-Poojary/startup-code-generator-flutter","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27347211972","text":"import boto3\nfrom constants import (\n TABLE_NAME,\n TABLE_READ_CAPACITY_UNITS,\n TABLE_WRITE_CAPACITY_UNITS,\n AWS_REGION,\n)\nfrom init import logger, statistics\n\n\ndef create_table(\n table_name: str = TABLE_NAME,\n ReadCapacityUnits: int = TABLE_READ_CAPACITY_UNITS,\n WriteCapacityUnits: int = TABLE_WRITE_CAPACITY_UNITS,\n aws_region: str = AWS_REGION,\n) -> bool:\n \"\"\" Creates DynamoB table \"\"\"\n\n try:\n client = boto3.client(\"dynamodb\", region_name=aws_region)\n response = client.list_tables()\n tables = [\n table for table in response[\"TableNames\"] if table == table_name\n ]\n\n if len(tables) > 0:\n logger.warning(\n f'Table \"{table_name}\" already exists. Skipping table creation.'\n )\n return False\n else:\n logger.info(\n f'Table \"{table_name}\" does not exist. Starting creation process...'\n )\n except Exception as e:\n logger.error(e)\n raise\n\n logger.info(\"Creating DB table...\")\n logger.debug(\n f\"Context Parameters: {create_table.__name__} => {create_table.__code__.co_varnames}\"\n )\n try:\n dynamodb = boto3.resource(\"dynamodb\", region_name=aws_region)\n table = dynamodb.create_table(\n TableName=table_name,\n AttributeDefinitions=[\n {\"AttributeName\": \"ts\", \"AttributeType\": \"S\"}\n ],\n KeySchema=[{\"AttributeName\": \"ts\", \"KeyType\": \"HASH\"}],\n ProvisionedThroughput={\n \"ReadCapacityUnits\": int(ReadCapacityUnits),\n \"WriteCapacityUnits\": int(WriteCapacityUnits),\n },\n )\n logger.info(\"Table created successfully.\")\n logger.debug(table)\n except dynamodb.exceptions.ResourceInUseException as e:\n logger.warning(\n f'Table \"{table_name}\" already exists. Skipping table creation.'\n )\n logger.debug(e)\n return False\n\n return True\n\n\ndef seed_db_table(\n db_objects: list = None,\n table_name: str = TABLE_NAME,\n aws_region: str = AWS_REGION,\n) -> bool:\n \"\"\" Insert DB objects into table \"\"\"\n\n logger.info(\"Inserting data into DB...\")\n logger.debug(\n f\"Context Parameters: {seed_db_table.__name__} => {seed_db_table.__code__.co_varnames}\"\n )\n\n try:\n dynamodb = boto3.resource(\"dynamodb\", region_name=aws_region)\n table = dynamodb.Table(table_name)\n\n with table.batch_writer() as batch:\n for item in db_objects:\n batch.put_item(Item=item)\n\n statistics.append([\"seed_db_table\", len(db_objects)])\n\n logger.info(f\"{len(db_objects)} item(s) were inserted in DB.\")\n except Exception as e:\n logger.error(e)\n raise\n\n return True\n","repo_name":"will666/wasabi-cli","sub_path":"manage/src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27657430835","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm # tqdm是显示循环进度条的库\n\n\nclass CliffWalkingEnv:\n def __init__(self, ncol, nrow):\n self.ncol = ncol # 列\n self.nrow = nrow # 行\n self.x = 0 # 记录当前智能体位置的横坐标\n self.y = self.nrow - 1 # 记录当前智能体位置的纵坐标\n\n def step(self, action): # 外部调用这个函数来改变当前位置\n # 区别在于:没有定义P矩阵\n # 4种动作, change[0]:上, change[1]:下, change[2]:左, change[3]:右。坐标系原点(0,0)\n # 定义在左上角\n change = [[0, -1], [0, 1], [-1, 0], [1, 0]]\n self.x = min(self.ncol - 1, max(0, self.x + change[action][0]))\n self.y = min(self.nrow - 1, max(0, self.y + change[action][1]))\n next_state = self.y * self.ncol + self.x\n reward = -1\n done = False\n # 第三行\n if self.y == self.nrow - 1 and self.x > 0: # 下一个位置在悬崖或者目标\n done = True\n if self.x != self.ncol - 1:# 不在第11列\n reward = -100\n return next_state, reward, done\n\n def reset(self): # 回归初始状态,坐标轴原点在左上角\n self.x = 0 # 列0\n self.y = self.nrow - 1 # 行 3\n return self.y * self.ncol + self.x","repo_name":"MengxueTao/testGIT-HandsOnRL","sub_path":"C5_TD_CliffWalkingEnv.py","file_name":"C5_TD_CliffWalkingEnv.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"533426544","text":"import os\nfrom teradataml.common.exceptions import TeradataMlException\nfrom teradataml.common.messages import Messages\nfrom teradataml.common.messagecodes import MessageCodes\n\n\nclass _ConfigureSuper(object):\n\n def __init__(self):\n pass\n\n def _SetKeyValue(self, name, value):\n super().__setattr__(name, value)\n\n def _GetValue(self, name):\n return super().__getattribute__(name)\n\n\ndef _create_property(name):\n storage_name = '_' + name\n\n @property\n def prop(self):\n return self._GetValue(storage_name)\n\n @prop.setter\n def prop(self, value):\n self._SetKeyValue(storage_name, value)\n\n return prop\n\n\nclass _Configure(_ConfigureSuper):\n \"\"\"\n Options to configure database related values.\n \"\"\"\n\n default_varchar_size = _create_property('default_varchar_size')\n column_casesensitive_handler = _create_property('column_casesensitive_handler')\n vantage_version = _create_property('vantage_version')\n val_install_location = _create_property('VAL_install_location')\n byom_install_location = _create_property('BYOM_install_location')\n sandbox_container_id = _create_property('sandbox_container_id')\n temp_table_database = _create_property('temp_table_database')\n temp_view_database = _create_property('temp_view_database')\n read_nos_function_mapping = _create_property('read_nos_function_mapping')\n write_nos_function_mapping = _create_property('write_nos_function_mapping')\n\n\n def __init__(self, default_varchar_size=1024, column_casesensitive_handler = False,\n vantage_version=\"vantage1.1\", val_install_location=None,\n byom_install_location=None, sandbox_container_id=None,\n temp_table_database=None, temp_view_database=None, database_version=None,\n read_nos_function_mapping=\"read_nos\", write_nos_function_mapping=\"write_nos\"):\n \"\"\"\n PARAMETERS:\n default_varchar_size:\n Specifies the size of varchar datatype in Teradata Vantage, the default\n size is 1024.\n User can configure this parameter using options.\n Types: int\n Example:\n teradataml.options.configure.default_varchar_size = 512\n\n column_casesensitive_handler:\n Specifies a boolean value that sets the value of this option to True or\n False.\n One should set this to True, when ML Engine connector property is\n CASE-SENSITIVE, else set to False, which is CASE-INSENSITIVE.\n Types: bool\n Example:\n # When ML Engine connector property is CASE-SENSITIVE, set this\n # parameter to True.\n teradataml.options.configure.column_casesensitive_handler = True\n\n vantage_version:\n Specifies the Vantage version of the system teradataml is connected to.\n Types: string\n Example:\n # Set the Vantage Version\n teradataml.options.configure.vantage_version = \"vantage1.1\"\n\n val_install_location:\n Specifies the name of the database where Vantage Analytic Library functions\n are installed.\n Types: string\n Example:\n # Set the Vantage Analytic Library install location to 'SYSLIB'\n # when VAL functions are installed in 'SYSLIB'.\n teradataml.options.configure.val_install_location = \"SYSLIB\"\n\n byom_install_location:\n Specifies the name of the database where Bring Your Own Model functions\n are installed.\n Types: string\n Example:\n # Set the BYOM install location to 'SYSLIB'\n # when BYOM functions are installed in 'SYSLIB'.\n teradataml.options.configure.byom_install_location = \"SYSLIB\"\n\n sandbox_container_id:\n Specifies the id of sandbox container that will be used by test_script method.\n Types: string\n Example:\n # Set the sandbox_container_id.\n teradataml.options.configure.sandbox_container_id = '734rfjsls3'\n\n database_version:\n Specifies the actual database version of the system teradataml is connected to.\n Types: string\n Example:\n # Set the Vantage Version\n teradataml.options.configure.database_version = \"17.05a.00.147\"\n \n read_nos_function_mapping:\n Specifies the function mapping name for the read_nos table operator function.\n Types: string\n Example:\n # Set the read nos function mapping name\n teradataml.options.configure.read_nos_function_mapping = \"read_nos_fm\"\n \n write_nos_function_mapping:\n Specifies the function mapping name for the write_nos table operator function.\n Types: string\n Example:\n # Set the write nos function mapping name\n teradataml.options.configure.write_nos_function_mapping = \"write_nos_fm\"\n\n \"\"\"\n super().__init__()\n super().__setattr__('default_varchar_size', default_varchar_size)\n super().__setattr__('column_casesensitive_handler', column_casesensitive_handler)\n super().__setattr__('vantage_version', vantage_version)\n super().__setattr__('val_install_location', val_install_location)\n super().__setattr__('byom_install_location', byom_install_location)\n super().__setattr__('sandbox_container_id', sandbox_container_id)\n super().__setattr__('temp_table_database', temp_table_database)\n super().__setattr__('temp_view_database', temp_view_database)\n super().__setattr__('database_version', database_version)\n super().__setattr__('read_nos_function_mapping', read_nos_function_mapping)\n super().__setattr__('write_nos_function_mapping', write_nos_function_mapping)\n\n \n # internal configurations\n # These configurations are internal and should not be\n # exported to the user's namespace.\n super().__setattr__('_validate_metaexpression', False)\n # Internal parameter, that should be used while testing to validate whether\n # Garbage collection is being done or not.\n super().__setattr__('_validate_gc', False)\n # Internal parameter, that is used for checking if sto sandbox image exists on user's system\n super().__setattr__('_latest_sandbox_exists', False)\n # Internal parameter, that is used for checking whether a container was started by\n # teradataml.\n super().__setattr__('_container_started_by_teradataml', None)\n # Internal parameter, that is used for specifying the global model cataloging schema name which\n # will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_database', None)\n # Internal parameter, that is used for specifying the global model cataloging table name which\n # will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_table', None)\n # Internal parameter, that is used for specifying the license information as a string, file\n # path or column name which will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_license', None)\n # Internal parameter, that is used for specifying the source where the license came from\n # which will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_license_source', None)\n # Internal parameter, that is used for specifying the license table name\n # where the license is stored\n super().__setattr__('_byom_model_catalog_license_table', None)\n # Internal parameter, that is used for specifying the schema name where\n # the license table is stored\n super().__setattr__('_byom_model_catalog_license_database', None)\n # Internal parameter, that is used for specifying the URL to be used as\n # base URL in UES REST calls\n super().__setattr__('ues_url', None)\n # Internal parameter, that is used for specifying the Authentication token to be used\n # in UES REST calls\n super().__setattr__('auth_token', None)\n # Internal parameter, that is used to specify the certificate file in a secured HTTP request.\n super().__setattr__('certificate_file', False)\n # Internal parameter, that is used for specify the maximum size of the file\n # allowed by UES to upload it.\n super().__setattr__('_ues_max_file_upload_size', 10)\n # Internal parameter, that is used to specify the default environment,\n super().__setattr__('_default_user_env', None)\n\n # Internal parameter, that is used to post the Code verifier in OAuth work flow.\n super().__setattr__('_oauth_end_point', None)\n\n # Internal parameter, that is used for specifying the client id in OAuth work flow.\n super().__setattr__('_oauth_client_id', None)\n\n # Internal parameter, that is used for specifying the ID of Authentication token.\n super().__setattr__('_id_auth_token', None)\n\n # Internal parameter, that is used for specifying the Authentication token expiry time.\n super().__setattr__('_auth_token_expiry_time', None)\n\n # Internal parameter, that is used for specifying the refresh token to be used\n # in UES REST calls\n super().__setattr__('_refresh_token', None)\n\n # Internal parameter, that is used for specifying the refresh token to be used\n # in UES REST calls\n super().__setattr__('_pf_token_username_label', \"pf.username\")\n\n # Internal parameter, that is used for specifying the refresh token to be used\n # in UES REST calls\n super().__setattr__('_pf_token_password_label', \"pf.pass\")\n\n def __setattr__(self, name, value):\n if hasattr(self, name):\n if name == 'default_varchar_size':\n if not isinstance(value, int):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'int'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n if value <= 0:\n raise TeradataMlException(Messages.get_message(MessageCodes.TDMLDF_POSITIVE_INT, name,\n \"greater than\"),\n MessageCodes.TDMLDF_POSITIVE_INT)\n elif name == '_ues_max_file_upload_size':\n if type(value) != int:\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'int'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n if value < 0:\n raise TeradataMlException(Messages.get_message(MessageCodes.TDMLDF_POSITIVE_INT, name,\n \"greater than or equal to\"),\n MessageCodes.TDMLDF_POSITIVE_INT)\n elif name in ['column_casesensitive_handler', '_validate_metaexpression',\n '_validate_gc', '_latest_sandbox_exists']:\n\n if not isinstance(value, bool):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'bool'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n elif name == 'certificate_file':\n if not isinstance(value, str):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n\n if not os.path.exists(value):\n msg_code = MessageCodes.EXECUTION_FAILED\n raise TeradataMlException(Messages.get_message(msg_code,\n \"read contents of file '{}'\".format(value),\n 'File does not exist.'),\n msg_code)\n\n if not os.path.isfile(value):\n msg_code = MessageCodes.EXECUTION_FAILED\n raise TeradataMlException(Messages.get_message(msg_code,\n \"read contents of file '{}'\".format(value),\n 'Not a file.'),\n msg_code)\n\n elif name == 'vantage_version':\n if not isinstance(value, str):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n valid_versions = ['vantage1.0', 'vantage1.1', 'vantage1.3', 'vantage2.0']\n value = value.lower()\n if value not in valid_versions:\n raise TeradataMlException(Messages.get_message(MessageCodes.INVALID_ARG_VALUE,\n value,\n name,\n \"a value in {}\".format(valid_versions)),\n MessageCodes.INVALID_ARG_VALUE)\n\n elif name in ['val_install_location', 'byom_install_location', 'database_version',\n 'read_nos_function_mapping', 'write_nos_function_mapping',\n '_byom_model_catalog_database', '_byom_model_catalog_table',\n '_byom_model_catalog_license', '_byom_model_catalog_license_source']:\n if not isinstance(value, str):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n\n elif name in {'ues_url', 'auth_token', '_oauth_end_point', '_oauth_client_id',\n '_id_auth_token', '_refresh_token', '_pf_token_username_label',\n '_pf_token_password_label'}:\n\n if not isinstance(value, str):\n raise TypeError(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name, 'str'))\n\n if len(value) == 0:\n raise ValueError(Messages.get_message(MessageCodes.ARG_EMPTY, name))\n\n if name == 'ues_url':\n value = value[: -1] if value.endswith(\"/\") else value\n\n elif name in ['sandbox_container_id', '_container_started_by_teradataml',\n 'temp_table_database', 'temp_view_database',\n \"_byom_model_catalog_license_table\", \"_byom_model_catalog_license_database\"]:\n if not isinstance(value, str) and not isinstance(value, type(None)):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str or None'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n\n elif name in {'_auth_token_expiry_time'}:\n\n if not isinstance(value, float):\n raise TypeError(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name, 'float'))\n\n super().__setattr__(name, value)\n else:\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(self.__class__.__name__, name))\n\n\nconfigure = _Configure()","repo_name":"Teradata/teradata-dataiku-plugin","sub_path":"python-lib/teradataml/options/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":16556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"3720328649","text":"import nltk\nimport logging as log\nfrom nltk import pos_tag, ne_chunk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tree import Tree\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\nimport recEntities\nfrom fuzzywuzzy import fuzz, process\nimport parse_tree\nimport db_handler\nimport util\nimport retry\nimport info\n\n\ndef recColoumns_temp(query_text):\n query_text_words = query_text.split()\n\n stem_columns = recEntities.init_datababse()\n # print(stem_columns)\n\n verb_to_col = recEntities.wrap_convert(stem_columns)\n\n print(verb_to_col)\n print(\"\\n\\n\")\n matched_words_col = {}\n # for col, col_var in verb_to_col.items():\n # for word in col_var:\n # res = process.extractOne(word, query_text_words)\n # if(res[1] > 70):\n # print(\"Column is \" + col)\n # print(\"Matched word is \" + res[0])\n # print(\"With accuracy \" + str(res[1]))\n # matched_words_col[res[0]] = col\n # print(\"\\n\\n\")\n\n for word in query_text_words:\n max_acc = 0\n col_mat = ''\n for col, col_var in verb_to_col.items():\n res = process.extractOne(word, col_var)\n if(res[1] > 70):\n print(\"word is \"+word)\n print(\"Column is \" + col)\n print(\"Matched word is \" + res[0])\n print(\"With accuracy \" + str(res[1]))\n if(res[1] > max_acc):\n print(res[1], max_acc, col, col_mat)\n max_acc = res[1]\n col_mat = col\n print(\"\\n\\n\")\n if(col_mat == ''):\n continue\n matched_words_col[word] = col_mat\n return matched_words_col\n\ndef recColoumns(query_text):\n if 'movie' in query_text:\n query_text.replace('movie', 'title')\n if 'movies' in query_text:\n query_text.replace('movies', 'title')\n\n query_text_words = query_text.split()\n\n if 'I' in query_text_words:\n query_text_words.remove('I')\n if 'i' in query_text_words:\n query_text_words.remove('i')\n \n stem_columns = recEntities.init_datababse()\n # print(stem_columns)\n\n verb_to_col = recEntities.wrap_convert(stem_columns)\n\n print(verb_to_col)\n print(\"\\n\\n\")\n matched_words_col = {}\n for col, col_var in verb_to_col.items():\n for word in col_var:\n res = process.extractOne(word, query_text_words)\n if(res[1] > 70):\n print(\"Column is \" + col)\n print(\"Matched word is \" + res[0])\n print(\"With accuracy \" + str(res[1]))\n matched_words_col[res[0]] = col\n print(\"\\n\\n\")\n return matched_words_col\n\ndef get_relationship(query_text, intent_info):\n str_parse_tree = parse_tree.get_parse_tree(query_text)\n matched_words_col = recColoumns_temp(query_text)\n\n if not matched_words_col:\n rows = retry.no_col_match(query_text)\n if rows:\n rows = [row.tolist() for row in rows]\n print(rows)\n return [rows]\n\n \n print(str_parse_tree)\n print(matched_words_col)\n db_inp_dic = {}\n col_type, col_pos = util.get_col_pos()\n print(col_pos)\n adj_dic = util.get_adj(query_text)\n rows = []\n for key, value in matched_words_col.items():\n pos_tag = col_pos[value]\n\n node, val = parse_tree.get_relation(str_parse_tree, key, pos_tag)\n\n print(\"\\n\\n\\n\\n\")\n print(node)\n print(\"\\n\\n\\n\\n\")\n print(val)\n\n if(val != False and val is not None):\n db_inp_dic[value.lower()] = val\n \n print(db_inp_dic)\n\n rows.append(db_handler.db_select(db_inp_dic, intent_info, col_type, adj_dic))\n print(rows)\n else:\n matched_rows = retry.retry(value, query_text)\n \n rows.append( [row.tolist() for row in matched_rows])\n print(rows)\n return rows\n\ndef get_intent_col(text):\n matched_words_col = recColoumns_temp(text)\n print('matched_words_col')\n \n print(matched_words_col)\n if not matched_words_col:\n return ['Title']\n elif 'movie' in text or 'movies' in text:\n return ['Title']\n cols = [val for key, val in matched_words_col.items()]\n return cols\n\ndef get_intent_info(query_text):\n intent = util.get_intent(query_text)\n \n for key, val in intent.items():\n if val:\n query_text = query_text.replace(key, '')\n cols = get_intent_col(query_text)\n\n number = util.get_number(query_text)\n\n \n intent_info = {'cols':cols, 'number':number, 'intent':intent}\n\n return intent_info\n\ndef chunking(tag_words):\n # grammar = r\"\"\"inter : {
????}\n # intent : {????+??}\"\"\"\n\n grammar = r\"\"\"inter : {????}\"\"\"\n\n parser = nltk.RegexpParser(grammar)\n chunked = parser.parse(tag_words)\n\n # print(chunked)\n # for subtree in chunked.subtrees(filter=lambda t: t.label() == 'intent'):\n # print(subtree.label())\n intent_text = ''\n inter_text = ''\n # for subtree in chunked.subtrees(filter=lambda t: t.label() == 'intent'):\n # intent_text = \" \".join([text for text, pos in subtree.leaves()])\n for subtree in chunked.subtrees(filter=lambda t: t.label() == 'inter'):\n inter_text = \" \".join([text for text, pos in subtree.leaves()])\n \n q = []\n i = []\n f = True\n for chunk in chunked:\n if type(chunk) != Tree:\n if f:\n i.append(chunk[0])\n else:\n q.append(chunk[0])\n else:\n f = False\n \n query_text = \" \".join(q)\n intent_text = \" \".join(i)\n # log.info(intent_text)\n print(\"Intent text is ---\" + intent_text)\n print(\"Intermediate text is ---\" + inter_text) \n print(\"query is ---\" + query_text)\n print(\"\\n\\n\\n\\n\\n\")\n return intent_text, inter_text, query_text\n\n\ndef chunkIntent(tag_words):\n\n grammar = r\"\"\"intent : {????+??}\"\"\"\n parser = nltk.RegexpParser(grammar)\n chunked = parser.parse(tag_words)\n\n # print(chunked)\n for subtree in chunked.subtrees(filter=lambda t: t.label() == 'Chunk'):\n print(subtree)\n\ndef groupNounVerb(tag_words):\n proper_nouns = []\n verbs = []\n nouns = []\n\n proper_nouns = get_continuous_chunks(tag_words)\n\n is_noun = lambda pos : pos[:2] == 'NN'\n \n\n for word, pos in tag_words:\n if pos.startswith('V'):\n verbs.append(word)\n if is_noun(pos):\n nouns.append(word)\n\n split_proper_nouns = []\n for proper_noun in proper_nouns:\n split_proper_nouns += proper_noun.split()\n \n temp_nouns = [noun for noun in nouns if noun not in split_proper_nouns]\n nouns = temp_nouns\n return nouns, proper_nouns, verbs\n\n\ndef filter(sentence):\n words = word_tokenize(sentence)\n\n # filtered_words = remove_stopwords(words)\n tag_words = tagging(words)\n # print(tag_words)\n nouns, proper_nouns, verbs = groupNounVerb(tag_words)\n split_input = []\n split_input = chunking(tag_words)\n # print(Intent_classification_final.predict(split_input[0]))\n print(\"\\n\\n\\n\")\n print(\"nouns \" + str(nouns))\n print(\"proper nouns \" + str(proper_nouns))\n print(\"verbs \" + str(verbs))\n print(\"\\n\\n\\n\")\n intent_info = get_intent_info(split_input[0])\n rows = get_relationship(split_input[2], intent_info)\n final_rows, intent_info = info.filter_info(rows, intent_info)\n return final_rows, intent_info\n\ndef remove_stopwords(words):\n stop_words = list(stopwords.words('english'))\n filtered_words = [word for word in words if word not in stop_words] \n return filtered_words\n\ndef tagging(words):\n return pos_tag(words)\n\ndef get_continuous_chunks(tagged_words):\n chunked = ne_chunk(tagged_words)\n # print(chunked)\n continuous_chunk = []\n current_chunk = []\n\n for i in chunked:\n if type(i) == Tree:\n current_chunk.append(\" \".join([token for token, pos in i.leaves()]))\n elif current_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.append(named_entity)\n current_chunk = []\n else:\n continue\n\n if continuous_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.append(named_entity)\n\n return continuous_chunk\n \n\nif __name__ == \"__main__\":\n filter(\"get movies of 2016\" )\n","repo_name":"karthikbhat13/databot","sub_path":"nlu_module/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"36389964295","text":"from application1 import app\nfrom flask import render_template\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\tsome1 = {'username': 'mike'}\n\tpostser = [\n\t{\n\t\t'author': {'username': 'John'},\n\t\t'body' : 'Beuatiful day in Portland!'\n\t},\n\t{\n\t\t'author' : {'username': 'Susan'},\n\t\t'body' : 'The Avengers is a cool movie'\n\t}\n\t]\n\n\treturn render_template('index.html', title='Home', user=some1, posts=postser)\n\n''' \n\n@app.route('/test1/')\ndef index1(name):\n\tsome1 = {'username': name+\"\\'s\"}\n\treturn render_template('index.html', title='Home', user=some1)\n'''\n\n","repo_name":"muthu-kr/blognew","sub_path":"application1/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"73904313855","text":"from collections import Counter\nfrom itertools import groupby\n\nFONTSIZE = 15\n\nimport matplotlib\nmatplotlib.use('Agg')\nmatplotlib.rc('font', size=FONTSIZE)\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom numba import njit\nimport numpy as np\nfrom scipy.stats import spearmanr, pearsonr, norm, uniform\nimport tqdm\n\nimport crisper\n\nfrom bananas.pipelines import mean_warszycki_logki\nfrom bananas.worlds import (\n BalancedAgglomerativeClustering,\n CrossValidation,\n KernelTSNE,\n Morgan,\n MurckoScaffoldSplit,\n PaperSplit,\n SMILESToMol,\n SpectralClustering,\n StoredCopy,\n TanimotoMinMaxRepresentationMaker,\n TwoClassLogisticRegression,\n)\nfrom elderberries.benchmarks2018.problem import (\n Benchmarks2018StructuralSimilarity,\n Benchmarks2018ProblemClassificationSummary,\n)\nfrom elderberries.benchmarks2018.solutions import (\n fingerprinter_by_name,\n)\n\ndef target_name(target_uid):\n return {\n \"CHEMBL214\": \"5-HT1A\",\n \"CHEMBL224\": \"5-HT2A\",\n \"CHEMBL225\": \"5-HT2C\",\n \"CHEMBL3371\": \"5-HT6\",\n \"CHEMBL3155\": \"5-HT7\",\n \"CHEMBL226\": \"A1\",\n \"CHEMBL251\": \"A2A\",\n \"CHEMBL217\": \"D2\",\n \"CHEMBL264\": \"H3\",\n \"CHEMBL216\": \"M1\",\n }[target_uid]\n\nweighted_accuracy = Benchmarks2018ProblemClassificationSummary.metrics[\"Weighted_Accuracy\"][0]\naccuracy = Benchmarks2018ProblemClassificationSummary.metrics[\"Accuracy\"][0]\n\nspearman = lambda x, y: spearmanr(x,y)[0]\n\nto_pki = lambda logki: 9. - logki\n\ndef _table(rows, cols, content, delimiter='\\t'):\n result = [delimiter.join([''] + list(cols)) + '\\n']\n for row_name, row in zip(rows, content):\n result.append(delimiter.join([row_name] + list(row)) + '\\n')\n return ''.join(result)\n\ndef _arr_header_to_html(arr, header):\n from herbivores._html import (\n to_arr_header,\n columns_width,\n to_html,\n sanitize_html,\n doc_template,\n style_template,\n table_style_1,\n div_style_1,\n href,\n tablesorter,\n )\n href_chembl_compound = lambda uid: href(\n \"https://www.ebi.ac.uk/chembl/compound/inspect/{}\".format(uid),\n uid,\n )\n href_chembl_document = lambda uid: href(\n \"https://www.ebi.ac.uk/chembl/doc/inspect/{}\".format(uid),\n uid,\n )\n width = columns_width(arr, header, 30)\n arr, header = sanitize_html(arr), sanitize_html(header)\n for i, key in enumerate(header):\n if \"uid\" in key and not \"doc\" in key:\n arr[:,i] = np.vectorize(href_chembl_compound, otypes=(np.str,))(arr[:,i])\n if \"uid\" in key and \"doc\" in key:\n arr[:,i] = np.vectorize(href_chembl_document, otypes=(np.str,))(arr[:,i])\n return doc_template(\n style_template(\n table_style_1(\"data_table\"),\n div_style_1(None),\n ) + '\\n' + tablesorter(),\n to_html(arr, header, width, \"data_table\"),\n )\n\ndef jj_thresholded_ki(N=10, N_SPLITS=5, target_uid=\"CHEMBL214\", split_name=\"cv\", C=10., class_weight=\"balanced\", weighted_score=True):\n\n from elderberries.benchmarks2018.problem import Benchmarks2018StructuralSimilarity\n\n preds = {}\n scores = np.zeros((N_SPLITS,N,N), dtype=np.float)\n lspace = np.linspace(0.,3.,10)\n for i in range(N):\n for j in range(N):\n if i <= j:\n thresholds = tuple((lspace[x] for x in (i,j)))\n dataset = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=thresholds,\n )[\"final\"]\n\n if split_name == \"cv\":\n split_ = CrossValidation(\n source=dataset,\n n_groups=N_SPLITS,\n seed=43,\n )\n elif split_name == \"bac\":\n split_ = BalancedAgglomerativeClustering(\n source=Benchmarks2018StructuralSimilarity(source=dataset),\n kernel=\"kernel\",\n n_groups=N_SPLITS,\n )\n else:\n raise ValueError(\"split_name: {}\".format(split_name))\n\n for n_split, split in enumerate(split_.get_splits()):\n tr, te = split.get_train(), split.get_test()\n fpr = Morgan(\n radius=4,\n use_chirality=True,\n use_bond_types=True,\n use_features=False,\n converter=SMILESToMol(),\n )\n fp_tr = fpr(source=tr)\n fp_te = fpr(source=te)\n repr_maker = TanimotoMinMaxRepresentationMaker(\n fingerprint=fp_tr)\n repr_tr = repr_maker(fingerprint=fp_tr)\n repr_te = repr_maker(fingerprint=fp_te)\n model = TwoClassLogisticRegression(\n source=repr_tr,\n C=C,\n class_weight=class_weight,\n )\n pred = StoredCopy(source=model.predict(source=repr_te))\n preds[(n_split, i, j)] = (te, pred)\n\n crisper.evaluate(\n *[k for tup in preds.values() for k in tup],\n label=\"J&J\"\n )\n\n for (n_split, i, j), (te, pred) in tqdm.tqdm(preds.items()):\n if weighted_score:\n scores[n_split, i, j] = weighted_accuracy(None, te, pred)\n else:\n scores[n_split, i, j] = accuracy(None, te, pred)\n scores_ = scores.mean(axis=0)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(scores_, origin=\"lower\", vmin=sorted(set(scores_.ravel()))[1], vmax=sorted(scores_.ravel())[-1])\n lspace_ = np.array([\"{:.2f}\".format(to_pki(x)) for x in lspace])\n idx = np.arange(0,N,2)\n ax.set_xticks(idx)\n ax.set_xticklabels(lspace_[idx])\n ax.set_yticks(idx)\n ax.set_yticklabels(lspace_[idx])\n ax.set_xlabel(\"Inactivity threshold (pKi)\")\n ax.set_ylabel(\"Activity threshold (pKi)\")\n if weighted_score:\n ax.set_title(\"Weighted Accuracy\")\n else:\n ax.set_title(\"Accuracy\")\n fig.colorbar(im, ax=ax)\n fig.tight_layout()\n return fig\n\ndef fingercheats(\n target_uids, fpr_names, include_earliest_year=None,\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False):\n cor = np.zeros((len(target_uids), len(fpr_names)), dtype=np.float)\n cor2 = np.zeros((len(target_uids), len(fpr_names)), dtype=np.float)\n for i, target_uid in enumerate(target_uids):\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n for j, fpr_name in enumerate(fpr_names):\n fpr = fingerprinter_by_name[fpr_name]\n a = np.array(fpr(source=ds).data[(\"fingerprint\", \"data\")].sum(axis=1)).ravel()\n b = ds.data[\"value\"]\n cor[i,j] = spearmanr(a,b)[0]\n cor2[i,j] = pearsonr(a,b)[0]\n\n fig = plt.figure(figsize=(16,6))\n\n ax = fig.add_subplot(121)\n fig.colorbar(ax.imshow(cor), ax=ax, orientation=\"horizontal\")\n ax.set_title(\"Spearman rank-order correlation coefficient\")\n ax.set_yticks(np.arange(len(target_uids)))\n ax.set_yticklabels([target_name(u) for u in target_uids])\n ax.set_xticks(range(0, len(fpr_names), 2))\n ax.set_xticklabels([\"FP{}\".format(i+1) for i in range(0, len(fpr_names), 2)])\n\n ax = fig.add_subplot(122)\n fig.colorbar(ax.imshow(cor2), ax=ax, orientation=\"horizontal\")\n ax.set_title(\"Pearson correlation coefficient\")\n ax.set_yticks(np.arange(len(target_uids)))\n ax.set_yticklabels([target_name(u) for u in target_uids])\n ax.set_xticks(range(0, len(fpr_names), 2))\n ax.set_xticklabels([\"FP{}\".format(i+1) for i in range(0, len(fpr_names), 2)])\n\n return (\n fig,\n ''.join([\"FP{}: {}\\n\".format(i+1, fpr_name) \\\n for i, fpr_name in enumerate(fpr_names)]),\n )\n\ndef fingercheats_thr(\n target_uids, fpr_names, threshold=2., include_earliest_year=None,\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False):\n from sklearn.linear_model import LogisticRegression\n from sklearn.metrics import balanced_accuracy_score\n result = '\\t'.join([''] + [\"FP{}\".format(i+1) for i in range(len(fpr_names))]) + '\\n'\n acc = np.zeros((len(target_uids), len(fpr_names)), dtype=np.float)\n for i, target_uid in enumerate(target_uids):\n row = \"{}\\t\".format(target_name(target_uid))\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=threshold,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n for j, fpr_name in enumerate(fpr_names):\n fpr = fingerprinter_by_name[fpr_name]\n X = np.array(fpr(source=ds).data[(\"fingerprint\", \"data\")].sum(axis=1)).reshape(-1,1)\n y = ds.data[\"value\"].ravel()\n assert set(y) == set([0., 1.])\n lr = LogisticRegression(class_weight=\"balanced\")\n lr.fit(X, y)\n acc[i,j] = balanced_accuracy_score(y, lr.predict(X))\n row += \"{:.3f}\\t\".format(acc[i,j])\n result += row + '\\n'\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_subplot(111)\n fig.colorbar(ax.imshow(acc), ax=ax, orientation=\"horizontal\")\n ax.set_title(\"Weighted accuracy\")\n ax.set_yticks(np.arange(len(target_uids)))\n ax.set_yticklabels([target_name(u) for u in target_uids])\n ax.set_xticks(range(0, len(fpr_names), 2))\n ax.set_xticklabels([\"FP{}\".format(i+1) for i in range(0, len(fpr_names), 2)])\n return (\n fig,\n result,\n ''.join([\"FP{}: {}\\n\".format(i+1, fpr_name) \\\n for i, fpr_name in enumerate(fpr_names)]),\n )\n\ndef min_max_mean_per_paper(\n target_uids,\n include_earliest_year,\n ic50_conversion_strategy,\n fit_ic50,\n min_paper_size):\n fig = plt.figure(figsize=(12.3, len(target_uids)*4))\n counter = 0\n axes = []\n results = []\n for target_uid in target_uids:\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n result = []\n doc_uid = d.data[\"doc_uid\"]\n value = to_pki(d.data[\"value\"])\n key = lambda x: x[0]\n for k, g in groupby(sorted(zip(doc_uid, value), key=key), key):\n gu, gv = zip(*g)\n if len(gv) >= min_paper_size:\n tup = (np.min(gv), np.max(gv), np.mean(gv))\n result.append(tup)\n results.append(tup)\n for h in zip(*result):\n counter += 1\n ax = fig.add_subplot(len(target_uids), 3, counter)\n axes.append(ax)\n ax.hist(h, bins=43, range=(value.min(), value.max()))\n if counter % 3 == 1:\n ax.set_ylabel(target_name(target_uid) + '\\n')\n ax.set_xlabel({\n 1: \"Min pKi per paper (earliest)\",\n 2: \"Max pKi per paper (earliest)\",\n 0: \"Mean pKi per paper (earliest)\",\n }[counter % 3])\n xlim = (np.array(results).min()-.1, np.array(results).max()+.1)\n [ax.set_xlim(xlim) for ax in axes] \n fig.tight_layout()\n return fig\n\ndef how_many_records_per_paper(\n target_uids,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True):\n fig = plt.figure(figsize=(4.3, len(target_uids)*4))\n for i, target_uid in enumerate(target_uids):\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n ax = fig.add_subplot(len(target_uids), 1, i+1)\n v = list(Counter(d.data[\"doc_uid\"]).values())\n ax.hist(v, bins=int(np.max(v)))\n ax.set_xlabel(\"Earliest records per paper\")\n ax.set_ylabel(target_name(target_uid) + \"\\n\")\n ax.set_yscale(\"log\", nonposy='clip')\n fig.tight_layout()\n return fig\n\ndef earliest_year_variants(target_uids):\n def compare_year(*ds):\n uids = np.array(sorted(set.union(*[set(d.data[\"uid\"]) for d in ds])))\n years = np.empty((len(uids),len(ds)),dtype=np.float)\n years.fill(np.nan)\n for i, d in enumerate(ds):\n idx = np.searchsorted(uids, d.data[\"uid\"])\n years[idx,i] = d.data[\"year\"]\n return years\n result = [\n \"Reference method: 'all_bioactivity_records'\\n\",\n \"Other:\\n\",\n \" 'Ki_IC50_records'\\n\",\n \" 'Ki_records'\\n\",\n \"target: differing/total\\n\",\n ]\n for target_uid in target_uids:\n ds = []\n for include_earliest_year in [\"all_bioactivity_records\", \"Ki_IC50_records\", \"Ki_records\"]:\n ds.append(mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n )[\"final\"])\n y = compare_year(*ds)\n a = np.all(\n np.logical_or(\n y == np.nanmax(y, axis=1).reshape(-1,1),\n np.isnan(y)\n ),\n axis=1,\n )\n result.append(\"{}: {}/{}\\n\".format(target_name(target_uid), len(a)-sum(a), len(a)))\n return ''.join(result)\n\ndef activity_variants(target_uids, conversion_strategies, reference_idx):\n def compare_Ki(*ds):\n uids = np.array(sorted(set.union(*[set(d.data[\"uid\"]) for d in ds])))\n value = np.empty((len(uids),len(ds)),dtype=np.float)\n value.fill(np.nan)\n for i, d in enumerate(ds):\n idx = np.searchsorted(uids, d.data[\"uid\"])\n assert np.all(uids[idx] == d.data[\"uid\"])\n value[idx,i] = d.data[\"value\"]\n return value\n\n fig = plt.figure(figsize=(4*len(conversion_strategies),4*len(target_uids)))\n fig2 = plt.figure(figsize=(4*len(conversion_strategies),4*len(target_uids)))\n ax_counter = 0\n for target_uid in target_uids:\n ds = []\n corrections = []\n for ic50_conversion_strategy, fit_ic50, _ in conversion_strategies:\n dct = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )\n ds.append(dct[\"final\"])\n correction = None\n if fit_ic50:\n for n in reversed(dct[\"data_nodes\"]):\n try:\n correction = n.data[\"IC50_correction\"]\n break\n except KeyError:\n pass\n assert correction is not None\n else:\n correction = 0.\n corrections.append(correction)\n value = compare_Ki(*ds)\n ref_label = conversion_strategies[reference_idx][2]\n for i, (_, fit_ic50, label) in enumerate(conversion_strategies):\n ax_counter += 1\n ax = fig.add_subplot(len(target_uids),len(conversion_strategies),ax_counter)\n ax.scatter(to_pki(value[:,reference_idx]), to_pki(value[:,i]), s=8)\n if fit_ic50:\n ax.set_title(\"(coefficient: {:.3f})\".format(2*10**(-corrections[i])))\n ax.set_xlabel(\"{} (reference)\".format(ref_label))\n ax.set_ylabel(\n target_name(target_uid) + '\\n\\n' + label if i == 0 else label\n )\n ax = fig2.add_subplot(len(target_uids),len(conversion_strategies),ax_counter)\n ax.hist(to_pki(ds[i].data[\"value\"]), bins=43)\n ax.set_xlabel(label)\n if i == 0:\n ax.set_ylabel(target_name(target_uid) + '\\n')\n fig.tight_layout()\n fig2.tight_layout()\n return fig, fig2\n\ndef median_thresholded_activity_variants(\n target_uids, conversion_strategies):\n medians = np.zeros(\n (len(target_uids), len(conversion_strategies)),\n dtype=np.float,\n )\n for i, target_uid in enumerate(target_uids):\n for j, (ic50_conversion_strategy, fit_ic50, _) in enumerate(conversion_strategies):\n medians[i,j] = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=\"median\",\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"].data[\"value_threshold\"]\n medians = to_pki(medians)\n labels = [l for _, _, l in conversion_strategies]\n fig = plt.figure(figsize=(10,7))\n ax = fig.add_subplot(111)\n im = ax.imshow(medians.T)\n\n ax.set_xticks(range(len(target_uids)))\n ax.set_xticklabels([target_name(u) for u in target_uids])\n# ax.set_xlabel(\"Target\")\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)\n\n ax.set_yticks(range(len(conversion_strategies)))\n ax.set_yticklabels(labels)\n# ax.set_ylabel(\"log Ki variant\")\n\n fig.colorbar(im, ax=ax, orientation='horizontal')\n ax.set_title(\"Median pKi\", fontsize=int(FONTSIZE*1.5))\n\n fig.tight_layout()\n txt = _table(\n rows=np.array([target_name(u) for u in target_uids]),\n cols=np.array(labels),\n content=np.vectorize(lambda f: \"{:.3f}\".format(f))(medians),\n delimiter='\\t'\n )\n return fig, txt\n\ndef density_bias(target_uids):\n def _distance_to_nth_neighbour(kernel, value):\n result = []\n for row in reversed(np.sort(kernel, axis=0)):\n result.append(spearman(row, value))\n return np.array(result, dtype=np.float)\n def _n_neighbours_in_radius(kernel, value):\n result = []\n lsp = np.linspace(0,1,201)\n for thr in lsp:\n x = np.sum(kernel>=thr, axis=1)\n result.append(spearman(x, value))\n return lsp, np.array(result, dtype=np.float)\n def _stationary(kernel, value, n=None):\n if n is not None:\n mask = np.zeros(kernel.shape, dtype=np.bool)\n for i, row in enumerate(kernel):\n mask[i,np.argsort(row)[-n:]] = True\n kernel = 0.001 * np.ones(kernel.shape, dtype=np.float)\n kernel[mask] = 1.\n _a = kernel/kernel.sum(axis=0).reshape(1,-1)\n a = _a - np.eye(len(value))\n b = np.zeros(len(value)+1)\n a = np.concatenate((a, np.ones(len(value)).reshape(1,-1)), axis=0)\n b[-1] = 1.\n x = np.linalg.lstsq(a,b)[0]\n return spearman(x, value)\n fig = plt.figure(figsize=(8, 4*len(target_uids)))\n for i, target_uid in enumerate(target_uids):\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False,\n )[\"final\"]\n c_doc_uid = Counter(ds.data[\"doc_uid\"])\n x = np.vectorize(lambda uid: c_doc_uid[uid])(ds.data[\"doc_uid\"])\n y = value = to_pki(ds.data[\"value\"])\n kernel = Benchmarks2018StructuralSimilarity(source=ds).data[\"kernel\"]\n result1 = _distance_to_nth_neighbour(kernel, value)\n lsp2, result2 = _n_neighbours_in_radius(kernel, value)\n _min, _max = min(np.nanmin(result1), np.nanmin(result2)), max(np.nanmax(result1), np.nanmax(result2))\n\n ax = fig.add_subplot(len(target_uids),2,2*i+1)\n x = np.arange(len(result1))\n mask = np.logical_not(np.isnan(result1))\n ax.plot(x[mask], result1[mask])\n ax.set_xlabel(\"Distance-sorted neighbours\")\n ax.set_ylabel(target_name(target_uid) + \"\\n\\nSpearman's Rho\")\n ax.set_ylim((_min-.05, _max+.05))\n\n ax = fig.add_subplot(len(target_uids),2,2*i+2)\n mask = np.logical_not(np.isnan(result2))\n ax.plot(lsp2[mask], result2[mask])\n ax.set_xlabel(\"Similarity threshold\")\n ax.set_ylabel(\"Spearman's Rho\")\n ax.set_ylim((_min-.05, _max+.05))\n\n fig.tight_layout()\n return fig\n\ndef similar_compounds(target_uid, n_top, n_bottom, n_random, seed=43):\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False,\n )[\"final\"]\n uid = ds.data[\"uid\"]\n kernel = Benchmarks2018StructuralSimilarity(source=ds).data[\"kernel\"]\n ix, iy = np.tril_indices(kernel.shape[0], -1)\n idx = np.argsort(kernel[ix, iy])\n l = len(idx)\n idx = idx[np.sort(np.concatenate((\n np.arange(n_bottom),\n np.arange(l-n_top, l),\n n_bottom + np.random.RandomState(seed=seed).choice(\n l - n_top - n_bottom,\n size=n_random,\n replace=False,\n )\n )))]\n ix, iy = ix[idx], iy[idx]\n uid1, uid2 = uid[ix], uid[iy]\n sim = np.vectorize(lambda f: \"~{:.4f}\".format(f))(kernel[ix, iy])\n arr = np.stack((uid1, uid2, sim), axis=1)\n header = np.array([\"uid\", \"uid\", \"similarity\"])\n return _arr_header_to_html(arr, header)\n\ndef same_paper_cross_paper(target_uids):\n fig = plt.figure(figsize=(len(target_uids)*4, 4))\n for i, target_uid in enumerate(target_uids):\n d = Benchmarks2018StructuralSimilarity(source=mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"])\n kernel = d.data[\"kernel\"]\n same_paper = d.data[\"doc_uid\"].reshape(1,-1) == d.data[\"doc_uid\"].reshape(-1,1)\n cross_paper = np.logical_not(same_paper)\n same_paper[range(len(same_paper)),range(len(same_paper))] = False\n ax = fig.add_subplot(1,len(target_uids),i+1)\n ax.hist(kernel.ravel()[same_paper.ravel()], bins=43, label=\"same paper\", alpha=.5, density=True)\n ax.hist(kernel.ravel()[cross_paper.ravel()], bins=43, label=\"cross paper\", alpha=.5, density=True)\n ax.legend()\n ax.set_xlabel(\"Structural similarity\")\n ax.set_title(target_name(target_uid))\n fig.tight_layout()\n return fig\n\ndef year_structural_pareto(target_uids):\n @njit\n def _first(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n raise ValueError()\n result = []\n for i, target_uid in enumerate(target_uids):\n result.append(\"TARGET: {}\".format(target_name(target_uid)))\n result.append(\"\")\n d = Benchmarks2018StructuralSimilarity(source=mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"])\n kernel = d.data[\"kernel\"]\n year = d.data[\"year\"]\n idx = np.flip(np.argsort(kernel.ravel()))\n delta_year = np.abs(year.reshape(-1,1) - year.reshape(1,-1)).ravel()[idx]\n for dy in sorted(set(delta_year.ravel())-set([0,0.])):\n _idx = idx[_first(delta_year, dy)]\n i, j = _idx // kernel.shape[0], _idx % kernel.shape[0]\n result.append(\"SIMILARITY: {:.3f}, DELTA YEAR: {}\".format(\n kernel[i,j],\n int(dy)\n ))\n for m in (i,j):\n result.append(\"UID: {}, SMILES: {}, VALUE: {}, YEAR: {}, DOC_UID: {}\".format(\n d.data[\"uid\"][m],\n d.data[\"smiles\"][m],\n d.data[\"value\"][m],\n int(d.data[\"year\"][m]),\n d.data[\"doc_uid\"][m],\n ))\n result.append(\"\")\n return '\\n'.join(result) + '\\n'\n\ndef aaaiiaii(value, groups, kernel, time_split):\n from numba import jit, njit\n result_all = np.zeros((kernel.size, 4), dtype=np.float)\n result_all_groups = np.zeros((kernel.size, 4), dtype=np.float)\n result_all_counter = np.zeros(4, dtype=np.int)\n result_nearest = np.empty((kernel.shape[0],2), dtype=np.float)\n result_nearest.fill(np.nan)\n @njit\n def f(value, groups, kernel, result_all, result_all_groups, result_all_counter, result_nearest):\n for i in range(kernel.shape[0]):\n for j in range(kernel.shape[1]):\n if groups[i] > groups[j] or (groups[i] < groups[j] and not time_split): # test to train\n idx = 3-(2*int(value[i])+int(value[j])) # aa ai ia ii\n result_all[result_all_counter[idx], idx] = kernel[i,j]\n result_all_groups[result_all_counter[idx], idx] = groups[i]\n result_all_counter[idx] += 1\n if np.isnan(result_nearest[i, value[j]]) or kernel[i,j] > result_nearest[i, value[j]]:\n result_nearest[i, value[j]] = kernel[i,j]\n f(value, groups, kernel, result_all, result_all_groups, result_all_counter, result_nearest)\n return {\n \"aa\": (result_all[:result_all_counter[0],0], result_all_groups[:result_all_counter[0],0]),\n \"ai\": (result_all[:result_all_counter[1],1], result_all_groups[:result_all_counter[1],1]),\n \"ia\": (result_all[:result_all_counter[2],2], result_all_groups[:result_all_counter[2],2]),\n \"ii\": (result_all[:result_all_counter[3],3], result_all_groups[:result_all_counter[3],3]),\n \"nearest_i\": result_nearest[:,0],\n \"nearest_a\": result_nearest[:,1],\n }\n\ndef splits_analysis(target_uids):\n def plot(value, groups, kernel, axes, split_label, time_split=False):\n dct = aaaiiaii(value, groups, kernel, time_split)\n\n not_nan_mask = np.logical_not(np.logical_or(\n np.isnan(dct[\"nearest_a\"]),\n np.isnan(dct[\"nearest_i\"]),\n ))\n aa, ai, ia, ii = (\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==0],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==0],\n )\n\n histtype, linewidth = \"step\", 3\n axes[0].hist(\n aa, bins=43, label=\"AA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].hist(\n ai, bins=43, label=\"AI\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].hist(\n ia, bins=43, label=\"IA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].hist(\n ii, bins=43, label=\"II\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].set_xlim((0.,1.))\n axes[0].set_xlabel(\"Nearest neighbour similarity\")\n axes[0].set_ylabel(split_label + '\\n')\n axes[0].legend()\n\n S = 8\n axes[1].scatter(ia, ii, label=\"inactive\", c=\"green\", s=S, alpha=.3)\n axes[1].scatter(aa, ai, label=\"active\", c=\"xkcd:sky blue\", s=S, alpha=.3)\n axes[1].scatter(ia.mean(), ii.mean(), facecolors=\"none\", edgecolors='red', s=150)\n axes[1].scatter(ia.mean(), ii.mean(), c=\"green\", marker=\"x\", s=43)\n axes[1].scatter(aa.mean(), ai.mean(), facecolors=\"none\", edgecolors=\"red\", s=150)\n axes[1].scatter(aa.mean(), ai.mean(), c=\"blue\", marker=\"x\", s=43)\n axes[1].plot([0.2, 0.9], [0.2, 0.9])\n axes[1].set_aspect(\"equal\")\n axes[1].legend()\n axes[1].set_xlabel(\"Nearest active similarity\")\n axes[1].set_ylabel(\"Nearest inactive similarity\")\n\n return [np.mean(x) for x in (aa, ai, ia, ii)]\n\n figs = []\n muv_result = []\n for target_uid in target_uids:\n muv_result.append(target_name(target_uid))\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=2.,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n value = d.data[\"value\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n kernel = kd.data[\"kernel\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd, \n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n year_groups = d.data[\"year\"]\n fig = plt.figure(figsize=(8,24))\n fig.axes_counter = 0\n def _axes():\n axes = []\n for _ in range(2):\n fig.axes_counter += 1\n axes.append(fig.add_subplot(6,2,fig.axes_counter))\n return axes\n for groups, split_label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n ):\n aa, ai, ia, ii = plot(value, groups, kernel, _axes(), split_label)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n aa, ai, ia, ii = plot(value, year_groups, kernel, _axes(), split_label=\"time split\", time_split=True)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n fig.tight_layout()\n figs.append(fig)\n\n return tuple(['\\n'.join(muv_result)+'\\n'] + figs)\n\ndef splits_analysis_3_columns(target_uids):\n def plot(value, groups, kernel, axes, split_label, time_split=False):\n dct = aaaiiaii(value, groups, kernel, time_split)\n for k in [\"aa\", \"ai\", \"ia\", \"ii\"]:\n axes[0].hist(\n dct[k][0], bins=43, label=k.upper(),\n density=True, histtype=\"step\", linewidth=3,\n )\n axes[0].set_xlim((0.,1.))\n axes[0].set_xlabel(\"All pairs similarity\")\n axes[0].set_ylabel(split_label + '\\n')\n axes[0].legend()\n\n not_nan_mask = np.logical_not(np.logical_or(\n np.isnan(dct[\"nearest_a\"]),\n np.isnan(dct[\"nearest_i\"]),\n ))\n aa, ai, ia, ii = (\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==0],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==0],\n )\n\n histtype, linewidth = \"step\", 3\n axes[1].hist(\n aa, bins=43, label=\"AA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].hist(\n ai, bins=43, label=\"AI\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].hist(\n ia, bins=43, label=\"IA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].hist(\n ii, bins=43, label=\"II\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].set_xlim((0.,1.))\n axes[1].set_xlabel(\"Nearest neighbour similarity\")\n axes[1].legend()\n\n S = 8\n axes[2].scatter(ia, ii, label=\"inactive\", c=\"green\", s=S, alpha=.3)\n axes[2].scatter(aa, ai, label=\"active\", c=\"xkcd:sky blue\", s=S, alpha=.3)\n axes[2].scatter(ia.mean(), ii.mean(), facecolors=\"none\", edgecolors='red', s=150)\n axes[2].scatter(ia.mean(), ii.mean(), c=\"green\", marker=\"x\", s=43)\n axes[2].scatter(aa.mean(), ai.mean(), facecolors=\"none\", edgecolors=\"red\", s=150)\n axes[2].scatter(aa.mean(), ai.mean(), c=\"blue\", marker=\"x\", s=43)\n axes[2].plot([0.2, 0.9], [0.2, 0.9])\n axes[2].set_aspect(\"equal\")\n axes[2].legend()\n axes[2].set_xlabel(\"Nearest active similarity\")\n axes[2].set_ylabel(\"Nearest inactive similarity\")\n\n return [np.mean(x) for x in (aa, ai, ia, ii)]\n\n figs = []\n muv_result = []\n for target_uid in target_uids:\n muv_result.append(target_name(target_uid))\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=2.,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n value = d.data[\"value\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n kernel = kd.data[\"kernel\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd, \n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n year_groups = d.data[\"year\"]\n fig = plt.figure(figsize=(12,24))\n fig.axes_counter = 0\n def _axes():\n axes = []\n for _ in range(3):\n fig.axes_counter += 1\n axes.append(fig.add_subplot(6,3,fig.axes_counter))\n return axes\n for groups, split_label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n ):\n aa, ai, ia, ii = plot(value, groups, kernel, _axes(), split_label)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n aa, ai, ia, ii = plot(value, year_groups, kernel, _axes(), split_label=\"time split\", time_split=True)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n fig.tight_layout()\n figs.append(fig)\n\n return tuple(['\\n'.join(muv_result)+'\\n'] + figs)\n\ndef splits_analysis_2(target_uids):\n fig = plt.figure(figsize=(4*(len(target_uids)+1),4))\n for i, target_uid in enumerate(target_uids):\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=2.,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n value = d.data[\"value\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n kernel = kd.data[\"kernel\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd, \n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n year_groups = d.data[\"year\"]\n ax = fig.add_subplot(1,len(target_uids),i+1)\n for groups, label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n ):\n dct = aaaiiaii(value, groups, kernel, time_split=False)\n x = np.maximum(dct[\"nearest_a\"], dct[\"nearest_i\"])\n ax.hist(\n x, bins=43, label=label,\n density=True, histtype=\"step\", linewidth=1,\n )\n dct = aaaiiaii(value, year_groups, kernel, time_split=True)\n x = np.maximum(dct[\"nearest_a\"], dct[\"nearest_i\"])\n x = x[np.logical_not(np.isnan(x))]\n ax.hist(\n x, bins=43, label=\"time split\",\n density=True, histtype=\"step\", linewidth=3,\n )\n ax.set_xlabel(\"Nearest neighbour similarity\")\n ax.set_title(target_name(target_uid))\n if i == len(target_uids) - 1:\n ax.legend(fontsize=\"small\", bbox_to_anchor=(1.04,1))\n fig.tight_layout()\n return fig\n\ndef simplest_dataset_hist(mus):\n fig = plt.figure(figsize=(4*len(mus), 8))\n alpha = .6\n for i, mu in enumerate(mus):\n\n ax = fig.add_subplot(2,len(mus),i+1)\n xs = np.linspace(-4.3,4.3,437)\n ax.fill_between(\n xs, norm(loc=mu).pdf(xs),\n label='\"inactive\"', alpha=alpha,\n )\n ax.fill_between(\n xs, norm(loc=-mu).pdf(xs),\n label='\"active\"', alpha=alpha,\n )\n ax.set_xlabel(\"mean: {:.1f}\".format(mu))\n ax.set_ylim((0.,0.6))\n ax.legend()\n if i == 0:\n ax.set_ylabel(\"Normal\\n\")\n\n ax = fig.add_subplot(2,len(mus),len(mus)+i+1)\n xs = np.linspace(-2.1,2.1,437)\n ax.fill_between(\n xs, uniform(loc=mu-1, scale=2.).pdf(xs),\n label='\"inactive\"', alpha=alpha,\n )\n ax.fill_between(\n xs, uniform(loc=-mu-1, scale=2.).pdf(xs),\n label='\"active\"', alpha=alpha,\n )\n ax.set_xlabel(\"mean: {:.1f}\".format(mu))\n ax.set_ylim((0.,0.7))\n ax.legend()\n if i == 0:\n ax.set_ylabel(\"Uniform\\n\")\n\n fig.tight_layout()\n return fig\n\ndef muv_on_simplest_dataset(mus, ns):\n def dataset(mu, n_train, n_test, distr, seed=43):\n if isinstance(n_train, int):\n rng = np.random.RandomState(seed=43)\n if distr == \"normal\":\n distr = rng.normal\n acc = norm.cdf(mu)\n elif distr == \"uniform\":\n distr = lambda size: rng.uniform(size=size) * 2. - 1.\n acc = min(1., .5 + .5*abs(mu))\n else:\n raise ValueError(distr)\n tr0 = distr(size=n_train) + mu\n tr1 = distr(size=n_train) - mu\n te0 = distr(size=n_test) + mu\n te1 = distr(size=n_test) - mu\n def _dist(x,y):\n return np.abs(x.reshape(-1,1)-y.reshape(1,-1))\n aa = np.min(_dist(te1, tr1), axis=1).mean()\n ai = np.min(_dist(te1, tr0), axis=1).mean()\n ia = np.min(_dist(te0, tr1), axis=1).mean()\n ii = np.min(_dist(te0, tr0), axis=1).mean()\n return {\n \"acc\": acc,\n \"muv\": aa - ai,\n \"atomwise\": aa - ai + ii - ia,\n }\n elif n_train == \"infty\" and n_test == \"infty\":\n if distr == \"normal\":\n aa, ai, ia, ii = 0., 0., 0., 0.\n return {\n \"acc\": norm.cdf(mu),\n \"muv\": aa - ai,\n \"atomwise\": aa - ai + ii - ia,\n }\n elif distr == \"uniform\":\n aa, ai, ia, ii = 0., min(mu**2,1.), min(mu**2,1.), 0.\n return {\n \"acc\": min(1., .5 + .5*abs(mu)),\n \"muv\": aa - ai,\n \"atomwise\": aa - ai + ii - ia,\n }\n else:\n raise ValueError()\n else:\n raise ValueError()\n result_normal = np.zeros((3, len(ns), len(mus)), dtype=np.float)\n result_uniform = np.zeros((3, len(ns), len(mus)), dtype=np.float)\n for i, n in enumerate(ns):\n for j, mu in enumerate(mus):\n d = dataset(mu, n, n, \"normal\")\n result_normal[0,i,j] = d[\"acc\"]\n result_normal[1,i,j] = d[\"muv\"]\n result_normal[2,i,j] = d[\"atomwise\"]\n\n d = dataset(mu, n, n, \"uniform\")\n result_uniform[0,i,j] = d[\"acc\"]\n result_uniform[1,i,j] = d[\"muv\"]\n result_uniform[2,i,j] = d[\"atomwise\"]\n fig = plt.figure(figsize=(24,6))\n axes = [fig.add_subplot(1,4,i+1) for i in range(4)]\n alpha = .6\n s = 43\n for i, n in enumerate(ns):\n label = \"4 x {}\".format(n) if n != \"infty\" else '∞'\n\n ax = axes[0]\n ax.scatter(\n result_normal[1,i,:], result_normal[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure (MUV part)\")\n ax.set_title(\"Normal\")\n\n ax = axes[1]\n ax.scatter(\n result_normal[2,i,:], result_normal[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure\")\n ax.set_title(\"Normal\")\n\n ax = axes[2]\n ax.scatter(\n result_uniform[1,i,:], result_uniform[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure (MUV part)\")\n ax.set_title(\"Uniform\")\n\n ax = axes[3]\n ax.scatter(\n result_uniform[2,i,:], result_uniform[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure\")\n ax.set_title(\"Uniform\")\n\n [ax.legend(loc=\"lower left\", title=\"Benchmark size\", fontsize=\"small\") for ax in axes]\n fig.tight_layout()\n return fig\n\ndef splits_tsne(target_uids):\n S = 8\n fig = plt.figure(figsize=(30,4*len(target_uids)))\n counter = 0\n for target_uid in target_uids:\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n tsne = KernelTSNE(\n source=kd,\n kernel=\"kernel\",\n n_components=2,\n perplexity=43.,\n early_exaggeration=43.,\n learning_rate=4343.,\n ).data[\"tsne\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n\n for c, split_label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n (d.data[\"year\"], \"time split\")):\n counter += 1\n ax = fig.add_subplot(len(target_uids),6,counter)\n a = ax.scatter(tsne.T[0], tsne.T[1], s=S, c=c)\n ax.set_xlim((-105,105))\n ax.set_ylim((-105,105))\n ax.set_aspect(\"equal\")\n ax.set_xlabel(split_label)\n if split_label == \"paper split\":\n ax.set_ylabel(target_name(target_uid) + '\\n')\n bar = fig.colorbar(a)\n bar.locator = MaxNLocator(integer=True)\n bar.update_ticks()\n fig.tight_layout()\n return fig\n\ndef noise_analysis(\n target_uids,\n delta_measurement_threshold,\n delta_measurement_upper_threshold,\n ic50_conversion_strategy,\n fit_ic50):\n fig = plt.figure(figsize=(16,len(target_uids)*4))\n N_PLOTS = 4\n counter = 0\n t1 = []\n t2 = []\n for target_uid in target_uids:\n _d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )\n all_values = _d[\"data_nodes\"][-2] # threshold: None -> -2, not None -> -3\n mean_values = _d[\"final\"]\n\n count_uid = Counter(all_values.data[\"uid\"])\n how_many_samples = np.vectorize(lambda uid: count_uid[uid])(all_values.data[\"uid\"])\n\n uid_to_mean_value = dict(zip(mean_values.data[\"uid\"], mean_values.data[\"value\"]))\n a = np.vectorize(lambda uid: uid_to_mean_value[uid])(all_values.data[\"uid\"])\n b = all_values.data[\"value\"]\n\n two_measurements_same_paper = []\n two_measurements_different_paper = []\n key = lambda x: x[0]\n for k, g in groupby(sorted(zip(all_values.data[\"uid\"], all_values.data[\"smiles\"], all_values.data[\"value\"], all_values.data[\"doc_uid\"]), key=key), key):\n gu, gs, gv, gdu = zip(*g)\n if len(gu) == 2:\n if np.abs(gv[0]-gv[1]) > delta_measurement_threshold:\n if np.abs(gv[0]-gv[1]) <= delta_measurement_upper_threshold:\n if gdu[0] == gdu[1]:\n two_measurements_same_paper.append(gv)\n else:\n two_measurements_different_paper.append(gv)\n else:\n t1.append(\"TARGET: {}, UID: {}, SMILES: {}, DOC1: {}, VALUE1: {}, DOC2: {}, VALUE2: {}\".format(\n target_name(target_uid),\n gu[0],\n gs[0],\n gdu[0],\n gv[0],\n gdu[1],\n gv[1],\n ))\n\n _a = np.array(two_measurements_same_paper)\n _b = np.array(two_measurements_different_paper)\n _a = np.abs(_a[:,0]-_a[:,1])\n _b = np.abs(_b[:,0]-_b[:,1])\n\n counter += 1\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.hist(_a, bins=43)\n ax.set_xlabel(\"pKi abs. difference, same paper\")\n ax.set_ylabel(target_name(target_uid) + \"\\n\")\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n\n counter += 1\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.hist(_b, bins=43)\n ax.set_xlabel(\"pKi abs. difference, two papers\")\n t2.append(\"TARGET UID: {}, SAME: {:.3f} [{} SAMPLES], DIFFERENT: {:.3f} [{} SAMPLES]\".format(\n target_name(target_uid),\n np.mean(np.square(_a))/2,\n len(_a),\n np.mean(np.square(_b))/2,\n len(_b),\n ))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n\n result = []\n for j in range(1,np.max(list(count_uid.values()))):\n mask = how_many_samples > j\n result.append(np.square(b[mask]-a[mask]).mean())\n\n counter += 1\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.scatter(to_pki(a), to_pki(b), s=8)\n ax.set_xlabel(\"Mean pKi\")\n ax.set_ylabel(\"Reported pKi\")\n\n counter += 1\n count_count_uid = Counter(count_uid.values())\n x = np.array([\n count_count_uid[1],\n count_count_uid[2],\n len(mean_values.data[\"uid\"])-count_count_uid[1]-count_count_uid[2],\n ])\n assert sum(x) == len(mean_values.data[\"uid\"])\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.bar(x=[0,1,2], height=x)\n ax.set_xticks(np.arange(3))\n ax.set_xticklabels([\"1\", \"2\", \">2\"])\n ax.set_xlabel(\"Records per SMILES\")\n ax.set_yscale(\"log\", nonposy='clip')\n\n for rect, label in zip(ax.patches, x):\n ax.text(\n rect.get_x() + rect.get_width() / 2,\n rect.get_height() + 5,\n label,\n ha='center',\n va='bottom',\n bbox=dict(\n boxstyle=\"square\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n ),\n )\n\n fig.tight_layout()\n return fig, '\\n'.join(t1)+'\\n', '\\n'.join(t2)+'\\n'\n\ndef how_many_active_inactive(target_uids, conversion_strategies, threshold):\n result = np.empty((len(target_uids), len(conversion_strategies)), dtype=np.object)\n result.fill(\"\")\n for i, target_uid in enumerate(target_uids):\n for j, (ic50_conversion_strategy, fit_ic50, _) in enumerate(conversion_strategies):\n dct = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=threshold,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )\n result[i,j] = \"a:{} ia:{} p:{}\".format(\n (dct[\"final\"].data[\"value\"] == 1.).sum(),\n (dct[\"final\"].data[\"value\"] == 0.).sum(),\n len(set(dct[\"data_nodes\"][-3].data[\"doc_uid\"])),\n )\n rows = [target_name(u) for u in target_uids]\n cols = list(list(zip(*conversion_strategies))[2])\n return _table(rows, cols, result, '\\t')\n\ndef ic50_delta(target_uids, conversion_strategies):\n result = np.empty((len(target_uids), len(conversion_strategies)), dtype=np.object)\n result.fill(\"\")\n for i, target_uid in enumerate(target_uids):\n for j, (ic50_conversion_strategy, name) in enumerate(conversion_strategies):\n n = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=True,\n )[\"data_nodes\"][-2]\n assert n.__class__.__name__ == \"FitOriginalIC50ToKi\"\n result[i,j] = \"{:.3f} / {}\".format(\n 2*10**(-n.data[\"IC50_correction\"]),\n n.data[\"how_many_uids_to_estimate_correction\"],\n )\n rows = [target_name(u) for u in target_uids]\n cols = list(list(zip(*conversion_strategies))[1])\n return _table(rows, cols, result, '\\t')\n","repo_name":"lesniak43/ananas","sub_path":"fruits/elderberries/benchmarks2018/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":52363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"4898997950","text":"import pdb\n\ndef print_func(n):\n if n == 0: # funksiyanı bitirən əsas hal.\n pdb.set_trace()\n return 0\n elif n > 0:\n print(n)\n return print_func(n - 1) # rekursiv çağırış\n\n\nif __name__ == \"__main__\":\n pdb.set_trace()\n print_func(4)\n","repo_name":"AzePUG/Data_Structures_Algo_Python","sub_path":"Source_Code/python_kodlar/fesil2/fesil2_2.5_pdb.py","file_name":"fesil2_2.5_pdb.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"az","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"}
+{"seq_id":"1753048167","text":"\"\"\"expertreview URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom expertreviewapp import views\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('',views.mainhome,name='mainhome'),\r\n path('login/',views.login,name='login'),\r\n path('cusreg',views.cusreg,name='cusreg'),\r\n path('expertreg',views.expertreg,name='expertreg'),\r\n path('addvehicle',views.addvehicle,name='addvehicle'),\r\n path('company',views.company,name='company'),\r\n path('adminviewcus',views.adminviewcus,name='adminviewcus'),\r\n path('adminviewexpert',views.adminviewexpert,name='adminviewexpert'),\r\n path('adminviewvehicle',views.adminviewvehicle,name='adminviewvehicle'),\r\n path('adminhome',views.adminhome,name='adminhome'),\r\n path('deletevehicle',views.deletevehicle,name='deletevehicle'),\r\n path('experthome',views.experthome,name='experthome'),\r\n path('companyhome',views.companyhome,name='companyhome'),\r\n path('expertviewvehicle',views.expertviewvehicle,name='expertviewvehicle'),\r\n path('comvvehicle',views.comviewvehicle,name='comvvehicle'),\r\n path('expertreview',views.expertreview,name='expertreview'),\r\n path('expertviewreviews',views.expertviewreviews,name='expertviewreviews'),\r\n path('cushome',views.cushome,name='cushome'),\r\n path('cusviewreviews',views.cusviewreviews,name='cusviewreviews'),\r\n path('custviewvehicle',views.custviewvehicle,name='expertreview'),\r\n \r\n path('expcardetails',views.expcardetails,name='expcardetails'),\r\n path('custcardetails',views.custcardetails,name='custcardetails'),\r\n path('adminreview',views.adminreview,name='adminreview'),\r\n path('adminreviewmore',views.adminreviewmore,name='adminreviewmore'),\r\n path('adminupdatereview',views.adminupdatereview,name='adminupdatereview'),\r\n \r\n path('expertprofile',views.expertprofile,name='expertprofile'),\r\n path('cusprofile',views.cusprofile,name='cusprofile'),\r\n path('req',views.req),\r\n path('expapp',views.expapp),\r\n path('exprem',views.exprem),\r\n path('cusvreq',views.cusvreq),\r\n path('expertvreq',views.expertvreq),\r\n path('comviewvehicle',views.comviewvehicle),\r\n path('inchat',views.inchat,name=\"inchat\"),\r\n path('sfChatPer',views.sfChatPer,name=\"sfChatPer\"),\r\n \r\n \r\n \r\n \r\n \r\n]","repo_name":"Rithw/Main-Project","sub_path":"expertreview/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"24242308963","text":"from collections import namedtuple\n\nimport gdb\n\ndef _load_pwndbg():\n try:\n import pwndbg\n except:\n return None\n\n from gdb_comments.integrations.pwndbg_patch import load\n load()\n\n from gdb_comments.integrations import pwndbg_utils\n return pwndbg_utils\n\ndef _load_peda():\n # PEDA was never designed to be imported. Instead of writing an overt rant\n # here, I will simply list a series of facts and let the astute reader draw\n # their own conclusions (and my appologies to the keen gramarian for my use\n # of the gender-neutral, singular \"they\").\n #\n # Typically in Python, when you want to import something, you type\n # `import something` at the top of your file and it Just Works. Sadly, peda\n # cannot be imported this way. The main file (that contains the majority of\n # code in peda) is a 6,000+ line script that contains at least two classes\n # and 50 lines of initialization code that is not guarded inside of a\n # standard `if __name__ == '__main__'` construct.\n #\n # In the event that you could convince Python to load this file, peda would\n # generate a second instance of the PEDA class which would be in direct\n # violation of the comment above the instance stating\n #\n # # global instances of PEDA() and PEDACmd()\n # peda = PEDA()\n #\n # Typically, a project implicitly demonstrates how to import itself via its\n # test suite. However, peda has no tests and therefore cannot serve as a\n # reference on importing itself.\n #\n # With that said, I know that the peda object exists in memory. I can (and\n # do) `import gdb` and potentially the global namespace accessible through\n # the GDB interpreter is available through that import although I could\n # never find it. I wouldn't be surprised if a knowledgable someone came\n # across this comment and just so happened to know how to access the\n # interpreter environment through `import gdb`. However, I was unable to\n # find it.\n #\n # And that finally brings us to the third and current solution. Given that\n # the peda object is sitting somewhere in memory and this code is getting\n # executed under the same Python process, this code should be able to find\n # the peda object. A quick search on SO yielded a simple, yet horrific,\n # answer: just get a list of every object known to the garbage collector.\n # From there, find one with the correct class name (although I need to\n # compare strings because I don't actually have a reference to the PEDA\n # class).\n #\n # If you have had the patience to read this rather lengthy wall of text, my\n # hope is that you will understand why the next few lines of code exist and\n # why I am not a terrible person for writing them.\n import gc\n\n peda = None\n for obj in gc.get_objects():\n if str(obj.__class__) == \"\":\n peda = obj\n break\n if peda is None:\n return None\n\n from gdb_comments.integrations.peda_patch import load\n load(peda)\n\n from gdb_comments.integrations import peda_utils\n return peda_utils\n\ndef _make_utils():\n _utils = None\n if _utils is None:\n _utils = _load_pwndbg()\n\n # Loading PEDA is very inefficient so make sure it's the last thing we try.\n if _utils is None:\n _utils = _load_peda()\n\n if _utils is None:\n raise EnvironmentError('Could not find a supported environment to load comments.')\n return _utils.info, _utils.error\n\ninfo, error = _make_utils()\nutils = _make_utils()\n","repo_name":"supersam654/gdb-comments","sub_path":"gdb_comments/integrations/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"10208468839","text":"#invoer\nuur_vertrek_thuis = int(input('Geef uur vertrek thuis: '))\nminuten_vertrek_thuis = int(input('Geef minuten vertrek thuis: '))\nuur_aankomst_bij_vriendin = int(input('Geef uur aankomst bij vriendin: '))\nminuten_aankomst_bij_vriendin = int(input('Geef minuten aankomst bij vriendin: '))\nuur_vertrek_van_vriendin = int(input('Geef uur vertrek van vriendin: '))\nminuten_vertrek_van_vriendin = int(input('Geef minuten vertrek van vriendin: '))\nuur_aankomst_thuis = int(input('Geef uur aankomst thuis: '))\nminuten_aankomst_thuis = int(input('Geef minuten aankomst thuis: '))\n\n#berekening reistijd heen of terug\nresultaat = ((1440 - (uur_vertrek_thuis * 60 + minuten_vertrek_thuis)) + (uur_aankomst_thuis * 60 + minuten_aankomst_thuis)) % 1440\nresultaat -= ((1440 - (uur_aankomst_bij_vriendin * 60 + minuten_aankomst_bij_vriendin)) + (uur_vertrek_van_vriendin * 60 + minuten_vertrek_van_vriendin)) % 1440\nresultaat /= 2\n\n\n#berekening tijdstip\ncorrecte_minuten_aankomst_thuis = int((minuten_vertrek_van_vriendin + (resultaat % 60)) % 60)\ncorrecte_uur_aankomst_thuis = int(((uur_vertrek_van_vriendin + (resultaat // 60)) + ((minuten_vertrek_van_vriendin +resultaat % 60)) // 60) % 24)\nprint(correcte_uur_aankomst_thuis)\nprint(correcte_minuten_aankomst_thuis)\n\n#15:45 945 18:05 1085 140\n# 16:30 990 17:14 1024 34\n# 53\n# python console gebruiken als rekenmachine\n#21 14 11 45 22 58 14 59 2 14\n#15 1 17 5 18 1 18 23 19 14\n#557213823281659284\n\n\n\n\n\n","repo_name":"astilleman/Informatica5","sub_path":"04 - Variabelen/De gestopte klok.py","file_name":"De gestopte klok.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"11387493080","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nATM module.\n\"\"\"\n\n__author__ = 'Ziang Lu'\n\nfrom atm.dispatcher import (\n FiveDispatcher, HundredDispatcher, OneDispatcher, TenDispatcher,\n TwentyDispatcher\n)\n\n\nclass AtmMachine:\n __slots__ = ['_first_dispatcher']\n\n def __init__(self):\n \"\"\"\n Default constructor.\n \"\"\"\n self._first_dispatcher = HundredDispatcher.get_instance(\n TwentyDispatcher.get_instance(\n TenDispatcher.get_instance(\n FiveDispatcher.get_instance(OneDispatcher.get_instance())\n )\n )\n )\n\n def withdraw(self, requested_amount: int) -> None:\n \"\"\"\n Withdraws the given amount of money from this ATM.\n :param requested_amount: int\n :return: None\n \"\"\"\n # Delegate to the dispatchers to handle this withdraw request\n self._first_dispatcher.dispatch(requested_amount)\n","repo_name":"Ziang-Lu/Design-Patterns","sub_path":"4-Behavioral Patterns/8-Chain of Responsibility Pattern/Usage 2-One or More Receivers Handle Request/Python/atm/atm_machine.py","file_name":"atm_machine.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"15130103600","text":"class Solution:\n def minCost(self, nums: List[int], cost: List[int]) -> int:\n res=0\n nums = sorted(zip(nums,cost))\n total = sum(cost)//2\n for num,cost in nums:\n res+=cost\n if res>total:\n mid = num\n break\n return sum(abs(mid-n)*c for n,c in nums)","repo_name":"iamcvarma/DSA-leetcode","sub_path":"2448-minimum-cost-to-make-array-equal/2448-minimum-cost-to-make-array-equal.py","file_name":"2448-minimum-cost-to-make-array-equal.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"26914915515","text":"import ipaddress as IP\nfrom os import system as linux\nlinux(\"clear\")\n\nip = '192.168.0.100'\n\nendereco = IP.ip_address(ip)\nrede = IP.ip_network(ip)\n\nprint(f\"rede: {rede}\")\n","repo_name":"Lucas20santos/BancoCarregourDataEngineer","sub_path":"FundamentosArquiteturaSistema/codigos/ips.py","file_name":"ips.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"5107642347","text":"import collections as co\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.ticker import NullLocator\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nimport numpy as np\nimport os.path as op\nimport io\n# ---------------------------------------------------------------------------\n\nFORMAT = 'png'\n\n# ---------------------------------------------------------------------------\n\nScatterplotData = co.namedtuple('ScatterplotData', 'label shape level x y')\nPixel = co.namedtuple('Pixel', 'x y')\nScatterplotMetaData = co.namedtuple('ScatterplotMetaData',\n 'readout ligand concentration time')\nPointSpec = co.namedtuple('PointSpec', 'label shape level')\nResponseData = co.namedtuple('ResponseData', 'metadata data')\nMarkerSpec = co.namedtuple('MarkerSpec', 'marker color')\n\nmarker_map = {\n 'triangle': MarkerSpec('^', 'orange'),\n 'circle': MarkerSpec('o', 'mediumpurple'),\n 'square': MarkerSpec('s', 'mediumseagreen'),\n }\n\ndpi = 72.0\n\ncmap_bwr = LinearSegmentedColormap.from_list('bwr', ['blue', 'white', 'red'])\n\ndef scatterplot(points, metadata, lims=None, outpath='/dev/null',\n display=False):\n f = Figure(figsize=(300 / dpi, 300 / dpi), dpi=dpi)\n ax = f.gca()\n for p in points:\n if p.level is None:\n # overrides cmap\n color = marker_map[p.shape].color\n else:\n color = p.level\n ax.scatter(p.x, p.y, c=color, vmin=0, vmax=1, linewidth=0.5,\n marker=marker_map[p.shape].marker, s=100, cmap=cmap_bwr)\n if lims is None:\n all_data = sum(([p.x, p.y] for p in points), [])\n dmin = min(all_data)\n dmax = max(all_data)\n drange = dmax - dmin\n lims = dmin - drange * 0.1, dmax + drange * 0.1\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_aspect('equal')\n ax.set_xlabel(build_label(metadata[0]))\n ax.set_ylabel(build_label(metadata[1]))\n for loc in 'top', 'right':\n ax.spines[loc].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n f.subplots_adjust(left=0.2, bottom=0.15, right=1, top=1, wspace=0, hspace=0)\n plt.setp(f, 'facecolor', 'none')\n\n canvas = FigureCanvasAgg(f)\n f.set_canvas(canvas)\n\n # must always be called, even if outpath is '/dev/null', so that the\n # returned figure object yields consistent pixel coordinates\n canvas.print_png(outpath)\n\n if display:\n plt.show()\n\n return f\n\ndef pixels(points, figure):\n transform = figure.gca().transData.transform\n # see http://matplotlib.org/devel/transformations.html#matplotlib.transforms.Transform.transform\n height = figure.canvas.get_width_height()[1]\n return tuple(Pixel(int(round(q[0])), int(round(height - q[1])))\n for q in transform(np.array([(p.x, p.y) for p in points])))\n\n\ndef build_label(metadata):\n readout, ligand, concentration, time = metadata\n if readout is not None and all(x is None for x in (ligand, concentration, time)):\n # basal\n label = 'basal %s (a.u.)' % readout\n elif all(x is not None for x in metadata):\n # ligand response\n label = '%s [%s]\\n(fold change over basal)' % (readout, ligand)\n else:\n raise ValueError(\"unknown combination of metadata values\")\n return label\n\n\ndef legend_categorical(target_dir):\n # this just generates pieces, still need to manually assemble them\n # into the final result\n f = Figure(figsize=(300/dpi, 300/dpi), dpi=dpi)\n ax = f.gca()\n for subtype, shape in (('HER2amp', 'triangle'),\n ('TN', 'circle'),\n ('HR+', 'square')):\n ax.plot(0, 0, marker=marker_map[shape].marker, mfc=marker_map[shape].color,\n label=subtype, ls='none')\n ax.legend(prop={'size': 12})\n filename = op.join(target_dir, 'legend-categorical.png')\n canvas = FigureCanvasAgg(f)\n canvas.print_png(filename)\n\n\ndef legend_graded(target_dir):\n # this just generates pieces, still need to manually assemble them\n # into the final result\n f = Figure(figsize=(300/dpi, 300/dpi), dpi=dpi)\n ax = f.gca()\n for subtype, shape in (('HER2amp', 'triangle'),\n ('TN', 'circle'),\n ('HR+', 'square')):\n ax.plot(0, 0, marker=marker_map[shape].marker, label=subtype, mfc='none', ls='none')\n ax.set_xlabel('Subtype')\n cax = ax.imshow([[0,1]], cmap=cmap_bwr)\n cbar = f.colorbar(cax, ticks=[0, 0.5, 1], orientation='horizontal')\n cbar.ax.set_xticklabels(['Weak', 'Medium', 'Strong'])\n cbar.ax.set_xlabel('Lapatinib response')\n plt.setp(cbar.ax.get_xticklines(), alpha=0)\n ax.legend(prop={'size': 12})\n f.set_facecolor('none')\n filename = op.join(target_dir, 'legend-graded.png')\n canvas = FigureCanvasAgg(f)\n canvas.print_png(filename)\n\n\nif __name__ == '__main__':\n points = (ScatterplotData('AU-565', 'triangle', 0.554, 4.308, 4.311),\n ScatterplotData('BT-20', 'circle', 0.043, 3.843, 3.877),\n ScatterplotData('BT-474', 'triangle', 0.496, 3.455, 3.535),\n ScatterplotData('BT-483', 'square', 1.000, 3.805, 3.685),\n ScatterplotData('BT-549', 'circle', 0.873, 3.333, 3.197),\n ScatterplotData('CAMA-1', 'square', 1.000, 3.343, 3.230),\n ScatterplotData('HCC1187', 'circle', 0.403, 3.818, 3.723),\n ScatterplotData('HCC1395', 'circle', 0.859, 3.682, 3.720),\n ScatterplotData('HCC1419', 'triangle', 0.501, 4.068, 4.051),\n ScatterplotData('HCC1428', 'square', 0.640, 3.590, 3.376),\n ScatterplotData('HCC1806', 'circle', 0.246, 3.877, 3.843),\n ScatterplotData('HCC1937', 'circle', 0.854, 3.862, 3.727),\n ScatterplotData('HCC1954', 'triangle', 0.162, 4.032, 3.996),\n ScatterplotData('HCC202', 'triangle', 0.838, 4.199, 4.197),\n ScatterplotData('HCC38', 'circle', 1.000, 3.919, 3.838),\n ScatterplotData('HCC70', 'circle', 0.000, 4.263, 4.307),\n ScatterplotData('MCF7__b', 'square', 1.000, 3.148, 2.951),\n ScatterplotData('MDA-MB-134-VI', 'square', 1.000, 3.442, 3.475),\n ScatterplotData('MDA-MB-157', 'circle', 0.921, 3.294, 2.611),\n ScatterplotData('MDA-MB-175-VII', 'square', 0.163, 4.052, 3.831),\n ScatterplotData('MDA-MB-231__a', 'circle', 0.860, 3.903, 3.524),\n ScatterplotData('MDA-MB-361', 'triangle', 0.994, 3.092, 2.991),\n ScatterplotData('MDA-MB-436', 'circle', 0.950, 3.781, 3.635),\n ScatterplotData('MDA-MB-453', 'circle', 0.889, 3.290, 3.424),\n ScatterplotData('SK-BR-3__a', 'triangle', 0.608, 3.986, 3.999),\n ScatterplotData('T47D', 'square', 0.921, 3.804, 3.835),\n ScatterplotData('UACC-812', 'triangle', 0.537, 3.908, 3.907),\n ScatterplotData('UACC-893', 'triangle', 0.539, 3.677, 3.709),\n ScatterplotData('ZR-75-1', 'square', 1.000, 3.884, 3.569))\n metadata = (ScatterplotMetaData(readout='pErk', ligand='EGF', concentration='100', time=None),\n ScatterplotMetaData(readout='pErk', ligand='EPR', concentration='100', time=None))\n lims = (1.518, 4.395)\n\n scatterplot(points, metadata, lims, display=True)\n","repo_name":"hmslincs/hmslincs","sub_path":"src/scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"79"}
+{"seq_id":"74305061694","text":"import tweepy\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\n\nfrom twitter_auth import authenticate_twitter_app\nfrom db_stuff import UserDB\n\nclass MyStreamListener(tweepy.StreamListener):\n \"\"\"\n Twitter listener, collects streaming tweets and output to a file\n \"\"\"\n\n def __init__(self, output_file=\"alc_tweets.db\", max_tweets=1000):\n super(MyStreamListener, self).__init__()\n self.max_tweets = max_tweets\n self.num_tweets = 0\n self.good_tweets = 0\n self.db = UserDB(output_file)\n\n def on_status(self, status):\n #print(status.text)\n tweet = status._json\n self.num_tweets=self.num_tweets+1\n \n text = \"\"\n \n # catching extended tweets (only way i found to do this with streaming API)\n try:\n text = status.extended_tweet['full_text']\n except:\n text = status.text\n\n if status.place != None:\n print(\"Inserting tweet of length: \" + str(len(text)))\n print(\"Text: \" + text)\n print(\"Country code: \" + status.place.country_code)\n self.good_tweets=self.good_tweets+1\n self.db.insert_tweet(int(status.user.id_str), text, status.place.country_code)\n self.db.save_changes()\n\n # Stops streaming when it reaches the limit\n if self.num_tweets <= self.max_tweets:\n if self.num_tweets % 100 == 0: # just to see some progress...\n print(str(self.num_tweets) + \" collected -> \" + str(self.good_tweets) + \" are applicable\")\n return True\n else:\n return False\n\n def on_error(self, status):\n print(status)\n return False\n \n def __del__(self):\n pass\n\n\nif __name__ == '__main__':\n\n print(\"Run Listener for crawling twitter data\")\n\n #Define search content\n key_words =[\"alcohol,beer,wine,drunk,drinking alcohol,party alcohol\"]\n\n\n l = MyStreamListener(max_tweets=100000)\n\n # Create you Stream object with authentication\n auth = authenticate_twitter_app()\n stream = tweepy.Stream(auth=auth, listener=l)\n\n # Filter Twitter Streams to capture data by the keywords:\n stream.filter(track=key_words,languages=['en'])\n\n# try out db stuff\n \n","repo_name":"DanielSudy/SMTAlcoholConsumption","sub_path":"sentiment_analysis/alcohol_streamer.py","file_name":"alcohol_streamer.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39507001799","text":"# 309. Best Time to Buy and Sell Stock with Cooldown\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/description/\n\nfrom functools import lru_cache\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n \n\n # Solution 1 - Dfs with memoization \n\n # cache = {}\n\n # @lru_cache\n # def dfs(i, canBuy):\n # # if (i, canBuy) in cache:\n # # return cache[(i, canBuy)]\n\n # if i >= len(prices):\n # return 0\n\n \n # res = dfs(i+1, canBuy)\n\n # if canBuy:\n # res = max(dfs(i+1, not canBuy) - prices[i], res)\n # else:\n # res = max(dfs(i+2, not canBuy) + prices[i], res)\n\n # # cache[(i, canBuy)] = res\n\n # return res\n\n # return dfs(0, True)\n\n\n # Solution 2 - Dynamic programming (Bottom up) approach with tabulation \n n = len(prices)\n\n stock = [0] * (n)\n no_stock = [0] * (n)\n sold = [0] * (n)\n\n stock[0] = -prices[0]\n\n\n for i in range(1, n):\n stock[i] = max(stock[i-1], no_stock[i-1] - prices[i])\n no_stock[i] = max(no_stock[i-1], sold[i-1])\n sold[i] = stock[i-1] + prices[i]\n\n return max(sold[n-1], no_stock[n-1])\n\n\n # Solution 3 - Space optimisation. You would only need three variables to hold previous state and nothing else hence space can be optimised to be constant. \n\n # n = len(prices)\n\n # stock = -prices[0]\n # no_stock = 0\n # sold = 0\n\n # for i in range(1, n):\n # prev_stock = stock\n # stock = max(stock, no_stock - prices[i])\n # no_stock = max(no_stock, sold)\n # sold = prev_stock + prices[i]\n\n\n # return max(sold, no_stock)\n\n\n\n \n\n \n# Example 1:\n\n# Input: prices = [1,2,3,0,2]\n# Output: 3\n# Explanation: transactions = [buy, sell, cooldown, buy, sell]\n \n# Example 2:\n\n# Input: prices = [1]\n# Output: 0\n\n\n\n\n\n\n\n\n","repo_name":"anoopanni/leetcode","sub_path":"BuySellStock.py","file_name":"BuySellStock.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71553703615","text":"import os\nimport argparse\n\nimport paddle\n\nfrom arch_unet import UNet\nfrom utils import load_pretrained_model\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Model export.')\n parser.add_argument(\n '--save_dir',\n dest='save_dir',\n help='The directory for saving the exported model',\n type=str,\n default='./output')\n parser.add_argument(\n '--model_path',\n dest='model_path',\n help='The path of model for export',\n type=str,\n default=None)\n\n return parser.parse_args()\n\n\ndef main(args):\n\n net = UNet(in_nc=3,\n out_nc=3,\n n_feature=48)\n\n if args.model_path:\n para_state_dict = paddle.load(args.model_path)\n net.set_dict(para_state_dict)\n print('Loaded trained params of model successfully.')\n\n\n shape = [-1, 3, 256, 256]\n\n new_net = net\n\n new_net.eval()\n new_net = paddle.jit.to_static(\n new_net,\n input_spec=[paddle.static.InputSpec(shape=shape, dtype='float32')])\n save_path = os.path.join(args.save_dir, 'model')\n paddle.jit.save(new_net, save_path)\n\n # yml_file = os.path.join(args.save_dir, 'deploy.yaml')\n # with open(yml_file, 'w') as file:\n # transforms = cfg.export_config.get('transforms', [{\n # 'type': 'Normalize'\n # }])\n # data = {\n # 'Deploy': {\n # 'transforms': transforms,\n # 'model': 'model.pdmodel',\n # 'params': 'model.pdiparams'\n # }\n # }\n # yaml.dump(data, file)\n\n print(f'Model is saved in {args.save_dir}.')\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)","repo_name":"txyugood/Neighbor2Neighbor_Paddle","sub_path":"export_model.py","file_name":"export_model.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"74035403456","text":"## Augmentation ##\r\n#Image shifts via the width_shift_range and height_shift_range arguments.\r\n#Image flips via the horizontal_flip and vertical_flip arguments.\r\n#Image rotations via the rotation_range argument\r\n#Image brightness via the brightness_range argument.\r\n#Image zoom via the zoom_range argument.\r\n\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\r\n\r\n# Construct an instance of the ImageDataGenerator class\r\n# Pass the augmentation parameters through the constructor. \r\n\r\ndatagen = ImageDataGenerator(\r\n rotation_range=40, # Random rotation between 0 and 40\r\n width_shift_range=0.2, # % shift\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest') # can also try nearest, constant, reflect, wrap\r\n\r\n\r\n\r\n############## Loading a single image and do the augmentation ##############\r\n\r\n#Using flow method to augment the image\r\n# Loading a sample image \r\n#Can use any library to read images but they need to be in an array form\r\n#If using keras load_img convert it to an array first\r\n\r\nimg = load_img('F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/images/000001.jpg') # this is a PIL image\r\nx = img_to_array(img) # this is a Numpy array with shape (500, 353, 3)\r\n\r\n# Reshape the input image because ...\r\n#x: Input data to datagen.flow must be Numpy array of rank 4 or a tuple.\r\n#First element represents the number of images\r\nx = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 500, 353, 3)\r\n\r\n# the .flow() command below generates batches of randomly transformed images\r\n# and saves the results to the `augmented_output/` directory\r\ni = 0\r\nfor batch in datagen.flow(x, batch_size=1,\r\n save_to_dir='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/augmented_output', save_prefix='man_with_puppy', save_format='jpeg'):\r\n i += 1\r\n if i > 4:\r\n break # otherwise the generator would loop indefinitely\r\n \r\n \r\n\r\n####################### Multiple images ######################\r\n\r\n#Manually read each image and create an array to be supplied to datagen via flow method\r\ndataset = []\r\n\r\nimport numpy as np\r\nfrom skimage import io\r\nimport os\r\nfrom PIL import Image\r\n\r\nimage_directory = 'F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/images/'\r\nSIZE = 400\r\ndataset = []\r\n\r\nmy_images = os.listdir(image_directory)\r\nfor i, image_name in enumerate(my_images):\r\n if (image_name.split('.')[1] == 'jpg'):\r\n image = io.imread(image_directory + image_name)\r\n image = Image.fromarray(image, 'RGB')\r\n image = image.resize((SIZE,SIZE))\r\n dataset.append(np.array(image))\r\n\r\nx = np.array(dataset) # this is a Numpy array with shape (7, 400, 400, 3)\r\n\r\ni = 0\r\nfor batch in datagen.flow(x, batch_size=1,\r\n save_to_dir='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/augmented_output', save_prefix='augments', save_format='jpeg'):\r\n i += 1\r\n if i > 27:\r\n break # otherwise the generator would loop indefinitely\r\n \r\n\r\n###################### accessing image in Multiclass problem #####################\r\n# Read directly from the folder structure using flow_from_directory\r\n\r\ni = 0\r\nfor batch in datagen.flow_from_directory(directory='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/', \r\n batch_size=16, \r\n target_size=(400, 400),\r\n color_mode=\"rgb\",\r\n save_to_dir='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/augmented_output', \r\n save_prefix='augments', \r\n save_format='png'):\r\n i += 1\r\n if i > 4:\r\n break \r\n\r\n#Creates 32 images for each class. \r\n \r\n#Once data is augmented, you can use it to fit a model via: fit.generator\r\n#instead of fit()\r\n#model = \r\n#fit model on augmented data\r\n#model.fit_generator(datagen.flow(x))","repo_name":"anandkvvlr/AI_assignment-works","sub_path":"CNN/module_9.py","file_name":"module_9.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6471816742","text":"import re\nimport json\nimport time\nimport datetime\nimport pandas as pd\n\nwith open(\"./AtomicCards.json\") as card_data:\n j_data = json.load(card_data)\n meta_data = j_data[\"meta\"]\n data = j_data[\"data\"]\n keys = list(data.keys())\n\nwith open(\"./SetList.json\") as set_data:\n set_j_data = json.load(set_data)\n set_meta_data = set_j_data[\"meta\"]\n set_data = set_j_data[\"data\"]\n dated = { time.mktime(datetime.datetime.strptime(s[\"releaseDate\"], \"%Y-%m-%d\").timetuple()) \\\n : (s[\"code\"] if \"parentCode\" not in s.keys() else s[\"parentCode\"], s[\"releaseDate\"]) for s in set_data}\n date_list = list(dated.keys())\n date_list.sort()\n sorted_dated = [dated[x] for x in date_list]\n\nINVALID_SETS = [\"PCEL\", \"PRM\"]\nFORBIDDEN_SETS = [\"UST\", \"UNH\", \"UGL\", \"UND\", \"AFR\", \"PCEL\", \"HHO\"]\n\ndef has_forb_set(c):\n ps = c[\"printings\"]\n if len(list(filter(lambda x : x not in FORBIDDEN_SETS, ps))) == 0:\n return True\n return False\n\nFORBIDDEN_TYPES = [\"Dungeon\"]\n\ndef has_forb_type(c):\n if c[\"type\"] in FORBIDDEN_TYPES:\n return True\n return False\n\ndouble_face_re = re.compile(r\"(.+) // (.+)\")\n\ndef get_print(c):\n valid_printings = list(filter(lambda x : len(x) <= 3 and x not in INVALID_SETS, c[\"printings\"]))\n for i in range(len(sorted_dated)):\n c_set = sorted_dated[i]\n if c_set[0] in valid_printings:\n return(c_set)\n\n# remove UN-sets\n# remove Dungeon type\n# check two-face\n\nrows = []\nignore_count = 0\n\nfor key in keys:\n for d in data[key]:\n if not(has_forb_type(d)) and not(has_forb_set(d)):\n if \"side\" in d.keys():\n m = double_face_re.match(d[\"name\"])\n if m:\n name = m.group(1) if d[\"side\"] == \"a\" else m.group(2)\n else:\n name = d[\"name\"]\n else:\n name = d[\"name\"]\n \n printing = get_print(d)\n if printing:\n colours = d[\"colors\"]\n red = \"R\" in colours\n green = \"G\" in colours\n black = \"B\" in colours\n white = \"W\" in colours\n blue = \"U\" in colours\n text = \"{EMPTY}\" if \"text\" not in d.keys() else d[\"text\"]\n text = re.sub(r\" \\({Q} is the untap symbol.\\)\", \"\", text).lower()\n if \"{q}\" in text:\n print(text)\n \n rows.append({\n \"name\" : name.lower(),\n \"printed\" : printing[1],\n \"r\" : int(red),\n \"g\" : int(green),\n \"b\" : int(black),\n \"w\" : int(white),\n \"u\" : int(blue),\n \"text\" : text,\n \"subtypes\" : d[\"subtypes\"],\n \"types\" : d[\"types\"]\n })\n else:\n print(\"Row ignored\")\n\n\ndataframe = pd.DataFrame(rows, columns=[\"name\", \"printed\", \"r\", \"g\", \"b\", \"w\", \"u\", \"text\", \"types\",\"subtypes\"])\ndataframe.to_csv(\"sanitized_cards.csv\", sep=\"|\")\n\n","repo_name":"Pickersgill/cardclassifier","sub_path":"datamine/acrew.py","file_name":"acrew.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"15790003641","text":"from .life import *\nfrom .impact import *\n\nclass Enemy(Life):\n def __init__(self, game, animations, attack_timming, attack_end, attack_cooltime, direction = 1, location = vec(0, 0), speed = 0, attack_point = 0, hp = 0, area = 50, aggro_area= 50, die_mp = 15):\n self.groups = game.all_sprites, game.visibles, game.lifes, game.enemies\n Life.__init__(self, game, self.groups, animations, direction=direction, location=location, speed = speed, attack_point= attack_point, hp = hp)\n self.area = area\n self.aggro_area = aggro_area\n self.attack_timming = attack_timming\n self.attack_end = attack_end\n self.attack_cooltime = attack_cooltime\n self.start_time = 0\n self.allow = True\n self.none_operate = ['공격', '바닥충돌']\n self.die_mp = die_mp\n self.fly_states = ['부유', '추락']\n\n def movestate_update(self):\n if self.state != '죽음':\n if self.floor_contact():\n if not self.operation:\n if self.velocity.x == 0:\n if not self.state in self.none_operate:\n self.state_set('통상')\n self.operation = True\n if self.walk_control_l:\n self.walk_l()\n if self.walk_control_r:\n self.walk_r()\n if not (self.walk_control_l or self.walk_control_r):\n self.state_set('통상')\n self.friction_switch = False\n else:\n if self.friction_switch:\n self.velocity.x -= ((self.velocity.x > 0) * 2 - 1) * friction * TIME\n if -20 < (self.velocity.x) < 20:\n self.velocity.x = 0\n else:\n self.friction_switch = False\n if not self.state == '부유':\n if self.state == '추락':\n self.state_set('바닥충돌')\n else:\n if not self.state in self.none_operate:\n if self.walk_control_l or self.walk_control_r:\n self.state_set('걷기')\n else:\n self.state_set('통상')\n self.move()\n\n\n if self.velocity.y > 0 and self.state != '부유':\n self.velocity.y = 0\n\n else:\n self.velocity += self.acceleration * TIME\n if not self.state in self.fly_states:\n if self.velocity.y > 0:\n self.state_set('추락')\n else:\n self.state_set('부유')\n\n if self.ceiling_contact() and self.velocity.y < 0:\n self.velocity.y = 0\n \n self.rect.center += self.velocity * TIME\n\n self.physics_update()\n\n if self.state == '공격' or self.state == '넉백':\n if self.game.player.rect.centerx < self.rect.centerx:\n self.walk_control_l = True\n self.walk_control_r = False\n else:\n self.walk_control_r = True\n self.walk_control_l = False\n\n if self.state == '공격' and self.attack_end >= self.animation.p_frame >= self.attack_timming:\n self.make_attack()\n \n if not self.allow and ((time.time() - self.start_time) > self.attack_cooltime):\n self.allow = True\n \n if self.die_check():\n self.state_set('죽음')\n\n\n def move(self):\n if self.operation:\n if abs(self.game.player.rect.centerx - self.rect.centerx) < self.aggro_area:\n if self.game.player.rect.centerx < self.rect.centerx:\n if not self.walk_control_l:\n self.walk_r_cancel()\n self.walk_l()\n else:\n if not self.walk_control_r:\n self.walk_l_cancel()\n self.walk_r()\n else: \n self.walk_r_cancel()\n self.walk_l_cancel()\n\n if abs(self.game.player.rect.centerx - self.rect.centerx) < self.area:\n self.attack()\n\n def update(self):\n self.animation_end()\n self.animation_update()\n self.movestate_update()\n self.end_damaged()\n \n def animation_end(self):\n if self.animation.end_check():\n if self.state in self.none_operate:\n self.operation = True\n\n if self.state == '죽음':\n self.delete()\n self.game.player.heal_mp(self.die_mp)\n \n elif self.state == '바닥충돌':\n if self.walk_control_l or self.walk_control_r:\n self.state_set('걷기')\n else:\n self.state_set('통상')\n \n elif self.state == '공격':\n self.state_set('통상')\n if self.walk_control_l:\n self.walk_l()\n if self.walk_control_r:\n self.walk_r()\n if not (self.walk_control_l or self.walk_control_r):\n self.state_set('통상')\n\n \n def attack(self):\n if self.operation and self.allow:\n self.operation_cancel()\n self.state_set('공격')\n self.allow = False\n self.start_time = time.time()\n \n def floated(self, v):\n self.state_set('부유')\n self.operation_cancel()\n self.velocity += v\n \n def make_attack(self):\n pass","repo_name":"jwcho2005/Hihi","sub_path":"class_data/classes/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"3306262237","text":"import numpy as np\nfrom tqdm import tqdm\n\nfrom maths import deg2rad, norm\nfrom rays import Ray\n\nRAYS_PER_PIXEL = 10\n\n\nclass Camera:\n def __init__(self, pos, dir, fov, resX, resY, clip_dst=0.1):\n \"\"\"\n camera coord space:\n --------> X\n |\n |\n |\n Y V z into screen\n\n\n \"\"\"\n self.pos = np.array(pos)\n self.dir = norm(np.array(dir))\n self.fov = deg2rad(fov)\n self.resX = resX\n self.resY = resY\n self.fovX = self.fov\n self.fovY = 2 * np.arctan2(np.tan(self.fovX / 2), self.resX / self.resY)\n self.clip_dst = clip_dst\n\n def set_direction(self, direction):\n self.dir = norm(direction)\n\n def get_ray_dir(self, px, py):\n # Dimensions of near clip plane\n clip_plane_X = 2 * np.tan(self.fovX / 2) * self.clip_dst\n clip_plane_Y = 2 * np.tan(self.fovY / 2) * self.clip_dst\n\n # Center camera view\n px_offset = px - self.resX // 2\n py_offset = py - self.resY // 2\n\n pixel_pos_cam_space = np.array(\n [\n clip_plane_X * px_offset / self.resX,\n clip_plane_Y * py_offset / self.resY,\n self.clip_dst,\n ]\n )\n pixel_pos_world_space = norm(\n np.matmul(self.cam_to_world_matrix(), pixel_pos_cam_space)\n )\n\n return pixel_pos_world_space\n\n def heading(self):\n dir_x = self.dir[0]\n dir_y = self.dir[1]\n # +x = 'north' = 0 rad\n heading = np.arctan2(dir_y, dir_x)\n\n return heading\n\n def elevation(self):\n dir_z = self.dir[2]\n # vertical up = pi/2, horizontal = 0, etc.\n return np.arcsin(dir_z)\n\n def cam_to_world_matrix(self):\n cam_x_in_world = np.array(\n [-np.sin(self.heading()), np.cos(self.heading()), 0.0]\n )\n cam_y_in_world = norm(\n np.array(\n [\n -self.dir[0] * np.sin(self.elevation()),\n -self.dir[1] * np.sin(self.elevation()),\n np.cos(self.elevation()),\n ]\n )\n )\n cam_z_in_world = self.dir\n\n matrix = np.column_stack([cam_x_in_world, cam_y_in_world, cam_z_in_world])\n\n return matrix\n\n def world_to_cam_matrix(self):\n return np.linalg.inv(self.cam_to_world_matrix())\n\n def draw(self, scene):\n pixel_data = np.zeros((self.resX, self.resY, 3))\n for n in tqdm(range(RAYS_PER_PIXEL)):\n for px in range(self.resX):\n for py in range(self.resY):\n ray = Ray(self.pos, self.get_ray_dir(px, py))\n pixel_data[px, py, :] += ray.trace(scene)\n\n return pixel_data / pixel_data.max()\n\n\nif __name__ == \"__main__\":\n cam = Camera([0, 0, 0], [1, 0, 0], 90, 800, 600)\n print(cam.elevation())\n print(cam.heading())\n print(cam.cam_to_world_matrix())\n print(cam.world_to_cam_matrix())\n print()\n print(cam.get_ray_dir(401, 301))\n print(cam.dir)\n","repo_name":"franklinscudder/RayTracer","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"22937180238","text":"import pyautogui as pg\nimport time\nimport webbrowser\n# while True:\n# time.sleep(4)\n# pyautogui.typewrite('Hello! Motherfu**ing :D')\n# time.sleep(2)\n# pyautogui.press('enter')\n\n# time.sleep(2)\n# print(pg.position())\n# pg.moveTo(562, 755, 2)\n# pg.leftClick()\n\nurl = \"https://www.facebook.com/messages/t/100017290625742\"\nwebbrowser.get().open(url)\nprint(pg.position())\n# pg.moveTo(970, 1079, 2)\n# pg.moveTo(1026, 1052, 2)\n# pg.leftClick(1026, 1052, 1)\n# pg.keyDown('ctrl')\n# pg.press('t')\n# pg.keyUp('ctrl')\n# pg.moveTo(661, 479)\n# pg.leftClick()\n# pg.typewrite(\"hello bạn\")\n# pg.press('enter')\ntime.sleep(10)\n\nfor i in range(6):\n pg.keyDown('alt')\n for j in range(i):\n pg.press('tab')\n pg.press('enter')\n pg.keyUp('alt')\n pg.moveTo(959, 1026)\n pg.leftClick()\n pg.typewrite(\"hello bạn\")\n pg.press('enter')\n #pg.hotkey('alt', 'tab', 'enter')\n","repo_name":"nxhawk/AI-helper","sub_path":"function/auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"33363039108","text":"\"\"\"\nA general instrument class that returns a status for each command sent\nor recieved from its instrument. This allows it to be used with the \"com\"\nfunction in the main algorithm, when visa fails it does not halt the\nwhole program but reports the failure instead.\n\"\"\"\nimport visa\nimport time\nclass INSTRUMENT(object):\n \n \"\"\" This instrument class really is only a function to read and write to\n some instrument, using pyvisa. It has a general 'dictionary' to which\n more key word arguments can be added, and more sub functions can be\n used to make instruments specific. It can be used very generally, as just a semd\n and recieve class which also wraps each communication with a check to see if the\n communication was sucesful. \"\"\"\n\n def __init__(self,inst_bus,letter, **kwargs):\n self.com = {'label':'','address':'', 'Ranges':[], 'measure_seperation':'0', 'NoError':'',\\\n 'reset':'','status':'','init':'','Make_Safe':'', 'error':'', \\\n 'SettleTime':'0', 'DCVRange':'', 'SetVoltage':'', 'operate':'', \\\n 'standby':'','MeasureSetup':'','SingleMsmntSetup':''} #command dictionary\n self.com.update(kwargs) #update dictionary to include all sent commands.\n self.label = self.com[\"label\"]\n self.com.update(label=str(letter)+str(kwargs['label']) )\n self.range = eval(self.com['Ranges']) #Use eval here or string operations? Like split multiple times.\n self.address = self.com['address']\n #ensure values are ints\n try:\n self.com_settle_time = float(self.com['SettleTime'])\n except:\n print(\"settle time made into 1 on \"+str(self.com['label']+\", from unreadable: \"+str(self.com['SettleTime'])))\n self.com_settle_time = 1\n try:\n self.measure_seperation = float(self.com['measure_seperation'])\n except:\n print(\"measure seperation made into 0 on \"+str(self.com['label']+\", from unreadable: \"+str(self.com['measure_seperation'])))\n self.measure_seperation = 0\n \n self.inst_bus = inst_bus #save the instrument bus, either visa or the simulated visa\n\n def create_instrument(self):\n\n \"\"\"\n Needs to be called prior to any commands being sent or recieved.\n Creates the visa instrument object, to which commands will be sent\n and recieved. \n \"\"\"\n\n success = False\n string = str(time.strftime(\"%Y.%m.%d.%H.%M.%S, \", time.localtime()))+' Creating '+self.label+': '\n try:\n self.rm = self.inst_bus.ResourceManager()\n self.inst = self.rm.open_resource(self.address)\n string = string+\"success\"\n success = True\n except: #There are a number of issues visa might raise?\n string = string+\"visa failed at address \"+str(self.address)\n return [success,None,string]\n \n def send(self,command):\n \"\"\"\n From here a command is sent to the instrument, surrounded by the try block.\n If the command fails, it does not halt the problem but sends back a failed status.\n \"\"\"\n success = False #did we read successfully\n #string to be printed and saved in log file\n string = str(time.strftime(\"%Y.%m.%d.%H.%M.%S, \", time.localtime()))+' '+self.label+': ' \n\n try:\n self.inst.write(command)\n print(command)\n time.sleep(self.com_settle_time)\n \n string = string+str(command)\n success = True\n except self.inst_bus.VisaIOError:\n string = string+\"visa failed\"\n return [success,None,string]\n \n def read_instrument(self):\n \"\"\"\n Similar to the send function, but reads and expects a return value too.\n \"\"\"\n val = '0' #value to be returned, string-type like instruments\n success = False #did we read successfully\n #string to be printed and saved in log file\n string = str(time.strftime(\"%Y.%m.%d.%H.%M.%S, \", time.localtime()))+' reading '+self.label+': ' \n try:\n time.sleep(self.measure_seperation)\n val = self.inst.read()\n string = string+str(val)\n success = True\n except self.inst_bus.VisaIOError:\n string = string+\"visa failed\"\n return [success,val,string]\n\n def initialise_instrument(self):\n \"\"\"A specific instrument command to the ref-step algorithm,\ninitialises instruments with a set of commands\"\"\"\n success,nothing,string = self.send(self.com['init'])\n \n \n \n return [success,nothing,string]\n\n def make_safe(self):\n \"\"\"specific to the ref-step algorithm, should turn instruments off\"\"\"\n success,nothing,string = self.send(self.com['Make_Safe'])\n\n return [success,nothing,string]\n \n def inst_status(self):\n \"\"\"specific to the ref-step algorithm, used for reading status\"\"\"\n success,nothing,string = self.send(self.com['status'])\n\n return [success,nothing,string]\n\n def reset_instrument(self):\n \"\"\"specific to the ref-step algorithm, reset routine\"\"\"\n success,nothing,string = self.send(self.com['reset'])\n\n return [success,nothing,string]\n \n def set_DCrange(self, value):\n \"\"\"specific to the ref-step algorithm, setting a DC voltage\"\"\"\n \n \n \n line = str(self.com['DCVRange'])\n line = line.replace(\"$\",str(value))\n out = self.send(line)\n \n return out\n \n def query_error(self):\n \"\"\"specific to the ref-step algorithm, reading the instruments error\"\"\"\n success,nothing,string = self.send(self.com['error'])\n\n return [success,nothing,string]\n \n def set_DCvalue(self, value):\n \"\"\"specific to the ref-step algorithm, set a DC value for sources\"\"\"\n line = str(self.com['SetVoltage'])\n line = line.replace('$V',str(value)+'V')\n out = self.send(line)\n return out\n\n def Operate(self):\n \"\"\"specific to the ref-step algorithm, operates sources\"\"\"\n success,nothing,string = self.send(self.com['operate'])\n\n return [success,nothing,string]\n \n def Standby(self):\n \"\"\"specific to the ref-step algorithm, puts sources on standby\"\"\"\n success,nothing,string = self.send(self.com['standby'])\n\n return [success,nothing,string]\n\n def MeasureSetup(self):\n \"\"\"specific to the ref-step algorithm, pre measurement sequence set up\"\"\"\n success,nothing,string = self.send(self.com['MeasureSetup'])\n\n return [success,nothing,string]\n\n def SingleMsmntSetup(self):\n \"\"\"specific to the ref-step algorithm, should any commands be sent prior to an individual measurement\"\"\"\n success,nothing,string = self.send(self.com['SingleMsmntSetup'])\n\n return [success,nothing,string]\n\n","repo_name":"AtillaTheFun/RefStep","sub_path":"Sphinx_documentation_attempt/modules/gpib_inst.py","file_name":"gpib_inst.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"26041130959","text":"# coding: utf-8\n\nimport pickle\nimport h5py\nimport torch\nimport torch.utils.data as data\nfrom args import train_caption_pkl_path\nfrom args import feature_h5_path, feature_h5_feats\n\n\nclass V2TDataset(data.Dataset):\n '''\n Video to Text数据集的描述类,用来加载和提供数据\n 支持MSR-VTT和MSVD数据集\n 构造的时候需要以下输入:\n 1. 提供文本特征的pkl文件\n 2. 包含视频帧信息的h5文件\n 提供文本和视频h5特征,以及根据caption的id来返回数据\n '''\n\n def __init__(self, cap_pkl, feature_h5):\n with open(cap_pkl, 'rb') as f:\n self.captions, self.lengths, self.video_ids = pickle.load(f)\n h5_file = h5py.File(feature_h5, 'r')\n self.video_feats = h5_file[feature_h5_feats]\n\n def __getitem__(self, index):\n '''\n 返回一个训练样本对(包含视频frame特征和对应的caption)\n 根据caption来找对应的video,所以要求video存储的时候是按照id升序排列的\n '''\n caption = self.captions[index]\n length = self.lengths[index]\n video_id = self.video_ids[index]\n video_feat = torch.from_numpy(self.video_feats[video_id])\n return video_feat, caption, length, video_id\n\n def __len__(self):\n return len(self.captions)\n\n\nclass VideoDataset(data.Dataset):\n '''\n 仅提供视频特征以及相应ID的数据加载类,\n 之所以单独提供这个类是希望加速评价指标的计算\n '''\n def __init__(self, eval_range, feature_h5):\n self.eval_list = tuple(range(*eval_range))\n h5_file = h5py.File(feature_h5, 'r')\n self.video_feats = h5_file[feature_h5_feats]\n\n def __getitem__(self, index):\n '''\n 返回一个训练样本对(包含视频特征和对应的ID)\n '''\n video_id = self.eval_list[index]\n video_feat = torch.from_numpy(self.video_feats[video_id])\n return video_feat, video_id\n\n def __len__(self):\n return len(self.eval_list)\n\n\ndef train_collate_fn(data):\n '''\n 用来把多个数据样本合并成一个minibatch的函数\n '''\n # 根据video的长度对数据进行排序\n data.sort(key=lambda x: x[-1], reverse=True)\n\n videos, captions, lengths, video_ids = zip(*data)\n\n # 把视频合并在一起(把2D Tensor的序列变成3D Tensor)\n videos = torch.stack(videos, 0)\n\n # 把caption合并在一起(把1D Tensor的序列变成一个2D Tensor)\n captions = torch.stack(captions, 0)\n return videos, captions, lengths, video_ids\n\n\ndef eval_collate_fn(data):\n '''\n 用来把多个数据样本合并成一个minibatch的函数\n '''\n data.sort(key=lambda x: x[-1], reverse=True)\n\n videos, video_ids = zip(*data)\n\n # 把视频合并在一起(把2D Tensor的���列变成3D Tensor)\n videos = torch.stack(videos, 0)\n\n return videos, video_ids\n\n\ndef get_train_loader(cap_pkl, feature_h5, batch_size=10, shuffle=True, num_workers=3, pin_memory=True):\n v2t = V2TDataset(cap_pkl, feature_h5)\n data_loader = torch.utils.data.DataLoader(dataset=v2t,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=train_collate_fn,\n pin_memory=pin_memory)\n return data_loader\n\n\ndef get_eval_loader(cap_pkl, feature_h5, batch_size=200, shuffle=False, num_workers=1, pin_memory=False):\n vd = VideoDataset(cap_pkl, feature_h5)\n data_loader = torch.utils.data.DataLoader(dataset=vd,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=eval_collate_fn,\n pin_memory=pin_memory)\n return data_loader\n\n\nif __name__ == '__main__':\n train_loader = get_train_loader(train_caption_pkl_path, feature_h5_path)\n print(len(train_loader))\n d = next(iter(train_loader))\n print(d[0].size())\n print(d[1].size())\n print(len(d[2]))\n","repo_name":"arieshx/ssta_video_caption","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"}
+{"seq_id":"74588075774","text":"import pymongo\n\n\nclass LocalData:\n\n def __init__(self, host, port, dbname):\n # self.client = pymongo.MongoClient('mongodb://%s:%s@%s:%d/%s' % (settings.\n # MONGO_USER, settings.MONGO_PWD,\n # host, port,\n # settings.\n # MONGO_AUTHDB))[dbname]\n self.client = pymongo.MongoClient(host, port,\n socketTimeoutMS=20000)[dbname]\n self.collection = self.client['test_data']\n\n\nif __name__ == '__main__':\n LocalData('47.100.39.147', 9017, 'lilytest').collection.insert({'time': '2018', 'time2': '2019'})\n","repo_name":"BockeyE/pyprac1","sub_path":"Functions/DBConnector/pmongo_test.py","file_name":"pmongo_test.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6470805302","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"This module implements SqueezeNet models.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"SqueezeNet1_0\", \"SqueezeNet1_1\",\n]\n\nfrom abc import ABC\n\nimport torch\n\nfrom mon.coreml import layer as mlayer, model as mmodel\nfrom mon.foundation import pathlib\nfrom mon.globals import MODELS\nfrom mon.vision.classify import base\n\n_current_dir = pathlib.Path(__file__).absolute().parent\n\n\n# region Model\n\nclass SqueezeNet(base.ImageClassificationModel, ABC):\n \"\"\"SqueezeNet.\n \n See Also: :class:`mon.vision.enhance.base.ImageEnhancementModel`\n \"\"\"\n \n configs = {}\n zoo = {}\n map_weights = {}\n \n def load_weights(self):\n \"\"\"Load weights. It only loads the intersection layers of matching keys\n and shapes between the current model and weights.\n \"\"\"\n if isinstance(self.weights, dict) \\\n and self.weights[\"name\"] in [\"imagenet\"]:\n state_dict = mmodel.load_state_dict_from_path(\n model_dir=self.zoo_dir, **self.weights\n )\n model_state_dict = self.model.state_dict()\n \"\"\"\n for k in self.model.state_dict().keys():\n print(f\"\\\"{k}\\\": \")\n for k in state_dict.keys():\n print(f\"\\\"{k}\\\"\")\n \"\"\"\n for k, v in state_dict.items():\n if \"features.\" in k:\n k = k.replace(\"features.\", \"\")\n else:\n continue\n model_state_dict[k] = v\n if self.weights[\"num_classes\"] == self.num_classes:\n model_state_dict[\"13.conv.bias\"] = state_dict[\"classifier.1.bias\"]\n model_state_dict[\"13.conv.weight\"] = state_dict[\"classifier.1.weight\"]\n self.model.load_state_dict(model_state_dict)\n else:\n super().load_weights()\n\n\n@MODELS.register(name=\"squeezenet-1.0\")\nclass SqueezeNet1_0(SqueezeNet):\n \"\"\"SqueezeNet-1.0.\n \n See Also: :class:`mon.vision.enhance.base.ImageEnhancementModel`\n \"\"\"\n \n configs = {}\n zoo = {\n \"imagenet\": {\n \"name\" : \"imagenet\",\n \"path\" : \"https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth\",\n \"file_name\" : \"squeezenet-1.0-imagenet.pth\",\n \"num_classes\": 1000,\n },\n }\n map_weights = {}\n \n def __init__(self, *args, **kwargs):\n kwargs |= {\n \"config\" : \"squeezenet-1.0.yaml\",\n \"name\" : \"squeezenet\",\n \"variant\": \"squeezenet-1.0\"\n }\n super().__init__(*args, **kwargs)\n\n\n@MODELS.register(name=\"squeezenet-1.1\")\nclass SqueezeNet1_1(SqueezeNet):\n \"\"\"SqueezeNet-1.1.\n \n See Also: :class:`mon.vision.enhance.base.ImageEnhancementModel`\n \"\"\"\n \n configs = {}\n zoo = {\n \"imagenet\": {\n \"name\" : \"imagenet\",\n \"path\" : \"https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth\",\n \"file_name\" : \"squeezenet-1.1-imagenet.pth\",\n \"num_classes\": 1000,\n },\n }\n map_weights = {}\n \n def __init__(self, *args, **kwargs):\n kwargs |= {\n \"config\" : \"squeezenet-1.1.yaml\",\n \"name\" : \"squeezenet\",\n \"variant\": \"squeezenet-1.1\"\n }\n super().__init__(*args, **kwargs)\n# endregion\n","repo_name":"phlong3105/deepacov2","sub_path":"src/mon/vision/classify/squeezenet.py","file_name":"squeezenet.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"69823912255","text":"\"\"\"\nGiven a value N, if we want to make change for N cents, \nand we have infinite supply of each of S = { S1, S2, .. , Sm} valued coins,\nhow many ways can we make the change? The order of coins doesn’t matter.\n\nFor example, \nFor N = 4 and S = {1,2,3}, there are four solutions: {1,1,1,1},{1,1,2},{2,2},{1,3}. \nSo output should be 4. \nFor N = 10 and S = {2, 5, 3, 6}, there are five solutions: {2,2,2,2,2}, {2,2,3,3}, {2,2,6}, {2,3,5} and {5,5}. \nSo output should be 5.\n\nTo count the total number of solutions, we can divide all set solutions into two sets.\n1) Solutions that do not contain mth coin (or Sm).\n2) Solutions that contain at least one Sm.\n\nLet count(S[], m, n) be the function to count the number of solutions, \nthen it can be written as sum of count(S[], m-1, n) and count(S[], m, n-Sm).\n\nwhere m is the size of coin set.\n\"\"\"\n\n\ndef coin_change(coin_set, m, sum):\n # We need n+1 rows as the table is constructed\n # in bottom up manner using the base case 0 value\n # case (n = 0)\n table = [[0 for x in range(m)] for y in range(sum + 1)]\n\n # Fill the entries for 0 value case (n = 0)\n for i in range(m):\n table[0][i] = 1\n\n # Fill rest of the table entries in bottom up manner\n for i in range(1, sum + 1):\n for j in range(m):\n # Count of solutions including S[j]\n x = table[i - coin_set[j]][j] if i - coin_set[j] >= 0 else 0\n # Count of solutions excluding S[j]\n y = table[i][j - 1] if j >= 1 else 0\n # total count\n table[i][j] = x + y\n\n return table[sum][m - 1]\n\n\ndef coin_change_recursive(coin_set, m, sum):\n\n # If n is 0 then there is 1\n # solution (do not include any coin)\n if sum == 0:\n return 1\n\n # If n is less than 0 then no\n # solution exists\n if sum < 0:\n return 0\n\n # If there are no coins and n\n # is greater than 0, then no\n # solution exist\n if m <= 0 and sum > 0:\n return 0\n\n # count is sum of solutions (i)\n # including S[m-1] (ii) excluding S[m-1]\n return coin_change_recursive(coin_set, m - 1, sum) + coin_change_recursive(coin_set, m, sum - coin_set[m - 1])\n\n\nif __name__ == '__main__':\n coin_list = [1, 2, 3]\n m = len(coin_list)\n sum = 4\n print(coin_change(coin_list, m, sum))\n","repo_name":"liquidpie/algorithms-py","sub_path":"dynamic_programming/coin_change_permutations.py","file_name":"coin_change_permutations.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71587430016","text":"from torch import nn\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom deep_shuffling.dataset import create_playlist_dataset, PlaylistDataset\nfrom deep_shuffling.neuralsort import NeuralSort\nfrom deep_shuffling.softsort import SoftSort\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.use('qtagg')\nn_batch_size = 1\nepochs = 32\nmaximum_playlist_length = 1024\nn_embed = 16\nn_heads = 8\ndevice = torch.device('cuda')\ntorch.cuda.manual_seed(1337)\ntorch.random.manual_seed(1337)\n\n\ndef project_p(P_hat):\n dim = 512\n P = torch.zeros_like(P_hat, device='cuda')\n b_idx = torch.arange(1).repeat([1, dim]).view(dim, 1).transpose(\n dim0=1, dim1=0).flatten().type(torch.cuda.LongTensor)\n r_idx = torch.arange(dim).repeat(\n [1, 1]).flatten().type(torch.cuda.LongTensor)\n c_idx = torch.argmax(P_hat, dim=-1).flatten() # this is on cuda\n brc_idx = torch.stack((b_idx, r_idx, c_idx))\n\n P[brc_idx[0], brc_idx[1], brc_idx[2]] = 1\n P_hat = (P - P_hat).detach() + P_hat\n return P_hat\n\n\nclass MultiheadAttentionBlock(nn.Module):\n def __init__(self, in_features: int, n_embed: int, n_heads: int):\n super().__init__()\n self.query = nn.Linear(in_features=in_features, out_features=n_embed, device=device)\n self.key = nn.Linear(in_features=in_features, out_features=n_embed, device=device)\n self.value = nn.Linear(in_features=in_features, out_features=n_embed, device=device)\n self.multiheadattention = nn.MultiheadAttention(embed_dim=n_embed,\n num_heads=n_heads,\n dropout=0,\n batch_first=True,\n device=device)\n\n def forward(self, x, mask):\n q = self.query(x)\n k = self.key(x)\n v = self.value(x)\n x = self.multiheadattention(query=q,\n key=k,\n value=v,\n key_padding_mask=mask,\n need_weights=False,\n attn_mask=None,\n average_attn_weights=True)\n return x\n\n\nclass ShuffleModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.b1 = MultiheadAttentionBlock(in_features=2, n_embed=n_embed, n_heads=n_embed // 2)\n self.relu1 = nn.ReLU()\n self.b2 = MultiheadAttentionBlock(in_features=n_embed, n_embed=n_embed, n_heads=n_embed // 2)\n self.relu2 = nn.ReLU()\n self.b3 = MultiheadAttentionBlock(in_features=n_embed, n_embed=1, n_heads=1)\n self.sort = SoftSort()#NeuralSort(tau=1)\n self.l1 = nn.Linear(in_features=2, out_features=n_embed, bias=False, device=device)\n self.l2 = nn.Linear(in_features=n_embed, out_features=1, bias=False, device=device)\n\n\n def forward(self, inp: dict[str, torch.tensor]):\n # x: {\"constant\", \"must_vary\"}\n xc = inp[\"constant\"]\n mask: torch.Tensor = inp[\"mask\"]\n #x, _ = self.b1(xc, mask)\n x = self.l1(xc)\n x = self.relu1(x)\n x = self.l2(x)\n #x, _ = self.b2(x, mask)\n #x = self.relu2(x)\n #x, _ = self.b3(x, mask)\n B, N, _ = x.shape\n x = torch.reshape(x, shape=(B, N))\n x = torch.masked_fill(x, mask=mask, value=-torch.inf)\n x = self.sort(x)\n return x\n\n\nclass PermutationMatrixLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, M: torch.Tensor):\n B, N, N = M.shape\n M2 = torch.square(M)\n M_abs = torch.abs(M)\n column_loss = torch.sum(torch.sum(M_abs, dim=2, keepdim=True) - torch.pow(torch.sum(M2, dim=2, keepdim=True), exponent=0.5), dim=1, keepdim=True)\n row_loss = torch.sum(torch.sum(M_abs, dim=1, keepdim=True) - torch.pow(torch.sum(M2, dim=1, keepdim=True), exponent=0.5), dim=2, keepdim=True)\n loss = torch.squeeze(column_loss + row_loss)/N\n return loss\n\n\nclass ShuffleLoss(nn.Module):\n def __init__(self, lambd: float):\n super(ShuffleLoss, self).__init__()\n self.avg_pooling = torch.nn.AvgPool2d(kernel_size=(3, 1))\n self.permutation_matrix_loss = PermutationMatrixLoss()\n self.lambd = lambd\n\n def forward(self, permutation_matrix, features):\n features_sorted = torch.bmm(permutation_matrix, features[:, :])\n avg_feats = torch.sum(features_sorted, dim=-2)\n shifted_features = torch.roll(features_sorted, -1, -2)\n pooling = self.avg_pooling(features_sorted)\n noise_squared_diff = (features_sorted[:, :-1, :] - shifted_features[:, :-1, :]) ** 2\n noise_loss = torch.sum(noise_squared_diff)**0.5\n pooling_squared_diff = (pooling - avg_feats) ** 2\n global_level_loss = torch.sum(pooling_squared_diff)\n #permutation_matrix_loss = self.permutation_matrix_loss(permutation_matrix)\n loss = noise_loss + global_level_loss# + self.lambd*permutation_matrix_loss\n return loss\n\n\ndef train(model: nn.Module, dataset: PlaylistDataset):\n data_loader = DataLoader(dataset=dataset,\n batch_size=1)\n criterion = ShuffleLoss(lambd=1)\n optimizer = torch.optim.AdamW(model.parameters(),\n lr=0.01, )\n torch.autograd.set_detect_anomaly(True)\n for i in range(1000):\n for playlist in data_loader:\n criterion.zero_grad()\n out: torch.Tensor = model(playlist)\n # print(torch.argmax(out[0, 0, :]))\n loss = criterion(out, playlist[\"constant\"])\n print(loss.item())\n loss.backward()\n optimizer.step()\n return model\n\n\ndef apply_model(playlist, model):\n n = playlist[\"n\"]\n B, N, D = playlist[\"constant\"].shape\n p = model(playlist)\n p_star = project_p(p)[0, :, :]\n print(p_star)\n d_star = p_star @ playlist[\"constant\"][0, :n, :]\n\n d_line = (d_star[:, di] for di in range(D))\n for line in d_line:\n l = line.tolist()\n plt.scatter(list(range(n)), l)\n plt.show()\n\n\nif __name__ == \"__main__\":\n model = ShuffleModel()\n dataset = create_playlist_dataset()\n model = train(model=model, dataset=dataset)\n data_loader = DataLoader(dataset=dataset,\n batch_size=1)\n for playlist in data_loader:\n apply_model(playlist, model=model)\n","repo_name":"AdamSkarboJonsson/deep-playlist-shuffling","sub_path":"supervised_learning/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2360555174","text":"from helpers import cmd\nimport os.path\n\n# Have to go one folder up\ncmd.setBase(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\ncommands = [\n\t# Django\n\tcmd.relative(\"\"),\n\t[\"git\", \"submodule\", \"init\"],\n\t[\"git\", \"submodule\", \"update\"],\n\tcmd.relative(\"server/dobby\"),\n\t[\"git\", \"submodule\", \"init\"],\n\t[\"git\", \"submodule\", \"update\"],\n\tcmd.relative(\"server/djangoserver\"),\n\t[\"python\", \"manage.py\", \"syncdb\"],\n\t[\"python\", \"load_default_data.py\"]\n]\n\ndef run():\n\tcmd.run(commands)\n\n# And run.\nif __name__ == \"main\":\n\trun()","repo_name":"ialexi/Contacts","sub_path":"commands/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"79"}
+{"seq_id":"72207716736","text":"\"\"\"day12\"\"\"\n\nfrom collections import deque\nimport numpy as np\n\nDEBUG, TEST = False, False\nDAY = \"12\"\n\n\nclass Graph:\n def __init__(self, graph: np.ndarray):\n self.graph = graph\n self.rows = graph[:, 0].size\n self.cols = graph[0].size\n self.edges = np.array([[0 for c in range(self.cols)] for r in range(self.rows)])\n\n def addEdge(self, r: int, c: int, height: int):\n self.edges[r, c] = height\n\n def bfs(self, start: str, target: str):\n queue: deque[tuple[tuple[int, int], int]] = deque()\n visited = set()\n # using array of vectors instead of tuples for that sweet sweet vector addition\n dirs = [\n np.array([0, 1]),\n np.array([0, -1]),\n np.array([1, 0]),\n np.array([-1, 0]),\n ]\n\n for r, row in enumerate(self.graph):\n for c, col in enumerate(row):\n if col == start:\n queue.appendleft(((r, c), 0))\n\n while queue:\n node, height = queue.pop()\n\n if self.graph[node] == target:\n return height\n\n if node not in visited:\n visited.add(node)\n\n for d in dirs:\n neighbor = node + d\n if 0 <= neighbor[0] < self.rows and 0 <= neighbor[1] < self.cols:\n if self.edges[tuple(neighbor)] <= 1 + self.edges[node]:\n queue.appendleft((tuple(neighbor), height + 1))\n\n\ndef solve(graph, start):\n heightMap = {letter: i for i, letter in enumerate(\"abcdefghijklmnopqrstuvwxyz\")}\n heightMap[\"S\"] = 0\n heightMap[\"E\"] = 25\n\n g = Graph(np.array(graph))\n\n for r, row in enumerate(graph):\n for c, col in enumerate(row):\n g.addEdge(r, c, heightMap[col])\n\n print(g.bfs(start, \"E\"))\n\n\nif __name__ == \"__main__\":\n # TEST = True\n # DEBUG = True\n datasets = [f\"./day{DAY}/day{DAY}input.txt\", f\"./day{DAY}/testday{DAY}input.txt\"]\n filename = datasets[1] if TEST else datasets[0]\n with open(file=filename, mode=\"r\", encoding=\"utf8\") as file:\n lines = [list(line.strip()) for line in file.readlines()]\n solve(lines, \"S\")\n solve(lines, \"a\")\n","repo_name":"m-ttaylor/adventofcode2022","sub_path":"day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71577294334","text":"from tasmanium import logger\nfrom tasmanium.registrars import Given\n\nl = logger.getLogger(__name__)\n\n\n@Given(\"a user which exists\")\ndef create_user_that_exists(context):\n l.info(f\"hello from the step 'Given a user which exists' - his data are {context.data_table} \")\n\n if context.data_table[0]['name'] == 'First B butterfly':\n context.attach_plaintext(data=\"A failing file in a failing test.\")\n assert False, \"assertion failed intentionally\"\n\n context.attach_plaintext(filename=\"success.txt\", data=\"this step succeeded!\", description=\"Some description of this file.\")\n context.attach_plaintext(filename=\"success2.txt\", data=\"This step succeeded as well!\")\n\n with open(\"D:/cool-crab.png\", \"rb\") as f:\n context.step.attach_image(filename=\"cool-crab.png\", data=f.read(), description=\"We can do pictures as well!\")\n\n context.attach_plaintext(filename=\"success3.txt\", data=\"Hi there! I am inside the file wee!\",\n description=\"Attach files anytime inside the step!\")\n\n with open(\"D:/lipsum.txt\", \"r\") as f:\n context.attach_plaintext(filename=\"lipsum.txt\", data=f.read(), description=\"This is a long file, check it out.\")\n\n # browser = webdriver.Remote(\n # desired_capabilities=webdriver.DesiredCapabilities.FIREFOX,\n # command_executor='http://localhost:4444/wd/hub'\n # )\n # from time import sleep\n # for _ in range(10):\n # browser.get(\"https://www.seznam.cz\")\n # sleep(1)\n # browser.get(\"https://www.google.com\")\n # sleep(1)\n # browser.get(\"https://www.atlas.cz\")\n # sleep(1)\n # browser.get(\"https://www.novinky.cz\")\n # sleep(1)\n # browser.get(\"https://www.yahoo.com\")\n\n\n@Given(\"a user which {status}\")\ndef create_user_doing_something(context, status):\n l.info(f\"hello from the step 'Given a user which {{status}}' - i am '{status}' right now\")\n l.info(f\"my docstring type is {context.docstring_type}, my docstring is {context.docstring}\")\n l.info(f\"my docstring parsed as json is {context.docstring_json}\")\n context.attach_plaintext(filename=f\"success-status.txt\", data=f\"i am '{status}' right now\")\n","repo_name":"Dri0m/tasmanium","sub_path":"steps/subcategory/given.py","file_name":"given.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29813307039","text":"import logging\nimport os, time, gc, argparse, math\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config, Conv1D\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport copy\nfrom util import init_para_frompretrained, num_params, prepare_dataset, linear_schedule, switch_schedule\nfrom model import VAEModel\nimport nltk\nfrom bi_training_core import train_step, Device\nfrom bi_loss import bidirectional_loss\nfrom bi_eval_step import validate_step, plot_input_distribution, generate_samples\n\nnltk.download('punkt')\nnltk.download('stopwords')\n# devices = '0'\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = devices\n\n\ndef main():\n logger = logging.getLogger(\"transformers\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument('experiment', type=str)\n\n # Default parameters are set based on single GPU training\n parser.add_argument('--lr', type=float, default=5e-5)\n parser.add_argument(\"--seed\", type=int, default=0)\n\n parser.add_argument('--data_type', type=str, default='t1', choices=['t' + str(i) for i in range(9)], help=\"t: type\")\n parser.add_argument('--model_type', type=str, default='cvae', choices=['cvae', 'ae_vae_fusion'])\n parser.add_argument('--iterations', type=int, default=101640 * 4) # wp 850001 wi 300001 ax 300001 yp 800001\n parser.add_argument('--dataset', type=str, default='wi', choices=['ax', 'yp', 'wp', 'wi'], help=\"Dataset to use for training\")\n parser.add_argument('--warmup', type=int, default=10000,\n help=\"Amount of iterations to warmup, then decay. (-1 for no warmup and decay)\")\n\n parser.add_argument('--switch-time', type=float, default=0,\n help=\"Percentage of iterations to spend on short sequence training.\")\n parser.add_argument('--data-dir', type=str, default='data')\n parser.add_argument('--out-dir', type=str, default='out')\n parser.add_argument('--load', type=str, help='path to load model from') # , default='out/test/'\n parser.add_argument('--workers', default=1, type=int, metavar='N',\n help='number of data loading workers')\n # use GPU\n parser.add_argument('--gpu', default=0, type=int)\n parser.add_argument('--no_gpu', action=\"store_true\")\n\n parser.add_argument('--fp16', action='store_true', help=\"Train using FP16?\")\n parser.add_argument('--fp16_opt_level', default='O0', type=str, required=False)\n\n # KL cost annealing, increase beta from beta_0 to 1 in beta_warmup steps\n parser.add_argument('--beta_0', default=1.00, type=float)\n parser.add_argument('--beta_warmup', type=int, default=50000)\n # cyc_vae parameters\n parser.add_argument('--cycle', type=int, default=101640)\n\n parser.add_argument('--add_input', action=\"store_true\")\n parser.add_argument('--add_attn', action=\"store_true\")\n parser.add_argument('--add_softmax', action=\"store_true\")\n parser.add_argument('--attn_proj_vary', action=\"store_true\")\n\n parser.add_argument('--learn_prior', action=\"store_true\")\n\n parser.add_argument('--train_batch_size', type=int, default=1)\n parser.add_argument('--val_batch_size', type=int, default=1)\n parser.add_argument('--test_batch_size', type=int, default=1)\n\n parser.add_argument('--short_seq_len', type=int, default=512)\n parser.add_argument('--long_seq_len', type=int, default=1024)\n\n # Loss weighting args\n parser.add_argument('--fwd_loss_weight', type=float, default=1, help=\"Weight multiplier for forward loss.\")\n parser.add_argument('--bkwd_loss_weight', type=float, default=1, help=\"Weight multiplier for backward loss.\")\n parser.add_argument('--all_sentence_loss_weight', type=float, default=1, help=\"Weight multiplier for all previous sentence loss (0 to A -> B).\")\n parser.add_argument('--prompt_loss_weight', type=float, default=1, help=\"Weight multiplier for backward prompt loss.\")\n \n # Reload args\n parser.add_argument('--reload_path', type=str, default='')\n parser.add_argument('--reload_iters', type=int, default=0)\n\n # NOTE: Use for changing the arguments of the program\n args = parser.parse_args()\n\n if args.model_type == 'cvae':\n args.learn_prior = True\n else:\n args.learn_prior = False\n\n devices = '0'\n\n # GPU\n if not torch.cuda.is_available():\n args.no_gpu = True\n\n gpu = not args.no_gpu\n if gpu:\n logger.info(f\"There are {torch.cuda.device_count()} available GPUs!\")\n logger.info('Using GPU devices {}'.format(devices))\n torch.cuda.set_device(args.gpu)\n logger.info('Current single GPU: {}'.format(torch.cuda.current_device()))\n\n Device.set_device(devices, args.gpu if gpu else \"cpu\")\n\n # randomness\n np.random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n if gpu: torch.cuda.manual_seed(args.seed); torch.cuda.manual_seed_all(args.seed)\n\n logger.info('\\n*******************************************************************************\\n')\n logger.debug(\"the configuration:\")\n logger.debug(str(args).replace(',', '\\n'))\n\n logger.info('Loading models...')\n\n logger.setLevel(logging.WARNING)\n save_folder = os.path.join(args.out_dir, args.experiment)\n os.makedirs(save_folder, exist_ok=True)\n t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)\n # importlib.reload(logger)\n # logger.basicConfig(filename=os.path.join(save_folder, 'train.log'), level=logger.INFO, format='%(asctime)s--- %(message)s')\n cache_dir = os.path.join(args.out_dir, 'model_cache')\n os.makedirs(cache_dir, exist_ok=True)\n # Load pre-trained teacher tokenizer (vocabulary)\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir=cache_dir)\n # Hack to allow tokenizing longer sequences.\n tokenizer.max_len = int(1e12)\n gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir=cache_dir)\n logger.info(f'gpt2_params: {num_params(gpt2_model)}') # gpt2: 124439808\n config = GPT2Config()\n config.n_ctx = 1024\n\n # add special tokens\n special_tokens = {\n 'sentence_fwd': '',\n 'sentence_bkwd': ''\n }\n # special_tokens_dict = {\n # 'pad_token': '<|startoftext|>',\n # 'cls_token': '<|startofcond|>',\n # 'sep_token': '<|sepofcond|>',\n # 'mask_token': '<|endofcond|>'\n # }\n # num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n logger.info('We have added', len(special_tokens), 'special tokens')\n # # Notice: resize_token_embeddings expect to receive the full size of the new vocab\n # gpt2_model.resize_token_embeddings(len(tokenizer))\n # assert tokenizer.pad_token == '<|startoftext|>'\n\n VAE = VAEModel(config, add_input=args.add_input, add_attn=args.add_attn, add_softmax=args.add_softmax,\n attn_proj_vary=args.attn_proj_vary, learn_prior=args.learn_prior)\n init_para_frompretrained(VAE.transformer, gpt2_model.transformer, share_para=True)\n init_para_frompretrained(VAE.encoder, gpt2_model.transformer, share_para=False)\n if args.learn_prior:\n init_para_frompretrained(VAE.encoder_prior, VAE.encoder, share_para=True)\n VAE.encoder_prior.averageSelfAttention.attention_weights = VAE.encoder.averageSelfAttention.attention_weights\n \n VAE.lm_head.weight = gpt2_model.lm_head.weight\n if VAE.add_softmax:\n VAE.lm_head_rep = Conv1D(*gpt2_model.lm_head.weight.size())\n # VAE.lm_head_rep = LM_head_rep(*gpt2_model.lm_head.weight.size()[::-1])\n logger.setLevel(logging.INFO)\n logger.info(f'VAE_params: {num_params(VAE)}') # 286694400\n args.load = args.reload_path\n if args.load:\n logger.info('Loading model weights...')\n state = torch.load(os.path.join(args.load), map_location=\"cpu\")\n if 'module' in list(state.keys())[0]: # model_path is data parallel model with attr 'module'\n state_copy = copy.copy(state)\n keys = state_copy.keys()\n for k in keys:\n state[k.replace('module.', '')] = state.pop(k)\n VAE.load_state_dict(state)\n gc.collect()\n logger.info('Done.')\n\n # fix pre-trained parameters before certain iterations\n tuning_all_after_iters = 40000\n tuning_all = False\n for name, parameter in VAE.named_parameters():\n # logger.info((name, parameter.requires_grad))\n new_pars = ['c_z', 'attention_weights', 'mean', 'logvar', 'input_proj', 'attn_proj', 'Nu_fc1', 'Nu_fc2', 'lm_head_rep']\n\n if not any([True if n in name else False for n in new_pars]):\n parameter.requires_grad = False\n\n logger.info('Setup data...')\n curr_seq_len = args.short_seq_len\n train_loader, val_loader, test_loader = prepare_dataset(\n args.data_dir, args.dataset, tokenizer,\n args.train_batch_size, curr_seq_len,\n args.val_batch_size, curr_seq_len,\n args.test_batch_size, curr_seq_len,\n make_test=True,\n num_workers=args.workers, data_type=args.data_type\n )\n logger.info('Done.')\n\n logger.info('Wrapping models and optimizers...')\n\n # Apply linear scaling rule to increase batch size for short sequence training.\n curr_batch_size = args.train_batch_size\n curr_seq_len = args.short_seq_len\n lr_schedule = switch_schedule(linear_schedule(args), curr_batch_size / curr_seq_len,\n int(args.iterations * args.switch_time))\n VAE = VAE.to(Device.device)\n VAE.train()\n\n optimizer = torch.optim.AdamW(VAE.parameters(), lr=args.lr)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_schedule)\n\n loss_fn = nn.CrossEntropyLoss(reduction='none')\n logger.info('Done.')\n\n logger.info(\"Begin training iterations\")\n max_val_batches = 20000 # max num. of val batches\n logger.info(\"Total iteration: %d\" % args.iterations)\n e = 0 # number of epoch\n\n num_iters = 0\n # Resume training from a checkpoint\n if args.load:\n num_iters = int(args.reload_iters)\n logger.info(\"Resume training from iteration %d\" % num_iters)\n\n optimizer.zero_grad()\n beta = args.beta_0\n\n def eval_step():\n '''Evaluates the performance of the model after a training step'''\n\n logger.info(\"Measuring Input distribution...\")\n plot_input_distribution(VAE, tokenizer, args.model_type, test_loader, args.dataset, num_iters, save_folder)\n logger.info(\"Validation Step...\")\n validate_step(VAE, tokenizer, args.model_type, val_loader, num_iters, max_val_batches, loss_fn, save_folder)\n logger.info(\"Generate output samples...\")\n generate_samples(VAE, tokenizer, args, test_loader, num_iters, save_folder)\n\n def calculate_loss(x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask):\n '''Calculates the loss of the model forward, backward, and for the sentence combinations'''\n\n # This computes a training step going from input to output and computes the losses\n # NORMAL LOSS, Prompt -> Story\n if args.fwd_loss_weight > 0:\n loss_forward, ce_loss_forward, kl_loss_forward = train_step(VAE, optimizer, x_mask, x_tokens, y_mask, y_tokens,\n input_tokens, target_tokens, mask, loss_fn, beta, args.model_type)[-1]\n else:\n loss_forward, ce_loss_forward, kl_loss_forward = 0, 0, 0\n\n # PROMPT LEVEL LOSS, Story -> Prompt\n if args.prompt_loss_weight > 0:\n loss_prompt_backward, ce_loss_prompt_backward, kl_loss_prompt_backward = train_step(VAE, optimizer, y_mask, y_tokens, x_mask, x_tokens,\n target_tokens, input_tokens, mask, loss_fn, beta, args.model_type)[-1]\n else:\n loss_prompt_backward, ce_loss_prompt_backward, kl_loss_prompt_backward = 0, 0, 0\n\n # BIDIRECTIONAL LOSSES\n\n # This finds the total loss for the previous sentence, Sentence B -> Sentence A and Sentence A -> Sentence B\n if args.bkwd_loss_weight > 0:\n previous_sentence_loss_output = bidirectional_loss(\"previous_sentence\", VAE, optimizer, y_mask,\n y_tokens, mask, loss_fn, beta, args.model_type, tokenizer, curr_batch_size, curr_seq_len, input_tokens)\n (total_loss_sentence_b_a, total_loss_sentence_a_b, total_ce_loss_sentence_b_a,\n total_ce_loss_sentence_a_b, total_kl_loss_sentence_b_a, total_kl_loss_sentence_a_b) = previous_sentence_loss_output\n else:\n total_loss_sentence_b_a, total_loss_sentence_a_b, total_ce_loss_sentence_b_a, total_ce_loss_sentence_a_b, total_kl_loss_sentence_b_a, total_kl_loss_sentence_a_b = 0, 0, 0, 0, 0, 0\n \n # This finds the total loss for all previous sentences, Sentence B -> All Previous Sentences\n if args.all_sentence_loss_weight > 0:\n all_previous_sentences_loss_output = bidirectional_loss(\"all_previous_sentences\", VAE, optimizer, y_mask,\n y_tokens, mask, loss_fn, beta, args.model_type, tokenizer, curr_batch_size, curr_seq_len, input_tokens)\n (total_loss_all_previous_sentences, total_ce_loss_all_previous_sentences, total_kl_loss_all_previous_sentences) = all_previous_sentences_loss_output\n else:\n total_loss_all_previous_sentences, total_ce_loss_all_previous_sentences, total_kl_loss_all_previous_sentences = 0, 0, 0\n\n # TOTAL LOSSES\n loss = (args.fwd_loss_weight*loss_forward) + (args.prompt_loss_weight*loss_prompt_backward) + \\\n (args.bkwd_loss_weight*total_loss_sentence_b_a) + \\\n (args.bkwd_loss_weight*total_loss_sentence_a_b) + (args.all_sentence_loss_weight*total_loss_all_previous_sentences)\n\n ce_loss = (args.fwd_loss_weight*ce_loss_forward) + (args.prompt_loss_weight*ce_loss_prompt_backward) + \\\n (args.bkwd_loss_weight*total_ce_loss_sentence_b_a) + \\\n (args.bkwd_loss_weight*total_ce_loss_sentence_a_b) + (args.all_sentence_loss_weight*total_ce_loss_all_previous_sentences)\n\n kl_loss = (args.fwd_loss_weight*kl_loss_forward) + (args.prompt_loss_weight*kl_loss_prompt_backward) + \\\n (args.bkwd_loss_weight*total_kl_loss_sentence_b_a) + \\\n (args.bkwd_loss_weight*total_kl_loss_sentence_a_b) + (args.all_sentence_loss_weight*total_kl_loss_all_previous_sentences)\n\n return loss, ce_loss, kl_loss\n\n # eval_step()\n torch.save(VAE.state_dict(), os.path.join(save_folder,\n 'model_' + '{:07d}'.format(num_iters) +\n f'_bidirectional_{args.fwd_loss_weight}_{args.bkwd_loss_weight}_{args.all_sentence_loss_weight}_{args.prompt_loss_weight}' + '.pt')\n )\n\n e = 0\n while num_iters < args.iterations:\n # Run epoch\n st = time.time()\n\n # Training\n logger.info('\\n----------------------------------------------------------------------')\n logger.info(\"Training loop. Batches: %d\" % len(train_loader))\n\n with tqdm(total=len(train_loader)) as pbar:\n for i, (x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask) in enumerate(train_loader):\n # NOTE: Swaps all the variables for the bidirectional running of the program\n # if num_iters % args.cycle >= args.cycle - args.beta_warmup:\n # beta = min(1.0, beta + (1. - args.beta_0) / args.beta_warmup)\n\n if not tuning_all and num_iters >= tuning_all_after_iters:\n for name, parameter in VAE.named_parameters():\n # logger.info((name, parameter.requires_grad))\n parameter.requires_grad = True\n tuning_all = True\n\n try:\n loss, ce_loss, kl_loss = calculate_loss(x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask)\n except RuntimeError as e:\n if 'out of memory' in str(e):\n logger.info('| WARNING: ran out of memory, skipping batch')\n torch.cuda.empty_cache()\n gc.collect()\n continue\n else:\n raise e\n\n if num_iters % 100 == 0:\n logger.info(f\"CURRENT ITERATION: {num_iters}\")\n logger.info(f\"CURRENT LOSS: Loss: {loss}, CE: {ce_loss}, KL: {kl_loss}\")\n\n lr = scheduler.get_last_lr()[0]\n # Log to Tensorboard\n t_writer.add_scalar('loss', loss, num_iters)\n t_writer.add_scalar('ppl', math.exp(min(ce_loss, 10)), num_iters)\n t_writer.add_scalar('lr', lr, num_iters)\n t_writer.add_scalar('iter_time', time.time() - st, num_iters)\n t_writer.add_scalar('kl', kl_loss, num_iters)\n t_writer.add_scalar('beta', beta, num_iters)\n\n if args.model_type == 'ae_vae_fusion':\n # Output is never defined. Raise error\n raise NotImplementedError()\n loss, ce_loss, kl_loss = output[0]\n # Log to Tensorboard\n t_writer.add_scalar('ae_loss', loss, num_iters)\n t_writer.add_scalar('ae_kl', kl_loss, num_iters)\n\n st = time.time()\n\n if args.warmup != -1:\n scheduler.step()\n \n end = num_iters >= args.iterations\n if end: break\n num_iters += 1\n pbar.update(1)\n\n if num_iters % args.cycle == 0:\n beta = args.beta_0\n logger.info('KL annealing restart')\n\n if num_iters % 10000 == 0:\n eval_step()\n\n if num_iters % 5000 == 0:\n logger.info('Saving model...')\n logger.info(\"Iteration completed: %d, remained %d\" % (num_iters, args.iterations - num_iters))\n logger.info(\"Saving model...\")\n logger.info('\\n------------------------------------------------------')\n torch.save(VAE.state_dict(), os.path.join(save_folder,\n 'model_' + '{:07d}'.format(num_iters) +\n f'_bidirectional_{args.fwd_loss_weight}_{args.bkwd_loss_weight}_{args.all_sentence_loss_weight}_{args.prompt_loss_weight}' + '.pt')\n )\n\n if args.switch_time > 0 and num_iters == int(args.iterations * args.switch_time):\n logger.info(\"Switch to long sequence training\")\n curr_seq_len = args.long_seq_len\n curr_batch_size = args.train_batch_size\n train_loader, val_loader, test_loader = prepare_dataset(\n args.data_dir, args.dataset, tokenizer,\n args.train_batch_size, curr_seq_len,\n args.val_batch_size, curr_seq_len,\n args.test_batch_size, curr_seq_len,\n make_test=True,\n num_workers=args.workers, data_type=args.data_type\n )\n\n if not end:\n e += 1\n logger.info(\"Training loop. The ith epoch completed: %d\" % e)\n\n torch.save(VAE.state_dict(), os.path.join(save_folder,\n 'model_' + '{:07d}'.format(num_iters) +\n f'_bidirectional_{args.fwd_loss_weight}_{args.bkwd_loss_weight}_{args.all_sentence_loss_weight}_{args.prompt_loss_weight}' + '.pt'))\n logger.info(\"Training complete.\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"AIRC-ASR/AIRC-ASR-Experimental","sub_path":"bidirectional_predictions/TransformerCVAE/train_bidirectional.py","file_name":"train_bidirectional.py","file_ext":"py","file_size_in_byte":19509,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"8256244384","text":"import vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\n\nfrom vktools import Keyboard, ButtonColor, Text, Carousel, Element\n\nvk = vk_api.VkApi(token=\"token\")\n\n\ndef send_message(user_id, message, carousel=None):\n values = {\n \"user_id\": user_id,\n \"message\": message,\n \"random_id\": 0\n }\n\n if carousel is not None:\n values[\"template\"] = carousel.add_carousel()\n\n vk.method(\"messages.send\", values)\n\n\nfor event in VkLongPoll(vk).listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me:\n text = event.text.lower()\n user_id = event.user_id\n\n if text == \"test carousel\":\n carousel = Carousel(\n [\n Element(\n \"Title 1\",\n \"Description 1\",\n \"-203980592_457239030\", # photo_id\n \"https://vk.com/fsoky\", # redirect url, if user click on element\n [Text(\"Button 1\", ButtonColor.POSITIVE)]\n ),\n Element(\n \"Title 2\",\n \"Description 2\",\n \"-203980592_457239030\", # photo_id\n \"https://vk.com/fsoky\", # redirect url, if user click on element\n [Text(\"Button 2\", ButtonColor.PRIMARY)]\n )\n ]\n )\n\n send_message(user_id, \"VkTools Carousel by Fsoky ~\", carousel=carousel)","repo_name":"Fsoky/vktools","sub_path":"examples/template_example.py","file_name":"template_example.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"}
+{"seq_id":"9484296564","text":"#Nearest neighbour approach to classification of breast tissue after pre-processing data and removing bad fields from the dataset(NNBTClassifier+)\r\n\r\nimport datetime\r\nprint(datetime.datetime.now())\r\n\r\ndef dist(l1, l2):\r\n temp = 0\r\n for x in [2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,27,29]:\r\n temp += (float(l1[x]) - float(l2[x])) ** 2\r\n distance = temp ** 0.5\r\n return distance\r\n\r\nf = open(\"wbtdPP.txt\", \"r\")\r\n\r\ndata = []\r\nrecord = []\r\ndList = []\r\nnnList = []\r\ndiagnosis = []\r\n\r\nfor line in f:\r\n sTemp = str(line) \r\n record = list(sTemp.split(\",\"))\r\n data.append(record)\r\n\r\nf.close()\r\n\r\nprint(\"Total size of dataset: \", len(data), \" records found.\")\r\n\r\n#Building a prediction list\r\n\r\nfor i in range(0, len(data)):\r\n dList = []\r\n for j in range(0, len(data)):\r\n if i != j:\r\n d = dist(data[i], data[j])\r\n dList.append(d)\r\n for z in range(0, len(dList)):\r\n if dList[z] == min(dList):\r\n nnList.append(z)\r\n print(\".\", end = \"\")\r\nprint()\r\ncorrectpred = 0\r\n\r\nfor q in range(0, len(data)):\r\n if data[q][1] == data[nnList[q]][1]:\r\n correctpred += 1\r\naccuracy = (correctpred / int(len(data))) * 100\r\n\r\nprint(\"Accuracy of NNBTPP+ Classifier =\", accuracy, \"%\")\r\nprint(\"No. of correct predictions =\", correctpred)\r\nprint(datetime.datetime.now())\r\ninput()\r\n","repo_name":"akkivasu/Breast-Tissue-Analysis","sub_path":"NNBTClassifierPP+.py","file_name":"NNBTClassifierPP+.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14253534820","text":"import sys\nsys.stdin = open('W.txt')\n\n\ndef dfs(r, c, idx):\n global cnt\n if visited[r][c] != -1: # 이미 지난 지점이면 일단 컷!\n if visited[r][c] == idx: # 시작지점에 다시 도달했을 때만 +1\n cnt += 1\n return\n\n visited[r][c] = idx # 방문체크\n d = mat[r][c]\n new_r, new_c = r + dirs[d][0], c + dirs[d][1]\n if 0 <= new_r < N and 0 <= new_c < M:\n dfs(new_r, new_c, idx)\n\n\nN, M = map(int, input().split())\nmat = [list(input()) for _ in range(N)]\nvisited = [[-1]*M for _ in range(N)]\ndirs = {'U': (-1, 0), 'D': (1, 0), 'L': (0, -1), 'R': (0, 1)} # 방향설정 딕셔너리\ncnt = idx = 0\nfor i in range(N):\n for j in range(M):\n dfs(i, j, idx)\n idx += 1\nprint(cnt)","repo_name":"woohree/ALGO2ITHM_STUDY","sub_path":"baekjoon/07월/0725 스도쿠 피리부는사나이 순서 로봇시뮬레이션 단어덧셈/g3_16724_피리부는사나이/woohree.py","file_name":"woohree.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"28709433345","text":"import urllib.request\nimport json\n\n# Header declarations for output print.\nhostTitle = \"HOST DETAILS\\n\"\ncountryTitle = \"COUNTRY DETAILS\\n\"\n\n# Format printing constants.\ndotNumber = 70\ncountryPadding = 50\ndetailsPadding = 40\n\n'''\nAll available types of responses for IP along with their urls.\n'''\n\nipValidTypes = ['plain', 'json', 'jsonp']\nipPlain = 'https://get.geojs.io/v1/ip'\nipJson = 'https://get.geojs.io/v1/ip.json'\nipLookup = {'plain' : ipPlain, 'json' : ipJson}\n\n'''\nAll available types of responses for country along with their urls.\n'''\n\ncountryValidTypes = ['plain', 'plainfull', 'json', 'jsonp']\ncountryPlain = 'https://get.geojs.io/v1/ip/country'\ncountryFullPlain = 'https://get.geojs.io/v1/ip/country/full'\ncountryJson = 'https://get.geojs.io/v1/ip/country/{ip address}.json'\ncountryLookup = {'plain' : countryPlain, 'plainfull' : countryFullPlain, 'json' : countryJson}\n\n'''\nAll available types of responses for all geo data along with their urls.\n'''\ngeoJson = 'https://get.geojs.io/v1/ip/geo/{ip address}.json'\n\n'''\nAll available types of responses for DNS PTR records.\n'''\nptrPlain = 'https://get.geojs.io/v1/dns/ptr'\n\n# Gets the response of a url that returns plain text as response.\ndef getPlainResponse(url):\n return urllib.request.urlopen(url).read().decode().strip()\n\n# Gets the response of a url that returns json as response and replaces the default argument '{ip address}' with the IP address whose country we're looking\ndef getJsonResponse(url, ipAddress):\n response = urllib.request.urlopen(url.replace('{ip address}',ipAddress)).read().decode()\n outDict = json.loads(response)\n return outDict\n\n# Gets host's IP address, having default 'returnType' as 'plain', which can be changed accordingly.\ndef getIP(returnType = 'plain'):\n if isinstance(returnType,str):\n returnType = returnType.lower()\n if returnType in ipValidTypes:\n if returnType == 'plain':\n return getPlainResponse(ipLookup[returnType])\n else:\n return getJsonResponse(ipLookup[returnType],'')\n else:\n raise ValueError('\\'returnType\\' does not belong in valid types: ' + str(ipValidTypes))\n else:\n raise TypeError('\\'returnType\\' must be of type \\'str\\'(' + type(returnType).__name__ + ' was given).')\n\n# Gets the country of a specific IP address.\ndef getCountry(ipAddress, returnType = 'plain'):\n if not isinstance(ipAddress,str):\n raise TypeError('\\'ipAddress\\' is not an instance of \\'str\\'('+ type(ipAddress).__name__ + ' was given).')\n if isinstance(returnType,str):\n returnType = returnType.lower()\n if returnType in countryValidTypes:\n if returnType == 'plain':\n return getPlainResponse(countryLookup[returnType] + '/' + ipAddress)\n elif returnType == 'plainfull':\n return getPlainResponse(countryLookup[returnType] + '/' + ipAddress)\n else:\n return getJsonResponse(countryLookup[returnType], ipAddress)\n else:\n raise ValueError('\\'returnType\\' does not belong in valid types: ' + str(countryValidTypes))\n else:\n raise TypeError('\\'returnType\\' must be of type \\'str\\'(' + type(returnType).__name__ + ' was given).')\n\n# Gets all available geodata for a specific IP address. \ndef getGeoData(ipAddress):\n if isinstance(ipAddress, str):\n return getJsonResponse(geoJson, ipAddress)\n else:\n raise TypeError(\"\\'ipAddress\\' is not an instance of list.\")\n\n# Gets the DNS PTR record of an IP address, if possible.\ndef getPTR(ipAddress):\n if not isinstance(ipAddress, str):\n raise TypeError(\"\\'ipAddress\\' is not an instance of list.\")\n return getPlainResponse(ptrPlain)\n\n# Gets all country information for an IP address.\ndef showCountryDetails(ip=''):\n result = \"\"\n if ip == '':\n ip = getIP('plain')\n countryData = getCountry(ip, 'json')\n result += '-' * dotNumber + '\\n'\n result += (dotNumber//2 - len(countryTitle)//2) * ' ' + countryTitle\n result += '-' * dotNumber + '\\n'\n for key, value in countryData.items():\n cleanKey = key.replace('_',' ').capitalize() + ':'\n cleanKey = cleanKey.ljust(countryPadding, ' ')\n result += cleanKey + str(value) + '\\n'\n result += '-' * dotNumber + '\\n'\n print(result)\n\n# Get all available information provided for a specific IP address (country, location, region, etc.).\ndef showIpDetails(ip=''):\n result = \"\"\n if ip == '':\n ip = getIP('plain')\n country = getCountry(ip, 'plainFull')\n result += '-' * dotNumber + '\\n'\n result += (dotNumber//2 - len(hostTitle)//2) * ' ' + hostTitle\n result += '-' * dotNumber + '\\n'\n result += 'Country: '.ljust(countryPadding,' ') + country + '\\n'\n geoData = getGeoData(ip)\n ptrData = getPTR(ip)\n for key, value in geoData.items():\n cleanKey = key.replace('_',' ').capitalize() + ':'\n cleanKey = cleanKey.ljust(countryPadding,' ')\n result += cleanKey + str(value) + '\\n'\n result += '-' * dotNumber + '\\n'\n print(result)\n","repo_name":"VasilisG/IP-location-tracker","sub_path":"geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"}
+{"seq_id":"42011932485","text":"#see the readme.md file for description and data from typing import Any, Union, Tuple, List\r\n\r\nimport random\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\nshots = 0 #global variable to count the total number of shots\r\n\r\ndef ship_position(ship): #returns a list of tuples giving all coordinates of a ship\r\n ship_pos = [(ship[0], ship[1])]\r\n if ship[2] == True:\r\n for i in range(1, ship[3]):\r\n ship_pos.append((ship[0], ship[1] + i))\r\n elif ship[2] == False:\r\n for i in range(1, ship[3]):\r\n ship_pos.append((ship[0] + i, ship[1]))\r\n return ship_pos\r\n\r\n\r\ndef is_sunk(ship):\r\n if ship[3] == len(ship[4]):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef ship_type(ship):\r\n if ship[3] == 4:\r\n return \"battleship\"\r\n elif ship[3] == 3:\r\n return \"cruiser\"\r\n elif ship[3] == 2:\r\n return \"destroyer\"\r\n else:\r\n return \"submarine\"\r\n\r\n\r\ndef is_open_sea(row, column, fleet):\r\n if (row > 9 or row < 0) or (column > 9 or column < 0):\r\n return False\r\n else:\r\n for ship in fleet:\r\n ship_pos = ship_position(ship)\r\n for pos in ship_pos:\r\n if row == pos[0]:\r\n if column == pos[1] or column == pos[1]+1 or column == pos[1]-1:\r\n return False\r\n if row == pos[0]-1:\r\n if column == pos[1] or column == pos[1] + 1 or column == pos[1] - 1:\r\n return False\r\n if row == pos[0]+1:\r\n if column == pos[1] or column == pos[1]+1 or column == pos[1]-1:\r\n return False\r\n return True\r\n\r\n\r\ndef ok_to_place_ship_at(row, column, horizontal, length, fleet):\r\n hits = set()\r\n tempship = (row, column, horizontal, length, hits)\r\n ok = True\r\n ship_pos = ship_position(tempship)\r\n for pos in ship_pos:\r\n if is_open_sea(pos[0], pos[1], fleet) == False:\r\n ok = False\r\n if ok == True:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef place_ship_at(row, column, horizontal, length, fleet):\r\n hits = set()\r\n new_ship = (row, column, horizontal, length, hits)\r\n fleet.append(new_ship)\r\n\r\n\r\ndef randomly_place_all_ships():\r\n fleet = []\r\n finished = False\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #battleship\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 4, fleet) == True:\r\n place_ship_at(row,col,horiz,3,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n #re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #cruisers\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 3, fleet) == True:\r\n place_ship_at(row,col,horiz,3,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 3, fleet) == True:\r\n place_ship_at(row,col,horiz,3,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #destroyers\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 2, fleet) == True:\r\n place_ship_at(row,col,horiz,2,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 2, fleet) == True:\r\n place_ship_at(row,col,horiz,2,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 2, fleet) == True:\r\n place_ship_at(row,col,horiz,2,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #submarines\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n return fleet\r\n\r\n\r\ndef check_if_hits(row, column, fleet):\r\n hit = False\r\n check_hit = (row, column)\r\n for ship in fleet:\r\n ship_pos = ship_position(ship)\r\n for pos in ship_pos:\r\n if check_hit == pos:\r\n hit = True\r\n if hit == True:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef hit(row, column, fleet):\r\n check_hit = (row, column)\r\n for ship in fleet:\r\n ship_pos = ship_position(ship)\r\n for pos in ship_pos:\r\n if check_hit == pos:\r\n ship[4].add(check_hit)\r\n if ship[3] == len(ship[4]):\r\n print(\"You sank a \" + ship_type(ship))\r\n if are_unsunk_ships_left(fleet) == False:\r\n print(\"You win! Total shots = \" + str(shots))\r\n return (fleet, ship)\r\n\r\n\r\n\r\ndef are_unsunk_ships_left(fleet):\r\n sunk_count = 0\r\n for ship in fleet:\r\n if is_sunk(ship) == True:\r\n sunk_count +=1\r\n if len(fleet) == sunk_count:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n#the following 3 functions are all needed for createbuttons()\r\ndef labelx(t, c, r, b):\r\n ttk.Label(b, text=t).grid(column=c, row=r, sticky=W, padx=5)\r\n\r\ndef labely(t, c, r, b):\r\n ttk.Label(b, text=t).grid(column=c, row=r, sticky=W)\r\n\r\ndef shoot(r, c, fleet, b):\r\n global shots\r\n if check_if_hits(r, c, fleet) == True:\r\n print(\"You hit!\")\r\n hit(r, c, fleet) #the hit function then deals with checking if a ship has been sunk and communicating that\r\n b.configure(bg=\"green\")\r\n shots +=1\r\n else:\r\n print(\"You missed\")\r\n b.configure(bg=\"red\")\r\n shots += 1\r\n\r\n\r\n\r\ndef createbuttons(board, tempfleet): #this creates the axis and the button grid\r\n for i in range(0, 10):\r\n labelx(i, i + 2, 1, board)\r\n\r\n for j in range(0, 10):\r\n labely(j, 1, j + 2, board)\r\n\r\n for i in range(2, 12):\r\n for j in range(2, 12):\r\n btn = Button(board)\r\n btn.config(width=3, command=lambda r=i-2, c=j-2, fleet=tempfleet, b=btn: shoot(r, c, fleet, b)) #this feeds the button corrdinates into shoot() when the button is clicked\r\n btn.grid(column=i, row=j)\r\n\r\n\r\ndef quitter(): #this is needed for the quit button\r\n sys.exit()\r\n\r\ndef main():\r\n\r\n current_fleet = randomly_place_all_ships()\r\n\r\n #the following sets up the board\r\n root = Tk()\r\n title = ttk.Label(root)\r\n title.configure(text=\"Battleships\", anchor=\"center\")\r\n title.grid(column = 1, row = 1)\r\n subtitle = ttk.Label(root)\r\n subtitle.configure(text=\"Click on a square to shoot!\", anchor=\"center\")\r\n subtitle.grid(column=1, row=2)\r\n\r\n board = ttk.Frame(root, padding=\"5 5 5 5\")\r\n board.grid(column=1, row=3, sticky=(N, W, E, S))\r\n root.columnconfigure(0, weight=1)\r\n root.rowconfigure(0, weight=1)\r\n\r\n #each square is a button, created using loops\r\n createbuttons(board, current_fleet)\r\n\r\n #this creates the lower portion of the board and the quit button\r\n scores = ttk.Frame(root, padding=\"5 5 5 5\")\r\n scores.grid(column=1, row=4, sticky=(W, E))\r\n quit_button = Button(scores)\r\n quit_button.configure(text=\"Quit\", bg=\"red\", command=quitter)\r\n quit_button.grid(column=1, row=1)\r\n\r\n\r\n root.mainloop()\r\n\r\nif __name__ == '__main__': #keep this in\r\n main()\r\n","repo_name":"franc17/battleships","sub_path":"battleships.py","file_name":"battleships.py","file_ext":"py","file_size_in_byte":10712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74426490174","text":"import cv2\nimport pandas as pd\nfrom .ops import LabelRatio2Coord, clipping_coordinate\nfrom data_process.file_utils.basic import TraverseDir, PathHandler\n\n# cv2.putText(影像, 文字, 座標, 字型, 大小, 顏色, 線條寬度, 線條種類)\n\ndef PlotBox(img, bbox, info=None):\n # Color set\n color_set = {'0': (0, 255, 255), # yellow\n '1': (255, 255, 0), # blue\n '2': (0, 255, 0)} # green\n h, w, _ = img.shape\n if 'x1' in bbox and bbox['x1'] < 1:\n bbox = LabelRatio2Coord(img, bbox)\n if bbox is False:\n return False\n text_coord = clipping_coordinate(img, [bbox['x1'] - w*0.01, bbox['y1'] - h*0.01])\n if info is not None and 'label' in info:\n print('plot', text_coord, img.shape)\n cv2.putText(img, str(bbox['label']),\\\n tuple(text_coord), cv2.FONT_HERSHEY_SIMPLEX,\\\n w*0.002, (0, 255, 255), 2, cv2.LINE_AA)\n cv2.rectangle(img, (bbox['x1'], bbox['y1']),\\\n (bbox['x2'], bbox['y2']), color_set[str(bbox['label'])], 2)\n else:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']),\\\n (bbox['x2'], bbox['y2']), color_set['0'], 2)\n\n\ndef ReadYoloLabel(label_path, bbox_format):\n \"\"\"\n bbox_format: 'xyxy' or 'xywh'\n\n returns:\n bbox_list : list of bbox dicts\n *** ratio\n *** clipping\n \"\"\"\n bbox_list = []\n f = open(label_path, 'r')\n for i in f:\n i = i.split(' ')\n bbox = dict()\n label = int(i[0])\n bbox['label'] = label\n if bbox_format == 'xyxy':\n x_center = float(i[1])\n y_center = float(i[2])\n w_box = float(i[3])\n h_box = float(i[4])\n x1 = x_center-w_box/2\n x2 = x_center+w_box/2\n y1 = y_center-h_box/2\n y2 = y_center+h_box/2\n bbox['x_center'] = x_center\n bbox['y_center'] = y_center\n bbox['w_box'] = w_box\n bbox['h_box'] = h_box\n bbox['x1'] = x1\n bbox['x2'] = x2\n bbox['y1'] = y1\n bbox['y2'] = y2\n elif bbox_format == 'xywh':\n x_center = float(i[1])\n y_center = float(i[2])\n w_box = float(i[3])\n h_box = float(i[4])\n bbox['x_center'] = x_center\n bbox['y_center'] = y_center\n bbox['w_box'] = w_box\n bbox['h_box'] = h_box\n bbox_list.append(bbox)\n return bbox_list\n\n\ndef WriteYoloLabel(label_path, bbox_list):\n f = open(label_path, 'w')\n for bbox in bbox_list:\n f.write('%d %f %f %f %f\\n'%(bbox['label'],\\\n bbox['x_center'],\\\n bbox['y_center'],\\\n bbox['w_box'],\\\n bbox['h_box']))\n f.close()\n return True\n\n\ndef WriteYoloLabelListFile(label_file_list):\n pass\n\n\ndef ReadGTFile(gt_file_path, answer_column):\n answer_dict = dict()\n df = pd.read_csv(gt_file_path)\n for i, lpnumber in df.iterrows():\n if isinstance(lpnumber[answer_column], str):\n ans = lpnumber[answer_column].strip()\n answer_dict[ans] = 0\n return answer_dict\n\n\ndef ReadBBoxPredictFile(file_path):\n \"\"\"\n Args:\n file path : str\n\n File format:\n image_name:\n (percentage) (abs)\n ,,,,,\n ...\n end\n\n example:\n image_name:a.jpg\n full,98%,19,30,37,50\n ...\n end\n\n Returns:\n imgs_bbox : dict\n\n {img_name1: [bbox1, bbox2, ...],\n img_name2: [bbox1, bbox2, ...],\n ...\n }\n \"\"\"\n f = open(file_path, 'r')\n imgs_bbox = {}\n img_bbox = []\n imgs_name = []\n for l in f:\n if 'image_name:' in l or 'end' in l:\n if len(img_bbox) != 0:\n img_bbox.sort(key = lambda x: x['conf'], reverse=True)\n imgs_bbox[l] = img_bbox.copy()\n img_bbox = []\n # record image name\n img_name = l.split(':')[-1]\n imgs_name.append(img_name)\n else:\n # Read bboxes!\n l = l.split(',')\n bbox = dict()\n bbox['label'] = l[0]\n bbox['conf'] = float(l[1].split('%')[0])\n bbox['x1'] = int(l[2])\n bbox['y1'] = int(l[3])\n bbox['x2'] = int(l[4])\n bbox['y2'] = int(l[5])\n\n img_bbox.append(bbox)\n return imgs_bbox\n\n\ndef ReadBBoxYoloLabels(dir_path):\n img_file_list = TraverseDir(dir_path, '.jpg', check_exist='txt')\n imgs_bbox = {}\n for img_path in img_file_list:\n label_path = PathHandler(img_path, 'find_txt')\n img = cv2.imread(img_path)\n bboxes = ReadYoloLabel(label_path, 'xyxy')\n abs_bbox_list = []\n for bbox in bboxes:\n bbox = LabelRatio2Coord(img, bbox)\n abs_bbox_list.append(bbox)\n\n imgs_bbox[img_path] = abs_bbox_list.copy()\n return imgs_bbox\n\n\ndef ReadLandmarkFile(file_path, w, h):\n f = open(file_path, 'r')\n preds = []\n for line in f:\n l = line.split(',')\n x = float(l[0]) * w\n y = float(l[1]) * h\n preds.append((x, y))\n if len(preds) > 0:\n return [preds]\n return None\n\n\ndef WriteLandmarkFile(Landmarks, file_path, w, h):\n f = open(file_path, 'w')\n if Landmarks == None:\n f.close()\n return\n for i in range(1, 68+1):\n landmark = Landmarks[i]\n x_ratio = max(min(landmark.x/w, 1.), 0.)\n y_ratio = max(min(landmark.y/h, 1.), 0.)\n # print(x_ratio, y_ratio)\n f.write(str(x_ratio)+\",\"+str(y_ratio)+\"\\n\")\n f.close()\n","repo_name":"heathcliffYang/data_process","sub_path":"src/data_process/label_utils/label_io.py","file_name":"label_io.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71664801536","text":"# https://www.hackerrank.com/challenges/ctci-balanced-brackets/problem\n\n# Given n strings of brackets, determine whether each sequence of brackets is balanced.\n# If a string is balanced, print YES on a new line; otherwise, print NO on a new line.\n\n# Input Format\n# The first line contains a single integer n denoting the number of strings.\n# Each line i of the n subsequent lines consists of a single string s denoting a sequence of brackets.\n\n# Output Format\n# For each string, print whether or not the string of brackets is balanced on a new line.\n# If the brackets are balanced, print YES; otherwise, print NO.\n\n\n# https://codereview.stackexchange.com/questions/180567/checking-for-balanced-brackets-in-python\ndef is_matched(expression):\n opening = tuple('({[')\n closing = tuple(')}]')\n mapping = dict(zip(opening, closing))\n queue = []\n\n for letter in expression:\n if letter in opening:\n queue.append(mapping[letter])\n elif letter in closing:\n if not queue or letter != queue.pop():\n return False\n return not queue\n\nt = int(input().strip())\nfor a0 in range(t):\n expression = input().strip()\n if is_matched(expression) == True:\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"ck-unifr/hackerrank-cracking-the-code-interview","sub_path":"stacks-balanced-brackets.py","file_name":"stacks-balanced-brackets.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27961438406","text":"from starlette.applications import Starlette\nfrom starlette.responses import JSONResponse\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.templating import Jinja2Templates\nfrom starlette.routing import Route\nimport uvicorn\nimport os\nimport sys\nimport logging\nfrom random import uniform\nimport run_generation\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# Needed to avoid cross-domain issues\nresponse_header = {\n 'Access-Control-Allow-Origin': '*'\n}\n\nEOG_TOKEN = '<|endofgenre|>'\nEOT_TOKEN = '<|endoftitle|>'\nEOS_TOKEN = '<|endoftext|>'\n\ndef generate_text(params):\n \"\"\"Generate text using transformers.\"\"\"\n prompt = ''\n if not params['genre'] and not params['title'] and not params['prefix']:\n prompt += EOS_TOKEN\n if params['genre']:\n prompt += params['genre'] + EOG_TOKEN\n if params['title']:\n prompt += params['title'] + EOT_TOKEN\n if params['prefix']:\n prompt += params['prefix']\n text = run_generation.main([\n '--model_type=gpt2',\n '--model_name_or_path=app/output',\n f\"--prompt={prompt}\" if prompt else '--prompt=\"\"',\n f'--temperature={float(params[\"temp\"]) if params[\"temp\"] else uniform(0.7, 1)}',\n f'--top_p={float(params[\"top_p\"]) if params[\"top_p\"] else 0}',\n '--num_samples=1',\n '--length=256',\n f'--stop_token={EOS_TOKEN}'\n ])\n return prompt+text\n\ndef parse_text(text):\n \"\"\"Parse text.\"\"\"\n logging.info(text)\n if len(text.split(EOS_TOKEN)[0]) > 0:\n main = text.split(EOS_TOKEN)[0]\n else:\n # eos_token can be at the beginning\n main = text.split(EOS_TOKEN)[1]\n if EOG_TOKEN in main:\n genre = main.split(EOG_TOKEN)[0]\n main = main.split(EOG_TOKEN)[1]\n else:\n genre = ''\n if EOT_TOKEN in main:\n title = main.split(EOT_TOKEN)[-2]\n main = main.split(EOT_TOKEN)[-1]\n else:\n title = ''\n plot = '.'.join(main.split('.')[:-1])+'.'\n return {\n 'genre': genre.strip(),\n 'title': title.strip(),\n 'plot': plot.strip()\n }\n\nasync def generate(request):\n \"\"\"Generate text and return the parsed result as a dict.\"\"\"\n if request.method == 'GET':\n params = request.query_params\n elif request.method == 'POST':\n params = await request.json()\n elif request.method == 'HEAD':\n return JSONResponse({'text': ''}, headers=response_header)\n logging.info(params)\n return JSONResponse(parse_text(generate_text(params)), headers=response_header)\n\nasync def homepage(request):\n \"\"\"Return HTML homepage.\"\"\"\n return templates.TemplateResponse('index.html', {'request': request})\n\nroutes = [\n Route(\"/\", endpoint=homepage),\n Route(\"/generate\", endpoint=generate, methods=[\"GET\", \"POST\"]),\n]\n\napp = Starlette(routes=routes, debug=True)\napp.mount('/static', StaticFiles(directory='app/static'))\ntemplates = Jinja2Templates(directory='app/templates')\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get('PORT', 5000)), log_level=\"info\")","repo_name":"polakowo/textai","sub_path":"MoviePlots/text_generation/with-titles/app/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"}
+{"seq_id":"26064560717","text":"import sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom utils import EncryptionUtility\n\ndef test_encryption_decryption():\n original_message = \"Secret Message\"\n key = EncryptionUtility.generate_key()\n encrypted_message = EncryptionUtility.encrypt_message(original_message, key)\n decrypted_message = EncryptionUtility.decrypt_message(encrypted_message, key)\n\n assert original_message == decrypted_message","repo_name":"Cdaprod/cda.CredKeeper","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"1934243896","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass LeNet(nn.Module):\n\tdef __init__(self):\n\t\tsuper(LeNet, self).__init__()\n\t\tself.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n\t\tself.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n\t\tself.conv2_drop = nn.Dropout2d()\n\t\tself.fc1 = nn.Linear(320, 50)\n\t\tself.fc2 = nn.Linear(50, 10)\n\n\tdef forward(self, x):\n\t\tx = F.relu(F.max_pool2d(self.conv1(x), 2))\n\t\tx = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n\t\tx = x.view(-1, 320)\n\t\tx = F.relu(self.fc1(x))\n\t\tx = F.dropout(x, training=self.training)\n\t\tx = self.fc2(x)\n\t\treturn F.log_softmax(x)\n\t\n\tdef name(self):\n\t\treturn 'LeNet'\n\nclass MLPNet(nn.Module):\n def __init__(self):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(28*28, 500)\n self.fc2 = nn.Linear(500, 256)\n self.fc3 = nn.Linear(256, 10)\n self.ceriation = nn.CrossEntropyLoss()\n def forward(self, x, target):\n x = x.view(-1, 28*28)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n loss = self.ceriation(x, target)\n return x, loss\n def name(self):\n return 'MLPNet'","repo_name":"jackyko1991/MNIST-pytorch","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"5029301295","text":"''' *****************************************************************************\n * Name: Arbaaz Khan\n * Language: python3 \n *\n * Description: Implementation of maximum heap datastructure.\n *\n * Written: 8/1/2018\n * Last updated: 8/1/2018\n * \n * TIME COMPLEXITIES:\n * -----------------------------------------------------------------\n * | Operations | WorstCase | AverageCase | BestCase |\n * -----------------------------------------------------------------\n * | insertion | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | deletion | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | traversal | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | searching | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | bubbleUp | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | bubbleDown | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | findMax | bigO(1) | bigO(1) | bigO(1) |\n * -----------------------------------------------------------------\n *\n * % python maxheap.py\n *\n***************************************************************************** '''\nclass Heap(object):\n HEAP_SIZE = 10\n\n def __init__(self):\n self.heap = [0]*self.HEAP_SIZE\n self.current_position = -1\n\n def insert(self,item):\n if self.isFull():\n print(\"Heap is full!\")\n else:\n self.current_position += 1\n self.heap[self.current_position] = item\n self.bubbleUp(self.current_position)\n\n def isFull(self):\n if self.current_position+1 == self.HEAP_SIZE:\n return True\n else:\n return False\n def isEmpty(self):\n if self.current_position == -1:\n return True\n else:\n return False\n\n def bubbleUp(self,pos): \n ### Ensures that the new item inserted maintains the rule of max-heap. ###\n # pos holds the index of the item whose position has to be checked that whether it follows the rule of the max heap\n # the item in question is compared with it's parent (pos-1/2), since in max heap the parent is greater than it's children\n # the item has to be swapped with it's parent if is found to be greater than it's parent. After swapping the item moves to\n # it's parent's place, now it's position is again checked by comparing it with it's parent. For this the pos is updated as \n # the item has moved to it's parent's place. Hence the parent index is also updated.\n if pos < 0: #Don't perform bubbleup if index becomes negative\n return\n parent_index = (pos-1)//2 #floor int value is used\n while parent_index >= 0 and self.heap[pos] >= self.heap[parent_index]:\n temp = self.heap[parent_index]\n self.heap[parent_index] = self.heap[pos]\n self.heap[pos] = temp\n pos = parent_index\n parent_index = (pos-1)//2\n\n def findMax(self):\n if not self.isEmpty():\n return self.heap[0]\n else:\n print(\"Heap is empty!\")\n\n def heapSort(self):\n # It works by putting the largest item in the last node in each iteration. \n # It swaps the root node with the last node\n # \n # \n for i in range(self.current_position+1):\n temp = self.heap[0]\n self.heap[0] = self.heap[self.current_position-i]\n self.heap[self.current_position-i] = temp\n self.bubbleDown(self.current_position-i-1)\n \n def bubbleDown(self,pos):\n root_index = 0\n if pos<0:\n return\n while root_index < pos:\n if((2*root_index+1 <= pos) and (2*root_index+2 <= pos)): \n if (self.heap[root_index] < self.heap[2*root_index+1]) and (self.heap[root_index] < self.heap[2*root_index+2]):\n if self.heap[2*root_index+1] > self.heap[2*root_index+2]:\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+1]\n self.heap[2*root_index+1] = temp\n root_index = 2*root_index+1\n else:\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+2]\n self.heap[2*root_index+2] = temp\n root_index = 2*root_index+2\n elif (self.heap[root_index] < self.heap[2*root_index+1]):\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+1]\n self.heap[2*root_index+1] = temp\n root_index = 2*root_index+1\n elif (self.heap[root_index] < self.heap[2*root_index+2]):\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+2]\n self.heap[2*root_index+2] = temp\n root_index = 2*root_index+2\n else:\n break\n elif (2*root_index+1 <= pos):\n if self.heap[root_index] < self.heap[2*root_index+1]:\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+1]\n self.heap[2*root_index+1] = temp\n root_index = 2*root_index+1\n else:\n break\n else:\n break\n\n def show(self):\n for i in range(self.current_position+1):\n print(self.heap[i])\n\nheap = Heap()\nheap.insert(5)\nheap.insert(4)\nheap.insert(10)\nheap.insert(3)\nheap.insert(2)\nheap.insert(100)\nheap.insert(12)\nheap.insert(40)\nheap.show()\nprint(\"Max value = \",heap.findMax())\nheap.heapSort()\nprint(\"After heapsort\")\nheap.show()\n","repo_name":"arzzon/PythonLearning","sub_path":"DataStructures/Heap/MaxHeap/maxheap_old_first_approach.py","file_name":"maxheap_old_first_approach.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"9113060347","text":"import numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import f1_score as f1\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn import svm\nimport warnings\nfrom sklearn.model_selection import GridSearchCV\nimport lyp_preprocessing as lyp\nimport kent\nimport util\nfrom sklearn.tree import DecisionTreeClassifier\nimport collections\nfrom gensim.models import KeyedVectors\n#from xgboost import XGBClassifier\nimport mord\nimport re\n\ndef get_para(view, like, dislike, comment):\n \"\"\"\n :param view: number of view, NumPy array shape (n_examples, 1)\n :param like: number of like, NumPy array shape (n_examples, 1)\n :param dislike: number dislike, NumPy array shape (n_examples, 1)\n :param comment: number of comment, NumPy array shape (n_examples, 1)\n :return: parameter, NumPy array shape (n_examples, 1), float\n \"\"\"\n return (like - 1.5 * dislike) * comment / view\n\ndef label(view, parameter, view_bar, para_bar):\n \"\"\"\n Args:\n view: number of view, NumPy array shape (n_examples, 1)\n parameter: the enmotional trend of the reflects from viewers, NumPy array shape (n_examples, 1)\n view_bar: number dislike, NumPy array shape (n_examples, 1)\n para_bar: bars of parameters, a list (2,)\n\n Returns:\n label, NumPy array shape (n_examples, 1), int\n 0: Not hot\n 1: Negative, dislike >> like\n 2: Controdictory, dislike ~= like\n 3: Positive, like >> dislike\n \"\"\"\n label = np.zeros(np.shape(view))\n n = len(view)\n [bar1, bar2] = para_bar\n for i in range(n):\n if view[i] < view_bar:\n label[i] = 0\n elif parameter[i] < bar1:\n label[i] = 1\n elif parameter[i] < bar2:\n label[i] = 2\n else:\n label[i] = 3\n return label\n\n\ndef loadGolveModel(glove_file):\n f = open(glove_file, 'r', encoding='UTF-8')\n model = {}\n for line in f:\n splitline = line.split()\n word = splitline[0].replace(\"'\", \"\")\n embedding = np.array([float(val) for val in splitline[1: ]])\n model[word] = embedding\n print(\"Done.\", len(model), \"words loaded!\")\n return model\n\n\ndef load_index_dic(glove_file):\n f = open(glove_file, 'r', encoding='UTF-8')\n dic = []\n for line in f:\n splitline = line.split()\n dic.append(splitline[0])\n f.close()\n return dic\n\n\ndef glove_embedding_one_string(string, dictionary):\n words = string.lower().split()\n new_words = [re.sub('[{}!#?,.:\";@$%^&*()_+-=|[]:;\">/?<,.~]', '', word) for word in words]\n temp = [dictionary[i] for i in new_words if i in dictionary.keys()]\n temp = np.array(temp)\n return np.sum(temp, axis=0)\n\n\ndef glove_embedding(list, dictionary):\n n, t = len(list), 0\n l = dictionary['a'].shape[0]\n temp = np.zeros((n, l))\n for i in list:\n temp[t] = glove_embedding_one_string(i, dictionary)\n t += 1\n return np.array(temp)\n\n\ndef get_token(string, header, k):\n \"\"\"\n Word embedding for token\n Function: remove the punctuation, lowercases words, and covert the words to sequences of integers\n :param string: A list of word, lenth: n\n header: type of string\n k: size of dictionary\n :return: A list of integers, representing the word\n Site: https://towardsdatascience.com/recurrent-neural-networks-by-example-in-python-ffd204f99470\n \"\"\"\n if header == 'tags':\n tokenizer = Tokenizer(num_words=k, # Word with top k frequency\n filters='!@#$%^&*()_+-=\\|{}[]:;\">/?<,.~',\n lower=True, split='|')\n else:\n tokenizer = Tokenizer(num_words=k,\n filters='!@#$%^&*()_+-=\\|{}[]:;\">/?<,.~',\n lower=True)\n\n tokenizer.fit_on_texts(string)\n sequences = tokenizer.texts_to_sequences(string)\n return sequences\n\ndef one_hot(string, k):\n \"\"\"\n One hot word embedding\n :param string: A list of strings\n k: size of dictionary\n :return: A matrix of integers reflecting the string\n dim: n-examples x m-size of dictionary\n Type: np.array\n \"\"\"\n t = Tokenizer(num_words=k,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=' ')\n t.fit_on_texts(string)\n encoded_docs = t.texts_to_matrix(string, mode='binary')\n return np.array(encoded_docs)\n\n\ndef one_hot_test(train, test, k):\n t = Tokenizer(num_words=k,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=' ')\n t.fit_on_texts(train)\n encoded_docs = t.texts_to_matrix(test, mode='binary')\n return np.array(encoded_docs)\n\n\ndef word_embedding(csv_path, dictionary):\n \"\"\"\n Get the structured input data\n :param csv_path: The trina,valid, and test test path, .csv file name\n :param size_of_dictionary: a int\n :return: structured title, tag, description, list type, each with a lenth of dictionary,\n category as integer, publish_time as time\n Type: np.array\n \"\"\"\n title, trending_date, publish_time, category, tags, description, duration = kent.get_feature(csv_path)\n glove_title = glove_embedding(title, dictionary)\n glove_description = glove_embedding(description, dictionary)\n glove_tags = glove_embedding(tags, dictionary)\n time = lyp.get_time_gap(publish_time, trending_date)\n category = util.add_intercept_fn(np.reshape(category, (len(category), 1)))\n time = time.reshape((len(time), 1))\n duration = duration.reshape((len(duration), 1))\n return glove_title, time, category, glove_tags, glove_description, duration\n\n\ndef word_embedding_test(train_path, test_path, size_of_dictionary, size_of_dictionary_description):\n train_title, train_trending_date, train_publish_time, train_category, train_tags, train_description = kent.get_feature(train_path)\n test_title, test_trending_date, test_publish_time, test_category, test_tags, test_descriotion = kent.get_feature(test_path)\n one_hot_title = util.add_intercept_fn(one_hot_test(train_title, test_title,size_of_dictionary))\n one_hot_description = util.add_intercept_fn(one_hot_test(train_description, test_descriotion, size_of_dictionary_description))\n one_hot_tags = util.add_intercept_fn(one_hot_test(train_tags, test_tags, size_of_dictionary))\n time = lyp.get_time_gap(test_publish_time, test_trending_date)\n time = util.add_intercept_fn(np.reshape(time, (len(time), 1)))\n category = util.add_intercept_fn(np.reshape(test_category, (len(test_category), 1)))\n return one_hot_title, time, category, one_hot_tags, one_hot_description\n\n\ndef separa_test(csv):\n \"\"\"\n Seprarte the test data by publish date\n :return: three set, containing the index of the video in test set\n first set: videos trended in the train or valid set\n third set: videos published and trended in the test set\n second set: rest of the videos\n \"\"\"\n new1 = []\n new3 = []\n publish_time = kent.get_time(csv)\n test_title = lyp.get_string_header(csv, 'title')\n train_title = lyp.get_string_header(csv, 'title')\n valid_title = lyp.get_string_header(csv, 'title')\n title = train_title + valid_title\n for i in range(len(publish_time)):\n pt_year = int(publish_time[i][0:4])\n pt_month = int(publish_time[i][5:7])\n pt_date = int(publish_time[i][8:10])\n if pt_year < 2018 and test_title[i] in title:\n new1 += [i]\n elif pt_year == 2018 and pt_month < 4 and test_title[i] in title:\n new1 += [i]\n elif pt_year == 2018 and pt_month == 4 and pt_date < 14 and test_title[i] in title:\n new1 += [i]\n elif pt_year == 2018 and pt_month > 4:\n new3 += [i]\n elif pt_year == 2018 and pt_month == 4 and pt_date >= 14:\n new3 += [i]\n return new1, new3\n\n\ndef accurancy(y_label, prediction):\n \"\"\"\n Calculate the accurancy\n :param y_label: a list of true label\n :param prediction: a list of predicted label\n :return: the accurancy, float\n \"\"\"\n n = len(y_label)\n result = 0\n new = np.zeros((4, ))\n for i in range(n):\n if y_label[i] == prediction[i]:\n result += 1\n t = int(y_label[i])\n new[t] += 1\n print('The accurancy count in each type', new)\n print('The count of each type:', collections.Counter(prediction))\n return result / n\n\n\ndef first_layer(fit_type, train_label, valid_type):\n \"\"\"\n :param fit_type: Description, Title, Tags etc. a list\n :param train_label: a list of train label\n :param valid_type: a list of valid label\n :return: an array of the probability\n \"\"\"\n y_train = train_label\n clf = SGDClassifier(alpha=0.2, loss=\"modified_huber\", penalty='l2', tol=1e-6, max_iter=10000, fit_intercept=False)\n clf.fit(fit_type, y_train)\n predict = clf._predict_proba(valid_type)\n train_probability = clf._predict_proba(fit_type)\n return predict, train_probability\n\n\ndef GBM_model(train, train_label, test, test_label):\n \"\"\"\n\n :param train: n x factor array, representing all factors in array\n :param test: n x factor array, representing all factors in array\n :param label_train: n x 1 array, representing the label of train\n :param label_test: n x 1 array, representing the label of test\n :return: the prediction result of GBM model\n \"\"\"\n model = GradientBoostingClassifier(max_depth=5, tol=0.0001, n_estimators=100)\n eval_set = [(train, train_label), (test, test_label)]\n model.fit(train, train_label, eval_metric=[\"merror\", \"mlogloss\"], eval_set=eval_set, verbose=True)\n print('Finish GBM fit')\n prediction = model.predict(test)\n print('Finish GBM prediction')\n return prediction\n\n\ndef GBM_multi_model(train, train_label, test):\n \"\"\"\n\n :param train: n x factor array, representing all factors in array\n :param test: n x factor array, representing all factors in array\n :param label_train: n x 1 array, representing the label of train\n :param label_test: n x 1 array, representing the label of test\n :return: the prediction result of GBM model\n \"\"\"\n # w_array = np.array([0.7] * train_label.shape[0])\n # w_array[train_label == 0] = 0.9\n # w_array[train_label == 1] = 8\n # w_array[train_label == 3] = 1.7\n model = GradientBoostingClassifier(max_depth=8, tol=0.0001, n_estimators=100)\n model.fit(train, train_label)\n print('Finish GBM fit')\n prediction = model.predict(test)\n print('Finish GBM prediction')\n return prediction\n\ndef random_forest(train, train_label, test):\n clf = RandomForestClassifier(random_state=27 ,max_features=None, n_estimators=300,\n class_weight={0:2.92, 1:65, 2:1, 3:7.4})\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\ndef random_forest_multi(train, train_label, test):\n clf = RandomForestClassifier(random_state=27 ,max_features=None, n_estimators=300)\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\n\ndef neuron_network(train, label_train, test):\n clf = MLPClassifier(solver='adam', activation='logistic', alpha=0.4, tol=1e-5,\n hidden_layer_sizes=(100, 20), max_iter=500)\n clf.fit(train, label_train)\n prediction = clf.predict(test)\n return prediction\n\n\ndef vote(fun1, fun2, fun3, train, train_label, valid):\n clf = VotingClassifier(estimators=[('fun1', fun1), ('fun2', fun2), ('fun3', fun3)], voting='hard')\n clf.fit(train, train_label)\n prediction = clf.predict(valid)\n return prediction\n\n\ndef svm_prediction(train, train_label, test):\n clf = svm.SVC(C=1.0, cache_size=200, coef0=1.0,\n decision_function_shape='ovo', degree=5, gamma='scale', kernel='poly',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=True)\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\n#\n# def mord_predict(train, train_label, test):\n# clf = mord.MulticlassLogistic()\n# clf.fit(train, train_label)\n# prediction = clf.predict(test)\n# return prediction\n#\n# def xgb_prediction(train, train_label, test):\n# clf = XGBClassifier(booster = \"gbtree\") #objective = reg:squaredlogerror\n# clf.fit(train, train_label)\n# return clf.predict(test)\n\ndef tree(train, train_label, test, i):\n clf = DecisionTreeClassifier(random_state=i, class_weight={0:5, 1:5, 2:0.05, 3:1}) #, class_weight={0:1, 1:1, 2:1, 3:1}\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\ndef tree_multi(train, train_label, test):\n clf = DecisionTreeClassifier() #, class_weight={0:1, 1:1, 2:1, 3:1}\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\n\ndef relable(label, target_label):\n \"\"\"\n change the multiple class into binary class\n :param label: the array of the original label\n :param target_label:\n :return: an array of the label, 1 means label is the targeted one and 0 is other labels\n \"\"\"\n return np.array([int(i == target_label) for i in label])\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('Model Performance')\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n return accuracy\n\n\ndef sgdc(train, train_label, test, random):\n clf = SGDClassifier(random_state=random, alpha=0.2, loss=\"modified_huber\", penalty='l2', tol=1e-6, max_iter=10000, fit_intercept=False)\n clf.fit(train, train_label)\n predict = clf.predict(test)\n return predict\n\ndef sgdc_multi(train, train_label, test):\n clf = SGDClassifier(alpha=7.5, loss=\"modified_huber\", penalty='l2', tol=1e-6, fit_intercept=False)\n clf.fit(train, train_label)\n predict = clf.predict(test)\n return predict\n\n\ndef delete_feature(train, function, train_label, test, test_label, name, random):\n \"\"\"\n :param list: list of separate feature\n :param function: the training model\n :return:\n \"\"\"\n def g(train, test, name):\n # Get the f1 score\n n = len(train)\n f1_score = np.zeros((n,))\n temp_name = name\n c = []\n if n == 1:\n # print('The last class:', name[0])\n return None\n for i in range(n):\n temp_train, temp_test = train.copy(), test.copy()\n temp_train.pop(i)\n temp_test.pop(i)\n new_train = temp_train[0]\n new_test = temp_test[0]\n if n - 2 > 0:\n for j in range(n - 2):\n new_train = np.hstack((new_train, temp_train[j + 1]))\n new_test = np.hstack((new_test, temp_test[j + 1]))\n prediction = function(new_train, train_label, new_test, random)\n c += [collections.Counter(prediction)]\n warnings.filterwarnings('ignore')\n f1_score[i] = f1(test_label, prediction, average='weighted')\n # print(\"the f1 score with class\", name[i], \"excluded:\", f1_score[i])\n remain_class = np.argmax(f1_score)\n del name[remain_class]\n train.pop(remain_class)\n test.pop(remain_class)\n print('The remaining class is:', temp_name)\n print('the class predicted is:', c[remain_class])\n return delete_feature(train, function, train_label, test, test_label, name, random)\n\n return g(train, test, name)\n","repo_name":"No21-lqz/CS229AAA","sub_path":"LIQIAN.py","file_name":"LIQIAN.py","file_ext":"py","file_size_in_byte":15849,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"}
+{"seq_id":"71397246970","text":"from django.contrib.auth import authenticate\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom api.models import Advisor, Booking, User\nfrom api.serializer import AdvisorSerializer, AdviserViewSerializer\n\n\nclass AdvisorView(viewsets.ModelViewSet):\n queryset = Advisor.objects.all()\n serializer_class = AdvisorSerializer\n\n\n@api_view(['GET'])\ndef advisor_list(request, user_id):\n try:\n User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return Response(\"User doesn't exist\", status=status.HTTP_404_NOT_FOUND)\n\n adv_serializer = AdviserViewSerializer(Advisor.objects.all(), many=True)\n print(adv_serializer.data)\n return Response(adv_serializer.data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef book_advisor(request, user_id, advisor_id):\n try:\n User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return Response(\"User doesn't exist\", status=status.HTTP_404_NOT_FOUND)\n\n try:\n adv = Advisor.objects.get(pk=advisor_id)\n except Advisor.DoesNotExist:\n return Response('Advisor not found', status=status.HTTP_404_NOT_FOUND)\n\n booking = Booking.objects.create(user_id=user_id, advisor_id=advisor_id, date=request.POST.get('date'))\n booking.save()\n\n return Response(status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef get_bookings(request, user_id):\n try:\n user = User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return Response(\"User doesn't exist\", status=status.HTTP_404_NOT_FOUND)\n\n bookings = Booking.objects.filter(user=user)\n data = []\n for booking in bookings:\n adv = Advisor.objects.get(id=booking.id)\n data.append(({\n 'advisor_name': adv.name,\n 'advisor_profile_pic': adv.photo,\n 'advisor_id': adv.id,\n 'booking_time': booking.date,\n 'booking_id': booking.id\n }))\n return Response(data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef register(request):\n try:\n user = User.objects.create_user(username=request.POST.get('email'), name=request.POST.get('name'),\n password=request.POST.get('password'), email=request.POST.get('email'))\n except Exception as e:\n return Response(\"Fields missing\", status=status.HTTP_400_BAD_REQUEST)\n\n token, id = user.save()\n data = {\n \"token\": token,\n \"id\": id\n }\n return Response(data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef login(request):\n user = authenticate(username=request.POST.get('email'),\n password=request.POST.get('password'))\n if user is None:\n return Response(\"Invalid Login\", status=status.HTTP_400_BAD_REQUEST)\n\n token = user.jwt_token\n id = user.id\n data = {\n \"token\": token,\n \"id\": id\n }\n return Response(data, status=status.HTTP_200_OK)\n","repo_name":"ayanshaikh18/AdvisoryNetwork","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27761132837","text":"# import RobotRaconteur as RR\n# RRN=RR.RobotRaconteurNode.s\n# RRN.SetLogLevel(RR.LogLevel_Trace)\n# node_setup=RR.CommandLineConfigParser(0)\n# #node_setup=RR.ClientNodeSetup(argv=[\"--robotraconteur-tcp-enable=false\"])\n# #browser_transport = RR.BrowserWebSocketTransport()\n# #RRN.RegisterTransport(browser_transport)\n# print(\"done\")\n\n\nfrom js import print_div\nfrom RobotRaconteur.Client import *\n\nprint_div(\"Begin test_transport\")\n\nc1 = None\n\ndef i32_huge_cb(i32_huge, err):\n print_div (\"i32_huge: \" + str(i32_huge))\n print_div (\"i32_huge error: \" + str(err))\n\ndef d1_cb(d1, err):\n print_div (\"d1: \" + str(d1))\n print_div (\"d1 error: \" + str(err))\n c1.async_get_i32_huge(i32_huge_cb)\n\ndef connect_cb(c, err):\n global c1\n c1 = c\n print_div(\"connect error: \" + str(err))\n c.async_get_d1(d1_cb)\n\nRRN.SetLogLevel(RR.LogLevel_Debug)\n\nRRN.AsyncConnectService(\"rr+ws://localhost:22222?service=RobotRaconteurTestService\", None, None, None, connect_cb)\n\n\n","repo_name":"robotraconteur/robotraconteur_pyodide","sub_path":"testing/pyodide_test/test/test_transport.py","file_name":"test_transport.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"26371110316","text":"#!/usr/bin/python3\nimport scripts\nimport subprocess\nimport datetime\n\nDEBUG=1\nFATAL=2\n\n# Host class to turn config host files into actual data structures\nclass _host:\n def __init__(self,hostname):\n self.hostname=hostname\n self.online=False\n\n# logging, Duh\ndef log(priority,message):\n if (priority == DEBUG):\n priority = \"[DEBUG, %s] \" % datetime.datetime.now()\n elif (priority == FATAL):\n priority = \"[FATAL, %s] \" % datetime.datetime.now()\n logfile.write(priority + message+\"\\n\")\n logfile.flush()\n\n# takes host object and does operations testing network connectivity\ndef ping(h):\n log(DEBUG, \"Pinging host: \" +h.hostname)\n cmd={ \n \"ping\" : [\"ping\",\"-c 2\",h.hostname]\n }\n try:\n subprocess.check_output(cmd[\"ping\"])\n h.online=True\n log(DEBUG,\"Ping Success!\")\n return True\n except:\n h.online=False\n log(DEBUG,\"Ping failed!\")\n return False\n\n# Parses host files and return _host objects\ndef host_parse(hostfile):\n log(DEBUG, \"Parsing Hosts\")\n lines=hostfile.read().split(\"\\n\")\n lines=lines[:len(lines)-1] # cleans excess ''\n hosts=[]\n\n #Create host objects from config files\n for host in lines:\n hostname=host.split(\",\")[0]\n hosts.append(_host(hostname))\n # Then below interact with objects\n\n # And return all hosts\n return hosts\n\n# Get all User defined functions from \"scripts\" dir and execute them\n# h is a _host object \ndef execute_functions(h):\n global log\n global logfile\n global DEBUG\n global FATAL\n for i in dir(scripts):\n if \"__\" not in i :\n\n # Get pointers to functions included in module inbound\n script=getattr(scripts,i)\n\n # Set logging pointers for scripts plugin\n setattr(script,\"logfile\",logfile) \n setattr(script,\"log\",log) \n setattr(script,\"DEBUG\",DEBUG)\n setattr(script,\"FATAL\",FATAL)\n script.execute(h)\n\ndef main():\n\n identity_file=open(\"hosts\",\"r\")\n log(DEBUG, \"Logging initialized\")\n\n hosts=host_parse(identity_file) #returns list of host objects\n \n # For every host execute all anon funcs\n # (Which hosts that functions are executed for\n # are defined in the anonymous functions themselves)\n for Object in hosts:\n if ping(Object):\n execute_functions(Object)\n log(DEBUG, \"Execution Completed\")\n\n\nlogfile=open(\"logs/log.log\",\"w+\")\nmain()\n","repo_name":"flareriderdash/TransparentSync","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"10558539803","text":"from __future__ import annotations\n\nfrom collections.abc import Iterator\nfrom datetime import timedelta\nimport hashlib\nfrom typing import Any\nfrom urllib.parse import urlsplit, urlunsplit\n\nimport boto3\nfrom botocore.config import Config\nfrom botocore.exceptions import ClientError\nfrom dandischema.digests.dandietag import PartGenerator\nfrom django.conf import settings\nfrom django.core.files.storage import Storage, get_storage_class\nfrom minio.error import NoSuchKey\nfrom minio_storage.policy import Policy\nfrom minio_storage.storage import MinioStorage, create_minio_client_from_settings\nfrom s3_file_field._multipart_boto3 import Boto3MultipartManager\nfrom s3_file_field._multipart_minio import MinioMultipartManager\nfrom storages.backends.s3 import S3Storage\n\n\nclass ChecksumCalculatorFile:\n \"\"\"File-like object that calculates the checksum of everything written to it.\"\"\"\n\n def __init__(self):\n self.h = hashlib.sha256()\n\n def write(self, bytes):\n self.h.update(bytes)\n\n @property\n def checksum(self):\n return self.h.hexdigest()\n\n\nclass DandiMultipartMixin:\n @staticmethod\n def _iter_part_sizes(file_size: int) -> Iterator[tuple[int, int]]:\n generator = PartGenerator.for_file_size(file_size)\n for part in generator:\n yield part.number, part.size\n\n _url_expiration = timedelta(days=7)\n\n\nclass DandiBoto3MultipartManager(DandiMultipartMixin, Boto3MultipartManager):\n \"\"\"A custom multipart manager for passing ACL information.\"\"\"\n\n def _create_upload_id(self, object_key: str, content_type: str | None = None) -> str:\n kwargs = {\n 'Bucket': self._bucket_name,\n 'Key': object_key,\n 'ACL': 'bucket-owner-full-control',\n }\n\n if content_type is not None:\n kwargs['Content-Type'] = content_type\n\n resp = self._client.create_multipart_upload(**kwargs)\n return resp['UploadId']\n\n\nclass DandiMinioMultipartManager(DandiMultipartMixin, MinioMultipartManager):\n \"\"\"A custom multipart manager for passing ACL information.\"\"\"\n\n def _create_upload_id(self, object_key: str, content_type: str | None = None) -> str:\n metadata = {'x-amz-acl': 'bucket-owner-full-control'}\n\n if content_type is not None:\n metadata['Content-Type'] = content_type\n\n return self._client._new_multipart_upload(\n bucket_name=self._bucket_name,\n object_name=object_key,\n metadata=metadata,\n )\n\n\nclass DeconstructableMinioStorage(MinioStorage):\n \"\"\"\n A MinioStorage which is deconstructable by Django.\n\n This does not require a minio_client argument to the constructor.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # A minio.api.Minio instance cannot be serialized by Django. Since all constructor\n # arguments are serialized by the @deconstructible decorator, passing a Minio client as a\n # constructor argument causes makemigrations to fail.\n kwargs['minio_client'] = create_minio_client_from_settings()\n super().__init__(*args, **kwargs)\n\n\nclass VerbatimNameStorageMixin:\n \"\"\"A Storage mixin, storing files without transforming their original filename.\"\"\"\n\n # The basic S3Storage does not implement generate_filename or get_valid_name,\n # so upon FileField save, the following call stack normally occurs:\n # FieldFile.save\n # FileField.generate_filename\n # Storage.generate_filename\n # Storage.get_valid_name\n # Storage.generate_filename attempts to normalize the filename as a path.\n # Storage.get_valid_name uses django.utils.text.get_valid_filename,\n # which cleans spaces and other characters.\n # Since these are designed around filesystem safety, not S3 key safety, it's\n # simpler to do sanitization before saving.\n def generate_filename(self, filename: str) -> str:\n return filename\n\n\nclass TimeoutS3Storage(S3Storage):\n \"\"\"Override boto3 default timeout values.\"\"\"\n\n def __init__(self, **settings):\n super().__init__(**settings)\n\n self.config = self.config.merge(\n Config(connect_timeout=5, read_timeout=5, retries={'max_attempts': 2})\n )\n\n\nclass VerbatimNameS3Storage(VerbatimNameStorageMixin, TimeoutS3Storage):\n @property\n def multipart_manager(self):\n return DandiBoto3MultipartManager(self)\n\n def etag_from_blob_name(self, blob_name) -> str | None:\n client = self.connection.meta.client\n\n try:\n response = client.head_object(\n Bucket=self.bucket_name,\n Key=blob_name,\n )\n except ClientError:\n return None\n else:\n etag = response['ETag']\n # S3 wraps the ETag in double quotes, so we need to strip them\n if etag[0] == '\"' and etag[-1] == '\"':\n return etag[1:-1]\n return etag\n\n def generate_presigned_put_object_url(self, blob_name: str, base64md5: str) -> str:\n return self.connection.meta.client.generate_presigned_url(\n ClientMethod='put_object',\n Params={\n 'Bucket': self.bucket_name,\n 'Key': blob_name,\n 'ACL': 'bucket-owner-full-control',\n 'ContentMD5': base64md5,\n },\n ExpiresIn=600, # TODO proper expiration\n )\n\n def generate_presigned_head_object_url(self, key: str) -> str:\n return self.bucket.meta.client.generate_presigned_url(\n 'head_object',\n Params={'Bucket': self.bucket.name, 'Key': key},\n )\n\n def generate_presigned_download_url(self, key: str, path: str) -> str:\n return self.connection.meta.client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': self.bucket_name,\n 'Key': key,\n 'ResponseContentDisposition': f'attachment; filename=\"{path}\"',\n },\n )\n\n def generate_presigned_inline_url(self, key: str, path: str, content_type: str) -> str:\n return self.connection.meta.client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': self.bucket_name,\n 'Key': key,\n 'ResponseContentDisposition': f'inline; filename=\"{path}\"',\n 'ResponseContentType': content_type,\n },\n )\n\n def sha256_checksum(self, key: str) -> str:\n calculator = ChecksumCalculatorFile()\n obj = self.bucket.Object(key)\n obj.download_fileobj(calculator)\n return calculator.checksum\n\n\nclass VerbatimNameMinioStorage(VerbatimNameStorageMixin, DeconstructableMinioStorage):\n @property\n def multipart_manager(self):\n return DandiMinioMultipartManager(self)\n\n def etag_from_blob_name(self, blob_name) -> str | None:\n try:\n response = self.client.stat_object(self.bucket_name, blob_name)\n except NoSuchKey:\n return None\n else:\n return response.etag\n\n def generate_presigned_put_object_url(self, blob_name: str, _: str) -> str:\n # Note: minio-py doesn't support using Content-MD5 headers\n\n # storage.client will generate URLs like `http://minio:9000/...` when running in\n # docker. To avoid this, use the secondary base_url_client which is configured to\n # generate URLs like `http://localhost:9000/...`.\n return self.base_url_client.presigned_put_object(\n bucket_name=self.bucket_name,\n object_name=blob_name,\n expires=timedelta(seconds=600), # TODO proper expiration\n )\n\n def generate_presigned_head_object_url(self, key: str) -> str:\n return self.base_url_client.presigned_url('HEAD', self.bucket_name, key)\n\n def generate_presigned_download_url(self, key: str, path: str) -> str:\n return self.base_url_client.presigned_get_object(\n self.bucket_name,\n key,\n response_headers={'response-content-disposition': f'attachment; filename=\"{path}\"'},\n )\n\n def generate_presigned_inline_url(self, key: str, path: str, content_type: str) -> str:\n return self.base_url_client.presigned_get_object(\n self.bucket_name,\n key,\n response_headers={\n 'response-content-disposition': f'inline; filename=\"{path}\"',\n 'response-content-type': content_type,\n },\n )\n\n def sha256_checksum(self, key: str) -> str:\n calculator = ChecksumCalculatorFile()\n obj = self.client.get_object(self.bucket_name, key)\n for chunk in obj.stream(amt=1024 * 1024 * 16):\n calculator.write(chunk)\n return calculator.checksum\n\n\ndef create_s3_storage(bucket_name: str) -> Storage:\n \"\"\"\n Return a new Storage instance, compatible with the default Storage class.\n\n This abstracts over differences between S3Storage and MinioStorage,\n allowing either to be used as an additional non-default Storage.\n \"\"\"\n # For production, calling django.core.files.storage.get_storage_class is fine\n # to return the storage class of S3Storage.\n default_storage_class = get_storage_class()\n\n if issubclass(default_storage_class, S3Storage):\n storage = VerbatimNameS3Storage(bucket_name=bucket_name)\n # Required to upload to the sponsored bucket\n storage.default_acl = 'bucket-owner-full-control'\n elif issubclass(default_storage_class, MinioStorage):\n base_url = None\n if getattr(settings, 'MINIO_STORAGE_MEDIA_URL', None):\n # If a new base_url is set for the media storage, it's safe to assume one should be\n # set for this storage too.\n base_url_parts = urlsplit(settings.MINIO_STORAGE_MEDIA_URL)\n # Reconstruct the URL with an updated path\n base_url = urlunsplit(\n (\n base_url_parts.scheme,\n base_url_parts.netloc,\n f'/{bucket_name}',\n base_url_parts.query,\n base_url_parts.fragment,\n )\n )\n\n # The MinioMediaStorage used as the default storage is cannot be used\n # as an ad-hoc non-default storage, as it does not allow bucket_name to be\n # explicitly set.\n storage = VerbatimNameMinioStorage(\n bucket_name=bucket_name,\n base_url=base_url,\n # All S3Storage URLs are presigned, and the bucket typically is not public\n presign_urls=True,\n auto_create_bucket=True,\n auto_create_policy=True,\n policy_type=Policy.read,\n # Required to upload to the sponsored bucket\n object_metadata={'x-amz-acl': 'bucket-owner-full-control'},\n )\n # TODO: generalize policy_type?\n # TODO: filename transforming?\n # TODO: content_type\n else:\n raise Exception(f'Unknown storage: {default_storage_class}')\n\n return storage\n\n\ndef get_boto_client(storage: Storage | None = None):\n \"\"\"Return an s3 client from the current storage.\"\"\"\n storage = storage if storage else get_storage()\n if isinstance(storage, MinioStorage):\n return boto3.client(\n 's3',\n endpoint_url=storage.client._endpoint_url,\n aws_access_key_id=storage.client._access_key,\n aws_secret_access_key=storage.client._secret_key,\n region_name='us-east-1',\n )\n\n return storage.connection.meta.client\n\n\ndef get_storage_params(storage: Storage):\n if isinstance(storage, MinioStorage):\n return {\n 'endpoint_url': storage.client._endpoint_url,\n 'access_key': storage.client._access_key,\n 'secret_key': storage.client._secret_key,\n }\n\n return {\n 'endpoint_url': storage.endpoint_url,\n 'access_key': storage.access_key,\n 'secret_key': storage.secret_key,\n }\n\n\ndef get_storage() -> Storage:\n return create_s3_storage(settings.DANDI_DANDISETS_BUCKET_NAME)\n\n\ndef get_storage_prefix(instance: Any, filename: str) -> str:\n return f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}{filename}'\n\n\ndef get_embargo_storage() -> Storage:\n return create_s3_storage(settings.DANDI_DANDISETS_EMBARGO_BUCKET_NAME)\n\n\ndef get_embargo_storage_prefix(instance: Any, filename: str) -> str:\n return f'{settings.DANDI_DANDISETS_EMBARGO_BUCKET_PREFIX}{filename}'\n","repo_name":"dandi/dandi-archive","sub_path":"dandiapi/api/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":12444,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"}
+{"seq_id":"34722148061","text":"#!/usr/bin/python3\n\ndef roman_to_int(roman_string):\n '''\n roman_to_int - function that convert roman string to integre\n '''\n roman_to_decimal = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n }\n number = 0\n previos_value = 0\n if type(roman_string) != str or roman_string is None:\n return (0)\n else:\n for i in roman_string:\n value = roman_to_decimal[i]\n if value > previos_value:\n number += value - (2 * previos_value)\n else:\n number += value\n previos_value = value\n return (number)\n","repo_name":"OuYa01/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/12-roman_to_int.py","file_name":"12-roman_to_int.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"11097763868","text":"import os\nimport shutil\n\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\n\n@contextmanager\ndef copy_work(working_dir, text_to_replace, replacement_text):\n \"\"\"\n Recursive function that iterates down through source directory until a file is reached. If file is newer than same\n file in the target directory then replaces target file with source version. If source doesn't exist in target\n directory then copies source file into target directory.\n :param replacement_text: replacement text to put into source path i.e /a/b//file\n :param text_to_replace: text that needs to be replaced in source path i.e /a/b//file\n :param working_dir: the source directory that contains the newest files.\n :return: copied file\n \"\"\"\n os.chdir(working_dir)\n for file in Path.cwd().iterdir():\n if file.is_file():\n try:\n p1, p2 = os.path.getmtime(Path(file.as_posix())), os.path.getmtime(Path(\n f'{os.path.split(file.as_posix())[0].replace(text_to_replace, replacement_text)}/{os.path.split(file.as_posix())[1]}').as_posix())\n if p1 > p2:\n shutil.copy(Path(file).as_posix(), Path(\n f'{os.path.split(file.as_posix())[0].replace(text_to_replace, replacement_text)}/{os.path.split(file.as_posix())[1]}'))\n print(f'{Path(file).name} replaced.')\n except:\n shutil.copy(Path(file).as_posix(), Path(\n f'{os.path.split(file.as_posix())[0].replace(text_to_replace, replacement_text)}/{os.path.split(file.as_posix())[1]}'))\n print(f'{Path(file).name} added.')\n else:\n copy_work(file, text_to_replace, replacement_text)\n\n","repo_name":"larymak/Python-project-Scripts","sub_path":"AUTOMATION/FileReplaceWithNewer/replace_with_newer.py","file_name":"replace_with_newer.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":929,"dataset":"github-code","pt":"77"}
+{"seq_id":"38331748775","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport streamlit as st\nimport plotly.graph_objects as go\nfrom plotly import tools\nimport plotly.offline as py\nimport plotly.express as px\nimport cufflinks as cf\nfrom plotly.offline import plot\nimport chart_studio.plotly as py\n\n\n# In[2]:\n\n\ncf.go_offline()\n\n\n# In[3]:\n\n\ndf = pd.read_csv('../notebooks/summary.csv')\npc = pd.read_csv('../notebooks/pc_comp_scrap/pc.csv')\nworten = pd.read_csv('../notebooks/worten_scrap/worten.csv')\n\n\n# In[ ]:\n\n\n\n\n\n# In[4]:\n\n\nst.title(\"** :desktop_computer:** **TV Marketplace Price Evolution** **:desktop_computer:**\")\nst.header(\"This is an App created to visualize the price Evolution of Ultra HD 4K TVs in 2 Manufactures: LG and Samsung, in 2 different marketplaces: Pc Componentes and Worten.\")\nst.subheader(\"The Dashboards will show Price evolution since October 3rd.\")\n\n\n# In[5]:\n\n\nimage = ('/Users/juandediegosuanzes/desktop/Ironhack-Final-Project/streamlit/samsung_vs_lg_')\n\n\n# In[6]:\n\n\nst.image(image, width=None)\n\n\n# In[7]:\n\n\npc_ok = pc[['PC LG', 'PC SS']]\nworten_ok = worten[['Worten LG', 'Worten SS']]\ndf_ok = df[['PC LG', 'PC SS', 'Worten LG', 'Worten SS']]\n\n\n# In[8]:\n\n\nst.markdown(\"#### \" +\"Pc Componentes & Worten Price Evolution in LG and Samsung\")\n\n\n# In[9]:\n\n\nst.line_chart(data=df_ok, width=0, height=0, use_container_width=True)\n\n\n# In[10]:\n\n\nst.markdown(\"#### \" +\"Select the manufacturer and the marketplace you would like to see the metrics in detail\")\n\nselected_metrics = st.selectbox(\n label=\"Choose...\", options=['PC LG','PC SS','Worten LG','Worten SS']\n)\n\n\n# In[11]:\n\n\nfig = go.Figure()\nif selected_metrics == 'PC LG':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['PC LG'],\n mode='lines+markers', name='PC LG'))\nif selected_metrics == 'PC SS':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['PC SS'],\n\t mode='lines+markers', name='PC SS'))\nif selected_metrics == 'Worten LG':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['Worten LG'],\n\t mode='lines+markers',name='Worten LG'))\nif selected_metrics == 'Worten SS':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['Worten SS'],\n\t mode='lines+markers',name='Worten SS'))\nst.plotly_chart(fig, use_container_width=True)\n\n\n# In[12]:\n\n\nif st.checkbox('Show dataframe'):\n st.dataframe(df.style.highlight_max(axis=0))\n\n\n# In[13]:\n\n\nst.markdown(\"#### \" +\"Pc Componentes Price Evolution by Manufacturer\")\n\n\n# In[14]:\n\n\nimage_pc = ('/Users/juandediegosuanzes/desktop/Ironhack-Final-Project/streamlit/PcComponentes.png')\n\n\n# In[15]:\n\n\nst.image(image_pc, width=None)\n\n\n# In[16]:\n\n\nst.area_chart(data=pc_ok, width=0, height=0, use_container_width=True)\n\n\n# In[17]:\n\n\nif st.checkbox('Show PC Componentes dataframe'):\n st.dataframe(pc.style.highlight_max(axis=0))\n\n\n# In[18]:\n\n\nst.markdown(\"#### \" +\"Worten Price Evolution by Manufacturer\")\n\n\n# In[19]:\n\n\nimage_worten = ('/Users/juandediegosuanzes/desktop/Ironhack-Final-Project/streamlit/worten_im.webp')\n\n\n# In[20]:\n\n\nst.image(image_worten, width=None)\n\n\n# In[21]:\n\n\nst.area_chart(data=worten_ok, width=0, height=0, use_container_width=True)\n\n\n# In[22]:\n\n\nif st.checkbox('Show Worten dataframe'):\n st.dataframe(worten.style.highlight_max(axis=0))\n\n\n# In[23]:\n\n\n#st.title(\"** :champagne:** **¡¡GRACIAS A TODOS!!: Lead Teachers, TA y enhorabuena compañeros!!** **:champagne:**\")\n\n\n# In[24]:\n\n\n#video_file = open('/Users/juandediegosuanzes/desktop/video.mp4', 'rb')\n#video_bytes = video_file.read()\n#st.video(video_bytes)\n\n\n# In[25]:\n\n\n#audio_file = open('/Users/juandediegosuanzes/desktop/champ.mp3', 'rb')\n#audio_bytes = audio_file.read()\n#st.audio(audio_bytes, format='audio/ogg', start_time=34)\n\n\n# In[26]:\n\n\n#fig = df.iplot(kind='box', \n# histnorm='percent', \n # xTitle='October Scraping', \n # yTitle='Price €', \n # title='Summary Price by Brand and Marketplace',\n # subplots=True)\n\n#st.pyplot(fig)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"juanema74/Ironhack-Final-Project","sub_path":"streamlit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27620432502","text":"import os\nimport cv2\nfrom pairs import Pairs\nfrom hog import getHog\nfrom contrast import getImageContrast\nfrom classes import class_filter\nfrom filters import calculateMetricsForImages\nfrom show import showContinuously, showFromClassContinuously\nfrom Report import Report\nimport argparse\nfrom dataset_specific_api import getDatasetSpecificApi\n\n# parsing command line args\n\nparser = argparse.ArgumentParser(description='Calculate objects metrics')\nparser.add_argument('--dataset', type=str,\n help='dataset name', default=None)\nparser.add_argument('--api', type=str,\n help='dataset specific api name', default=None)\nparser.add_argument('--threads', type=str,\n help='threads number', default='1')\nparser.add_argument('--overwrite', type=str,\n help='overwrite existing pairs or not', default='0')\nparser.add_argument('--continue_calc', type=str,\n help='continue first calculatinon', default='1')\nparser.add_argument('--metrics_file', type=str,\n help='metrics file name (without extension! must be in observer\\'s folder)', default='default_metrics')\nargs = parser.parse_args()\n\ndataset_name = args.dataset\ndataset_specific_api_name = args.api or dataset_name\nthreads = int(args.threads)\noverwrite = int(args.overwrite)\ncontinue_calc = int(args.continue_calc)\nmetrics_file_path = args.metrics_file\n\n# importing metrics\nmetrics = __import__(metrics_file_path).metrics\n\n# creating report object\nreport_file_path = 'report_' + dataset_name + '.json'\nif overwrite or (not os.path.exists(report_file_path)):\n\treport = Report(report_file_path)\n\n# geting dataset specific api\ndataset_specific_api = getDatasetSpecificApi(dataset_specific_api_name)\n\n# geting pairs from directory\ndirectory = 'pairs_' + dataset_name + '_new'\nif overwrite or (not os.path.exists(directory)) or continue_calc:\n\tdirectory = directory.replace('_new', '')\npairs = Pairs(directory, get_classes_function=dataset_specific_api.getClasses)\n\n\n\n# using this function you can see and list (press q) images with from class\n# showFromClassContinuously(pairs, 'Unknown', dataset_specific_api.getClasses)\n\n\n\n# counting objects in classes\nif overwrite or (not os.path.exists(report_file_path)):\n\tobjects_number_by_class = pairs.countObjectsInClasses()\n\treport.write('objects number by class', objects_number_by_class)\n\n\n\n# counting videos in classes\n# videos_number_by_class = countVideosInClasses(pairs, dataset_specific_api.getClasses)\n# report.write('videos number by class', videos_number_by_class)\n\n# available metrics\n\n\n# calculating metrics\nnew_pairs_folder_path = 'pairs_' + dataset_name + '_new'\ncalculateMetricsForImages(pairs, metrics, new_pairs_folder_path, threads=threads, overwrite=overwrite)\n# if overwrite:\n# \tpairs.dumpClasses(os.path.normcase(new_pairs_folder_path + '/' + 'classes_list.json'))","repo_name":"MentalBlood/observer","sub_path":"get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"30907857249","text":"\n\"\"\"\nmake model tutorial\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\ndef make_model() -> keras.Model:\n x0 = keras.layers.Input(shape=(28, 28, 3))\n x = keras.layers.Conv2D(32, 3, activation='relu')(x0)\n x = keras.layers.Conv2D(64, 3, activation='relu')(x)\n x = keras.layers.Flatten()(x)\n x = keras.layers.Dense(128, activation='relu')(x)\n x = keras.layers.Dense(10, activation='softmax')(x)\n model = keras.Model(inputs=(x0), outputs=(x))\n return model\n\nif __name__ == \"__main__\":\n model = make_model()\n model.summary()\n","repo_name":"torigara603/tensorflowtips","sub_path":"tips/tutorials/N03_SaveModel/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"22282641293","text":"# Python code to find the co-ordinates of\n# the contours detected in an image.\nimport cv2\n\n\ndef parse_image(image: str):\n # file_path: str = \"./xray_file.png\"\n # Reading image\n font = cv2.FONT_HERSHEY_COMPLEX\n img2 = cv2.imread(image, cv2.IMREAD_COLOR)\n\n # Reading same image in another\n # variable and converting to gray scale.\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n # edged = cv2.Canny(img, 20, 300)\n\n # Converting image to a binary image\n # ( black and white only image).\n _, threshold = cv2.threshold(img, 200, 455, cv2.THRESH_BINARY)\n\n # Detecting contours in image.\n # contours, _ = cv2.findContours(threshold, cv2.RETR_TREE,\n # cv2.CHAIN_APPROX_SIMPLE)\n contours, hierarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Going through every contours found in the image.\n for cnt in contours:\n\n approx = cv2.approxPolyDP(cnt, 0.020 * cv2.arcLength(cnt, True), True)\n\n # draws boundary of contours.\n # cv2.drawContours(img2, 0, (0, 0, 255), 5)\n # cv2.drawContours(img2, contours, -1, (10, 355, 100), 3)\n cv2.drawContours(img2, contours, 0, (0,255, 0), 3)\n\n # Used to flatten the array containing\n # the co-ordinates of the vertices.\n values = approx.ravel()\n i = 0\n\n for _ in values:\n if i % 2 == 0:\n x = values[i]\n y = values[i + 1]\n\n # String containing the co-ordinates.\n string = f\"{str(x)} {str(y)}\"\n\n if i != 0:\n # text on remaining co-ordinates.\n cv2.putText(img2, string, (x, y), font, 0.5, (0, 255, 0))\n i = i + 1\n\n # Saving the image\n cv2.imwrite(\"./output_image/image.jpg\", img2)\n\n","repo_name":"Nakul21/fastapiImage","sub_path":"process_image.py","file_name":"process_image.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"815965119","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom pymongo import MongoClient\n\n\nclass Spot:\n collection_old = 'latestattractions'\n\n collection_new = 'spot'\n\n params_map = {}\n\n def __init__(self):\n pass\n\n @staticmethod\n def create_spot(address_old, port_old, address_new, port_new, collection_old, collection_new,\n params_map):\n\n # old database connection\n client = MongoClient(address_old, port_old)\n travel1 = client.travel1\n\n # new database connection\n client = MongoClient(address_new, port_new)\n travel2 = client.travel2\n\n # get old collection and create new collection\n db_old = travel1[collection_old]\n db_new = travel2[collection_new]\n\n # clean former data\n db_new.remove()\n\n # 临时数组\n temp = [''] * len(params_map.keys())\n\n # 判断当前文档是否含有某个字段,若有则取出后赋值给临时数组,否则为 None\n for document in db_old.find():\n for i in range(len(params_map.keys())):\n if params_map.keys()[i] in document:\n temp[i] = document[params_map.keys()[i]]\n\n image_url = 'http://weegotest.b0.upaiyun.com/attractions/iosimgs/'\n post = {}\n\n if 'spot' in document:\n spot = document['spot']\n if spot is not None:\n for i in range(len(spot)):\n if 'cover_image' in spot[i]:\n if spot[i]['cover_image'] != '':\n cover_image = image_url + spot[i]['cover_image']\n if 'title' in spot[i]:\n title = spot[i]['title']\n if 'desc' in spot[i]:\n desc = spot[i]['desc']\n if 'advice' in spot[i]:\n advice = spot[i]['advice']\n \n num = db_new.find({'cover_image': cover_image, 'title': title,\n 'desc': desc, 'advice': advice}).count() \n if num > 1:\n print('重复项')\n print(document['_id'])\n else:\n temp_spot = {}\n temp_spot.update({'cover_image': cover_image, 'title': title,\n 'desc': desc, 'advice': advice, 'tag': ''})\n db_new.insert(temp_spot)\n print(temp_spot)\n","repo_name":"hezhensong/MongoConvertor","sub_path":"mongodb/Spot.py","file_name":"Spot.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"36978017098","text":"import argparse\nimport time\nimport csv\nimport socket\nimport os, shutil\nfrom datetime import timedelta\nfrom multiprocessing import Process, Manager, Value, Lock\nfrom subprocess import Popen, PIPE, TimeoutExpired\nfrom ipaddress import ip_network\nfrom datetime import datetime\n\n\nclass Counter(object):\n def __init__(self, initval=0):\n self.val = Value('i', initval)\n self.lock = Lock()\n\n def increment(self):\n with self.lock:\n self.val.value += 1\n\n def value(self):\n with self.lock:\n return self.val.value\n\n\ndef generate_args():\n \"\"\" Create main parser \"\"\"\n parser = argparse.ArgumentParser(prog='ping.py')\n # Create global arguments\n parser.add_argument('--hosts', dest='hosts', type =str, help=\"Specify network to ping using CIDR notation.\"\n \"Example: 10.0.0.0/24\",\n required=True)\n args = parser.parse_args()\n return args\n\ndef subnet_ping(ip, counter, ip_results):\n \"\"\" Run ping subprocess and keep track of ping result\n Append results to a list of dictionaries \"\"\"\n # Linux/mac\n if os.name == 'posix':\n sub_p = Popen(['ping', '-c', '4', str(ip)], stdout=PIPE, stderr=PIPE, stdin=PIPE)\n # Windows\n elif os.name == 'nt':\n sub_p = Popen(['ping', '-n', '4', str(ip)], stdout=PIPE, stderr=PIPE, stdin=PIPE)\n # grab output and errors from subprocess\n # sleep a bit (mainly for windows because ping return output is rather slow\n # FIX THIS - use more elegant way of checking if output is finished\n time.sleep(10)\n try:\n output, errors = sub_p.communicate(timeout=15)\n except TimeoutExpired:\n sub_p.kill()\n output, errors = sub_p.communicate()\n # differences in output of poxis vs nt\n if os.name == 'posix':\n # if you don't see 0 packets in the output, then you must have received packets from the host\n if not '0 packets received' in str(output):\n #print(ip, 'is up!', \"\\n\")\n log_out = \"{} is up! \\n\".format(ip)\n log_file(log_out)\n counter.increment()\n ip_results.append({'ip': ip, 'status': 'up'})\n else:\n #print(ip, \"is down or can't be pinged!\", \"\\n\")\n log_out = \"{} is down or can't be pinged! \\n\".format(ip)\n log_file(log_out)\n ip_results.append({'ip': ip, 'status': 'down'})\n elif os.name == 'nt':\n if not 'Received = 0' in str(output):\n #print(ip, 'is up!', \"\\n\")\n log_out = \"{} is up! \\n\".format(ip)\n log_file(log_out)\n counter.increment()\n ip_results.append({'ip': ip, 'status': 'up'})\n else:\n #print(ip, \"is down or can't be pinged!\", \"\\n\")\n log_out = \"{} is down or can't be pinged! \\n\".format(ip)\n log_file(log_out)\n ip_results.append({'ip': ip, 'status': 'down'})\n\ndef log_file(info):\n \"\"\" Write to a log file \"\"\"\n ## FIX - Windows seems to have a problem using the global reference log_filename ##\n with open('ping_log.txt', 'a+') as f:\n f.write(str(info))\n\ndef export_hosts_to_csv(hosts):\n with open('ping_results.csv', 'w+', newline='') as csvfile:\n fieldnames = ['ip', 'status']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for host in hosts:\n writer.writerow({'ip': host['ip'], 'status': host['status']})\n\n\n\nif __name__ == '__main__':\n start_time = time.time()\n args = generate_args()\n # use manager for sharing the list between processes\n manager = Manager()\n ip_results = manager.list()\n # Mac limits resources by default - this sets the number of open files from default 256 to 10240\n # for this parent process and all subs. Don't think this is a Linux problem, but this sets it\n # for all posix compliant machines \n if os.name == 'posix':\n import resource\n resource.setrlimit(resource.RLIMIT_NOFILE, (10240, 10240))\n hosts = args.hosts\n # shared counter for all processes to have access to increment\n counter = Counter(0)\n dt = datetime.now()\n log_filename = \"ping_log.txt\"\n archive_log_filename = \"ping_log_{}_{}_{}_{}_{}_{}.txt\".format(dt.month, dt.day, dt.year, dt.hour,\n dt.minute, dt.second,)\n archive_logfile_path = \"Archive/{}\".format(archive_log_filename)\n # remove old log file if it exists, create new archive folder if one doesn't exist, move old to archive\n if not os.path.exists('Archive'):\n os.mkdir('Archive')\n if os.path.exists(log_filename):\n os.rename(log_filename, archive_log_filename)\n shutil.move(archive_log_filename, archive_logfile_path)\n\n # build ips\n hosts = list(ip_network(hosts).hosts())\n hosts = [str(host) for host in hosts]\n # grab total number of hosts within the subnet to ping (length of list)\n total_hosts = len(hosts)\n # create process queue for each ip to be pinged. Prob need to look into better management of this\n processes = []\n workers = [0 for x in range(100)]\n # increment on index of ip_addr because a list is returned\n idx = 0\n # grab number of IPs - later count down to 0\n hosts_len = len(hosts)\n try:\n while hosts_len > 0:\n if 0 not in workers:\n workers = [0 for x in range(100)]\n for w in range(len(workers)):\n p = Process(target=subnet_ping, args=(hosts[idx], counter, ip_results))\n # start the process\n p.start()\n # add to list of workers available to run processes\n processes.append(p)\n workers.remove(0)\n idx += 1\n hosts_len -= 1\n # calling process blocked until process who's join method is called terminates.\n # used more or less for queuing. If join is not used all processes join immediately\n # you can also specify an optional timeout in case waiting is too long\n for p in processes:\n p.join()\n except IndexError:\n pass\n\n # continually check if process is still alive, when done provide results\n process_running = True\n while process_running:\n if not processes[-1].is_alive():\n print(\"--> {} of {} hosts could be pinged.\".format(counter.value(), total_hosts))\n host_result_summary = \"\\n{} of {} hosts could be pinged.\".format(counter.value(), total_hosts)\n datetime_completed = \"\\nCompleted on {}/{}/{} @ {}:{}:{}\".format(dt.month,dt.day, dt.year,dt.hour,\n dt.minute, dt.second)\n log_file(host_result_summary)\n log_file(datetime_completed)\n # sort the results from first ip to last by using socket's builtin inet_aton\n ip_results = sorted(ip_results, key=lambda host: socket.inet_aton(host['ip']))\n export_hosts_to_csv(ip_results)\n process_running = False\n else:\n continue\n end_time = time.time() - start_time\n end_time = str(timedelta(seconds=end_time))\n print(\"--> Process running time: {} (Hours:Minutes:Seconds.Microseconds)\".format(end_time))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kirbocannon/network_tools","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"8736132165","text":"import logging\nimport threading\nimport time\n\nfrom peregrine_mail.data.models import Email\nfrom peregrine_mail.data.database import db\nfrom peregrine_mail.sending_emails import send_email, find_mail_to_send, find_mail_to_delete\n\nlogger = logging.getLogger('peregrine')\n\n\nclass Threading:\n \"\"\"Run emails in the background\"\"\"\n\n def __init__(self, email_queue, app, sleep_time=10):\n self.app = app\n self.email_queue = email_queue\n self.sleep_time = sleep_time\n thread = threading.Thread(target=self.send_emails)\n thread.daemon = True\n thread.start()\n\n def send_emails(self):\n db.app = self.app\n\n while True:\n # Send NEW emails\n try:\n self.sending_emails_from_queue()\n except Exception as err:\n logger.exception(f'Unexpected error while sending new mail: {err}')\n\n emails = db.session.query(Email).all()\n\n # Resend FAILED emails\n try:\n for email in find_mail_to_send(self.app, emails):\n send_email(self.app, **email)\n except Exception as err:\n logger.exception(f'Unexpected error while finding failed mail to send: {err}')\n\n # Delete old emails\n try:\n find_mail_to_delete(self.app, emails)\n except Exception as err:\n logger.exception(f'Unexpected error while executing retention policy deletion: {err}')\n\n time.sleep(self.sleep_time)\n\n def sending_emails_from_queue(self):\n while not self.email_queue.empty():\n send_email(self.app, **self.email_queue.get())\n","repo_name":"beautiousmax/peregrine_mail","sub_path":"peregrine_mail/background_thread.py","file_name":"background_thread.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"25198082124","text":"import adafruit_dht\nimport board\nimport time\n\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(17, GPIO.OUT)\n\ndht_pin = board.D4\ndht_sensor = adafruit_dht.DHT11(dht_pin, use_pulseio=False)\n\ndef callback_func(*args):\n print(\"Button was pushed!\")\n while True:\n try:\n GPIO.output( 17, GPIO.HIGH )\n temp_c = dht_sensor.temperature\n temp_f = temp_c * (9 / 5) + 32\n hum = dht_sensor.humidity\n print(\"Temperature =\", temp_c, 'C,', temp_f, 'F')\n print(\"Humidity =\", hum, '%')\n time.sleep( 0.5 )\n GPIO.output( 17, GPIO.LOW )\n break\n except:\n print('error reading, trying again...')\n continue\n\nGPIO.add_event_detect(10, edge=GPIO.FALLING, callback=callback_func, bouncetime=200)\n\ninput(\"press enter 2 quit\\n\") # block program from exiting\n\n\n\n\n\n","repo_name":"ucsd-ece196/ucsd-ece196.github.io","sub_path":"examples/pi/combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"20496960003","text":"import pickle\nfrom typing import List\nfrom fastapi import FastAPI, File, Form, UploadFile\nfrom starlette.middleware.cors import CORSMiddleware\nimport io\nimport face_recognition\nimport numpy as np\nfrom fastapi.encoders import jsonable_encoder\nfrom PIL import Image, ImageDraw\nimport cv2\nfrom Encode_face import EncodeFace\n\n#encode available image on start server\nEncodeFace().load_encoding_images(\"./images\")\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware, allow_origins=[\"*\"], allow_methods=[\"*\"], allow_headers=[\"*\"]\n)\n\n@app.post(\"/api/Identify\")\nasync def faces_recognition(image_upload: UploadFile = File(...)):\n data = await image_upload.read()\n known_face_names =[]\n known_face_encodings=[]\n \n image = face_recognition.load_image_file(io.BytesIO(data))\n #img = Image.open(io.BytesIO(data))\n #draw = ImageDraw.Draw(img)\n\n \n\n with open('know_face_names.p','rb') as f:\n while 1:\n try:\n known_face_names.append(pickle.load(f))\n except EOFError:\n break\n with open('know_face_encodes.p','rb') as f:\n while 1:\n try:\n known_face_encodings.append(pickle.load(f))\n except EOFError:\n break\n #print(known_face_names)\n\n # Detect face(s) and encode them\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)\n\n \n face_names = []\n face_loc=[]\n\n # Recognize face(s)\n for face_encoding, face_location in zip(face_encodings, face_locations):\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.4)\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n #print(face_distances)\n best_match_index = np.argmin(face_distances)\n #print(best_match_index)\n if matches[best_match_index]: \n name = known_face_names[best_match_index]\n else:\n name = \"Unknown\"\n #top, right, bottom, left = face_location\n #draw.rectangle([left, top, right, bottom],width = 4)\n #draw.text((left, top), name)\n face_names.append(name)\n face_loc.append(face_location)\n #img.show()\n return {\"Face name \": face_names,\"Face location \": face_loc}\n\n\n\n@app.post(\"/api/AddImg\")\nasync def faces_recognition(image_upload: UploadFile = File(...),name :str =Form()):\n data = await image_upload.read()\n img = Image.open(io.BytesIO(data))\n img.save(\"./images/{}.png\".format(name))\n image = face_recognition.load_image_file(io.BytesIO(data))\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)[0]\n \n with open('know_face_names.p','ab') as f:\n pickle.dump((name), f)\n with open('know_face_encodes.p','ab') as f:\n pickle.dump((face_encodings), f)\n\n return {\"message\" : \"add success\"}\n\n\n\n@app.post(\"/api/AddMultiImg\")\nasync def create_upload_files(files: List[UploadFile],name :str=Form()):\n for data in files:\n data = await data.read()\n image = face_recognition.load_image_file(io.BytesIO(data))\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)[0]\n with open('know_face_names.p','ab') as f:\n pickle.dump((name), f)\n with open('know_face_encodes.p','ab') as f:\n pickle.dump((face_encodings), f)\n \n return {\"message\":\"add success\"}\n\n\n\n\n\n","repo_name":"numan9199/face-ocr","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29484266259","text":"class Twitter:\n def __init__(self):\n self.trending_topics = []\n\n def tweet(self, mensaje):\n if len(mensaje) > 140:\n print(\"El mensaje excede el límite de 140 caracteres.\")\n return\n\n hashtags = self.obtener_hashtags(mensaje)\n self.actualizar_trending_topics(hashtags)\n\n def obtener_hashtags(self, mensaje):\n palabras = mensaje.split()\n hashtags = [palabra[1:] for palabra in palabras if palabra.startswith(\"#\")]\n return hashtags\n\n def actualizar_trending_topics(self, hashtags):\n for hashtag in hashtags:\n encontrado = False\n for i, trending_topic in enumerate(self.trending_topics):\n if hashtag == trending_topic[0]:\n self.trending_topics[i] = (hashtag, trending_topic[1] + 1)\n encontrado = True\n break\n if not encontrado:\n self.trending_topics.append((hashtag, 1))\n \n self.trending_topics.sort(key=lambda x: x[1], reverse=True)\n self.trending_topics = self.trending_topics[:3]\n\n\n# Ejemplo de uso\ntwitter = Twitter()\n\n# Primer tweet\ntwitter.tweet(\"Hola, estoy probando mi prototipo de Twitter. #twitter #prototipo #prueba\")\nprint(twitter.trending_topics) # [('twitter', 1), ('prototipo', 1), ('prueba', 1)]\n\n# Segundo tweet\ntwitter.tweet(\"Me encanta el desarrollo web. #web #desarrollo #programación\")\nprint(twitter.trending_topics) # [('web', 1), ('desarrollo', 1), ('programación', 1)]\n\n# Tercer tweet\ntwitter.tweet(\"Hoy es un día soleado. #clima #sol #verano\")\nprint(twitter.trending_topics) # [('sol', 2), ('web', 1), ('desarrollo', 1)]\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema9_ej1/tema9_ej1_db6ce14501fafe028282235b78618db2.py","file_name":"tema9_ej1_db6ce14501fafe028282235b78618db2.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"8117935151","text":"\"\"\"Tests for helper functions.\"\"\"\n\nimport rudra.utils.helper as helper\nimport requests\nimport pytest\n\n\ndef test_get_github_repo_info():\n gh_repo1 = 'https://github.com/fabric8-analytics/f8a-hpf-insights'\n gh_repo2 = 'https://github.com/fabric8-analytics/f8a-hpf-insights.git'\n gh_repo3 = 'git+https://github.com/fabric8-analytics/f8a-hpf-insights'\n gh_repo4 = 'fabric8-analytics/f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo1)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo2)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo3)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo4)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n\n\ndef test_get_training_file_url():\n user = 'fabric8-analytics'\n repo = 'f8a-hpf-insights'\n file_url = helper.get_training_file_url(user, repo)\n resp = requests.get(file_url)\n assert resp.status_code == 200\n\n file_url = helper.get_training_file_url(user, repo, branch='training-code')\n resp = requests.get(file_url)\n assert resp.status_code == 200\n\n file_url = helper.get_training_file_url(\n user, repo, training_file_path='src/flask_endpoint.py')\n resp = requests.get(file_url)\n assert resp.status_code == 200\n\n\ndef test_load_hyper_params():\n # mock command line args\n helper.argv = ['helper.py', '{\"a\": 111, \"b\": \"some text\"}']\n hyper_params = helper.load_hyper_params()\n assert hyper_params.get('a') == 111\n assert hyper_params.get('b') == \"some text\"\n\n\ndef test_cache_dict_with_zero_max_size():\n cache_dict = helper.CacheDict(0)\n with pytest.raises(KeyError):\n cache_dict['key1'] = 'value1'\n assert len(cache_dict) == 0\n\n\ndef test_cache_dict_with_one_max_size():\n cache_dict = helper.CacheDict(1)\n cache_dict['key1'] = 'value1'\n cache_dict['key2'] = 'value2'\n assert len(cache_dict) == 1\n assert 'key2' in cache_dict\n assert 'key1' not in cache_dict\n\n\ndef test_cache_dict():\n # default max_len = 1024\n cache_dict = helper.CacheDict()\n for i in range(2000):\n cache_dict[i] = i * i\n assert len(cache_dict) == cache_dict.max_len\n assert cache_dict[i] == i * i\n del cache_dict[i]\n assert len(cache_dict) == cache_dict.max_len - 1\n assert cache_dict[cache_dict.max_len - 2] == pow(cache_dict.max_len - 2, 2)\n assert len(list(cache_dict)) == cache_dict.max_len - 1\n assert str(cache_dict.max_len - 2) in str(cache_dict)\n","repo_name":"fabric8-analytics/fabric8-analytics-rudra","sub_path":"tests/utils/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"30866850939","text":"#! /usr/bin/env python3\r\n#\r\ndef blowup_deriv ( t, y ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_deriv() evaluates the right hand side of blowup_ode().\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 10 November 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Reference:\r\n#\r\n# John D Cook,\r\n# Approximating a solution that doesn't exist,\r\n# https://www.johndcook.com/blog/2009/08/11/approximating-a-solution-that-doesnt-exist/\r\n# 11 August 2009.\r\n#\r\n# Input:\r\n#\r\n# real T, Y: the time and solution value.\r\n#\r\n# Output:\r\n#\r\n# real DYDT: the derivative value.\r\n#\r\n dydt = y**2\r\n\r\n return dydt\r\n\r\ndef blowup_euler ( n ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_euler() solves blowup_ode() using euler.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 10 November 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# integer N: the number of steps to take.\r\n#\r\n import matplotlib.pyplot as plt\r\n import numpy as np\r\n\r\n print ( '' )\r\n print ( 'blowup_euler():' )\r\n print ( ' Use euler() to solve blowup_ode().' )\r\n#\r\n# Get the parameters.\r\n#\r\n t0, y0, tstop = blowup_parameters ( )\r\n\r\n f = blowup_deriv\r\n tspan = np.array ( [ t0, tstop ] )\r\n\r\n t, y = euler ( f, tspan, y0, n )\r\n\r\n print ( '' )\r\n print ( ' Number of equal steps is %d\\n', n );\r\n\r\n ye = blowup_exact ( t )\r\n#\r\n# Plot the solution curve.\r\n#\r\n plt.clf ( )\r\n plt.plot ( t, y, 'ro', linewidth = 3 )\r\n plt.plot ( t, ye, 'b-', linewidth = 3 )\r\n plt.grid ( True )\r\n plt.xlabel ( '<--- T --->' )\r\n plt.ylabel ( '<--- X(T) --->' )\r\n plt.title ( 'blowup_ode(): euler()' )\r\n plt.legend ( ( 'Computed', 'Exact' ) )\r\n filename = 'blowup_euler.png'\r\n plt.savefig ( filename )\r\n print ( ' Graphics saved as \"%s\"' % ( filename ) )\r\n plt.show ( block = False )\r\n plt.close ( )\r\n\r\n return\r\n\r\ndef blowup_exact ( t ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_exact() evaluates the exact solution of blowup_ode().\r\n#\r\n# Discussion:\r\n#\r\n# y' = y^2\r\n# dy/y^2 = dt (Separation of variables)\r\n# -1/y = t + C (Antiderivatives)\r\n# y = - 1 / ( t + C )\r\n# C = - t0 - 1/y0\r\n# y = - 1 / ( t - t0 - 1/y0 ) (Exact formula)\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 29 April 2021\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# real T(:): the evaluation times.\r\n#\r\n# Output:\r\n#\r\n# real Y(:): the exact solution values.\r\n#\r\n import numpy as np\r\n\r\n t0, y0, tstop = blowup_parameters ( )\r\n\r\n if ( y0 == 0.0 ):\r\n value = np.zeros ( t.shape )\r\n else:\r\n value = - 1.0 / ( t - t0 - 1.0 / y0 )\r\n\r\n return value\r\n\r\ndef blowup_ode_test ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_ode_test() tests blowup_ode().\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 10 November 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import platform\r\n\r\n print ( '' )\r\n print ( 'blowup_ode_test():' )\r\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\r\n print ( ' Test blowup_ode().' )\r\n\r\n t0, y0, tstop = blowup_parameters ( )\r\n print ( '' )\r\n print ( ' parameters:' )\r\n print ( ' t0 = ', t0 )\r\n print ( ' y0 = ', y0 )\r\n print ( ' tstop = ', tstop )\r\n\r\n n = 40\r\n blowup_euler ( n )\r\n#\r\n# Terminate.\r\n#\r\n print ( '' )\r\n print ( 'blowup_ode_test():' )\r\n print ( ' Normal end of execution.' )\r\n return\r\n\r\ndef blowup_parameters ( t0_user = None, y0_user = None, \\\r\n tstop_user = None ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_parameters() returns the parameters of blowup_ode().\r\n#\r\n# Discussion:\r\n#\r\n# If input values are specified, this resets the default parameters.\r\n# Otherwise, the output will be the current defaults.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 28 January 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# real T0_USER: the initial time.\r\n#\r\n# real Y0_USER(4): the initial condition.\r\n#\r\n# real TSTOP_USER: the final time.\r\n#\r\n# Output:\r\n#\r\n# real T0: the initial time.\r\n#\r\n# real Y0(1): the initial condition.\r\n#\r\n# real TSTOP: the final time.\r\n#\r\n import numpy as np\r\n#\r\n# Initialize defaults.\r\n#\r\n if not hasattr ( blowup_parameters, \"t0_default\" ):\r\n blowup_parameters.t0_default = 0.0\r\n\r\n if not hasattr ( blowup_parameters, \"y0_default\" ):\r\n blowup_parameters.y0_default = 1.0\r\n\r\n if not hasattr ( blowup_parameters, \"tstop_default\" ):\r\n blowup_parameters.tstop_default = 0.95\r\n#\r\n# Update defaults if input was supplied.\r\n#\r\n if ( t0_user is not None ):\r\n blowup_parameters.t0_default = t0_user\r\n\r\n if ( y0_user is not None ):\r\n blowup_parameters.y0_default = y0_user\r\n\r\n if ( tstop_user is not None ):\r\n blowup_parameters.tstop_default = tstop_user\r\n#\r\n# Return values.\r\n#\r\n t0 = blowup_parameters.t0_default\r\n y0 = blowup_parameters.y0_default\r\n tstop = blowup_parameters.tstop_default\r\n \r\n return t0, y0, tstop\r\n\r\ndef euler ( dydt, tspan, y0, n ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## euler() approximates the solution to an ODE using Euler's method.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 April 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# function dydt: points to a function that evaluates the right\r\n# hand side of the ODE.\r\n#\r\n# real tspan[2]: contains the initial and final times.\r\n#\r\n# real y0[m]: an array containing the initial condition.\r\n#\r\n# integer n: the number of steps to take.\r\n#\r\n# Output:\r\n#\r\n# real t[n+1], y[n+1,m]: the times and solution values.\r\n#\r\n import numpy as np\r\n\r\n if ( np.ndim ( y0 ) == 0 ):\r\n m = 1\r\n else:\r\n m = len ( y0 )\r\n\r\n tfirst = tspan[0]\r\n tlast = tspan[1]\r\n dt = ( tlast - tfirst ) / n\r\n t = np.zeros ( n + 1 )\r\n y = np.zeros ( [ n + 1, m ] )\r\n t[0] = tspan[0]\r\n y[0,:] = y0\r\n\r\n for i in range ( 0, n ):\r\n t[i+1] = t[i] + dt\r\n y[i+1,:] = y[i,:] + dt * ( dydt ( t[i], y[i,:] ) )\r\n\r\n return t, y\r\n\r\ndef timestamp ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## timestamp() prints the date as a timestamp.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license. \r\n#\r\n# Modified:\r\n#\r\n# 21 August 2019\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import time\r\n\r\n t = time.time ( )\r\n print ( time.ctime ( t ) )\r\n\r\n return\r\n\r\nif ( __name__ == '__main__' ):\r\n timestamp ( )\r\n blowup_ode_test ( )\r\n timestamp ( )\r\n\r\n","repo_name":"jjeongGrp/MathSubroutines","sub_path":"Python3/blowup_ode.py","file_name":"blowup_ode.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"15088421796","text":"#!/usr/bin/python3\n\nwith open('24.in') as f:\n parts = [i.splitlines() for i in f.read().split('inp w')[1:]]\n\nstack = []\nfor i, part in enumerate(parts):\n add = int(part[5][6:])\n if add > 0:\n stack.append((i, int(part[-3][6:])))\n continue\n show = stack.pop()\n add += show[1]\n print('decimal %d + %d = decimal %d' % (i, -1 * add, show[0]))\n","repo_name":"fridokus/advent-of-code","sub_path":"2021/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"}
+{"seq_id":"34168519165","text":"import argparse\nimport cpe_utils\nimport json\nimport os\nimport re\nfrom tqdm import tqdm\nimport vm_automation\n\n\ndef get_vm_server(config_file):\n if os.path.isfile(config_file):\n with open(config_file) as config_file_handle:\n config_map = json.load(config_file_handle)\n if config_map['HYPERVISOR_TYPE'].lower() == \"esxi\":\n vmServer = vm_automation.esxiServer.createFromConfig(config_map, 'esxi_automation.log')\n vmServer.connect()\n if config_map['HYPERVISOR_TYPE'].lower() == \"workstation\":\n vmServer = vm_automation.workstationServer(config_map, 'workstation_automation.log')\n return vmServer\n return None\n\n\ndef vm_as_cpe_string(vm_name):\n cpe_parts = {\n \"ubuntu\" : {\n \"vendor\" : \"canonical\",\n \"product\" : \"ubuntu_linux\",\n \"version_pattern\" : \".*ubuntu(\\d+).*\",\n \"update\" : \"\"\n },\n \"fedora\" : {\n \"vendor\" : \"fedoraproject\",\n \"product\" : \"fedora\",\n \"version_pattern\" : \".*fedora(\\d+).*\",\n \"update\" : \"\"\n },\n \"centos\" : {\n \"vendor\" : \"centos\",\n \"product\" : \"centos\",\n \"version_pattern\" : \".*centos(\\d+).*\",\n \"update\" : \"\"\n }\n }\n\n if \"x64\" in vm_name:\n arch = \"x64\"\n else:\n arch = \"x86\"\n \n vm_name = vm_name[vm_name.index(\"linux\") + len(\"linux\"):]\n os_pattern = re.compile(\"[a-z]+\")\n os_name = os_pattern.match(vm_name)\n if os_name:\n os_name = os_name.group(0)\n else: exit\n\n if os_name in cpe_parts:\n version_pattern = re.compile(cpe_parts[os_name]['version_pattern'])\n v = version_pattern.match(vm_name)\n version = v.group(1)\n\n if \"ubuntu\" in os_name:\n version = version[:2] + \".\" + version[2:]\n\n cpe_str = \":\".join([\"cpe:/o\", cpe_parts[os_name]['vendor'], cpe_parts[os_name]['product'],\n version, cpe_parts[os_name]['update'], arch])\n\n return cpe_str\n else: exit\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", \"--keyword\", help=\"VM search parameter\")\n parser.add_argument(\"-o\", \"--output\", help=\"output file location [defaults to catalog.json]\")\n parser.add_argument(\"hypervisorConfig\", help=\"json hypervisor config\")\n\n args = parser.parse_args()\n\n prefix = args.keyword\n\n catalog_file = \"catalog.json\"\n if args.output is not None:\n catalog_file = args.output\n\n vm_server = get_vm_server(config_file=args.hypervisorConfig)\n if vm_server is None:\n print (\"Failed to connect to VM environment\")\n exit(1)\n\n vm_list = []\n vm_server.enumerateVms()\n for vm in vm_server.vmList:\n if prefix in vm.vmName:\n vm_list.append(vm.vmName)\n cpe_catalog = {}\n\n if os.path.isfile(catalog_file):\n with open(catalog_file) as catalog_handle:\n cpe_catalog = json.load(catalog_handle)\n\n for name in tqdm(vm_list):\n if \"linux\" in name.lower(): \n cpe_str = vm_as_cpe_string(name.lower())\n if cpe_str:\n cpe = cpe_utils.CPE(cpe_str)\n vm_entry = {\n 'NAME': name,\n 'CPE': cpe_str,\n 'USERNAME': \"vagrant\",\n 'PASSWORD': \"vagrant\",\n 'OS': cpe.human()\n }\n cpe_catalog[vm_server.hostname + \"_\" + name] = vm_entry\n\n with open(catalog_file, \"w\") as catalog_handle:\n json.dump(cpe_catalog, catalog_handle, indent=2, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rapid7/metasploit-baseline-builder","sub_path":"helpers/generateLinuxCatalog.py","file_name":"generateLinuxCatalog.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"}
+{"seq_id":"11710887377","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 13:18:53 2021\n\n@author: arun\n\"\"\"\n\nimport time\nimport datetime\nimport h5py\nimport numpy as np\nfrom random import randint\n\nfrom os import listdir\nfrom os.path import isfile, join\n# import matplotlib.pyplot as plt\n# import scipy.io as sio\nimport os\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\nst_0 = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') \nstart_time_0=time.time()\n\n#works for mat file version 7.3 which is the new default.\n\n\n\nDataPath='/home/arun/Documents/MATLAB/ImageDB/PrintoutDB/DB33/'\n\nonlyfiles = [f for f in listdir(DataPath) if isfile(join(DataPath, f))]\nonlyfiles.sort()\nonlyfileslenrem=len(onlyfiles)-round(len(onlyfiles)*0.7)\nonlyfiles = onlyfiles[0:-onlyfileslenrem]\nmatfiles=[join(DataPath,f) for f in onlyfiles]\nmat_fname_ind=np.random.choice(len(matfiles),replace=False)\n\nmat_contents=h5py.File(matfiles[mat_fname_ind])\nmat_contents_list=list(mat_contents.keys())\n\nPlanCTCellRef=mat_contents['CTInfoCell']\nCTLen=np.shape(PlanCTCellRef)\nCTsl=np.zeros([CTLen[1],1])\nfor cti in range(CTLen[1]):\n CTmatsizref=mat_contents['CTInfoCell'][1,cti]\n CTLocR=mat_contents[CTmatsizref]\n CTLoc=CTLocR.value\n CTsiz=np.shape(CTLoc)\n if CTsiz[1]>300:\n CTsl[cti]=1\n else:\n CTsl[cti]=0\nCTindex=np.where(CTsl==1)\nCTindex=CTindex[0]\nCTindex=int(CTindex)\nPlanCTLocRef=mat_contents['CTInfoCell'][1, CTindex]\nPlanCTLocRef=mat_contents[PlanCTLocRef]\nPlanCTLoc=PlanCTLocRef.value\nPlanCTCellRef=mat_contents['CTInfoCell'][2, CTindex]\nPlanCTCellRef=mat_contents[PlanCTCellRef]\nPlanCT=PlanCTCellRef.value\nPlanCT=np.transpose(PlanCT,(2,1,0))\nbatch_size=10\nCTsiz1=PlanCT.shape\n# CT_rand_index=np.random.choice(CTsiz1[2],size=batch_size,replace=False)\n# batch_CT_img=np.zeros((CTsiz1[0],CTsiz1[1],len(CT_rand_index)))\n# for ri in range(len(CT_rand_index)):\n# batch_CT_img[:,:,ri]=PlanCT[:,:,CT_rand_index[ri]]\nPlanCTCellRef=mat_contents['CTInfoCell'][3, CTindex]\nPlanCTCellRef=mat_contents[PlanCTCellRef]\nPlanCTvoxel=PlanCTCellRef.value\nCBCTCellRef=mat_contents['CBCTInfocell']\nCBCLen=np.shape(CBCTCellRef)\n#Random CBCT scan selection\nCBCTi=randint(0,CBCLen[1]-1)\nCBCellRef=mat_contents['CBCTInfocell'][2, CBCTi]\nCBCellRef=mat_contents[CBCellRef]\nCBCT=CBCellRef.value\nCBCT=np.transpose(CBCT,(2,1,0))\nCBLocRef=mat_contents['CBCTInfocell'][1, CBCTi]\nCBLocRef=mat_contents[CBLocRef]\nCBCTLoc=CBLocRef.value\n#%%\n#Sequential CBCT scan selection\n# CBCTs=[]\n# for CBCTi in range(CBCLen[1]):\n# # print(CBCTi)\n# CBCellRef=mat_contents['CBCTInfocell'][4, CBCTi]\n# CBCellRef=mat_contents[CBCellRef]\n# CBCT=CBCellRef.value\n# CBCT=np.transpose(CBCT,(2,1,0))\n# CBCTs.append(CBCT)\n# CBLocRef=mat_contents['CBCTInfocell'][1, CBCTi]\n# CBLocRef=mat_contents[CBLocRef]\n# CBCTLoc=CBLocRef.value\n# CBCellRef=mat_contents['CBCTInfocell'][3, CBCTi]\n# CBCellRef=mat_contents[CBCellRef]\n# CBCTvoxel=CBCellRef.value\n# CBsiz=CBCT.shape\n# # CB_rand_pat_index=np.random.choice(CBCLen[1],size=batch_size,replace=True)\n# # batch_CB_img=np.zeros((CTsiz1[0],CTsiz1[1],len(CB_rand_pat_index)))\n# batch_CB_img=np.zeros((CTsiz1[0],CTsiz1[1],batch_size))\n# for cbi in range(batch_size):\n# CB_rand_sl_index=np.random.choice(CBsiz[2])\n# CB_rand_pat_index=np.random.choice(CBCLen[1],replace=False)\n# print(CB_rand_pat_index)\n# print(CB_rand_sl_index)\n# batch_CB_img[:,:,cbi]=CBCTs[CB_rand_pat_index][:,:,CB_rand_sl_index]\n\n\n#%% \nprint('Script started at')\nprint(st_0)\nruntimeN0=(time.time()-start_time_0)/60\n# runtimeN0=(time.time()-start_time_0)\nprint('Script Total Time = %s min'%(runtimeN0))\nprint('Script ended at')\nst_0 = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\nprint(st_0)","repo_name":"duraiarun-p/cycleGAN","sub_path":"cyclegan3D_1.py","file_name":"cyclegan3D_1.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71220321849","text":"# Já para o caso do sufixo ...more , poderíamos utilizar fatiamento para removê-lo. Mas, antes, é importante verificarmos se o conteúdo possui o sufixo, evitando assim perda de conteúdo de forma acidental. Vamos ver como isso funciona no arquivo teste.py .\n\nfrom parsel import Selector\nimport requests\n\n\nresponse = requests.get(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\nselector = Selector(text=response.text)\n\n# Extrai a descrição\ndescription = selector.css(\"#product_description ~ p::text\").get()\nprint(description)\n\n# \"Fatiamos\" a descrição removendo o sufixo\nsuffix = \"...more\"\nif description.endswith(suffix):\n description = description[:-len(suffix)]\nprint(description)\n","repo_name":"gusttavocaruso/trybeExercises","sub_path":"MODULO.04_computerScience/BLOCO.35_WEB & CRAWLER/35.3 - SCRAPING/DATACLEANING/teste_02.py","file_name":"teste_02.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"71275102970","text":"from dotenv import load_dotenv\nimport json\nimport os\nfrom requests_oauthlib import OAuth1Session\n\ndotenv_path = os.path.join(os.path.dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\nCONSUMER_KEY = os.environ.get('CONSUMER_KEY')\nCONSUMER_SECRET = os.environ.get('CONSUMER_SECRET')\nACCESS_TOKEN = os.environ.get('ACCESS_TOKEN')\nACCESS_TOKEN_SECRET = os.environ.get('ACCESS_TOKEN_SECRET')\n\ntwitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\ntweet = input('Tweet: ')\nparams = {'status': tweet}\nreq = twitter.post('https://api.twitter.com/1.1/statuses/update.json', params = params)\n\nif req.status_code != 200:\n print('Tweet was failed...')\nelse:\n print('Tweet was successfull!')","repo_name":"koluku/twitwi","sub_path":"twitwi.py","file_name":"twitwi.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"26887480018","text":"from leetcode.tree.binary_tree_traversals import TreeNode\nfrom typing import Optional\n\n\nclass Solution:\n def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:\n range_sum = 0\n\n def helper(node):\n nonlocal range_sum\n if node:\n if low <= node.val <= high:\n range_sum += node.val\n if node.val > low:\n helper(node.left)\n if node.val < high:\n helper(node.right)\n\n helper(root)\n return range_sum\n\n\nif __name__ == '__main__':\n root_node1 = TreeNode(10)\n root_node1.left = TreeNode(5)\n root_node1.right = TreeNode(15)\n root_node1.left.left = TreeNode(3)\n root_node1.left.right = TreeNode(7)\n root_node1.right.left = TreeNode(13)\n root_node1.right.right = TreeNode(18)\n root_node1.left.left.left = TreeNode(1)\n root_node1.left.right.left = TreeNode(6)\n print(Solution().rangeSumBST(root_node1, 6, 10))\n","repo_name":"pk0912/ProgrammingPractice","sub_path":"leetcode/tree/binary_search_tree/range_sum.py","file_name":"range_sum.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"6660269926","text":"from PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tqdm import tqdm\n\ndata_set_path = r'C:/Users/babymon/Desktop/데이터셋/사람얼굴/archive/img_align_celeba/img_align_celeba'\n\nimages = list()\n\nfor i in os.listdir(data_set_path)[0:50000]:\n old_image = Image.open(f'{data_set_path}/{i}').crop((20, 30, 160, 180)).convert('L').resize((64, 64))\n images.append(np.array(old_image))\n\n# plt.imshow(images[0])\n# plt.show()\n\n# print(images.shape)\n\n# 이미지 전처리\nimages = np.divide(images, 255)\nimages = images.reshape(50000, 64, 64, 1) # 흑백 이미지 4차원으로 증강\n# images.reshape( 5 ,)\n\nprint(images.shape)\n\n# discriminator 모델 생성\ndiscriminator = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same', input_shape=[64,64,1]),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.Conv2D(64, (3,3), strides=(2, 2), padding='same'),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nnoise_shape = 100\n\n# generator 모델 생성\ngenerator = tf.keras.models.Sequential([\n tf.keras.layers.Dense(4 * 4 * 256, input_shape=(noise_shape,)),\n tf.keras.layers.Reshape((4, 4, 256)),\n tf.keras.layers.Conv2DTranspose(256, 3, strides=2, padding='same'), # upsampling2D도 찾아볼것\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Conv2DTranspose(128, 3, strides=2, padding='same'),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Conv2DTranspose(64, 3, strides=2, padding='same'),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Conv2DTranspose(1, 3, strides=2, padding='same', activation='sigmoid')\n])\n\ngenerator.summary()\n\nGAN = tf.keras.models.Sequential([generator, discriminator])\n\ndiscriminator.compile(optimizer='adam', loss='binary_crossentropy')\ndiscriminator.trainable = False\n\nGAN.compile(optimizer='adam', loss='binary_crossentropy')\n\n\ndef predict_pic(time: int, cycle: int):\n\n show_img = plt\n show_img.figure(f'{str(cycle+1)} 회차 결과')\n predict_value = generator.predict((lambda x, y : np.random.uniform(x, y, size=(20, 100)))(-1, 1))\n # print(predict_value.shape)\n for i in range(20):\n show_img.subplot(4, 5, i+1)\n show_img.imshow(predict_value[i].reshape(64, 64), cmap='gray') # 컬러면 64, 64, 3\n show_img.axis('off')\n\n show_img.tight_layout()\n show_img.show(block=False)\n show_img.pause(time)\n show_img.close()\n\n\nx_data = images\n\n\nfor i in tqdm(range(300)):\n print(f'현재 epoch {i+1}회차')\n predict_pic(5, i)\n\n for j in range(50000//128):\n if j % 100 == 0:\n print(f'현재 batch {j+1}회차')\n\n # discriminator 트레이닝\n real_images = x_data[j*128:(j+1)*128]\n real_markings = np.ones(shape=(128, 1))\n loss1 = discriminator.train_on_batch(real_images, real_markings) # 진짜 사진\n\n random_value = np.random.uniform(-1, 1, size=(128, 100))\n fake_images = generator.predict(random_value, verbose=0)\n fake_markings = np.zeros(shape=(128, 1))\n\n loss2 = discriminator.train_on_batch(fake_images, fake_markings) # 가짜 사진\n \n # real_images 와 fake_images 셔플해서 학습해보기\n\n # generator 트레이닝\n loss3 = GAN.train_on_batch(random_value, real_markings)\n\n print(f'이번 epoch 의 최종 loss discriminator loss : {loss1+2}, GAN loss : {loss3}')\n\n\n'''\n더 해봐야할 것 들\nGAN 네트워크의 layer들을 수정하고 더해보기 \n이미지를 더 사용하거나 살짝 비틀어서 집어넣어��기\nlabel smoothing 같은 잡기술 넣어보기 \nnoise (랜덤숫자) 다르게 설정해보기 \n요즘 GAN은 어떻게 만드나 살펴보기\n'''","repo_name":"surplusboy/machine_learning_ex","sub_path":"GAN_model_ex/tensor_a.py","file_name":"tensor_a.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"18127392915","text":"from re import search\nimport csv\nimport time\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nimport json\nfrom webdriver_manager.chrome import ChromeDriverManager\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--headless\")\ndriver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options)\noutputfile = open('xyz.csv', 'w')\ncsvwriter = csv.writer(outputfile)\nwith open('amfoss.json') as f :\n data = json.loads(f.read())\nfor i in range(len(data)):\n link = []\n time.sleep(2)\n query = data[i][\"School_Name_EN\"]\n url = f\"https://www.google.com/search?q={query}\"\n driver.get(url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n search = soup.find('div', class_=\"yuRUbf\")\n z = search.a.get('href')\n z = str(z)\n link.append(z)\n print(query)\n csvwriter.writerow(link)\n","repo_name":"Arindam200/Python_Projects","sub_path":"Projects/API projects/Google_Selenium_Searcher/Google_Search.py","file_name":"Google_Search.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"}
+{"seq_id":"7623061233","text":"n =int(input())\nlis =list(map(int,input().split()))\np =0\nv =0\nd =[]\nfor i in lis:\n if lis.count(i)==i and i not in d:\n p+=1\n v+=i\n d.append(i)\nif p==0:\n print(\"-1\")\nelse:\n s =v/p\n print(\"%.2f\"%(s))\n","repo_name":"Happy-76/codemind-python","sub_path":"Average_of_super_elements.py","file_name":"Average_of_super_elements.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"42634124013","text":"\ndef main():\n filename = input('Введите имя файла: ')\n with open(filename, 'w') as f:\n data = None\n while data != '':\n data = input('Введите строку для записи в файл или пустую строку для выхода: ')\n f.write(data + '\\n')\n\n\nmain()\n","repo_name":"AlexanderMaslikhin/python","sub_path":"lesson5/lesson5_dz1.py","file_name":"lesson5_dz1.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29442130099","text":"def suma_divisores(a):\n suma = 0\n\n for i in range(1, a):\n if a % i == 0:\n suma += i\n\n if suma == 1:\n es_primo = True\n else:\n es_primo = False\n\n return suma, es_primo\n\nif __name__ == \"__main__\":\n a = int(input(\"Ingresa un número entero positivo: \"))\n resultado, primo = suma_divisores(a)\n\n print(\"La suma de los divisores de {a} es: {resultado}\")\n print(\"El número {a} {'es primo' if primo else 'no es primo'}\")\n\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema3_ej1/tema3_ej1_dd724f9fce4a2e00b679294dc181be55.py","file_name":"tema3_ej1_dd724f9fce4a2e00b679294dc181be55.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"69824676410","text":"from logging import getLogger\nfrom typing import Any\n\nfrom aiohttp import ClientSession\n\nlog = getLogger(__name__)\n\n\nclass Aeza:\n def __init__(\n self,\n token: str | None = None,\n session: ClientSession = ClientSession(),\n http_proxy: str | None = None,\n ) -> None:\n self.session = session\n self.base_url = \"https://my.aeza.net/api/\"\n self.http_proxy = http_proxy\n\n self.headers = {}\n if token is not None:\n self.headers[\"X-API-Key\"] = token\n\n async def _request(self, method: str, url: str, **kwargs: Any) -> dict[str, Any]:\n if self.http_proxy is not None:\n kwargs[\"proxy\"] = self.http_proxy\n async with self.session.request(\n method, self.base_url + url, headers=self.headers, **kwargs\n ) as resp:\n resp.raise_for_status()\n return await resp.json()\n\n async def get_product_group_statuses(self) -> dict[int, bool]:\n out = {}\n resp = await self._request(\"GET\", \"services/products\")\n for group in resp[\"data\"][\"items\"]:\n try:\n id_ = group[\"id\"]\n status = group[\"group\"][\"payload\"].get(\"isDisabled\", False) in [\n \"true\",\n True,\n ]\n out[id_] = False if status else True\n except (KeyError, TypeError) as e:\n if group is None:\n continue\n log.debug(\n f\"Error in get_product_group_statuses, id: {group.get('id', 'ID not defined')}: {str(e)}\"\n )\n return out\n","repo_name":"cofob/aeza-assistant","sub_path":"aeza_assistant/aeza.py","file_name":"aeza.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"}
+{"seq_id":"21920846946","text":"#!/usr/bin/env python\n\n# stdlib imports\nimport re\nimport os.path\n\n# third party imports\nimport pandas as pd\nimport numpy as np\n\n# local imports\nfrom losspager.utils.exception import PagerException\nfrom losspager.utils.country import Country\n\nDEFAULT_RATE = 1.17 / 100.0\n\n\ndef adjust_pop(population, tpop, tevent, rate):\n \"\"\"Adjust input population between two input years given growth rate.\n\n :param population:\n Population starting value at time *tpop*.\n :param tpop:\n Year in which input population data was collected.\n :param tevent:\n Year to which population data should be adjusted.\n :param rate:\n Population growth rate value.\n :returns:\n Adjusted population value at time *tevent*.\n \"\"\"\n T = tpop - tevent\n adjpop = np.round(population * np.power((1 + rate), (-1 * T)))\n return adjpop\n\n\nclass PopulationGrowth(object):\n def __init__(self, ratedict, default_rate=DEFAULT_RATE):\n \"\"\"Initialize Population growth with dictionary containing rates over given time \n spans, per country. \n\n :param ratedict:\n dictionary like: {841: {'end': [1955, 1960, 1965],\n 'rate': [0.01, 0.02, 0.03],\n 'start': [1950, 1955, 1960]},\n 124: {'end': [1955, 1960, 1965],\n 'rate': [0.02, 0.03, 0.04],\n 'start': [1950, 1955, 1960]}}\n Where 841 and 842 in this case are country codes (US and Canada), and the three \"columns\" for each \n country are the year start of each time interval, the year end of each time interval, and the growth \n rates for those time intervals.\n :param default_rate:\n Value to be used for growth rate when input country codes are not found in ratedict.\n \"\"\"\n # check the fields in the ratedict\n for key, value in ratedict.items():\n if 'start' not in value or 'end' not in value or 'rate' not in value:\n raise PagerException(\n 'All country rate dictionaries must contain keys \"start\",\"end\",\"rate\"')\n if not (len(value['start']) == len(value['end']) == len(value['rate'])):\n raise PagerException(\n 'Length of start/end year arrays must match length of rate arrays.')\n self._dataframe = pd.DataFrame(ratedict)\n self._default = default_rate\n\n @classmethod\n def fromDefault(cls):\n homedir = os.path.dirname(os.path.abspath(\n __file__)) # where is this module?\n excelfile = os.path.join(\n homedir, '..', 'data', 'WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')\n return cls.fromUNSpreadsheet(excelfile)\n\n @classmethod\n def fromUNSpreadsheet(cls, excelfile, default_rate=DEFAULT_RATE):\n \"\"\"Instantiate population growth rates from UN global spreadsheet.\n http://esa.un.org/unpd/wpp/Download/Standard/Population/\n\n :param excelfile:\n Path to Excel file containing UN population growth rate data per country.\n :param default_rate:\n Value to be used for growth rate when input country codes are not found in ratedict.\n :returns:\n PopulationGrowth instance.\n \"\"\"\n re_year = '[0-9]*'\n df = pd.read_excel(excelfile, header=16)\n ratedict = {}\n starts = []\n ends = []\n for col in df.columns:\n matches = re.findall(re_year, col)\n if len(matches) and len(matches[0]):\n starts.append(int(matches[0]))\n ends.append(int(matches[2]))\n\n ccode_idx = df.columns.get_loc('Country code')\n uscode = 840\n usrates = None\n country = Country()\n for idx, row in df.iterrows():\n key = row['Country code']\n rates = row.iloc[ccode_idx + 1:].values / 100.0\n if key == uscode:\n usrates = rates.copy()\n if country.getCountry(key) is None:\n continue\n ratedict[key] = {'start': starts[:], 'end': ends[:], 'rate': rates}\n\n # we have three non-standard \"country\" codes for California, eastern US, and western US.\n ratedict[902] = {'start': starts[:], 'end': ends[:], 'rate': usrates}\n ratedict[903] = {'start': starts[:], 'end': ends[:], 'rate': usrates}\n ratedict[904] = {'start': starts[:], 'end': ends[:], 'rate': usrates}\n\n return cls(ratedict, default_rate=default_rate)\n\n def getRate(self, ccode, year):\n \"\"\"Return population growth rate(s) for a given country code and year.\n\n :param ccode:\n Numeric country code.\n :param year:\n Integer year to be used to find growth rate (will be between start and end years,\n or before first start year or after last end year).\n :returns:\n Scalar growth rate.\n \"\"\"\n ccode = int(ccode)\n if ccode not in self._dataframe.columns:\n return self._default\n starts = np.array(self._dataframe[ccode]['start'])\n ends = np.array(self._dataframe[ccode]['end'])\n rates = np.array(self._dataframe[ccode]['rate'])\n if year is None:\n return dict(list(zip(starts, rates)))\n if year < starts.min():\n rate = rates[0]\n elif year > ends.max():\n rate = rates[-1]\n else:\n idx = (np.abs(year - ends)).argmin()\n rate = rates[idx]\n return rate\n\n def getRates(self, ccode):\n \"\"\"Return population growth rates for a given country code.\n\n :param ccode:\n Numeric country code.\n :param year:\n Integer year to be used to find growth rate (will be between start and end years,\n or before first start year or after last end year).\n :returns:\n Tuple of two lists of (start_years,rates).\n \"\"\"\n if ccode not in self._dataframe.columns:\n raise PagerException(\n 'Country %s not found in PopulationGrowth data structure.' % ccode)\n starts = np.array(self._dataframe[ccode]['start'])\n rates = np.array(self._dataframe[ccode]['rate'])\n return (starts, rates)\n\n def adjustPopulation(self, population, ccode, tpop, tevent):\n \"\"\"Adjust population based on growth rates.\n\n :param population:\n Number of people.\n :param ccode:\n Numeric country code.\n :param tpop:\n Year of population data collection.\n :param tevent:\n Year to which population data should be adjusted from tpop.\n :returns:\n Population adjusted for growth rates in years between tpop and tevent. \n \"\"\"\n if tpop == tevent:\n return population\n if tpop < tevent:\n interval = 1\n else:\n interval = -1\n newpop = population\n for startpop in np.arange(tpop, tevent, interval):\n endpop = startpop + interval\n rate = self.getRate(ccode, startpop)\n newpop = adjust_pop(newpop, startpop, endpop, rate)\n\n return newpop\n","repo_name":"mhearne-usgs/pager","sub_path":"losspager/models/growth.py","file_name":"growth.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"}
+{"seq_id":"30124623447","text":"# While we can manually send and receive data over HTTP using the socket library,\n# there is a much simpler way to perform this common task in Python by using the\n# urllib library.\n\n# Using urllib, you can treat a web page much like a file. You simply indicate\n# which web page you would like to retrieve and urllib handles all of the HTTP\n# protocol and header details. The following is equivalent to 12.2:\n\nimport urllib.request\n\nfhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')\nfor line in fhand:\n print(line.decode().strip())\n\n# As an example, we can write a program to retrieve the data for romeo.txt and\n# compute the frequency of each word in the file as follows:\n\nfileOpen = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')\n\ncounts = dict()\nfor line in fileOpen:\n words = line.decode().split()\n for word in words:\n counts[word] = counts.get(word, 0) + 1\nprint(counts)\n\n# Refer to urllib documentation for more functionality:\n# https://docs.python.org/3/library/urllib.html","repo_name":"kylev114/PY4E","sub_path":"Chapter 12 Network Programs/12.4_urllibLibrary.py","file_name":"12.4_urllibLibrary.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"72101227768","text":"from gi.overrides import GLib\n\nimport src.globals\nfrom src.api.api import send_p2p_message, send_p2g_message, get_group_name, get_group_member_num, get_contact_list, \\\n is_contact_group, get_nickname_by_id, get_p2p_messages_after_time, get_p2g_messages_after_time\n\nfrom src.utils import get_cached_user_id, append_cached_group_list, is_id_in_group_cache, \\\n append_to_cached_contact_list, get_cached_contact_list, get_cached_selected_contact_id, get_text_buffer_cache\nfrom src.utils.common_utils import write_log\nfrom src.utils.message_utils import *\n\n\ndef send_p2p_message_worker(receiver_id, content):\n send_p2p_message(receiver_id, content)\n\n\ndef send_group_message_worker(group_id, content):\n send_p2g_message(group_id, content)\n\n\ndef init_local_storage():\n contact_id_list = get_contact_list(get_cached_user_id())\n write_log(\"contact_list: \" + str(contact_id_list))\n for contact_id in contact_id_list:\n is_group_id = is_contact_group(contact_id)\n if is_group_id:\n write_log(\"group_id: \" + str(contact_id))\n append_cached_group_list(contact_id)\n for contact_id in contact_id_list:\n local_latest_message_time = get_local_latest_message_time(contact_id)\n if not is_id_in_group_cache(contact_id):\n p2p_messages_after_time = get_p2p_messages_after_time(contact_id, local_latest_message_time)\n for message in p2p_messages_after_time:\n append_message_storage(contact_id, False, message[\"content\"], message[\"create_time\"])\n latest_message = get_local_latest_message(contact_id)\n if latest_message is not None:\n local_latest_message_time = get_local_latest_message_time(contact_id)\n else:\n latest_message = {'message_content': \"\", 'is_sender': False, 'sent_time': int(round(time.time()*1000))}\n local_latest_message_time = latest_message['sent_time']\n nickname = get_nickname_by_id(contact_id)\n append_to_cached_contact_list(contact_id, nickname, latest_message['message_content'], local_latest_message_time)\n else:\n p2g_message_after_time = get_p2g_messages_after_time(contact_id, local_latest_message_time)\n for message in p2g_message_after_time:\n append_group_message_storage(contact_id, False, message[\"content\"], message[\"create_time\"], message[\"sender_name\"])\n latest_message = get_local_latest_message(contact_id)\n if latest_message is not None:\n local_latest_message_time = get_local_latest_message_time(contact_id)\n else:\n latest_message = {'message_content': \"\", 'is_sender': False, 'sent_time': int(round(time.time()*1000))}\n local_latest_message_time = latest_message['sent_time']\n group_name = get_group_name(contact_id)\n append_to_cached_contact_list(contact_id, group_name, latest_message['message_content'], local_latest_message_time)\n write_log(\"拉取消息成功 return\")\n\n\ndef init_chat_window(chat_window):\n write_log(\"开始初始化聊天窗口\")\n \"\"\"\n chat窗口的初始化工作\n 1.读取cache里的联系人列表\n 2.读取cache里的消息记录\n 3.生成联系人ContactItem和对应聊天记录列表的映射关系\n 4.将联系人列表和聊天记录列表添加到chat窗口的对应容器中\n \"\"\"\n contact_list = get_cached_contact_list()\n \"\"\"从本地cache中取出所有的本地联系人列表\"\"\"\n for contact in contact_list:\n is_selected = False\n contact_nickname = contact['nickname']\n contact_id = contact['contact_id']\n contact_sent_time = contact['sent_time']\n contact_last_message = contact['last_message']\n message_list = get_stored_messages(contact_id)\n\n \"\"\"如果用户上一次使用过程中选中的是该联系人,则在打开chat窗口时,将该联系人的聊天记录列表显示出来,并将字体small化,以凸显选中\"\"\"\n if get_cached_selected_contact_id() == contact_id:\n is_selected = True\n \"\"\"将本地消息记录列表填入聊天记录列表容器中\"\"\"\n if is_id_in_group_cache(contact_id):\n for message in message_list:\n chat_window.insert_group_message(message['message_content'], message['is_sender'], message['sender_name'])\n group_name = get_group_name(contact_id)\n member_num = get_group_member_num(contact_id)\n chat_window.message_header_bar.set_title(group_name + \" (\" + str(member_num) + \")\")\n else:\n for message in message_list:\n chat_window.insert_message(message[\"message_content\"], message[\"is_sender\"])\n \"\"\"将联系人昵称填入Header Bar里\"\"\"\n chat_window.message_header_bar.set_title(contact_nickname)\n\n \"\"\"读入上次退出程序,text buffer中的内容\"\"\"\n text = get_text_buffer_cache(contact_id)\n chat_window.text_box.get_buffer().set_text(text)\n chat_window.insert_contact(contact_nickname, contact_last_message, contact_sent_time, contact_id, is_selected)\n write_log(\"拉取消息成功,show窗口\")\n\n\ndef __insert_message_from_contact(chat_window, contact_id, sent_time, message_content):\n \"\"\"\n 本函数用于接收到消息后,将消息插入到聊天记录json中\n :param chat_window: Gtk.Window\n :param contact_id: 向当前用户发消息的联系人\n :param sent_time: 消息��送的时间戳,13位毫秒级UNIX时间戳\n :param message_content: 消息内容\n \"\"\"\n append_message_storage(contact_id, False, message_content, sent_time)\n is_selected = False\n if src.globals.LAST_SELECTED_CONTACT.contact_id == contact_id:\n is_selected = True\n GLib.idle_add(chat_window.insert_message, message_content, False)\n GLib.idle_add(src.globals.LAST_SELECTED_CONTACT.update_contact, message_content, sent_time, is_selected)\n chat_window.scroll_flag = not chat_window.scroll_flag\n\n\ndef __insert_message_from_group(chat_window, group_id, sender_id, sender_name, sent_time, message_content):\n \"\"\"\n 本函数用于接收到消息后,将消息插入到聊天记录json中\n :param chat_window: Gtk.Window\n :param group_id: 向当前用户发消息的群组\n :param sender_id: 消息发送者\n :param sender_name: 消息发送者昵称\n :param sent_time: 消息发送的时间戳,13位毫秒级UNIX时间戳\n :param message_content: 消息内容\n \"\"\"\n GLib.idle_add()\n print(\"insert message from group\")\n append_group_message_storage(group_id, False, message_content, sent_time, sender_name)\n write_log(\"appended to cache\")\n is_selected = False\n if src.globals.LAST_SELECTED_CONTACT.contact_id == group_id:\n is_selected = True\n GLib.idle_add(chat_window.insert_group_message, message_content, False, sender_name)\n GLib.idle_add(src.globals.LAST_SELECTED_CONTACT.update_contact, message_content, sent_time, is_selected)\n chat_window.scroll_flag = not chat_window.scroll_flag\n\n\ndef parse_p2p_msg_api_result(chat_window, msg_list):\n write_log(\"parse_p2p_msg_api_result\"+str(msg_list))\n for message in msg_list:\n __insert_message_from_contact(chat_window,\n message[\"senderID\"],\n message[\"create_time\"],\n message[\"content\"])\n\n\ndef parse_p2g_msg_api_result(chat_window, msg_list):\n write_log(\"parse_p2g_msg_api_result\")\n write_log(str(isinstance(msg_list, list)))\n for message in msg_list:\n __insert_message_from_group(chat_window,\n message[\"groupID\"],\n message[\"senderID\"],\n message[\"sender_name\"],\n message[\"create_time\"],\n message[\"content\"])\n","repo_name":"xiaoheng86/avo-chat-client","sub_path":"src/controllers/chat_controller.py","file_name":"chat_controller.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27444089510","text":"from collections import deque\n\ndq = deque()\ndq.append(0)\ndq.append(1)\ndq.append(2)\ndq.appendleft(-1)\ndq.appendleft(-2)\n\nfor i in dq:\n print(i, end=\" \")\nprint()\n\ndq.pop()\nfor i in dq:\n print(i, end=\" \")\nprint()\n\ndq.popleft()\nfor i in dq:\n print(i, end=\" \")","repo_name":"rkskek1226/Algorithm","sub_path":"Data_Structure/Linear_DS/DoubleEndedQueue.py","file_name":"DoubleEndedQueue.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"35347522062","text":"from unittest import TestCase\n\nfrom IntersectionOfTwoLinkedLists import IntersectionOfTwoLinkedLists, ListNode\n\n\nclass TestIntersectionOfTwoLinkedLists(TestCase):\n def test_getIntersectionNode(self):\n i = IntersectionOfTwoLinkedLists()\n\n self.assertIsNone(i.getIntersectionNode(None, None))\n\n node345 = ListNode(3)\n node345.next = ListNode(4)\n node345.next.next = ListNode(5)\n\n self.assertIsNone(i.getIntersectionNode(node345, ListNode(6)))\n\n node12345 = ListNode(1)\n node12345.next = ListNode(2)\n node12345.next.next = node345\n\n self.assertEqual(i.getIntersectionNode(node12345, node345), node345)\n","repo_name":"TonnyL/Windary","sub_path":"Python/IntersectionOfTwoLinkedListsTest.py","file_name":"IntersectionOfTwoLinkedListsTest.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"77"}
+{"seq_id":"2506353047","text":"'''\n19943:图的拉普拉斯矩阵(matrix)\nhttp://cs101.openjudge.cn/practice/19943/\n\n'''\nnode, edge = [int(i) for i in input().split()]\nmatrix = []\nfor i in range(node):\n matrix.append([0] * node)\nfor fake_i in range(edge):\n i, j = [int(i) for i in input().split()]\n matrix[i][i] += 1\n matrix[j][j] += 1\n matrix[i][j] = -1\n matrix[j][i] = -1\n\nfor i in range(node):\n print(*matrix[i], sep=' ')","repo_name":"forxhunter/ComputingIntro","sub_path":"solutions/cs101_openjudge/19943.py","file_name":"19943.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"5545848540","text":"# Name of csv file for storing data\nCSV_FILE = \"phonebook.csv\"\n\n# Names of table columns\nHEADER_FIELDS = [\n \"Last name\",\n \"First Name\",\n \"Middle Name\",\n \"Company\",\n \"Phone (work)\",\n \"Phone (cell)\",\n]\n\n# Widths of colums (for print formating)\nTOTAL_WIDTH = 130\n\n# Minimum number of contacts to initiate paged output\nPAGED_OUT_THRESHOLD = 10\n","repo_name":"kgdpete2022/phonebook","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29333421819","text":"#Conversión de Decimal a Binario\n# Entrada\nnumero_decimal = float(input(\"Ingrese un numero: \"))\n\nnumero_binario = 0\nmultiplicador = 1\n\n# Procesamiento \nwhile numero_decimal != 0: # paso 3\n # pasos 1, 4 y 5 se multiplica el módulo por su multiplicador\n numero_binario = numero_binario + numero_decimal % 2 * multiplicador\n numero_decimal //= 2 # paso 1\n multiplicador *= 10 # paso 5\n\n# Salida\nprint(\"Resultado =\", numero_binario) ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej4/hito1_ej4_128398e4fa0d6b009b3a8f8b495f8dc2.py","file_name":"hito1_ej4_128398e4fa0d6b009b3a8f8b495f8dc2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"34499609805","text":"import pickle, time\nfrom sys import stdin, stdout, stderr\nfrom collections import OrderedDict\nimport numpy as np\nimport theano as th\nimport theano.tensor as T\n\n\nclass ModelParams:\n \"\"\"Base class for RNN variants.\n NOTE: Not intended to be instantiated!\n \"\"\"\n # Parameter matrix names and ordering\n # Defined by model subclass\n pnames = []\n\n def __init__(self, hyper, epoch=0, pos=0, pvalues=None):\n self.hyper = hyper\n self.epoch = epoch\n self.pos = pos\n\n if not pvalues:\n pvalues = self._build_p()\n\n # Initialize shared variables\n\n # Create parameter dicts\n # OrderedDict used to keep paramater access deterministic throughout\n self.params = OrderedDict()\n self.mparams = OrderedDict()\n\n # Load parameter matrices and create rmsprop caches\n for n in self.pnames:\n self.params[n] = th.shared(name=n, value=pvalues[n].astype(th.config.floatX))\n self.mparams['m'+n] = th.shared(name='m'+n, value=np.zeros_like(pvalues[n]).astype(th.config.floatX))\n\n # Build Theano generation functions\n self._built_g = False\n self._built_t = False\n self._build_g()\n\n # Model-specific definitions of parameters, forward propagation, regularization, state initialization\n def _build_p(self):\n pass\n def _forward_step(self, x_t, s_t):\n pass\n def _weight_cost(self, reg_lambda):\n pass\n def freshstate(self, batchsize):\n pass\n\n # Theano-generated model-dependent functions\n def gen_chars(self, *args, **kwargs):\n pass\n def gen_chars_max(self, *args, **kwargs):\n pass\n def train_step_bat(self, *args, **kwargs):\n pass\n def errs_bat(self, *args, **kwargs):\n pass\n def err_bat(self, *args, **kwargs):\n pass\n def grad_bat(self, *args, **kwargs):\n pass\n\n # Cross-model definitions of generation functions\n def _build_g(self):\n \"\"\"Build Theano graph and define generation functions.\"\"\"\n\n stdout.write(\"Compiling generation functions...\")\n stdout.flush()\n time1 = time.time()\n\n # Local binding for convenience\n forward_step = self._forward_step\n\n ### SEQUENCE GENERATION ###\n\n x_in = T.vector('x_in')\n x_seq = T.matrix('x_seq')\n s_in = T.matrix('s_in')\n k = T.iscalar('k')\n temperature = T.scalar('temperature')\n\n rng = T.shared_randomstreams.RandomStreams(seed=(int(time.time()) % 1000000000))\n\n # Generate output sequence based on input single onehot and given state.\n\n # Main version:\n # Chooses output char by multinomial, and feeds back in for next step.\n # Scaled by temperature parameter before softmax (temperature 1.0 leaves\n # softmax output unchanged).\n # Returns matrix of one-hot vectors.\n def generate_step(x_t, s_t, temp):\n # Do next step\n o_t1, s_t1 = forward_step(x_t, s_t)\n\n # Get softmax\n o_ts = T.nnet.softmax(o_t1 / temp)[-1]\n\n # Randomly choose by multinomial distribution\n o_rand = rng.multinomial(n=1, pvals=o_ts, dtype=th.config.floatX)\n\n return o_rand, s_t1\n\n [o_chs, s_chs], genupdate = th.scan(\n fn=generate_step,\n outputs_info=[dict(initial=x_in), dict(initial=s_in)],\n non_sequences=temperature,\n n_steps=k)\n s_ch = s_chs[-1]\n\n self.gen_chars = th.function(\n inputs=[k, x_in, s_in, th.Param(temperature, default=0.5)], \n outputs=[o_chs, s_ch], \n name='gen_chars', \n updates=genupdate)\n\n # Alternate version:\n # As above, but chooses output char by argmax, and feeds back in.\n def generate_step_max(x_t, s_t):\n # Do next step\n o_t1, s_t1 = forward_step(x_t, s_t)\n\n # Get softmax\n o_ts = T.nnet.softmax(o_t1)[-1]\n\n # Now find selected index\n o_idx = T.argmax(o_ts)\n\n # Create one-hot\n o_ret = T.zeros_like(o_ts)\n o_ret = T.set_subtensor(o_ret[o_idx], 1.0)\n\n return o_ret, s_t1\n\n [o_chms, s_chms], _ = th.scan(\n fn=generate_step_max,\n outputs_info=[dict(initial=x_in), dict(initial=s_in)],\n n_steps=k)\n s_chm = s_chms[-1]\n\n self.gen_chars_max = th.function(\n inputs=[k, x_in, s_in], \n outputs=[o_chms, s_chm], \n name='gen_chars_max')\n\n # Sequence processing without generation:\n # Input is onehot-encoded string, output is sequence\n # of predictions and states at each step. Useful for\n # direct comparisons of output probabilities and \n # per-neuron activations\n def process_step(x_t, s_t, temp):\n # Do next step\n o_t1, s_t1 = forward_step(x_t, s_t)\n\n # Get softmax\n o_ts = T.nnet.softmax(o_t1 / temp)[-1]\n\n return o_ts, s_t1\n\n [o_seq, s_seq], _ = th.scan(\n fn=process_step,\n outputs_info=[None, dict(initial=s_in)],\n sequences=x_seq,\n non_sequences=temperature)\n\n self.seq_process = th.function(\n inputs=[x_seq, s_in, th.Param(temperature, default=0.5)],\n outputs=[o_seq, s_seq],\n name='seq_process')\n\n # And done!\n time2 = time.time()\n stdout.write(\"done!\\nCompilation took {0:.3f} s.\\n\\n\".format(time2 - time1))\n stdout.flush()\n self._built_g = True\n\n # Cross-model definitions of training functions\n def _build_t(self):\n \"\"\"Build Theano graph and define training functions.\"\"\"\n\n stdout.write(\"Compiling training functions...\")\n stdout.flush()\n time1 = time.time()\n\n # Local bindings for convenience\n forward_step = self._forward_step\n reg_cost = self._reg_cost\n\n # Scalar training parameters\n learnrate = T.scalar('learnrate')\n decayrate = T.scalar('decayrate')\n reg_lambda = T.scalar('reg_lambda')\n\n ### BATCH-SEQUENCE TRAINING ###\n\n # Batch inputs\n x_bat = T.tensor3('x_bat')\n y_bat = T.tensor3('y_bat')\n s_in_bat = T.tensor3('s_in_bat')\n\n # Step function\n def batch_step(x_t, y_t, s_t):\n o_t1, s_t = forward_step(x_t, s_t)\n # We can use the whole matrix from softmax for batches\n o_ts = T.nnet.softmax(o_t1)\n return o_ts, s_t\n\n [o_bat, s_seq_bat], _ = th.scan(\n batch_step, \n sequences=[x_bat, y_bat], \n truncate_gradient=self.hyper.bptt_truncate,\n outputs_info=[None, dict(initial=s_in_bat)])\n s_out_bat = s_seq_bat[-1]\n\n # We have to reshape the outputs, since Theano's categorical cross-entropy\n # function will only work with matrices or vectors, not tensor3s.\n # Thus we flatten along the sequence/batch axes, leaving the prediction\n # vectors as-is, compute cross-entropy, then reshape the errors back to \n # their proper dimensions.\n o_bat_flat = T.reshape(o_bat, (o_bat.shape[0] * o_bat.shape[1], -1))\n y_bat_flat = T.reshape(y_bat, (y_bat.shape[0] * y_bat.shape[1], -1))\n o_errs_bat = T.nnet.categorical_crossentropy(o_bat_flat, y_bat_flat)\n o_errs_res = T.reshape(o_errs_bat, (o_bat.shape[0], o_bat.shape[1]))\n\n # Next, we reshuffle to group sequences together instead\n # of batches, then sum the individual sequence errors.\n # (Hopefully Theano's auto-differentials follow this...)\n o_errs_shuf = o_errs_res.dimshuffle(1, 0)\n o_errs_sums = T.sum(o_errs_shuf, axis=1)\n # Regularization term (without averaging over samples (done outside Theano)).\n # reg_cost() defined per-model.\n reg_sum = reg_cost(reg_lambda)\n # Final cost (with regularization):\n cost_bat = T.sum(o_errs_sums) + reg_sum\n\n # Gradients\n dparams_bat = [ T.grad(cost_bat, p) for p in self.params.values() ]\n\n # rmsprop parameter updates\n uparams_bat = [ decayrate * mp + (1 - decayrate) * dp ** 2 for mp, dp in zip(self.mparams.values(), dparams_bat) ]\n\n # Gather updates\n train_updates_bat = OrderedDict()\n # Apply rmsprop updates to parameters\n for p, dp, up in zip(self.params.values(), dparams_bat, uparams_bat):\n train_updates_bat[p] = p - learnrate * dp / T.sqrt(up + 1e-6)\n # Update rmsprop caches\n for mp, up in zip(self.mparams.values(), uparams_bat):\n train_updates_bat[mp] = up\n\n # Batch training step function\n self.train_step_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, \n th.Param(learnrate, default=0.001), \n th.Param(decayrate, default=0.95),\n th.Param(reg_lambda, default=0.0)],\n outputs=s_out_bat,\n updates=train_updates_bat,\n name='train_step_bat')\n\n ### ERROR CHECKING ###\n\n # Mostly for internal debug, returns unsummed error tensor and regularization cost\n self.errs_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, th.Param(reg_lambda, default=0.0)], \n outputs=[o_errs_res, reg_sum, s_out_bat])\n\n # Full error sum, not averaged over sample size (done in outer non-Theano func)\n self.err_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, th.Param(reg_lambda, default=0.0)], \n outputs=[cost_bat, s_out_bat])\n\n # Gradient calculations\n # We'll use this at some point for gradient checking\n self.grad_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, th.Param(reg_lambda, default=0.0)], \n outputs=dparams_bat)\n\n ### Whew, I think we're done! ###\n time2 = time.time()\n stdout.write(\"done!\\nCompilation took {0:.3f} s.\\n\\n\".format(time2 - time1))\n stdout.flush()\n self._built_t = True\n\n @classmethod\n def loadfromfile(cls, infile):\n \"\"\"Load model parameters from file and rebuild model.\"\"\"\n\n with np.load(infile) as f:\n # Extract hyperparams and position\n p = f['p']\n hparams = pickle.loads(p.tobytes())\n hyper, epoch, pos = hparams['hyper'], hparams['epoch'], hparams['pos']\n\n # Load matrices\n pvalues = { n:f[n] for n in cls.pnames }\n\n # Create instance\n if isinstance(infile, str):\n stdout.write(\"Loaded model parameters from {0}\\n\".format(infile))\n stdout.write(\"Rebuilding model...\\n\")\n model = cls(hyper, epoch, pos, pvalues)\n\n return model\n\n def savetofile(self, outfile):\n \"\"\"Save model parameters to file.\"\"\"\n\n # Pickle non-matrix params into bytestring, then convert to numpy byte array\n pklbytes = pickle.dumps({'hyper': self.hyper, 'epoch': self.epoch, 'pos': self.pos}, \n protocol=pickle.HIGHEST_PROTOCOL)\n p = np.fromstring(pklbytes, dtype=np.uint8)\n\n # Gather parameter matrices and names\n pvalues = { n:m.get_value() for n, m in self.params.items() }\n\n # Now save params and matrices to file\n try:\n np.savez_compressed(outfile, p=p, **pvalues)\n except OSError as e:\n raise e\n else:\n if isinstance(outfile, str):\n stdout.write(\"Saved model parameters to {0}\\n\".format(outfile))\n\n def calc_loss(self, dataset, startpos=0, batchsize=16, num_examples=0, init_state=None):\n \"\"\"Calculates average cross-entropy loss over given batchsize.\"\"\"\n\n # First build training functions if not already done\n if not self._built_t:\n self._build_t()\n\n step_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(batchsize)\n\n if batchsize < 1:\n raise NotImplementedError(\"Single-sequence training is no longer available.\")\n\n data_len = dataset.batchepoch(batchsize)\n valid_len = num_examples if num_examples else data_len\n errors = np.zeros(valid_len)\n\n # Use explicit indexing instead of fancy slicing so we can \n # roll over properly\n data_pos = startpos\n for valid_pos in range(valid_len):\n xbatch, ybatch = dataset.batch(data_pos, batchsize)\n errors[valid_pos], step_state = self.err_bat(xbatch, ybatch, step_state, self.hyper.regcost)\n data_pos += 1\n # Advance position and overflow\n if data_pos >= data_len:\n data_pos = 0\n # Roll state vector on batch axis, to keep continuity\n step_state = np.roll(step_state, 1, axis=1)\n\n # Return total loss divided by number of characters in sample\n return np.sum(errors).item() / float(valid_len * batchsize * dataset.seq_len)\n\n def train(self, dataset, batchsize=16, num_examples=0, callback_every=1000, callback=None, init_state=None):\n \"\"\"Train model on given dataset for num_examples, with optional \n batch size.\n\n Optional callback function called after callback_every, with \n model and current state as arguments.\n\n If num_examples is 0, will train for full epoch.\n \"\"\"\n\n # Batched training only\n if batchsize < 1:\n raise NotImplementedError(\"Single-sequence training is no longer available.\")\n\n # First build training functions if not already done\n if not self._built_t:\n self._build_t()\n\n input_len = dataset.batchepoch(batchsize)\n train_len = num_examples if num_examples else input_len\n\n # Start with fresh state if none provided\n step_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(batchsize)\n\n # Debug\n # print(\"Training with batchsize {0:d}, state shape {1}\".format(batchsize, repr(step_state.shape)))\n\n # Use explicit indexing instead of fancy slicing so we can \n # keep track, both for model status and checkpoint purposes\n for train_pos in range(train_len):\n # Learning step\n xbatch, ybatch = dataset.batch(self.pos, batchsize)\n step_state = self.train_step_bat(xbatch, ybatch, step_state, \n self.hyper.learnrate, self.hyper.decay, self.hyper.regcost)\n\n # Advance position and overflow\n self.pos += 1\n if self.pos >= input_len:\n self.epoch += 1\n self.pos = 0\n # Roll state vector on batch axis, to keep continuity\n step_state = np.roll(step_state, 1, axis=1)\n\n # Optional callback\n if callback and callback_every and (train_pos + 1) % callback_every == 0:\n # Make sure to only pass a slice of state if batched\n callback(self, step_state[:,0,:])\n\n # Return final state\n return step_state\n\n def traintime(self, dataset, batchsize=16, pos=0, init_state=None):\n \"\"\"Prints time for batch training step (default size 16).\"\"\"\n\n # First build training functions if not already done\n if not self._built_t:\n self._build_t()\n\n # Fresh state\n start_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(batchsize)\n\n # Get slice\n xbatch, ybatch = dataset.batch(pos, batchsize)\n\n # Time training step\n time1 = time.time()\n self.train_step_bat(xbatch, ybatch, start_state, \n self.hyper.learnrate, self.hyper.decay, self.hyper.regcost)\n time2 = time.time()\n\n stdout.write(\n \"Time for SGD/RMS learning batch of {0:d} sequences, {1:d} chars each: {2:.4f} ms\\n\".format(\n xbatch.shape[1], xbatch.shape[0], (time2 - time1) * 1000.0))\n\n # Time loss calc step\n time1 = time.time()\n self.err_bat(xbatch, ybatch, start_state, self.hyper.regcost)\n time2 = time.time()\n\n stdout.write(\"Time for loss calculation step of {0:d} chars: {1:.4f} ms\\n\".format(\n xbatch.shape[0], (time2 - time1) * 1000.0))\n\n def genchars(self, charset, numchars, init_state=None, seedch=None, \n print_seed=True, use_max=False, temperature=0.5):\n \"\"\"Generate string of characters from current model parameters.\n\n If use_max is True, will select most-likely character at each step.\n\n Probabilities can be optionally scaled by temperature during generation\n if use_max=False. \n \"\"\"\n\n # Fresh state\n start_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(0)\n\n # Seed given or random character to start (as one-hot)\n if seedch:\n seedidx = charset.idxofchar(seedch)\n else:\n try:\n seedidx = charset.semirandomidx()\n except AttributeError:\n seedidx = charset.randomidx()\n\n seedvec = charset.onehot(seedidx)\n\n # Get generated sequence\n if use_max:\n idxs, end_state = self.gen_chars_max(numchars, seedvec, start_state)\n else:\n idxs, end_state = self.gen_chars(numchars, seedvec, start_state, temperature)\n\n # Convert to characters\n chars = [ charset.charatidx(np.argmax(i)) for i in idxs ]\n\n # Now construct string\n if print_seed:\n retstr = charset.charatidx(np.argmax(seedvec))\n else:\n retstr = ''\n return retstr + \"\".join(chars), end_state\n\n\n","repo_name":"rneilson/rngru","sub_path":"rn_rnn_model.py","file_name":"rn_rnn_model.py","file_ext":"py","file_size_in_byte":17624,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"}
+{"seq_id":"73872087607","text":"import numpy as np\r\nimport array\r\nclass myArray(array.array):\r\n arraymember1 = np.array([1,3,4,5])\r\n arraymember2 = np.array([2,4,5,6])\r\n def array_addition(self):\r\n resultarray = self.arraymember1 + self.arraymember2\r\n print(\"This array addition function returns the result array \\n\")\r\n return resultarray\r\n\r\narrayObj = myArray('u')\r\narrayObj.arraymember1 = np.array([[1,2,3,4],[34,54,36,67]])\r\narrayObj.arraymember2 = np.array([[2,38,95,26],[32,23,89,75]])\r\nresultarray = arrayObj.array_addition()\r\nprint(resultarray)","repo_name":"PelluriDeepthi/PelluriDeepthi","sub_path":"ArrayWrapper.py","file_name":"ArrayWrapper.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"4621951901","text":"# coding:utf-8\nfrom enum import Enum\n\nfrom qfluentwidgets import (qconfig, QConfig, ConfigItem, OptionsConfigItem, BoolValidator,\n OptionsValidator, RangeConfigItem, RangeValidator,\n FolderListValidator, EnumSerializer, FolderValidator)\n\n\n\nclass Language(Enum):\n \"\"\" Language enumeration \"\"\"\n\n CHINESE_SIMPLIFIED = \"zh\"\n CHINESE_TRADITIONAL = \"hk\"\n ENGLISH = \"en\"\n AUTO = \"Auto\"\n\n\nclass Config(QConfig):\n \"\"\" Config of application \"\"\"\n\n # folders\n musicFolders = ConfigItem(\n \"Folders\", \"LocalMusic\", [], FolderListValidator())\n downloadFolder = ConfigItem(\n \"Folders\", \"Download\", \"app/download\", FolderValidator())\n\n # main window\n dpiScale = OptionsConfigItem(\n \"MainWindow\", \"DpiScale\", \"Auto\", OptionsValidator([1, 1.25, 1.5, 1.75, 2, \"Auto\"]), restart=True)\n language = OptionsConfigItem(\n \"MainWindow\", \"Language\", Language.AUTO, OptionsValidator(Language), EnumSerializer(Language), restart=True)\n\n # software update\n checkUpdateAtStartUp = ConfigItem(\"Update\", \"CheckUpdateAtStartUp\", True, BoolValidator())\n\n\nYEAR = 2023\nAUTHOR = \"软盘驱动程序\"\nVERSION = \"v0.1.1\"\nHELP_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml\"\nREPO_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml\"\nFEEDBACK_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml/issues\"\nRELEASE_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml/releases/latest\"\n\n\ncfg = Config()\nqconfig.load('app/config/config.json', cfg)","repo_name":"clean-master/stable-diffusion-webui-launcher-directml","sub_path":"app/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"}
+{"seq_id":"22084417882","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, HttpResponseRedirect, HttpResponse, redirect\nfrom django.contrib import messages\nfrom ..users.models import User\nfrom .models import Book, Review, Author\nimport bcrypt\n\n# Create your views here.\ndef add(request):\n authors = Author.objects.all()\n context = {\n 'authors': authors,\n }\n return render(request, 'books/new.html', context)\ndef create(request):\n errors = Book.objects.book_validator(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error, extra_tags=tag)\n return redirect('/books/add')\n else:\n if request.POST['author'] > 0:\n author = Author.objects.get(id = request.POST['author'])\n else:\n name = request.POST['new_author']\n author = Author.objects.create(name = name)\n title = request.POST['title']\n review = request.POST['review']\n rating = request.POST['rating']\n id = request.session['id']\n reviewer = User.objects.get(id = id)\n book = Book.objects.create(title = title, author = author)\n r = Review.objects.create(stars = rating, review = review, reviewer = reviewer, book = book)\n return redirect('/dashboard')\ndef book(request, book_id):\n book = Book.objects.get(id = book_id)\n context = {\n 'id': request.session['id'],\n 'book': book,\n 'reviews': Review.objects.filter(book = book),\n }\n return render(request, 'books/book.html', context)\ndef review(request):\n book_id = request.POST['book_id']\n review = request.POST['review']\n rating = request.POST['rating']\n id = request.session['id']\n reviewer = User.objects.get(id = id)\n book = Book.objects.get(id = book_id)\n Review.objects.create(stars = rating, review = review, reviewer = reviewer, book = book)\n return redirect('/dashboard')\ndef delete(request, review_id):\n review = Review.objects.get(id = review_id)\n review.delete()\n return redirect('/dashboard')","repo_name":"marmegh/enigma","sub_path":"beltreviewer/apps/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"270301356","text":"# Silver 5_1436\n\n# 종말의 숫자란 어떤 수에 6이 적어도 3개이상 연속으로 들어가는 수를 말한다.\n# 제일 작은 종말의 숫자는 666이고, 그 다음으로 큰 수는 1666, 2666, 3666, .... 과 같다.\n\n# 따라서, 숌은 첫 번째 영화의 제��은 세상의 종말 666,\n# 두 번째 영화의 제목은 세상의 종말 1666 이렇게 이름을 지을 것이다.\n# 일반화해서 생각하면, N번째 영화의 제목은 세상의 종말 (N번째로 작은 종말의 숫자) 와 같다.\n# 숌이 만든 N번째 영화의 제목에 들어간 숫자를 출력하는 프로그램을 작성하시오.\n# 숌은 이 시리즈를 항상 차례대로 만들고, 다른 영화는 만들지 않는다.\n\nn = int(input())\nc = 0\nstart = 666\nwhile True:\n if '666' in str(start):\n c += 1\n if c == n:\n print(start)\n break\n start += 1","repo_name":"chaerui7967/Today_I_Learned","sub_path":"Baekjoon/movie_director_shom_210717.py","file_name":"movie_director_shom_210717.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"28905014989","text":"# coding: utf-8\nimport datetime\nfrom flask import Flask, redirect\n\napp = Flask(__name__)\n\n@app.route('/today')\ndef today():\n return redirect(\n 'http://show-time.xyz/{}.html'.format(datetime.date.today().strftime('%Y%m%d')))\n\n@app.route('/tommorow')\ndef tommorow():\n date = datetime.date.today() + datetime.timedelta(days=1)\n return redirect(\n 'http://show-time.xyz/{}.html'.format(date.strftime('%Y%m%d')))\n\nif __name__ == '__main__':\n app.run(port=9997)\n","repo_name":"maruchanman/__band_app","sub_path":"back/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"16022419445","text":"#input de que pedem para o usuário informar um número inteiro\nnumber1 = input(\"Informe um primeiro número inteiro: \")\nnumber2 = input(\"Informe o segundo número inteiro: \")\nnumber3 = input(\"Informe o terceiro número inteiro: \")\n\n# execução da primeiro cálculo pedido na questão\nproduct = ((int(number1) * 2) * (int(number2) / 2)) + int(number3)\nprint(int(product))\n\n# execução do segundo cálculo pedido na questão\nsoma = (int(number1) * 3 + int(number3)) * int(number2) \nprint(soma)","repo_name":"kaynann/PYTHON","sub_path":"aula10.18/desafio07.py","file_name":"desafio07.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"17551942735","text":"# coding=utf-8\nimport time\nimport numpy as np\nimport logging\nimport os\nimport tensorflow as tf\nfrom tensorflow.contrib import slim\n\nfrom db_config import cfg\n\nimport lib.networks.model as model\nfrom lib.networks.losses import compute_loss, compute_acc\nfrom lib.dataset.dataloader import get_batch\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef make_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\ndef tower_loss(images, gt_score_maps, gt_threshold_map, gt_score_mask,\n gt_thresh_mask, reuse_variables):\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):\n binarize_map, threshold_map, thresh_binary = model.model(images, is_training=True)\n\n model_loss = compute_loss(binarize_map, threshold_map, thresh_binary,\n gt_score_maps, gt_threshold_map, gt_score_mask, gt_thresh_mask)\n\n total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n\n # add summary\n if reuse_variables is None:\n tf.summary.image('gt/input_imgs', images)\n tf.summary.image('gt/score_map', gt_score_maps)\n tf.summary.image('gt/threshold_map', gt_threshold_map * 255)\n tf.summary.image('gt/score_mask', gt_score_mask)\n tf.summary.image('gt/thresh_mask', gt_thresh_mask)\n\n tf.summary.image('pred/binarize_map', binarize_map)\n tf.summary.image('pred/threshold_map', threshold_map * 255)\n tf.summary.image('pred/thresh_binary', thresh_binary)\n\n tf.summary.scalar('model_loss', model_loss)\n tf.summary.scalar('total_loss', total_loss)\n\n return total_loss, model_loss, binarize_map, threshold_map, thresh_binary\n\n\ndef average_gradients(tower_grads):\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n grads = []\n for g, _ in grad_and_vars:\n expanded_g = tf.expand_dims(g, 0)\n grads.append(expanded_g)\n\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n\n return average_grads\n\n\ndef _train_logger_init():\n \"\"\"\n 初始化log日志\n :return:\n \"\"\"\n train_logger = logging.getLogger('train')\n train_logger.setLevel(logging.DEBUG)\n\n # 添加文件输出\n log_file = os.path.join(cfg[\"TRAIN\"][\"TRAIN_LOGS\"], time.strftime('%Y%m%d%H%M', time.localtime(time.time())) + '.logs')\n file_handler = logging.FileHandler(log_file, mode='w')\n file_handler.setLevel(logging.DEBUG)\n file_formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n file_handler.setFormatter(file_formatter)\n train_logger.addHandler(file_handler)\n\n # 添加控制台输出\n consol_handler = logging.StreamHandler()\n consol_handler.setLevel(logging.DEBUG)\n consol_formatter = logging.Formatter('%(message)s')\n consol_handler.setFormatter(consol_formatter)\n train_logger.addHandler(consol_handler)\n return train_logger\n\n\ndef main():\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = cfg.TRAIN.VIS_GPU\n if not tf.gfile.Exists(cfg[\"TRAIN\"][\"CHECKPOINTS_OUTPUT_DIR\"]):\n tf.gfile.MkDir(cfg[\"TRAIN\"][\"CHECKPOINTS_OUTPUT_DIR\"])\n\n train_logger = _train_logger_init()\n\n input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')\n input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps')\n input_threshold_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_threshold_maps')\n\n input_score_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_masks')\n input_threshold_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_threshold_masks')\n\n global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)\n\n learning_rate = tf.train.exponential_decay(cfg[\"TRAIN\"][\"LEARNING_RATE\"], global_step, decay_steps=10000,\n decay_rate=0.94, staircase=True)\n\n if cfg.TRAIN.OPT == 'adam':\n # learning_rate = tf.constant(cfg[\"TRAIN\"][\"LEARNING_RATE\"], tf.float32)\n opt = tf.train.AdamOptimizer(learning_rate)\n elif cfg.TRAIN.OPT == 'momentum':\n opt = tf.train.MomentumOptimizer(learning_rate, 0.9)\n else:\n assert 0, 'error optimzer'\n print('use ', cfg.TRAIN.OPT)\n\n # add summary\n tf.summary.scalar('learning_rate', learning_rate)\n\n gpus = [str(i) for i in range(len(cfg.TRAIN.VIS_GPU.split(',')))]\n input_images_split = tf.split(input_images, len(gpus))\n input_score_maps_split = tf.split(input_score_maps, len(gpus))\n input_threshold_maps_split = tf.split(input_threshold_maps, len(gpus))\n input_score_masks_split = tf.split(input_score_masks, len(gpus))\n input_threshold_masks_split = tf.split(input_threshold_masks, len(gpus))\n\n\n tower_grads = []\n reuse_variables = None\n total_binarize_acc = 0\n total_thresh_binary_acc = 0\n for i, gpu_id in enumerate(gpus):\n print('gpu_id', gpu_id)\n with tf.device('/gpu:' + gpu_id):\n with tf.name_scope('model_' + gpu_id) as scope:\n gt_imgs = input_images_split[i]\n gt_scores = input_score_maps_split[i]\n gt_thresholds = input_threshold_maps_split[i]\n gt_score_masks = input_score_masks_split[i]\n gt_threshold_masks = input_threshold_masks_split[i]\n total_loss, model_loss, binarize_map, threshold_map, thresh_binary = \\\n tower_loss(gt_imgs, gt_scores, gt_thresholds, gt_score_masks, gt_threshold_masks, reuse_variables)\n binarize_acc, thresh_binary_acc = compute_acc(binarize_map, threshold_map, thresh_binary,\n gt_scores, gt_thresholds, gt_score_masks, gt_threshold_masks)\n total_binarize_acc += binarize_acc\n total_thresh_binary_acc += thresh_binary_acc\n reuse_variables = True\n\n batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))\n\n grads = opt.compute_gradients(total_loss)\n tower_grads.append(grads)\n\n grads = average_gradients(tower_grads)\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n avg_binarize_acc = total_binarize_acc / 2.0\n avg_thresh_binary_acc = total_thresh_binary_acc / 2.0\n\n summary_op = tf.summary.merge_all()\n\n variable_averages = tf.train.ExponentialMovingAverage(cfg[\"TRAIN\"][\"MOVING_AVERAGE_DECAY\"], global_step)\n\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):\n train_op = tf.no_op(name='train_op')\n\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=cfg.TRAIN.SAVE_MAX)\n\n\n train_logs_dir = os.path.join(cfg.TRAIN.TRAIN_LOGS, 'train')\n val_logs_dir = os.path.join(cfg.TRAIN.TRAIN_LOGS, 'val')\n\n make_dir(train_logs_dir)\n make_dir(val_logs_dir)\n\n train_summary_writer = tf.summary.FileWriter(train_logs_dir, tf.get_default_graph())\n val_summary_writer = tf.summary.FileWriter(val_logs_dir, tf.get_default_graph())\n\n\n init = tf.global_variables_initializer()\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n try:\n\n if cfg[\"TRAIN\"][\"RESTORE\"]:\n train_logger.info('continue training from previous checkpoint')\n ckpt = tf.train.get_checkpoint_state(cfg[\"TRAIN\"][\"RESTORE_CKPT_PATH\"])\n train_logger.info('restore model path:', ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n train_logger.info(\"done\")\n elif cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"] is not None:\n sess.run(init)\n print(cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"])\n train_logger.info('load pretrain model:{}', str(cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"]))\n variable_restore_op = slim.assign_from_checkpoint_fn(cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"],\n slim.get_trainable_variables(),\n ignore_missing_vars=True)\n variable_restore_op(sess)\n train_logger.info(\"done\")\n\n else:\n sess.run(init)\n except:\n assert 0, 'load error'\n\n train_data_generator = get_batch(num_workers=cfg.TRAIN.NUM_READERS,\n img_dir=cfg.TRAIN.IMG_DIR,\n label_dir=cfg.TRAIN.LABEL_DIR,\n batchsize=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(gpus))\n\n val_data_generator = get_batch(num_workers=10,\n img_dir=cfg.EVAL.IMG_DIR,\n label_dir=cfg.EVAL.LABEL_DIR,\n batchsize=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(gpus))\n\n test_data_generator = get_batch(num_workers=1,\n img_dir=cfg.EVAL.IMG_DIR,\n label_dir=cfg.EVAL.LABEL_DIR,\n batchsize=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(gpus),\n is_eval=True)\n\n test_epoch = 0\n\n start = time.time()\n for step in range(cfg[\"TRAIN\"][\"MAX_STEPS\"]):\n train_data = next(train_data_generator)\n\n train_feed_dict = {input_images: train_data[0],\n input_score_maps: train_data[1],\n input_threshold_maps: train_data[3],\n input_score_masks: train_data[2],\n input_threshold_masks: train_data[4]}\n\n ml, tl, _ = sess.run([model_loss, total_loss, train_op], feed_dict=train_feed_dict)\n if np.isnan(tl):\n train_logger.info('Loss diverged, stop training')\n break\n\n if step % 10 == 0:\n avg_time_per_step = (time.time() - start) / 10\n avg_examples_per_second = (10 * cfg[\"TRAIN\"][\"BATCH_SIZE_PER_GPU\"] * len(gpus)) / (time.time() - start)\n start = time.time()\n train_logger.info(\n '{}->Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'.format(\n cfg.TRAIN.VERSION, step, ml, tl, avg_time_per_step, avg_examples_per_second))\n\n if step % cfg[\"TRAIN\"][\"SAVE_CHECKPOINT_STEPS\"] == 0:\n saver.save(sess, os.path.join(cfg[\"TRAIN\"][\"CHECKPOINTS_OUTPUT_DIR\"],\n 'DB_' + cfg.BACKBONE + '_' + cfg.TRAIN.VERSION + '_model.ckpt'),\n global_step=global_step)\n\n if step % cfg[\"TRAIN\"][\"SAVE_SUMMARY_STEPS\"] == 0:\n _, tl, train_summary_str = sess.run([train_op, total_loss, summary_op], feed_dict=train_feed_dict)\n train_summary_writer.add_summary(train_summary_str, global_step=step)\n\n val_data = next(val_data_generator)\n val_feed_dict = {input_images: val_data[0],\n input_score_maps: val_data[1],\n input_threshold_maps: val_data[3],\n input_score_masks: val_data[2],\n input_threshold_masks: val_data[4]}\n eval_summary_str = sess.run(summary_op, feed_dict=val_feed_dict)\n\n val_summary_writer.add_summary(eval_summary_str, global_step=step)\n\n if step % cfg.EVAL.TEST_STEP == 0 and step != 0:\n temp_epoch = test_epoch\n train_logger.info('~~~~~~~~~~~~~~~~~~start to test~~~~~~~~~~~~~~~~~~~~~')\n avg_bc = []\n avg_tbc = []\n while temp_epoch==test_epoch:\n test_data = next(test_data_generator)\n test_feed_dict = {input_images: test_data[0],\n input_score_maps: test_data[1],\n input_threshold_maps: test_data[3],\n input_score_masks: test_data[2],\n input_threshold_masks: test_data[4]}\n test_epoch = test_data[5]\n bc, tbc = sess.run([avg_binarize_acc, avg_thresh_binary_acc],\n feed_dict=test_feed_dict)\n\n avg_bc.append(bc)\n avg_tbc.append(tbc)\n\n train_logger.info('avg binarize acc is :{}'.format(sum(avg_bc)/len(avg_bc)))\n train_logger.info('avg thresh binary acc is :{}'.format(sum(avg_tbc)/len(avg_tbc)))\n\n\nif __name__ == '__main__':\n\n main()\n\n","repo_name":"iamrishab/DB-tf","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13145,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"}
+{"seq_id":"20727963815","text":"\"\"\"\nRelated paras for concepts collector\nmainly used for pre-processing\n\"\"\"\nfrom typing import Callable\nimport attr\nfrom HistoMIL import logger\nfrom HistoMIL.DATA.Slide.concepts.WholeSlideImage import WholeSlideImage \n\nfrom HistoMIL.DATA.Database.data_aug import naive_transforms,only_naive_transforms\n\n\n##############################################################################\n# para for slide\n##############################################################################\n\n@attr.s(auto_attribs=True)\nclass SlideParas(object):\n \n folder:str=None\n fname:str = None\n\n##############################################################################\n# para for tissue\n##############################################################################\n@attr.s(auto_attribs=True)\nclass TissueParas(object):\n \"\"\"\n include all paras for tissue concepts in pre-processing and usage\n \"\"\"\n seg_level:int = 0 # level for segment tissue mask\n min_seg_level:int = None # min level for segment tissue mask if chose fast mode\n\n ref_patch_size:int = 256 # reference patch size for tissue mask\n\n #------> parameters for blurring\n mthresh:int = 7 # paras for Apply median blurring\n\n #------> parameters for otsu\n use_otsu:bool = True\n sthresh:int = 20 \n sthresh_up:int = 255\n\n \n #------> Morphological closing\n close:int = 0\n\n #------> parameters for contours in mask2contours()\n filter_params:dict = {'a_t':100,'a_h': 16, 'max_n_holes':8}\n\n # if there is more than one contours, exclude option:default empty list\n to_contours:bool = True\n exclude_ids:list = []\n keep_ids:list = []\n \n #------> create a name for instance\n name:str = f\"tissue_{seg_level}_otsu_{use_otsu}_contours_{to_contours}\"\n\ndef set_min_seg_level(tissue_para:TissueParas,slide:WholeSlideImage,\n min_seg_level:int=None):\n \"\"\"\n get minimum seg level for tissue mask\n \"\"\"\n if min_seg_level is None:\n tissue_para.seg_level = len(slide.meta.level_dims)-1\n else:\n tissue_para.seg_level = min(len(slide.meta.level_dims)-1,min_seg_level)\n logger.info(f\"TissuePara:: set min_seg_level to {tissue_para.seg_level},in {slide.meta.level_dims} \")\n return tissue_para\n\n##############################################################################\n# para for patch\n##############################################################################\n@attr.s(auto_attribs=True)\nclass PatchParas(object):\n \"\"\"\n include all paras for patch concepts in pre-processing and usage\n \"\"\"\n #------> parameters for patch\n patch_level:int = 0 # level for patch\n patch_size = (512,512) # patch size\n step_size:int = 512 # step size for patch\n\n #------> parameters for patch extraction\n from_contours:bool = True # extract patches from contours otherwise from tissue mask\n # debug: set mp to 1 to avoid not solved error \n mp_processor:int = 1 # number of processors for multiprocessing\n #------> parameters for patch extraction function \n contour_fn_name:str = \"four_pt\" # function name for contour extraction\n use_padding:bool = True # whether padding\n top_left = None # top left point for patch extraction area\n bot_right = None # bot right point for patch extraction area\n\n #------> name for instance\n name:str = f\"patch({patch_level})_size({patch_size[0]})_step({step_size})_contours({contour_fn_name})\"\n\n\n##############################################################################\n# para for faeture\n##############################################################################\n@attr.s(auto_attribs=True)\nclass FeatureParas(object):\n \"\"\"\n include all paras for feature concepts in pre-processing and usage\n \"\"\"\n #------> parameters for feature encoder\n model_name:str = \"resnet18\"\n\n model_instance = None\n img_size = None\n out_dim = None\n #-----> for inference part \n\n device:str = \"cuda\"\n trans:Callable = only_naive_transforms\n \n batch_size:int = 32\n\n #------> parameters for cluster\n cluster_nb:int = 200\n with_semantic_shifts:bool = False\n\n##############################################################################\n# para for collectorß\n##############################################################################\n@attr.s(auto_attribs=True)\nclass CollectorParas(object):\n \"\"\"\n include all paras for collector concepts in pre-processing and usage\n \"\"\"\n #------> parameters for collector\n slide:SlideParas = SlideParas() # get instance of slide paras\n tissue:TissueParas = TissueParas() # get instance of tissue paras\n patch:PatchParas = PatchParas() # get instance of patch paras\n feature:FeatureParas = FeatureParas()\n\nDEFAULT_CONCEPT_PARAS = CollectorParas()\n\n","repo_name":"secrierlab/HistoMIL","sub_path":"EXP/paras/slides.py","file_name":"slides.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"}
+{"seq_id":"26906770008","text":"from pathlib import Path\nimport toml\nimport cv2\nfrom utils import CameraParam, LensUndistorter, ImageSaver, LensUndistorterWithKroi\n\n# Load Pathes\nBASE_DIR = Path(__file__).resolve().parent.parent\nDATA_DIR = Path(BASE_DIR, \"data\")\nCFG_PARAM_PATH = str(Path(DATA_DIR, \"camera_param.toml\"))\nRGB_IMAGE_PATH = str(Path(DATA_DIR, \"rgb_img.png\"))\nRESULT_SAVE_DIR = str(Path(BASE_DIR, \"results\"))\nresult_saver = ImageSaver(RESULT_SAVE_DIR)\n\n# Get config file and rgb image\ndict_param = toml.load(open(CFG_PARAM_PATH))\nrgb_img = cv2.imread(RGB_IMAGE_PATH)\n\n# Get camera parameter\ncamera_param = CameraParam.from_dict(dict_param[\"Rgb\"])\nK_rgb = camera_param.intrinsic_matrix\nD_rgb = camera_param.distortion\nimage_height, image_width = camera_param.size\n\n# Image Correction\nlens_undistorter = LensUndistorter(K_rgb, D_rgb, image_width, image_height)\nlens_undistorter_roi = LensUndistorterWithKroi(K_rgb, D_rgb, image_width, image_height)\nrgb_img_undistorted = lens_undistorter.correction(rgb_img)\nrgb_img_undistorted_roi = lens_undistorter_roi.correction(rgb_img)\n\n\nresult_saver.save_image(\"raw_image.png\", rgb_img)\nresult_saver.save_image(\"rgb_img_undistorted.png\", rgb_img_undistorted)\nresult_saver.save_image(\"rgb_img_undistorted_roi.png\", rgb_img_undistorted_roi)\n","repo_name":"yuki-inaho/test_getOptimalNewCameraMatrix","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"3492854108","text":"import json\nimport logging\nimport os\nimport tarfile\nfrom io import TextIOWrapper\nfrom typing import IO\nfrom typing import Iterable\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nfrom . import convert\nfrom . import siteinfo as si\n\nlog = logging.getLogger(__name__)\n\n\ndef replace_extensions(path: str, new_exts: Iterable = ()) -> str:\n \"\"\"\n >>> replace_extensions(\"/a/b/c/dump.njson.tar.gz\")\n '/a/b/c/dump'\n >>> replace_extensions(\"dump.njson.tar.gz\")\n 'dump'\n >>> replace_extensions(\"dump.njson.tar.gz\", new_exts=[\"slob\"])\n 'dump.slob'\n >>> replace_extensions(\"/a/b/c/dump.njson.tar.gz\", new_exts=[\"siteinfo\", \"json\"])\n '/a/b/c/dump.siteinfo.json'\n \"\"\"\n basename = os.path.basename(path)\n dirname = os.path.dirname(path)\n noext, *_ = basename.split(os.path.extsep)\n return os.path.join(dirname, os.path.extsep.join((noext, *new_exts)))\n\n\ndef get_outname(args):\n outname = args.output_file\n if outname is None:\n basename = os.path.basename(args.dump_file[0])\n outname = replace_extensions(basename, [\"slob\"])\n return outname\n\n\ndef get_siteinfo(args):\n siteinfo_path = args.siteinfo\n if not siteinfo_path:\n siteinfo_path = replace_extensions(args.dump_file, [\"siteinfo\", \"json\"])\n\n with open(siteinfo_path) as siteinfo_file:\n siteinfo_dict = json.load(siteinfo_file)\n\n return siteinfo_dict\n\n\ndef parse_loc_spec(s: str) -> Tuple[int, int]:\n if \":\" in s:\n fileno, lineno = s.split(\":\")\n return int(fileno), int(lineno)\n return 1, int(s)\n\n\ndef articles(\n dump_files: Sequence[str],\n info: si.Info,\n start_line_spec: str = \"1:1\",\n end_line_spec: Optional[str] = None,\n html_encoding=\"utf-8\",\n remove_embedded_bg=\"\",\n ensure_ext_image_urls=True,\n) -> Iterable[convert.ConvertParams]:\n\n start_file, start_line = parse_loc_spec(start_line_spec)\n if end_line_spec:\n end_file, end_line = parse_loc_spec(end_line_spec)\n else:\n end_file, end_line = None, None\n\n for dump_file in dump_files:\n dump_file = os.path.expanduser(dump_file)\n print(f\"Reading articles from ${dump_file}\")\n files: Iterable[Union[TextIOWrapper, IO[bytes]]] = []\n\n if dump_file.endswith(\".tar.gz\") or dump_file.endswith(\".tar\"):\n if dump_file.endswith(\".tar.gz\"):\n tar = tarfile.open(dump_file, \"r:gz\")\n else:\n tar = tarfile.open(dump_file, \"r\")\n ctx_manager = tar\n files = (\n f for f in (tar.extractfile(member) for member in tar) if f is not None\n )\n else:\n ctx_manager = open(dump_file)\n files = [ctx_manager]\n\n with ctx_manager:\n for k, f in enumerate(files):\n file_number = k + 1\n if file_number < start_file:\n continue\n if end_file and file_number > end_file:\n break\n for i, line in enumerate(f):\n line_number = i + 1\n j = 0\n if line_number < start_line:\n if i % 1000 == 0:\n print(\".\", end=\"\", flush=True)\n j += 1\n if j % 50 == 0:\n print(flush=True)\n j = 0\n continue\n if end_line and line_number > end_line:\n break\n try:\n data = json.loads(line)\n html = data[\"article_body\"][\"html\"]\n title = data[\"name\"]\n redirects = data.get(\"redirects\", ())\n aliases = [r[\"name\"] for r in redirects]\n print(f\"{file_number}:{line_number} {title} ({len(html)})\")\n yield convert.ConvertParams(\n title=title,\n aliases=aliases,\n text=html,\n rtl=info.rtl,\n server=info.server,\n articlepath=\"./\", # TODO needs to be arg?\n site_articlepath=info.articlepath,\n encoding=html_encoding,\n remove_embedded_bg=remove_embedded_bg,\n ensure_ext_image_urls=ensure_ext_image_urls,\n )\n except:\n log.exception(f\"Failed to read line {i}\")\n","repo_name":"itkach/mw2slob","sub_path":"mw2slob/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"77"}
+{"seq_id":"26132631056","text":"# Create a string and save im a variable\noriginal_string = input(\"Please, insert a sentence: \")\n\n# Use loop to extract and alternate case in the string\nnew_alt_char_string = \"\"\n\n# Use enumerate to access the indexes and control better the item/case alternation\nfor index, item in enumerate(original_string):\n if index % 2 == 0:\n new_alt_char_string = new_alt_char_string + item.lower()\n else:\n new_alt_char_string = new_alt_char_string + item.upper()\n\nprint(new_alt_char_string)\n\n# With the same string but making each alternative word lower and upper case\nsplit_string = original_string.split(\" \")\nnew_alt_word_string = [] # Split converts a string into an array\n\nfor index, item in enumerate(split_string):\n if index % 2 == 0:\n new_alt_word_string.append(item.lower()) # Use .append to manipulate the array\n else:\n new_alt_word_string.append(item.upper())\n\nprint(\" \".join(new_alt_word_string)) # Use .join to include the empty spaces","repo_name":"tmitidieri/python-projects-hyperion-training","sub_path":"T17/alternative.py","file_name":"alternative.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74027833529","text":"import re\nimport numpy as np\nimport gensim\nimport requests\nimport json\nfrom scipy import spatial\n\ndata = []\nwith open('./avas_list.txt') as inputfile:\n for line in inputfile:\n data.append(line)\n\nprint(\"Loaded function data\")\n\nmodel = gensim.models.KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True, limit=500000)\nprint(\"Loaded model\")\nindex2word_set = set(model.wv.index2word)\n\ndef avg_feature_vector(sentence, model, num_features, index2word_set):\n words = sentence.split()\n feature_vec = np.zeros((num_features, ), dtype='float32')\n n_words = 0\n for word in words:\n if word in index2word_set:\n n_words += 1\n feature_vec = np.add(feature_vec, model[word])\n if (n_words > 0):\n feature_vec = np.divide(feature_vec, n_words)\n return feature_vec\n\ndef make_list(name):\n words = []\n if('_' in name): #if snake case\n name = name.lower()\n words = name.split('_')\n else: #identify if camel case\n word = \"\"\n for c in name:\n if(c.islower()):\n word +=c\n else:\n words.append(word)\n word = \"\"\n word += c.lower()\n words.append(word)\n return words\n\ndef make_sentence(words):\n sentence = \"\"\n for w in words:\n sentence += w\n sentence += \" \"\n return sentence[:-1]\n\ndef similarity_sentences(s1, s2):\n s1_afv = avg_feature_vector(s1, model=model, num_features=300, index2word_set=index2word_set)\n s2_afv = avg_feature_vector(s2, model=model, num_features=300, index2word_set=index2word_set)\n sim = 1 - spatial.distance.cosine(s1_afv, s2_afv)\n return sim\n\n#s1 = make_sentence(make_list('remove'))\n#s2 = make_sentence(make_list('delete'))\n#print(similarity_sentences(s1,s2))\n\n\ndef camel_to_snake(name):\n list = make_list(name)\n new_name = \"\"\n for w in list:\n new_name += w\n new_name += \"_\"\n return new_name[:-1]\n\ndef snake_to_camel(name):\n list = make_list(name)\n new_name = \"\"\n pp = False\n for w in list:\n c = w[0].upper() if pp else w[0].lower()\n pp = True\n new_name += c\n new_name += w[1:]\n return new_name\n\ndef change_case(name):\n if('_' in name): #this is snake\n return snake_to_camel(name)\n return camel_to_snake(name)\n\n\ndef find_synonyms(word):\n\n #dev\n return [\"sum\",\"total\",\"append\"]\n\n p = make_sentence(make_list(word))\n s_list = []\n\n\n #r = requests.get('https://wordsapiv1.p.mashape.com/words/'+word+'/synonyms'\n # , headers={\"x-rapidapi-host\": \"wordsapiv1.p.rapidapi.com\",\n # \t\"x-rapidapi-key\": \"\"} )\n\n #print(json.loads(r.content))\n #synonym_list = json.loads(r.content)['synonyms']\n\n r = requests.get('https://words.bighugelabs.com/api/2/3d61b2dab0e22df66fd693006de7a367/'+word+'/json')\n\n j = json.loads(r.content)\n synonym_list = []\n for (key,val) in j.items():\n if('syn' in val.keys()):\n synonym_list += val['syn']\n #synonym_list = j['noun']['syn'] + j['verb']['syn']\n\n for s in synonym_list:\n if(s.count(' ')>0):\n continue\n p1 = make_sentence(make_list(s))\n sim = similarity_sentences(p,p1)\n obj = ( s, sim)\n if(not np.isnan(sim)):\n s_list.append(obj)\n #print(s_list)\n\n s_list.sort(key = lambda synonym: synonym[1] )\n firsts = [t[0] for t in s_list]\n return firsts[-3:]\n\ndef getReplacementsName(name):\n #for each word in the name, get the replacements\n words = make_list(name)\n replace_dict = []\n for w in words:\n w_replacements = find_synonyms(w)\n w_replacements.append(w)\n w_replacements = list(set(w_replacements))\n replace_dict.append(w_replacements)\n\n a = replace_dict[0]\n for b in replace_dict[1:]:\n o = []\n for ia in a:\n for ib in b:\n o.append(ia+\"_\"+ib)\n a = o\n ca = a.copy()\n for poss in a:\n ca.append(change_case(poss))\n return ca\n\n\ndef extractName(regex):\n i = regex.find(\"def\")\n before = regex[:(i+4)]\n\n i += 4\n name = \"\"\n while True:\n name+=regex[i]\n i+=1\n if(i>=len(regex)):\n break\n if(i 0.7):\n fast_regex += '|('+d_before+d_def+d_after+')'\n found = True\n #if(not found):\n # print(\"Nothing good and fast\")\n #else:\n # print(\"GOOD:\",fast_regex)\n #look for the synonyms\n r = replaceFunctionNames(regex) + '|'+fast_regex\n return r\n\n#print(extractName(\"def delete_selected\"))\nprint(lookup('def removeSelected'))\n#print(lookup('somestuff def base64ToInt\\([a-z]*\\): func'))\n#print(lookup('somestuff def checkErr: func'))\n#print(lookup('somestuff def add_one[a-z]*: func'))\n#print(replaceFunctionNames('somestuff def addOne\\(\\): func'))\n","repo_name":"avaspataru/hackcambridge101","sub_path":"phrase_similarity.py","file_name":"phrase_similarity.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"32748885174","text":"import os, csv\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lab1.settings')\n\nimport django\n\ndjango.setup()\n\nfrom films.models import Movie, Genre, Tag, Rating\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nMOVIES_DIR = os.path.join(BASE_DIR, 'lab1/data/movies.csv')\nTAGS_DIR = os.path.join(BASE_DIR, 'lab1/data/tags.csv')\nRATINGS_DIR = os.path.join(BASE_DIR, 'lab1/data/ratings.csv')\nLINKS_DIR = os.path.join(BASE_DIR, 'lab1/data/links.csv') \n\n\nmovies = csv.reader(open(MOVIES_DIR), delimiter=',')\ntags = csv.reader(open(TAGS_DIR), delimiter=',')\nratings = csv.reader(open(RATINGS_DIR), delimiter=',')\nlinks = csv.reader(open(LINKS_DIR), delimiter=',')\n\n\n# for n in range(1, 100): # movieId,title,genres\n# # movie = Movie.objects.create(\n# # movieID=movies[n][0],\n# # title=movies[n][1],\n# # )\n# movie = Movie()\n# movie.movieID = movies[n][0]\n# movie.title = movies[n][1]\n# movie.save()\n# genres = movies[n][2].split('|')\n# for g in genres:\n# genre, created = Genre.objects.get_or_create(name=g)\n# if not created:\n# genre.save()\n# movie.genres.add(genre)\n\n# for n in range(1, 100):\n# movie = Movie.objects.get(movieID=links[n][0])\n# # print links[n][1], links[n][2]\n# movie.imdbId = links[n][1]\n# movie.tmdbId = links[n][2]\n# movie.save()\n \n\n\nfor row in movies: # movieId,title,genres\n if row[0] != 'movieId':\n movie = Movie()\n movie.movieID = row[0]\n movie.title = row[1]\n movie.save()\n \n genres = row[2].split('|')\n# for g in genres:\n# genre = addGenre(g)\n# movie.genres.add(genre)\n for g in genres:\n genre, created = Genre.objects.get_or_create(name=g)\n if not created:\n genre.save()\n movie.genres.add(genre)\n\nfor row in links: # movieId,imdbId,tmdbId\n if row[0] != 'movieId':\n movie = Movie.objects.get(movieID=row[0])\n if row[1] != '':\n movie.imdbId = row[1]\n if row[2] != '':\n movie.tmdbId = row[2]\n movie.save()\n\n\nfor row in tags: # userId,movieId,tag,timestamp\n if row[0] != 'userId':\n tag = Tag()\n tag.content = row[2]\n tag.movie = Movie.objects.get(movieID=row[1])\n tag.save()\n\nfor row in ratings: # userId,movieId,rating,timestamp\n if row[0] != 'userId':\n rating = Rating()\n rating.rate = row[2]\n rating.movie = Movie.objects.get(movieID=row[1])\n rating.save()\n\n\n\n\n \n \n\n","repo_name":"vicrosa25/AII","sub_path":"populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"4170946500","text":"from typing import List\nfrom fastapi import Depends, HTTPException,Request\nfrom sqlalchemy.orm import Session \nfrom Database import get_db\nfrom Database.models import models\nfrom responsables.Schemas.Create import EstanciaCreate\nfrom uuid import uuid4\nfrom fastapi_jwt_auth import AuthJWT\n\nclass EstanciaController:\n\n def __init__(self, db:Session = Depends(get_db),AuthJWT:AuthJWT = Depends()):\n self.db = db\n self.auth_jwt = AuthJWT\n\n async def get_estancias(self):\n self.auth_jwt.jwt_required()\n user_data = self.auth_jwt.get_raw_jwt()\n data = self.db.query(models.Estancia) \\\n .filter(models.Estancia.re.any(models.Responsable.id_responsable == user_data.get(\"id_responsable\"))) \\\n .order_by(models.Estancia.fecha_ingreso.desc()).all()\n if not data:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n return data\n\n \n async def get_estancia(self,id_estancia:int):\n self.auth_jwt.jwt_required()\n #self.auth_jwtjwt_optional()\n user_data = self.auth_jwt.get_raw_jwt()\n \n data = self.db.query(models.Estancia) \\\n .filter(models.Estancia.re.any(models.Responsable.id_responsable == user_data.get(\"id_responsable\"))) \\\n .filter(models.Estancia.id_estancia == id_estancia) \\\n .first()\n if not data:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n return data\n\n async def create_estancia(self,estancia: EstanciaCreate):\n identificador = uuid4()\n data = estancia.dict()\n data['identificador'] = identificador.hex\n db_item = models.Estancia(**data)\n self.db.add(db_item)\n self.db.commit()\n\n \n \n\n\n","repo_name":"devlfx/SalaBackend","sub_path":"responsables/Controllers/EstanciaController.py","file_name":"EstanciaController.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"9526591241","text":"import socket\r\nimport select\r\n\r\n# function section\r\n\r\n\r\ndef reliable_send(message, ip):\r\n global received, sock_send, sock_receive\r\n sock_receive.bind((UDP_IP_r_proxy, UDP_PORT_r_proxy))\r\n sock_receive.setblocking(0)\r\n sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n received = 2 # 0 just send 1 receive ok 2 time out/send\r\n callSend = 1\r\n fragment = 0\r\n if len(message) > 6500:\r\n callSend = int(len(message) / 6500) + 1\r\n fragment = 1 # 1 moreFragment 0 o.w\r\n for x in range(0, callSend):\r\n start = x * 6500\r\n end = (x + 1) * 6500\r\n print(callSend)\r\n if x == callSend - 1:\r\n fragment = 0\r\n FragmentedMESSAGE = str(x) + '*' + str(fragment) + '*' + MESSAGE[start: end] + '*' + str(\r\n ip) + \"*\" + make_parity(MESSAGE[start: end])\r\n print(\"send packet : \" + FragmentedMESSAGE)\r\n if reliable_send_fragmented(FragmentedMESSAGE):\r\n print(\"send succsecfully packet : \" + str(x))\r\n print(\"\\n\")\r\n x += 1\r\n received = 2\r\n else:\r\n print(\"can not send packet number : \" + str(x))\r\n # parity ip/port/split dns\r\n return False\r\n sock_send.close()\r\n sock_receive.close()\r\n return True\r\n\r\n\r\ndef reliable_send_fragmented(message):\r\n counter = 0\r\n global received\r\n while counter < 15:\r\n if received == 0:\r\n result = receive_http()\r\n if received == 1:\r\n counter = 15\r\n return True\r\n if received == 2:\r\n send_http(message)\r\n counter += 1\r\n\r\n if counter == 15 and received == 2:\r\n print(\"proxy is not ready to answer\")\r\n return False\r\n\r\n\r\ndef check_parity(message):\r\n # m[2] data - m[4] parity\r\n temp = str(message)\r\n m = temp[2:-1].split('*')\r\n p = 0\r\n for i in m[2]:\r\n p += ord(i)\r\n parity = bin(p)\r\n parity = parity.split('b')\r\n if m[4] == parity[1]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef make_parity(message):\r\n print(message)\r\n m = bytes(message, \"utf-8\")\r\n message = str(m)\r\n print(message)\r\n parity = 0\r\n p = 0\r\n for i in message[2:-1]:\r\n p += ord(i)\r\n parity = bin(p)\r\n parity = parity.split('b')\r\n return parity[1]\r\n\r\n\r\ndef send_http(message):\r\n global received\r\n # print(\"send packet\")\r\n # print(\"UDP target IP:\", UDP_IP_s)\r\n # print(\"UDP target port:\", UDP_PORT_s)\r\n # print(\"message:\", message)\r\n sock_send.sendto(bytes(message, \"utf-8\"), (UDP_IP_s_proxy, UDP_PORT_s_proxy))\r\n received = 0\r\n\r\n\r\ndef receive_http():\r\n global received\r\n print(\"client waiting for answer ...\")\r\n ready = select.select([sock_receive], [], [], 1)\r\n if ready[0]:\r\n receive_data, addr = sock_receive.recvfrom(1024) # buffer size is 1024 bytes\r\n # print(\"client receive message \")\r\n if check_parity(receive_data):\r\n received = 1\r\n assert isinstance(receive_data, object)\r\n show_result(receive_data)\r\n return receive_data\r\n else:\r\n received = 2\r\n print(\"parity error\")\r\n return 0\r\n\r\n else:\r\n received = 2\r\n print(\"time out \")\r\n return 0\r\n\r\n\r\ndef show_result(message):\r\n assert isinstance(message, object)\r\n print(\"received message:\", message)\r\n\r\n\r\ndef receive_http_proxy():\r\n global TCP_IP_s_server, sock_receive, sock_send\r\n sock_receive = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n sock_receive.bind((UDP_IP_r_proxy, UDP_PORT_r_proxy))\r\n sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n hope = 1\r\n temp = receive_http_fragmented()\r\n if temp != str(-1):\r\n # print(temp)\r\n TCP_IP_s_server = str(temp[3])\r\n myMessage = str(temp[2])\r\n while temp[1] == str(1):\r\n temp = receive_http_fragmented()\r\n if temp[0] == str(hope):\r\n myMessage += temp[2]\r\n hope += 1\r\n print(\"defragment finish\")\r\n return myMessage\r\n else:\r\n print(\"parity error , remove the packet from buffer...\")\r\n sock_receive.close()\r\n sock_send.close()\r\n\r\n\r\ndef receive_http_fragmented():\r\n print(\"client is waiting for response packet ...\")\r\n notReceive = True\r\n while notReceive:\r\n data, addr = sock_receive.recvfrom(6500) # buffer size is 6500 bytes\r\n print(\"receive packet\")\r\n assert isinstance(data, object)\r\n print(\"received message:\", data)\r\n notReceive = False\r\n\r\n if check_parity(data):\r\n print(data)\r\n temp = str(data)\r\n m = temp[2:-1].split('*')\r\n send_ack_http_proxy(data)\r\n return m\r\n else:\r\n return -1\r\n\r\n\r\ndef send_ack_http_proxy(data):\r\n print(\"send ack to proxy\")\r\n global sock_send\r\n sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n print(\"UDP target IP:\", UDP_IP_s_proxy)\r\n print(\"UDP target port:\", UDP_PORT_s_proxy)\r\n print(\"message:\", data)\r\n print(\"\\n\")\r\n sock_send.sendto(data, (UDP_IP_s_proxy, UDP_PORT_s_proxy))\r\n sock_send.close()\r\n\r\n\r\n# send part initiation\r\nUDP_IP_s_proxy = \"127.0.0.1\" # \"185.211.88.22\"\r\nUDP_PORT_s_proxy = 5005\r\nsock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n\r\n# receive part initiation\r\nUDP_IP_r_proxy = \"127.0.0.1\" # \"185.211.88.22\"\r\nUDP_PORT_r_proxy = 5006\r\nsock_receive = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n\r\nTCP_IP_s_server = \"\"\r\n# code section\r\nreceived = 2 # 0 just send 1 receive ok 2 time out/send\r\n# MESSAGE = \"GET / HTTP/1.0\\r\\n\\r\\n\"\r\n# DES_IP = input(\"enter destionation IP : \")\r\n# MESSAGE = input(\"enter your http message : \")\r\nDES_IP = \"www.aut.ac.ir\"\r\nMESSAGE = \"GET / HTTP/1.0\\r\\n\\r\\n\"\r\nreliable_send(MESSAGE, DES_IP)\r\nprint(\"send with no problem\")\r\nresult = receive_http_proxy()\r\nprint(result)\r\n# parity ip/port/split dns\r\n\r\n# http type setting numberOfPacke * moreFragment * message * IPDestination * parity\r\n","repo_name":"Yasaman1997/Computer_Networks","sub_path":"Python/new_test_pkg/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29328649759","text":"#Aprobación de créditos\ningreso=int(input(\"¿cúal es tu ingreso?:\"))\nnacimiento=int(input(\"¿qué año naciste?:\"))\nhijos=int(input(\"¿cuántos hijos tienes?:\"))\npertenencia=int(input(\"¿hace cuántos años estás en este banco?:\"))\nestadocivil=input(\"¿cuál es tu estado civil?:\") \nC= estadocivil\nS= estadocivil\nvive=input(\"¿dónde vives? (si es en campo escriba R, si es en ciudad escriba U):\")\nR = vive\nU = vive\nif pertenencia > 10 and hijos >= 2:\n print(\"APROBADO\")\nelif estadocivil == C and hijos > 3 and ((2018 - nacimiento)> 45 or (2018 - nacimiento)< 55):\n print(\"APROBADO\")\nelif ingreso >2500000 and estadocivil==S and vive == U:\n print(\"APROBADO\")\nelif ingreso > 3500000 and pertenencia <5:\n print(\"APROBADO\")\nelif vive== R and estadocivil==C and hijos < 2:\n print(\"APROBADO\")\nelse:\n print(\"RECHAZADO\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_731250392801519201fcd3e41f9cb6ee.py","file_name":"hito1_ej3_731250392801519201fcd3e41f9cb6ee.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"8961094132","text":"from twilio.rest import TwilioRestClient\nimport os\n\ndef send_text_message(message, phone_num):\n\t\"\"\"Sends a text message to the given phone number.\n\n\tIs called when Ronnie's 'text address' link is clicked.\n\t\"\"\"\n\t\n\tACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')\n\tAUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')\n\tTWILIO_NUMBER = os.environ.get('TWILIO_NUMBER')\n\n\tclient = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)\n\n\tm = client.messages.create(\n\t\tto=phone_num,\n\t\tfrom_=TWILIO_NUMBER,\n\t\tbody=message,\n\t\t)\n\n\treturn m.sid","repo_name":"mfbalder/ChatappFeedmeBot-HB","sub_path":"send_message.py","file_name":"send_message.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"77"}
+{"seq_id":"602438345","text":"from flojoy import flojoy, OrderedPair\nfrom time import sleep\nfrom typing import Optional\nimport serial\nimport numpy as np\nfrom datetime import datetime\n\n\n@flojoy(deps={\"pyserial\": \"3.5\"})\ndef SERIAL_TIMESERIES(\n default: Optional[OrderedPair] = None,\n comport: str = \"/dev/ttyUSB0\",\n baudrate: int = 9600,\n num_readings: int = 100,\n record_period: int = 1,\n) -> OrderedPair:\n \"\"\"The SERIAL_TIMESERIES node extracts simple time-dependent 1D data from an Arduino or a similar serial device.\n\n Parameters\n ----------\n num_readings : int\n Number of points to record.\n record_period : float\n Length between two recordings in seconds.\n baudrate : int\n Baud rate for the serial device.\n comport : string\n COM port of the serial device.\n\n num_readings * record_period :\n Is roughly the run length in seconds.\n \"\"\"\n\n ser = serial.Serial(comport, timeout=1, baudrate=baudrate)\n readings = []\n times = []\n # The first reading is commonly empty.\n s = ser.readline().decode()\n\n for i in range(num_readings):\n ts = datetime.now()\n s = ser.readline().decode()\n # Some readings may be empty.\n if s != \"\":\n reading = s[:-2].split(\",\")\n if len(reading) == 1:\n reading = reading[0]\n readings.append(reading)\n\n ts = datetime.now()\n seconds = float(\n ts.hour * 3600 + ts.minute * 60 + ts.second + ts.microsecond / 10**6\n )\n\n times.append(seconds)\n\n if len(times) > 0:\n time1 = seconds - times[i]\n else:\n # Estimate execution time.\n time1 = 0.1\n\n if time1 < record_period:\n sleep(record_period - time1)\n\n times = np.array(times)\n try:\n times -= times[0]\n except IndexError:\n raise IndexError(\"No data detected from the Arduino\")\n\n readings = np.array(readings)\n readings = readings.astype(\"float64\")\n\n return OrderedPair(x=times, y=readings)\n","repo_name":"flojoy-io/nodes","sub_path":"IO/PROTOCOLS/SERIAL/BASIC/SERIAL_TIMESERIES/SERIAL_TIMESERIES.py","file_name":"SERIAL_TIMESERIES.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"4375416156","text":"# \n\nfrom __future__ import nested_scopes\n\n\ndef interpret(formula, dictionary):\n \"\"\" Interpretation einer Formel in Postfix-Form\n Erlaubte Operatoren: AND, OR, NOT\n Das dictionary enth�lt die auszuf�hrenden Funktionen \"\"\"\n\n stack = []\n for token in formula.split():\n if token == \"AND\":\n p = stack.pop()\n q = stack.pop()\n stack.append(lambda x: q(x) & p(x))\n elif token == \"OR\":\n p = stack.pop()\n q = stack.pop()\n stack.append(lambda x: q(x) | p(x))\n elif token == \"NOT\":\n p = stack.pop()\n stack.append(lambda x: not p(x))\n else:\n stack.append(dictionary[token])\n return stack.pop()\n","repo_name":"johsieders/potpourri","sub_path":"fttp/src/interpreters/formula.py","file_name":"formula.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"13504100674","text":"\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\nimport pathlib\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\n# long_description = (here / 'README.md').read_text(encoding='utf-8')\n\n# Arguments marked as \"Required\" below must be included for upload to PyPI.\n# Fields marked as \"Optional\" may be commented out.\n\nsetup(\n name='supermarket',\n version='1.0.0',\n description='A Python project to demonstrate APM-Logs correlation',\n author='Emanuil Tolev',\n author_email='etolev@elastic.co',\n\n # You can just specify package directories manually here if your project is\n # simple. Or you can use find_packages().\n #\n # Alternatively, if you just want to distribute a single Python file, use\n # the `py_modules` argument instead as follows, which will expect a file\n # called `my_module.py` to exist:\n #\n # py_modules=[\"my_module\"],\n #\n packages=find_packages(where='.'), # Required\n python_requires='>=3.5, <4'\n)\n","repo_name":"emanuil-tolev/logs-traces-correlation","sub_path":"supermarket/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"9554769058","text":"import codecs\nimport json\nimport re\nimport logging\nfrom datetime import datetime\nfrom urllib.request import urlopen\nfrom typing import Optional, Tuple\n\nfrom ulauncher.config import API_VERSION\nfrom ulauncher.utils.version import satisfies\nfrom ulauncher.modes.extensions.ExtensionManifest import ExtensionManifest\n\nlogger = logging.getLogger()\n\nCommit = Tuple[str, str]\n\n\nclass ExtensionRemoteError(Exception):\n pass\n\n\nclass InvalidExtensionUrlWarning(Exception):\n pass\n\n\nclass ExtensionNetworkError(Exception):\n pass\n\n\nclass ExtensionIncompatibleWarning(Exception):\n pass\n\n\ndef json_fetch(url):\n try:\n return json.loads(urlopen(url).read())\n except Exception as e:\n # If json.loads fails, treat it as a network error too.\n # It should never happen as all these API endpoint are exclusively JSON\n raise ExtensionNetworkError(f'Could not access repository resource \"{url}\"') from e\n\n\nclass ExtensionRemote:\n url_match_pattern = r\"^(?:git@|https:\\/\\/)(?P[^\\/]+)\\/(?P[^\\/]+)\\/(?P[^\\/]+)\"\n date_format = '%Y-%m-%dT%H:%M:%S%z'\n\n def __init__(self, url):\n self.url = url.lower()\n match = re.match(self.url_match_pattern, self.url, re.I)\n if not match:\n raise InvalidExtensionUrlWarning(f'Invalid URL: {url}')\n\n self.user = match.group(\"user\")\n self.repo = match.group(\"repo\")\n self.host = match.group(\"host\")\n\n if \".\" not in self.host:\n self.extension_id = f\"{self.host}.{self.user}.{self.repo}\"\n else:\n domain, tld = self.host.rsplit(\".\", 1)\n self.extension_id = f\"{tld}.{domain}.{self.user}.{self.repo}\"\n\n if self.host == \"github.com\":\n self.host_api = \"https://api.github.com\"\n self.date_format = '%Y-%m-%dT%H:%M:%SZ'\n elif self.host == \"gitlab.com\":\n host_api = \"https://gitlab.com/api/v4\"\n projects = json_fetch(f\"{host_api}/users/{self.user}/projects?search={self.repo}\")\n project = next((p for p in projects if p[\"name\"] == self.repo), None)\n\n self.host_api = f\"{host_api}/projects/{project['id']}/repository\"\n self.date_format = '%Y-%m-%dT%H:%M:%S.%f%z'\n else:\n self.host_api = f\"https://{self.host}/api/v1\"\n\n def get_download_url(self, commit: str) -> str:\n if self.host == \"gitlab.com\":\n return f'https://{self.host}/{self.user}/{self.repo}/-/archive/{commit}/{self.repo}-{commit}.tar.gz'\n return f'https://{self.host}/{self.user}/{self.repo}/archive/{commit}.tar.gz'\n\n def fetch_file(self, file_path) -> Optional[str]:\n # This saves us a request compared to using the \"raw\" file API that needs to know the branch\n file_api_url = f\"{self.host_api}/repos/{self.user}/{self.repo}/contents/{file_path}\"\n if self.host == \"gitlab.com\":\n file_api_url = f\"{self.host_api}/files/{file_path}?ref=HEAD\"\n\n file_data = json_fetch(file_api_url)\n\n if file_data and file_data.get(\"content\") and file_data.get(\"encoding\"):\n return codecs.decode(file_data[\"content\"].encode(), file_data[\"encoding\"]).decode()\n\n return None\n\n def get_compatible_commit_from_tags(self) -> Optional[Commit]:\n \"\"\"\n This method is new for v6, but intentionally undocumented because we still want extension\n devs to use the old way until Ulauncher 5/apiv2 is fully phased out\n \"\"\"\n tags = {}\n # pagination is only implemented for GitHub (default 30, max 100)\n tags_url = f\"{self.host_api}/repos/{self.user}/{self.repo}/tags?per_page=100\"\n if self.host == \"gitlab.com\":\n # GitLab's API allows to filter out tags starting with our prefix\n tags_url = f\"{self.host_api}/tags?search=^apiv\"\n\n try:\n tags_data = json_fetch(tags_url)\n\n for tag in tags_data or []:\n if tag[\"name\"].startswith(\"apiv\") and satisfies(API_VERSION, tag[\"name\"][4:]):\n commit = tag[\"commit\"]\n version = tag[\"name\"][4:]\n id = commit.get(\"sha\", commit.get(\"id\")) # id fallback is needed for GitLab\n commit_time = commit.get(\"created\", commit.get(\"created_at\"))\n tags[version] = (id, commit_time)\n\n if tags:\n id, commit_time = tags[max(tags)]\n if id and self.host == \"github.com\": # GitHub's tag API doesn't give any dates\n commit_data = json_fetch(f\"{self.host_api}/repos/{self.user}/{self.repo}/commits/{id}\")\n commit_time = commit_data[\"commit\"][\"committer\"][\"date\"]\n if id and commit_time:\n date = datetime.strptime(commit_time, self.date_format)\n return id, date.isoformat()\n\n except Exception as e:\n logger.warning(\"Unexpected error retrieving version from tags '%s' (%s: %s)\", self.url, type(e).__name__, e)\n\n return None\n\n def get_commit(self, ref: str = \"HEAD\") -> Commit:\n if self.host == \"gitlab.com\":\n url = f\"{self.host_api}/commits/{ref}\"\n elif self.host == \"github.com\":\n url = f\"{self.host_api}/repos/{self.user}/{self.repo}/commits/{ref}\"\n else:\n # Gitea/Codeberg API differs from GitHub here, but has the same API\n url = f\"{self.host_api}/repos/{self.user}/{self.repo}/git/commits/{ref}\"\n\n try:\n response = json_fetch(url)\n id = response.get(\"sha\") or response.get(\"id\")\n commit_time = response.get(\"created_at\") or response[\"commit\"][\"committer\"][\"date\"]\n date = datetime.strptime(commit_time, self.date_format)\n return id, date.isoformat()\n except (KeyError, TypeError) as e:\n raise ExtensionRemoteError(f'Could not fetch reference \"{ref}\" for {self.url}.') from e\n\n def get_latest_compatible_commit(self) -> Commit:\n \"\"\"\n Finds first version that is compatible with users Ulauncher version.\n Returns a commit hash and datetime.\n \"\"\"\n manifest = ExtensionManifest(json.loads(self.fetch_file(\"manifest.json\") or \"{}\"))\n\n if satisfies(API_VERSION, manifest.api_version):\n return self.get_commit()\n\n tag = self.get_compatible_commit_from_tags()\n if tag:\n return tag\n\n if satisfies(\"2.0\", manifest.api_version):\n logger.warning(\"Falling back on using API 2.0 version for %s.\", self.repo)\n return self.get_commit()\n\n raise ExtensionIncompatibleWarning(f\"{manifest.name} does not support Ulauncher API v{API_VERSION}.\")\n","repo_name":"otisdog8/Ulauncher","sub_path":"ulauncher/modes/extensions/ExtensionRemote.py","file_name":"ExtensionRemote.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"}
+{"seq_id":"29313262844","text":"import math\nimport json\nimport requests\nimport itertools\nimport numpy as np\nimport time\nimport pickle\nimport tqdm\n\nfrom datetime import datetime, timedelta\nprint('import complete')\n\ndef make_request(uri, max_retries = 5):\n\n def fire_away(uri):\n response = requests.get(uri)\n assert response.status_code == 200\n return json.loads(response.content)\n current_tries = 1\n while current_tries < max_retries:\n try:\n time.sleep(1)\n response = fire_away(uri)\n return response\n except:\n time.sleep(1)\n current_tries += 1\n return fire_away(uri)\n\n\ndef pull_posts_for(subreddit, start_at, end_at):\n\n def map_posts(posts):\n return list(map(lambda post: {\n 'id': post['id'],\n 'created_utc': post['created_utc'],\n 'permalink': post['permalink'],\n }, posts))\n\n SIZE = 100 # maximum request amount to pushshift.io at once\n URI_TEMPLATE = r'https://api.pushshift.io/reddit/search/submission/?subreddit={}&after={}&before={}&limit={}&fields=id,created_utc,permalink'\n\n post_collections = map_posts( \\\n make_request( \\\n URI_TEMPLATE.format( \\\n subreddit, start_at, end_at, SIZE))['data'])\n n = len(post_collections)\n while n == SIZE:\n time.sleep(1)\n last = post_collections[-1]\n new_start_at = last['created_utc'] - (10)\n\n more_posts = map_posts( \\\n make_request( \\\n URI_TEMPLATE.format( \\\n subreddit, new_start_at, end_at, SIZE))['data'])\n\n n = len(more_posts)\n post_collections.extend(more_posts)\n\n # remove duplicates\n res = []\n [res.append(x) for x in post_collections if x not in res]\n\n return res\n\n############################################################################################################\n\ndays = 3\nsubreddit = 'citiesskylines'\nend_at = math.ceil(datetime.utcnow().timestamp())\nstart_at = math.floor((datetime.utcnow() - \\\n timedelta(days=days)).timestamp())\nprint(f'from {start_at} to {end_at}, {days} days @ r/{subreddit}')\n\nposts = pull_posts_for(subreddit, start_at, end_at)\n\nprint(len(posts))\n\nf = open(\"./data/post_filtered_pickle\", \"wb\")\npickle.dump(posts, f)\nf.close()\n","repo_name":"maxjo020418/OKBHscraper","sub_path":"pushshiftio_post.py","file_name":"pushshiftio_post.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"43507790285","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.optim import SGD, Adam\r\nimport torch.utils.data as Data\r\nimport torchvision.transforms as transforms\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfilename = \"G:\\data\\spambase.csv\" # 读取文件位置\r\nspam = pd.read_csv(filename) # (4600,58) 4600个样本,每个样本有58个特征\r\n# print(spam.head())\r\nX = spam.iloc[:, 0:57].values # 去掉最后一列标签列\r\ny = spam.spam.values\r\n\r\n# 数据归一化\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=123) # 将数据分为训练集和测试集\r\nscales = MinMaxScaler(feature_range=(0, 1)) # 将数据缩放到0,1\r\nX_train_s = scales.fit_transform(X_train) # 对X_train_s 缩放,下同\r\nX_test_s = scales.transform(X_test) #\r\n\r\n# 使用箱线图对比邮件的每个特征分布\r\ncolname = spam.columns.values[:-1]\r\nplt.figure(figsize=(20, 14))\r\nfor ii in range(len(colname)):\r\n plt.subplot(7, 9, ii+1)\r\n sns.boxplot(x=y_train, y=X_train[:,ii])\r\n plt.title(colname[ii])\r\nplt.subplots_adjust(hspace=0.4)\r\nplt.savefig('box.png')\r\nplt.show()\r\n\r\n\r\n# 搭建MLP网络\r\nclass MLPclassifica(nn.Module):\r\n def __init__(self):\r\n super(MLPclassifica, self).__init__() #构造方法必须���\r\n\r\n # Sequential()表示将括号里的层链接起来,下面nn.Linear表示输入有57个神经元,输出有30个神经元,存在偏置神经元(默认开启)\r\n # 然后将输出结果带入ReLu函数,Linear与Relu合在一起起名为hidden1,上层的输出为下层的输入\r\n self.hidden1 = nn.Sequential(\r\n nn.Linear(\r\n in_features=57,\r\n out_features=30,\r\n bias=True,\r\n ),\r\n nn.ReLU()\r\n )\r\n\r\n self.hidden2 = nn.Sequential(\r\n nn.Linear(30, 10),\r\n nn.ReLU()\r\n )\r\n\r\n self.classifica = nn.Sequential(\r\n nn.Linear(10, 2),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x): # 定义前向传播函数\r\n fc1 = self.hidden1(x)\r\n fc2 = self.hidden2(fc1)\r\n output = self.classifica(fc2)\r\n\r\n return fc1, fc2, output\r\n\r\n\r\n# 数据转为张量\r\nX_train_t = torch.from_numpy(X_train_s.astype(np.float32))\r\ny_train_t = torch.from_numpy(y_train.astype(np.int64))\r\n\r\nX_test_t = torch.from_numpy(X_test_s.astype(np.float32))\r\ny_test_t = torch.from_numpy(y_test.astype(np.int64))\r\n\r\ntrain_data = Data.TensorDataset(X_train_t, y_train_t)\r\n# 定义一个数据加载器,会将数据分批次喂给神经网络,这里定义的一批为64个样本\r\ntrain_loader = Data.DataLoader(\r\n dataset=train_data, # 数据是什么\r\n batch_size=64, # 每批多少个\r\n shuffle=True, # 是否打乱数据\r\n #num_workers=2\r\n)\r\n\r\n# 我们的网络结构是个类,将其实例化一下\r\nmlpc = MLPclassifica()\r\n\r\n# 定义优化器,使用Adam优化算法,可自动调节学习率\r\noptimizer = torch.optim.Adam(mlpc.parameters(), lr=0.01)\r\n\r\nloss_func = nn.CrossEntropyLoss() # 定义损失函数为二分类损失函数\r\n\r\nmax_epoch = 15 # 训练轮次\r\ntrain_loss_list = [] # 定义一个空列表,等下来存储训练的损失\r\naccuracy_list = [] #同上,来存储精度\r\n\r\nfor epoch in range(max_epoch):\r\n\r\n for step,(b_x,b_y) in enumerate(train_loader):\r\n _, _, output = mlpc(b_x) # 将b_x喂给神经网络,得到输出\r\n train_loss = loss_func(output, b_y) # 根据输出计算损失函数\r\n optimizer.zero_grad() # torch中每次求导梯度会叠加,所以我们在反向传播的过程中先将梯度清零再求导\r\n train_loss.backward() # 求导\r\n optimizer.step() # 更新参数\r\n print(train_loss)\r\n\r\n niter = epoch * len(train_loader)+step+1\r\n\r\n if niter % 25 == 0:\r\n train_loss_list.append(train_loss.detach().numpy()) # 没经过25次迭代记录一次损失值\r\n _, _, output = mlpc(X_test_t)\r\n _, pre_index = torch.max(output, 1)\r\n test_accuracy = accuracy_score(y_test, pre_index) # 计算精度\r\n accuracy_list.append(test_accuracy)\r\n\r\nplt.subplot(2,1,1) #画loss\r\nplt.plot(train_loss_list)\r\nplt.title('loss')\r\n\r\nplt.subplot(2,1,2) #画精度表\r\nplt.title('accracy')\r\nplt.plot(accuracy_list)\r\nplt.savefig('train.png')\r\nplt.show()\r\n\r\n#torch.save(mlpc, \"spam_model.pkl\") #保存模型的网络结构与参数\r\n#torch.save(mlpc.state_dict(), \"spam_state_dict.pkl\") # 仅保存所有的参数\r\n","repo_name":"saber805/spam_classify","sub_path":"trian_spam_classifica.py","file_name":"trian_spam_classifica.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"5875091163","text":"from modules.db import db\nfrom flask import session, flash\nfrom os import abort\n\ndef get():\n sql = \"SELECT * FROM schools WHERE id=:school_id\"\n result = db.session.execute(sql, {\"school_id\": session[\"school\"]})\n school = result.fetchone()\n return school\n\ndef create(form):\n if session[\"csrf_token\"] != form[\"csrf_token\"]:\n abort(403)\n schoolname = form[\"schoolname\"]\n info = form[\"info\"]\n address = form[\"address\"]\n phone = form[\"phone\"]\n www = form[\"www\"]\n if len(schoolname) < 3 or len(info) < 10 or len(address) < 10 or len(phone) < 4 or len(www) < 3:\n flash(\"Tarkista, että kaikki kentät ovat oikein täytetty\", \"error\")\n return False\n sql = \"INSERT INTO schools (schoolname, info, address, phone, www, visible) VALUES (:schoolname, :info, :address, :phone, :www, 'true') RETURNING id\"\n result = db.session.execute(sql, {\"schoolname\":schoolname, \"info\": info, \"address\": address, \"phone\": phone, \"www\": www})\n school_id = result.fetchone()[0]\n sql = \"INSERT INTO schooladmins (user_id, school_id) VALUES (:user_id, :school_id)\"\n db.session.execute(sql, {\"user_id\":session[\"user_id\"], \"school_id\": school_id})\n db.session.commit()\n session[\"school\"] = school_id\n return True\n\ndef edit(form):\n if session[\"csrf_token\"] != form[\"csrf_token\"]:\n abort(403)\n schoolname = form[\"schoolname\"]\n info = form[\"info\"]\n address = form[\"address\"]\n phone = form[\"phone\"]\n www = form[\"www\"]\n if len(schoolname) < 3 or len(info) < 10 or len(address) < 10 or len(phone) < 4 or len(www) < 3:\n flash(\"Tarkista, että kaikki kentät ovat oikein täytetty\", \"error\")\n return False\n sql = \"SELECT schoolname FROM schools WHERE id=:id\"\n result = db.session.execute(sql, {\"id\": session[\"school\"]})\n oldname = result.fetchone()[0]\n if oldname != schoolname: # someone wants to change the name of the school\n sql = \"SELECT * FROM schools WHERE schoolname=:schoolname\"\n result = db.session.execute(sql, {\"schoolname\": schoolname})\n if result.fetchone():\n flash(\"Tämä koulunimi on jo käytössä muualla\", \"error\")\n return False\n sql = \"UPDATE schools SET schoolname=:schoolname, info=:info, address=:address, phone=:phone, www=:www WHERE id=:id\"\n db.session.execute(sql, {\"schoolname\": schoolname, \"info\": info, \"address\": address, \"phone\": phone, \"www\": www, \"id\": session[\"school\"]})\n db.session.commit()\n return True\n","repo_name":"rundtjan/kielipelisovellus","sub_path":"modules/schoolz.py","file_name":"schoolz.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"19006237386","text":"from collections import deque\nclass Solution:\n \n #Function to return list containing vertices in Topological order.\n def topoSort(self, V, adj):\n # Code here\n indeg = [0]*V\n ans = []\n for i in range(V):\n for x in adj[i]:\n indeg[x] += 1\n \n pq = deque()\n for i in range(V):\n if indeg[i] == 0:\n pq.append(i)\n \n while(len(pq)) > 0:\n t = pq.popleft()\n ans.append(t)\n for x in adj[t]:\n indeg[x] -= 1\n if indeg[x] == 0:\n pq.append(x)\n \n return ans","repo_name":"godspell/Data_Structure_and_Algorithms","sub_path":"Graphs/Topological sort/ans2.py","file_name":"ans2.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"3149402862","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport time\nimport multiprocessing as mp\nimport os, sys\nfrom itertools import repeat\n\nimport LGTp as lgt\n\nprint(\"default_rng:\",np.random.default_rng())\n\ncpu_count = os.cpu_count()\nprint(\"os cpu_count:\",cpu_count)\n\n\n# calculate equilibrating phase\n# Change only here\n#N = 6\n#N_t = 12\n#run_n = 4\n#beta_id = \"b050to200s40\"\n#n_conf = 200\n\nif __name__ == '__main__':\n\n\targs = sys.argv # N, N_t, run_n, beta_id, n_conf, data_dir\n\n\tprint('Argument : {}'.format(args))\n\t\n\tif len(args) != 8:\n\t\traise SyntaxError(\"Check args : N, N_t, run_n, beta_id, prec, data_dir, fig_dir\")\n\n\tN = int(args[1]) # Spatial lattice point number \n\tN_t = int(args[2]) # Temporal lattice point number\n\trun_n = int(args[3]) # run id\n\tbeta_id = str(args[4]) # beta set id\n\tprec = float(args[5]) # target precision\n\tdata_dir = str(args[6]) # data save directory\n\tfig_dir = str(args[7]) # figure save directory\n\n\tstart_b = float(beta_id[1:4])*0.01\n\tend_b = float(beta_id[6:9])*0.01\n\tsteps = int(beta_id[-2:])\n\n\tmax_steps = 500\n\t\n\tbeta_list = np.linspace(start_b,end_b,steps)\n\tprint(\"generating U1-%d \"%(N)+beta_id)\n\n\tnt = len(beta_list)\n\n\tensem = []\n\n\t# for b in range(nt):\n\tdef simulate(b):\n\t# start = time.time()\n\t\t\t#seed = int(beta_list[b]*1000)\n\t\t\tseed = int((time.time() % 1)*1000)\n\n\t\t\tu1 = lgt.Lattice([N,N,N,N_t])\n\t\t\tu1.init_fields('U1','Cold',seed)\n\t\t\t\n\t\t\tbare_parameters = u1.bare_parameter_generator()\n\t\t\tbare_parameters['beta'] = beta_list[b]\n\t\t\t\n\t\t\tg = lgt.action(u1,bare_parameters)\n\t\t\tO = g.polyakovLoopR_nb # Target observable\n\n\t\t\tt_eq, t_ac, _, _ = lgt.calc_teq_tac(bare_parameters,\n\t\t\t\t\tO, \n\t\t\t\t\tu1, \n\t\t\t\t\ttol=prec, \n\t\t\t\t\tmax_steps=max_steps, \n\t\t\t\t\tverbose=True, \n\t\t\t\t\tfig_dir=fig_dir, \n\t\t\t\t\tuse_lat=True)\n\t\t\t\n\t\t\tt_eq = int(np.round(t_eq+0.5))\n\t\t\tt_ac = int(np.round(t_ac+0.5))\n\t\t\t\n\t\t\tprint(\"beta\",beta_list[b],\" teq : \",t_eq,\" tac : \",t_ac)\n\t\t\t\n\t\t\tif t_ac > max_steps:\n\t\t\t\t\treturn\n\n\t\t\t# Finish thermalizing if t_eq > max_steps\n\t\t\tif t_eq > max_steps*3:\n\t\t\t\trem_eq = max_steps*2\n\t\t\telse:\n\t\t\t\trem_eq = t_eq - max_steps\n\n\t\t\tfor i in range(rem_eq):\n\t\t\t\tlgt.metropolis(u1,bare_parameters)\n\t\t\t\n\t\t\tconf = []\n\t\t\t\t\t\n\t\t\t# Generate minimum number of configurations\n\t\t\tO_mean = O(u1.field)\n\t\t\tO_hist = []\n\t\t\tO_diff_hist = []\n\t\t\tfor i in range(100):\n\t\t\t\tO_mean_old = O_mean\n\t\t\t\t\n\t\t\t\tfor t in range(2*t_ac):\n\t\t\t\t#for t in range(t_ac):\n\t\t\t\t\tlgt.metropolis(u1,bare_parameters)\n\t\t\t\tconf.append(u1.field)\n\n\t\t\t\tO_hist.append(O(u1.field))\n\t\t\t\tO_mean = np.mean(O_hist)\n\t\t\t\tO_diff = np.abs(O_mean - O_mean_old)\n\t\t\t\tO_diff_hist.append(O_diff)\n\n\t\t\t# Generate conf of target precision\n\t\t\twhile np.mean(O_diff_hist[-100:]) > prec and len(O_diff_hist) < max_steps*3:\n\t\t\t\t\n\t\t\t\tO_mean_old = O_mean\n\t\t\t\t\n\t\t\t\tfor t in range(2*t_ac):\n\t\t\t\t#for t in range(t_ac):\n\t\t\t\t\tlgt.metropolis(u1,bare_parameters)\n\t\t\t\tconf.append(u1.field)\n\n\t\t\t\tO_hist.append(O(u1.field))\n\t\t\t\tO_mean = np.mean(O_hist)\n\t\t\t\tO_diff = np.abs(O_mean - O_mean_old)\n\t\t\t\tO_diff_hist.append(O_diff)\n\t\t\t\n\t\t\tbeta = beta_list[b]\n\t\t\tconf_name = data_dir+'/U1_b%0.3fN%dtac%dS%d.npy' %(beta,N,t_ac,seed)\n\t\t\tnp.save(conf_name, conf)\n\n\t# Test run\n\tprint(\"starting test run\")\n\tstart = time.time()\n\tsimulate(0)\n\tdur = time.time() - start\n\n\tn_ensem = len(beta_list)\n\tn_core = cpu_count\n\texpected_dur = n_ensem*dur/n_core\n\n\tprint(\"test run duration : %.5f sec\"%(dur))\n\tprint(\"for %d ensemble ~ %d sec ~ %0.3f hour\"%(n_ensem,n_ensem*dur,n_ensem*dur/3600.))\n\tprint(\"with %d core, expecting : %0.3f hour\"%(n_core, expected_dur/3600))\n\n\n\tnow = time.ctime(time.time())\n\texpected_end = time.ctime(time.time() + expected_dur)\n\n\tprint(\"starting at \"+now)\n\tprint(\"expected end time : \"+expected_end)\n\n\tstart = time.time()\n\n\tp = mp.Pool(n_core)\n\tres = p.map(simulate, range(nt)[1:])\n\tp.close()\n\tp.join()\n\n\tdue = time.time() - start\n\tprint(\"time span:\",due)\n\n\tprint(due/3600)\n\n","repo_name":"chanjure/LGTp","sub_path":"scripts/U1_auto_conf_gen.v5.py","file_name":"U1_auto_conf_gen.v5.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"73573130487","text":"from django.db import models\nfrom common.models import CommonModel\n\n# Create your models here.\n\n\nclass Review(CommonModel):\n user = models.ForeignKey(\n \"users.User\",\n on_delete=models.CASCADE,\n )\n # boarder = models.ForeignKey(\n # \"boarders.Boarder\",\n # null=True,\n # blank=True,\n # on_delete=models.SET_NULL,\n # related_name=\"reviews\",\n # )\n sitter = models.ForeignKey(\n \"sitters.Sitter\",\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"reviews\",\n )\n payload = models.TextField()\n rating = models.PositiveIntegerField()\n\n def __str__(self):\n return f\"{self.user} / {self.rating}тнР\"\n","repo_name":"bellakim0843/pawfect_match_backend","sub_path":"reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29311608609","text":"#Juego adivina mi número\nprint(\"Intenta adivinar mi numero, esta entre el 1 y el 20\")\na=int(input(\"En que numero estoy pensando :\"))\nimport random\nb = (random.randrange(20))\nx = 1\nwhile x < 5:\n if b > a:\n print(\"Mi numero es mayor\")\n a=int(input(\"En que numero estoy pensando :\"))\n elif b < a:\n print(\"Mi numero es menor\")\n a=int(input(\"En que numero estoy pensando :\"))\n elif a == b:\n print(\"Adivinaste, mi numero era\",(b))\n break\n x = x + 1\nprint(\"No adivinaste, mi número era\",(b))\n\n \n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_b42ebbf40e5d7a3362012473908552d4.py","file_name":"hito1_ej12_b42ebbf40e5d7a3362012473908552d4.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"18543968961","text":"import socket\nimport threading\n\nhost = socket.gethostname()\nport = 6666\nbuff = 1024\n\nclient_sock = socket.socket()\nclient_sock.connect((host, port))\n\ndef recieve():\n while True:\n rMsg = client_sock.recv(buff).decode()\n if not rMsg:\n print('Ending connection')\n break\n print()\n print(\"revd:\", rMsg)\n\ndef send():\n while True:\n sMsg = input()\n client_sock.send(sMsg.encode())\n\nt1 = threading.Thread(target=send, name=1)\nt2 = threading.Thread(target=recieve, name=2)\n\nt1.start()\nt2.start()","repo_name":"mihirs16/Computer-Networks","sub_path":"Full Duplex/full_dup_client.py","file_name":"full_dup_client.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"3924529049","text":"\n\nfrom gensim.test.utils import common_dictionary, common_corpus\nfrom gensim.models import LsiModel\n\n\n\n\n\nimport jieba\nimport jieba.posseg as pseg\nimport gensim\nimport json\nfrom gensim import corpora\nimport time\nfrom algorithm.base import dbs\n\ndef keywords_save():\n # 把所有keyword写入文件\n keywords = open('keywords.txt', encoding='utf-8', mode='w')\n\n sql = \"\"\"select keyword_paper from doclda\"\"\"\n result = dbs.getTuples(sql)\n for i in range(0, len(result)):\n if (result[i][0]):\n keywords.write(result[i][0] + ',')\n\ndef userdict_extract():\n \"\"\"\n 抽取关键字作为用户字典\n :return: 存储在 userdict.txt里面\n \"\"\"\n keywords_save()\n\n # 把keyword读出来, 并且统计词频写入userdict.txt里面\n wordDict = {}\n keywordsLst = open('keywords.txt', encoding='utf-8', mode='r').read().split(',')\n userdict = open('userdict.txt', encoding='utf-8', mode='w')\n\n # 统计词频放入词典\n for word in keywordsLst:\n if(word in wordDict):\n wordDict[word] += 1\n else:\n wordDict[word] = 1\n # 把词典写入文件\n for word in wordDict:\n userdict.write(word + ' ' + str(wordDict[word]) + ' n' + '\\n')\n\nprint('查询教师院系')\nsql='select id,institution from teacher'\nlist=dbs.getTuples(sql)\ninstitution_dict={}\nfor institution in list:\n if institution[1] not in institution_dict.keys():\n institution_dict[institution[1]]=[]\n institution_dict[institution[1]].append(institution[0])\n else :\n institution_dict[institution[1]].append(institution[0])\n\nmax=0\nmin=100\nfor v in institution_dict:\n l=len(institution_dict[v])\n if l>max:\n max=l\n if l=20:\n num_topics=10\n num_words=(num_topics-2)*2+10\n print('本院系文章总数为%d,即将分为主题数%d个,关键字%d个......' % (len(corpus),num_topics,num_words))\n # ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=50)\n # result = ldamodel.print_topics(num_topics=num_topics, num_words=num_words)\n # doc_lda = ldamodel[corpus]\n model = LsiModel(corpus, id2word=dictionary,num_topics=num_topics,)\n doc_lda = model[corpus]\n result = model.print_topics(num_topics=num_topics, num_words=num_words)\n time2 = time.time()\n print('模型训练用时:', time2 - time1)\n print('LDA模型训练完成。插入数据库......')\n\n\n for n in range(len(doc_lda)):\n Topic=doc_lda[n]\n if len(Topic)==0:\n prams = (institution_paper_list[n][0], institution + \"其他\", json.dumps({}, ensure_ascii=False),\n json.dumps({}, ensure_ascii=False))\n sql = 'insert into lda2 values(%s,%s,%s,%s)'\n list = dbs.exe_sql(sql, prams)\n continue\n c1 = sorted(Topic, key=lambda x: x[1], reverse=True)\n\n wordTopic = [i[1] for i in result if int(c1[0][0]) == i[0]]\n\n d=strToMap(wordTopic[0])\n t={}\n for key in DocWord[n]:\n if key in d.keys():\n t[key]=d[key]\n topic=c1[0][0]\n prams=(institution_paper_list[n][0],institution+str(topic),json.dumps(d,ensure_ascii=False),json.dumps(t,ensure_ascii=False))\n sql='insert into lda2 values(%s,%s,%s,%s)'\n list = dbs.exe_sql(sql, prams)\n\n\n\n\n","repo_name":"ischenrui/eds","sub_path":"algorithm/lsi.py","file_name":"lsi.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"6621816263","text":"import asyncio\nfrom .db import model\nfrom .db import create_session\nfrom sqlalchemy import or_,and_, desc, asc\nimport queue\nimport sys\nimport logging\nimport datetime\nimport weakref\nfrom concurrent.futures import ThreadPoolExecutor\nimport concurrent\nfrom . import webserver\n#asyncio.tasks._DEBUG = True\n\n\nclass Job(object):\n def __init__(self, entry):\n self.entry = entry\n\n def __lt__(self, other):\n if self.entry and getattr(other, 'entry', None):\n return not self.entry.priority.__lt__(other.entry.priority)\n return False\n\n def __repr__(self):\n return \"\" %(self.entry and self.entry.id or self.entry.name)\n\n\n\nclass Daemon(object):\n log = logging.getLogger(\"daemon\")\n\n def __init__(self, manager, check_interval = 10, queue_size=20):\n self.manager = manager\n self.jobs = asyncio.PriorityQueue(queue_size)\n self.check_interval = check_interval\n self.in_check = set()\n self.workpool = ThreadPoolExecutor(5)\n self.loop = manager.loop\n self.manager.loop = self.loop\n self.blacklist = set()\n self.first_run = True\n\n @asyncio.coroutine\n def do_job(self):\n while True:\n try:\n job = yield from self.jobs.get()\n #yield from asyncio.sleep(1000)\n entry = job.entry\n entry.state = model.EntryState.started\n session = create_session()\n session.add(entry)\n self.log.info(\"check entry: %s\" %entry.full_path)\n if entry.plugin is None:\n self.log.debug(\"detect plugin for entry: %s\" %entry.id)\n (plugin, prio) = self.manager.get_backend_for_entry(entry)\n if not plugin:\n self.log.info(\"can't find plugin to handle url %s\" %(entry))\n entry.set_error(\"can't find plugin to handle url\", unhandled=True)\n continue\n entry.plugin = plugin.name\n session.commit()\n self.log.debug(\"use plugin for entry %s: %s (prio=%s)\" %(entry.id, plugin.name, prio))\n else:\n plugin = self.manager.get_backend(entry.plugin)\n if not plugin:\n self.log.error(\"entry has plugin that does not exist\")\n self.blacklist.add(entry.id)\n # FIXME, blacklist entry until restart\n return\n\n rv = plugin.do_entry(entry)\n def call_done(future):\n asyncio.Task(self.job_done(future))\n #rv.add_done_callback(self.job_done)\n rv.add_done_callback(call_done)\n yield from rv\n except Exception as e:\n self.log.exception(e)\n #raise asyncio.tasks.Return(job)\n\n def job_done(self, future):\n entry, rv = future.result()\n if not rv:\n self.log.error(\"job failed: %s\", str(entry))\n try:\n self.in_check.remove(entry.id)\n except KeyError:\n self.log.debug(\"entry should have been in in_check\")\n else:\n dm = yield from self.manager.get_download_manager(entry.collection)\n yield from dm.entry_done(entry)\n try:\n self.in_check.remove(entry.id)\n except KeyError:\n self.log.debug(\"entry should have been in in_check\")\n\n\n @asyncio.coroutine\n def got_entries(self, entries):\n if not entries:\n return\n try:\n for entry in entries:\n if entry.id in self.in_check:\n self.log.debug(\"entry still processed: %s\" %entry.full_path)\n continue\n\n self.in_check.add(entry.id)\n #self.in_check.add(entry)\n #embed()\n #print(\"qlen\", self.jobs.qsize())\n #asyncio.Task(self.do_job())\n yield from self.jobs.put(Job(entry))\n #print(\"%%%%%%\")\n #print(rv)\n\n #entry.next_check = next_check\n #session.add(entry)\n\n except Exception as e:\n self.log.exception(e)\n #for i in session.query(model.Entry).filter(or_(model.Entry.next_check==None,\n #model.Entry.next_check= DATE('{}') \" \\\n \"AND f_mensaje <= DATE('{}')) \".format(p_clave, f_ini, f_fin)\n\n df = pd.read_sql_query(query, conexion)\n if not df.empty:\n print(df)\n return df\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")\n\ndef consultar_comentarios_cantidad(conexion, p_clave):\n query = \"SELECT usuario.nick_usuario, count(mensaje.text_mensaje) as cantidad \" \\\n \"FROM mensaje \" \\\n \"INNER JOIN usuario ON usuario.id_usuario = mensaje.id_usuario \" \\\n \"GROUP BY mensaje.id_usuario \" \\\n \"HAVING text_mensaje like '%{}%' \" \\\n \"ORDER BY cantidad DESC\".format(p_clave)\n\n df = pd.read_sql_query(query, conexion)\n if not df.empty:\n return df\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")\n\ndef consultar_media_mensajes(conexion, f_ini, f_fin):\n query = \"SELECT red_social.nom_red_social, mensaje.f_mensaje \" \\\n \"FROM mensaje \" \\\n \"INNER JOIN red_social ON red_social.id_red_social = mensaje.id_red_social \" \\\n \"WHERE f_mensaje >= DATE('{}') \" \\\n \"AND f_mensaje <= DATE('{}') \".format(f_ini, f_fin)\n\n df = pd.read_sql_query(query, conexion)\n if not df.empty:\n df[\"f_mensaje\"] = pd.to_datetime(df[\"f_mensaje\"])\n df[\"dia\"] = df[\"f_mensaje\"].dt.date\n df = df.loc[:, [\"nom_red_social\", \"dia\"]]\n m_dia = df.groupby([\"nom_red_social\", \"dia\"])[\"dia\"].count().reset_index(name='Mensajes')\n total_mensajes = m_dia[\"Mensajes\"].sum()\n m_dia['media_mensajes'] = m_dia['Mensajes']/total_mensajes\n print(m_dia)\n m_dia.plot(x='dia', y=\"media_mensajes\", kind='bar', figsize=(12, 8))\n plt.xticks(rotation=30)\n plt.xlabel('Días')\n plt.ylabel('Porcentaje')\n plt.title('Media de mensajes por día', size=18)\n plt.show()\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")\n\ndef stadisticas_mensaje(conexion, word):\n query= \"SELECT (red_social.nom_red_social) as Red_Social, count(mensaje.text_mensaje) as Cantidad \" \\\n \"FROM mensaje \" \\\n \"INNER JOIN red_social ON red_social.id_red_social = mensaje.id_red_social \" \\\n \"WHERE mensaje.text_mensaje like '%{}%' \" \\\n \"GROUP BY red_social.nom_red_social\".format(word)\n\n df= pd.read_sql_query(query, conexion)\n if not df.empty:\n return df\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")","repo_name":"villa85/curso_python_2","sub_path":"Proyecto_Final_23E_Yuniel_Villalon/obtener_datos/consultar_datos.py","file_name":"consultar_datos.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29255508539","text":"\nimport argparse\n\ndef count_lines(line):\n n_lines = 1\n return(n_lines)\n \ndef count_words(line):\n n_words = 0\n while \" \" in line:\n line = line.replace(\" \",\" \")\n words = line.strip().split(\" \")\n if words != ['']:\n n_words += len(words)\n return(n_words)\n \ndef count_chars(line):\n n_chars = len(line) + 1\n return(n_chars)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Count lines, words and characters.')\n parser.add_argument('file_path', type=str, help='name of the file to be counted')\n parser.add_argument('-l', dest= \"lines\", action=\"store_true\", help='count lines')\n parser.add_argument('-c', dest= \"characters\", action=\"store_true\", help='count chars')\n parser.add_argument('-w', dest= \"words\", action=\"store_true\", help='count words')\n\n args = parser.parse_args()\n return(args)\n\ndef open_file(file_path):\n try:\n data_file = open(file_path, 'r')\n return(True, data_file)\n except OSError:\n return(False, 'File not found')\n\nif __name__ == '__main__':\n args = parse_args()\n data_file = open_file(args.file_path)\n if data_file[0] == True:\n lines = 0\n words = 0\n chars = 0\n for data_line in data_file[1]:\n if args.lines:\n lines += count_lines(data_line)\n if args.words:\n words += count_words(data_line)\n if args.characters:\n chars += count_chars(data_line)\n data_file[1].close()\n if args.lines:\n print(\"Number of lines: \" + str(lines))\n if args.words:\n print(\"Number of words: \" + str(words))\n if args.characters:\n print(\"Number of characters: \" + str(chars))\n else:\n print(data_file[1])\n\n","repo_name":"janusz-krauze/word_counter","sub_path":"word_counter.py","file_name":"word_counter.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"36518508609","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, Group\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom datetime import datetime\ntry:\n from django.contrib.sites.shortcuts import get_current_site\nexcept ImportError:\n from django.contrib.sites.models import get_current_site\nfrom django.core import signing\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.core.validators import validate_email\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import App_Course\nfrom instructor.models import Instructor, Course, Student\nfrom tutor_admin.models import Term\nfrom ta_tutor.models import Session\nfrom survey.models import Survey\nfrom student.models import Student as StudentAccount\n\nfrom pusher import Pusher, pusher\nimport codecs, json, sys, pyexcel as pe\nfrom collections import defaultdict\nfrom xlrd import XLRDError\n\n# LOAD HOME PAGE\ndef index(request):\n return render(request, 'home/home.html')\n\n# CONTACT US PAGE\ndef contact(request):\n context = {\n 'contact': ['Email: UtsaTutorLab@gmail.com'],\n 'title': \"Contact Us\",\n }\n return render(request, 'home/contact.html', context)\n\n# LOGIN USER, REDIRECT TO THEIR PROFILE\ndef submit_login(request):\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n if user.groups.filter(name='Student').exists():\n return HttpResponse(\n json.dumps(\"/student\"),\n content_type=\"application/json\"\n )\n if user.groups.filter(name='Tutor').exists():\n return HttpResponse(\n json.dumps(\"/ta_tutor\"),\n content_type=\"application/json\"\n )\n if user.groups.filter(name='Tutor_Admin').exists():\n return HttpResponse(\n json.dumps(\"/tutor_admin\"),\n content_type=\"application/json\"\n )\n if user.groups.filter(name='Instructor').exists():\n return HttpResponse(\n json.dumps(\"/instructor\"),\n content_type=\"application/json\"\n )\n if username == 'admin' or username == 'bifrost_larry':\n return HttpResponse(\n json.dumps(\"/admin\"),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps(\"false-1\"),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps(\"false-2\"),\n content_type=\"application/json\"\n )\n\n# LOGOUT USER\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n# REDIRECT TO USER PROFILE\ndef profile(request):\n user = request.user\n if user is not None:\n if user.is_active:\n if user.groups.filter(name='Student').exists():\n return HttpResponseRedirect('/../student/')\n if user.groups.filter(name='Tutor').exists():\n return HttpResponseRedirect('/../ta_tutor/')\n if user.groups.filter(name='Instructor').exists():\n return HttpResponseRedirect('/../instructor/')\n if user.groups.filter(name='Tutor_Admin').exists():\n return HttpResponseRedirect('/../instructor/')\n if user.username == 'admin' or user.username == 'bifrost_larry':\n return HttpResponseRedirect('/../admin/')\n else:\n return HttpResponseRedirect('/')\n else:\n return HttpResponseRedirect('/')\n\n@csrf_exempt\ndef pusher_authentication(request):\n\tpusher_client = pusher.Pusher(app_id=settings.PUSHER_APP_ID,key=settings.PUSHER_KEY,secret=settings.PUSHER_SECRET)\n\tpusher_client.trigger(u'ch1',u'enqueue',{})\t\n\n\treturn HttpResponse(\"Ooh secret\")\n\n# SHOWS ALL TUTORS SCHEDULES\ndef ta_schedule(request):\n context = {\n 'title': \"Tutor Schedule\",\n }\n return render(request, 'home/schedule.html', context)\n\n\n\n\n@login_required(login_url='/admin/')\ndef admin_import(request):\n if request.user.is_active:\n if not request.user.is_superuser:\n return HttpResponseRedirect('/profile')\n if request.method == \"GET\":\n return render(request, \"home/admin_import.html\")\n if request.method == \"POST\" and request.FILES['file']:\n xlsFile = request.FILES['file']\n i_first_name = i_last_name = i_user_name = i_email = class_name = class_num = s_first_name = s_last_name = s_user_name = ' '\n num_i = num_c = num_s = 0\n try:\n fs = FileSystemStorage()\n filename = fs.save(xlsFile.name, xlsFile)\n print(\"File name =\", xlsFile)\n sheet = pe.get_sheet(file_name=fs.path(xlsFile.name), name_columns_by_row=0)\n records = sheet.to_records()\n for record in records:\n keys = sorted(record.keys())\n for key in keys:\n if key == \"Instructor First Name\":\n print(str(record[key]))\n i_first_name = record[key]\n elif key == \"Instructor Last Name\": \n print(str(record[key]))\n i_last_name = record[key]\n elif key == \"Instructor Username\": \n print(str(record[key]))\n i_user_name = record[key]\n elif key == \"Instructor Email\":\n print(str(record[key]))\n i_email = record[key]\n elif key == \"Class Name\":\n print(str(record[key]))\n class_name = record[key]\n elif key == \"Class Number\":\n print(str(record[key]))\n class_num = record[key]\n elif key == \"Student abc123\":\n print(str(record[key]))\n s_user_name = record[key]\n elif key == \"Student First Name\":\n print(str(record[key]))\n s_first_name = record[key]\n elif key == \"Student Last Name\":\n print(str(record[key]))\n s_last_name = record[key]\n\n # Get or create user\n user, user_created = User.objects.get_or_create(username=i_user_name, first_name=i_first_name, last_name=i_last_name, email=i_email)\n group = Group.objects.get(name='Instructor')\n group.user_set.add(user)\n # Get or create current instructor\n cur_instructor,created = Instructor.objects.get_or_create(user=user, first_name=i_first_name, last_name=i_last_name, email=i_email)\n cur_instructor.save()\n\n if(user_created):\n # send email to setup password\n send_activation(request, user.username, user.email)\n num_i+=1\n\n # Get or create current course and associate with instructor\n cur_course, course_created = Course.objects.get_or_create(course_num=class_num, course_name=class_name)\n cur_course.save()\n cur_course.Instructor = cur_instructor\n cur_course.save()\n if(course_created):\n num_c+=1 \n \n # Get or create current student and associate with course\n cur_student, student_created = Student.objects.get_or_create(first_name=s_first_name, last_name=s_last_name, studentID=s_user_name)\n cur_student.save()\n cur_student.courses.add(cur_course)\n cur_student.save()\n if(student_created):\n num_s+=1\n \n fs.delete(xlsFile.name)\n data = {\n \"bool\":\"true\",\n\t\t \"i_created\":num_i,\n \"c_created\":num_c,\n \"s_created\":num_s\n }\n return HttpResponse(\n json.dumps(data),\n content_type=\"application/json\"\n )\n\n\n except XLRDError:\n print(\"xlrd error\")\n lastCol = firstCol = userCol = 0\n i_last_name = i_first_name = i_email = class_name = class_num = s_first_name = s_last_name = s_user_name = \"\"\n fs = FileSystemStorage()\n filename = fs.save(xlsFile.name, xlsFile)\n with codecs.open(fs.path(xlsFile.name), encoding='UTF-16') as f:\n for rowx, row in enumerate(f):\n if row.endswith(u'\\r\\n'): row = row[:-2]\n data = row.split(u'\\t ,')\n for colx, datum in enumerate(data):\n info = datum.strip(\"'\\\"\")\n if(rowx == 0):\n if( info == 'Instructor First Name'):\n print(info)\n iFirstCol = colx\n elif( info == 'Instructor Last Name'):\n print(info + str(colx))\n iLastCol = colx\n elif( info == 'Instructor Email'):\n print(info + str(colx))\n iEmailCol = colx\n elif( info == 'Class Name'):\n print(info + str(colx))\n cNameCol = colx\n elif( info == 'Class Number'):\n print(info + str(colx))\n cNumCol = colx\n elif( info == 'Student First Name'):\n print(info + str(colx))\n sFirstCol = colx\n elif( info == 'Student Last Name'):\n print(info + str(colx))\n sLastCol = colx\n elif( info == 'Student abc123'):\n print(info + str(colx))\n sUserCol = colx\n else:\n if(colx == iLastCol):\n # print(\"Instructor last name = col[\" + str(colx) +\"]\", info)\n i_last_name = info\n elif(colx == iFirstCol):\n # print(\"Instructor first name = col[\" + str(colx) +\"]\", info)\n i_first_name = info\n elif(colx == iEmailCol):\n # print(\"Instructor Email = col[\" + str(colx) +\"]\", info)\n i_email = info\n elif(colx == cNameCol):\n # print(\"Class name = col[\" + str(colx) +\"]\", info)\n class_name = info\n elif(colx == cNumCol):\n # print(\"Class num = col[\" + str(colx) +\"]\", info)\n class_num = info\n elif(colx == sUserCol):\n # print(\"username = col[\" + str(colx) +\"]\", info)\n s_user_name = info\n elif(colx == sFirstCol):\n # print(\"Student first name = col[\" + str(colx) +\"]\", info)\n s_first_name = info\n elif(colx == sLastCol):\n # print(\"Student last name = col[\" + str(colx) +\"]\", info)\n s_last_name = info\n\n if(rowx > 0):\n # Get or create user\n user, user_created = User.objects.get_or_create(username=i_user_name, first_name=i_first_name, last_name=i_last_name, email=i_email)\n group = Group.objects.get(name='Instructor')\n group.user_set.add(user)\n # Get or create current instructor\n cur_instructor,created = Instructor.objects.get_or_create(first_name=i_first_name, last_name=i_last_name, email=i_email)\n cur_instructor.save()\n # Get or create current course and associate with instructor\n cur_course, created = Course.objects.get_or_create(course_num=class_num, course_name=class_name)\n cur_course.save()\n cur_course.Instructor = cur_instructor\n cur_course.save()\n # Get or create current student and associate with course\n cur_student,created = Student.objects.get_or_create(first_name=s_first_name, last_name=s_last_name, studentID=s_user_name)\n cur_student.save()\n cur_student.courses.add(cur_course)\n cur_student.save()\n \n except Exception as e:\n print(\"Error in upload:\", e)\n\n if(fs.exists(filename)):\n # print(\"deleting file 2: \", xlsFile.name)\n fs.delete(xlsFile.name)\n if(fs.exists(filename)):\n # print(\"deleting file 1: \", filename)\n fs.delete(filename)\n \n data = {\n 'bool': 'false'\n }\n\n return HttpResponse(\n json.dumps(data),\n content_type = \"application/json\"\n )\n \ndef admin_purge(request):\n if request.method == \"GET\":\n terms = Term.objects.all()\n context = {\n 'terms':terms\n }\n return render(request, \"home/admin_purge.html\", context)\n \n if request.method == \"POST\":\n \n ######### DELETE TERMS, SESSIONS, SURVEYS #########\n\n termList = request.POST.getlist('selectedTerms[]')\n terms = []\n data = {}\n\n if \"None\" in termList:\n if len(termList) > 1:\n data['term-issue'] = \"None selected in term selection list\"\n data['bool-term'] = \"false\"\n else:\n data['term-issue'] = \"No surveys or student-tutor sessions deleted\"\n data['bool-term'] = \"true\"\n else: \n for term in termList:\n terms.append(Term.objects.get(name=term))\n surveys = Survey.objects.all()\n sessions = Session.objects.all()\n surveysToDelete = []\n sessionsToDelete = []\n for term in terms:\n for survey in surveys:\n if term.inTerm(survey.date_completed.date()):\n surveysToDelete.append(survey)\n for session in sessions:\n if term.inTerm(session.sessionID.date()):\n sessionsToDelete.append(session)\n \n # Delete surveys and minus count from tutor\n for survey in surveysToDelete:\n survey.tutor.survey_count -= 1\n survey.tutor.save()\n survey.delete()\n # Delete sessions\n for session in sessionsToDelete:\n session.delete()\n # Delete terms\n for term in terms:\n term.delete()\n \n ######## DELETE COURSES AND STUDENTS AND STUDENT ACCOUNTS ###########\n Course.objects.all().delete()\n Student.objects.all().delete()\n for student in StudentAccount.objects.all():\n if student.student.last_login.date() < datetime.today().date().replace(year = datetime.today().year - 1):\n student.user.delete()\n\n data['bool-term'] = 'true'\n\n return HttpResponse(\n json.dumps(data),\n content_type = \"application/json\"\n )\n\n@login_required(login_url='/admin/')\ndef admin_manage(request):\n if request.user.is_active:\n if not request.user.is_superuser:\n return HttpResponseRedirect('/profile')\n if request.method == \"GET\":\n instructors = Instructor.objects.all()\n tutor_admins = []\n for instructor in instructors:\n if instructor.user:\n if instructor.user.groups.filter(name=\"Tutor_Admin\"):\n tutor_admins.append(instructor)\n context = {\n \"instructors\": instructors,\n \"tutor_admins\": tutor_admins\n }\n return render(request, \"home/admin_manage.html\", context)\n if request.method == \"POST\":\n action = request.POST.get(\"action\")\n instructors = request.POST.getlist(\"selectedInstructors[]\")\n \n if \"None\" not in instructors:\n if action == \"delete\":\n try:\n for instructor in instructors:\n cur_instructor = Instructor.objects.get(email=instructor)\n cur_instructor.user.delete()\n data = {\n \"bool\":\"true\",\n \"msg\":\"Instructor(s) deleted\"\n }\n except ObjectDoesNotExist:\n data = {\n \"bool\":\"false\",\n \"msg\":\"Could not delete instructor (Does Not Exist)\"\n }\n elif action == \"addAdmin\":\n group = Group.objects.get(name='Tutor_Admin')\n for instructor in instructors:\n cur_instructor = Instructor.objects.get(email=instructor)\n group.user_set.add(cur_instructor.user)\n data = {\n \"bool\":\"true\",\n \"msg\":\"Instructor(s) given Tutor-Admin status\"\n }\n elif action == \"remAdmin\":\n group = Group.objects.get(name='Tutor_Admin')\n for instructor in instructors:\n cur_instructor = Instructor.objects.get(email=instructor)\n group.user_set.remove(cur_instructor.user)\n data = {\n \"bool\":\"true\",\n \"msg\":\"Instructor(s) revoked of Tutor-Admin status\"\n }\n else:\n data = {\n \"bool\":\"false\",\n \"msg\": \"None selected in instructor selection\"\n }\n\n return HttpResponse(\n json.dumps(data),\n content_type = \"application/json\"\n )\n \ndef send_activation(request, username, email):\n try:\n # GET EMAIL TEMPLETS\n email_body = 'home/email_temps/activation_body.txt'\n email_subject = 'home/email_temps/activation_subject.txt'\n user = User.objects.get(username=username)\n instructor = Instructor.objects.get(user = user)\n token = signing.dumps(username, salt=settings.SECRET_KEY)\n instructor.token = token\n instructor.save()\n \n # CONTEXT FOR EMAIL\n context = {\n 'site': get_current_site(request),\n 'username': user.get_full_name(),\n 'token': token,\n 'secure': request.is_secure(),\n }\n body = loader.render_to_string(email_body, context).strip()\n subject = loader.render_to_string(email_subject, context).strip()\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email])\n return True\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return False\n","repo_name":"UtsaTutorLab/TutorLabProject","sub_path":"tutorlab/tutorlab/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71638317690","text":"#!C:\\Program Files\\Python310\\python.exe\nprint(\"content-type: text/html\\n\\n\")\n\nimport sys\n\nsys.path.append(\"C:\\\\Users\\\\tyree\\\\AppData\\\\Roaming\\\\Python\\\\Python310\\\\site-packages\")\nimport speech_recognition as sr\n\n\ndef main():\n # obtain audio from the microphone\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n # recognize speech using Google Speech Recognition\n try:\n # the default google API (no keys needed)\n speech = r.recognize_google(audio)\n # print(speech)\n return speech\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\n \"Could not request results from Google Speech Recognition service; {0}\".format(\n e\n )\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Alx-nder/virtualTourWebsite","sub_path":"chatbot_module/speech_module.py","file_name":"speech_module.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27321065049","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n if len(nums) == 1:\n return [nums]\n \n elif len(nums) == 2:\n if nums[0] != nums[1]: \n return [nums, list(reversed(nums))] \n return [nums]\n \n all_perms = []\n for index, num in enumerate(nums):\n nums_without_current_num = nums[:index]\n if index + 1 <= len(nums):\n nums_without_current_num.extend(nums[index + 1:])\n \n permutuation_without_current_num = self.permuteUnique(nums_without_current_num) \n \n for perm in permutuation_without_current_num:\n all_perms.append([num] + perm) \n \n unique_perms_set = set()\n for perm in all_perms:\n unique_perms_set.add(tuple(perm)) \n \n unique_perms = []\n for perm in unique_perms_set:\n unique_perms.append(list(perm))\n \n return unique_perms","repo_name":"meraf00/Competitive-Programming","sub_path":"0047-permutations-ii/0047-permutations-ii.py","file_name":"0047-permutations-ii.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29308076592","text":"print(\"数字,日期和时间5\")\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom dateutil.rrule import *\n#创建一周的列表\nweekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday']\nweekends = ['Saturday', 'Sunday']\n\n#初始化\ndef get_previous_byday(dayname, start_date=None):\n if start_date is None:\n start_date = datetime.today()\n day_num = start_date.weekday()\n day_num_target = weekdays.index(dayname)\n days_ago = (7 + day_num - day_num_target) % 7\n if days_ago == 0:\n days_ago = 7\n target_date = start_date - timedelta(days=days_ago)\n return target_date\n\n\ndef last_friday():\n print(datetime.today())\n print(get_previous_byday('Monday'))\n print(get_previous_byday('Tuesday'))\n print(get_previous_byday('Friday'))\n print(get_previous_byday('Saturday'))\n # 显式的传递开始日期\n print(get_previous_byday('Sunday', datetime(2012, 12, 21)))\n\n # 使用dateutil模块\n d = datetime.now()\n # 下一个周五\n print(d + relativedelta(weekday=FR))\n # 上一个周五\n print(d + relativedelta(weekday=FR(-1)))\n # 下一个周六, 为什么如果今天是周六,下一个/上一个都返回今天的日期??\n print(d + relativedelta(weekday=SA))\n # 上一个周六\n print(d + relativedelta(weekday=SA(-1)))\n\n\nif __name__ == '__main__':\n last_friday()\n\nfrom datetime import datetime, date, timedelta\nimport calendar\n\ndef get_month_range(start_date=None):\n if start_date is None:\n start_date = date.today().replace(day=1)\n _, days_in_month = calendar.monthrange(start_date.year, start_date.month)\n end_date = start_date + timedelta(days=days_in_month)\n return (start_date, end_date)\n def date_range(start, stop, step):\n while start < stop:\n yield start\n start += step\n def month_range():\n a_day = timedelta(days=1)\n first_day, last_day = get_month_range()\n while first_day < last_day:\n print(first_day)\n first_day += a_day\n # 使用生成器\n for d in date_range(datetime(2012, 9, 1), datetime(2012, 10, 1),\n timedelta(hours=6)):\n print(d)\n if __name__ == '__main__':\n month_range()\n\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\nimport pytz\n\n\ndef tz_local():\n d = datetime(2012, 12, 21, 9, 30, 0)\n print(d)\n\n # Localize the date for Chicago\n central = timezone('US/Central')\n loc_d = central.localize(d)\n print(loc_d)\n\n # Convert to Bangalore time\n bang_d = loc_d.astimezone(timezone('Asia/Kolkata'))\n print(bang_d)\n\n\n # 夏令时\n d = datetime(2013, 3, 10, 1, 45)\n loc_d = central.localize(d)\n print(loc_d)\n later = loc_d + timedelta(minutes=30)\n print(later)\n # 使用normalize修正这个问题\n later = central.normalize(loc_d + timedelta(minutes=30))\n print(later)\n\n # 一个普遍策略是先转换为UTC时间,使用UTC时间来进行计算\n print(loc_d)\n utc_d = loc_d.astimezone(pytz.utc)\n print(utc_d)\n\n later_utc = utc_d + timedelta(minutes=30)\n # 转回到本地时间\n print(later_utc.astimezone(central))\n\n # 根据ISO 3166国家代码查找时区名称\n print(pytz.country_timezones['IN'])\n\nif __name__ == '__main__':\n tz_local()","repo_name":"TheRealMilesLee/Computer-Science-Learning","sub_path":"Python相关/Python_CookBook/数字,日期和时间/数字,日期和时间 5.py","file_name":"数字,日期和时间 5.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"10343926285","text":"from django.shortcuts import render\nfrom django.template import Context, Template\nfrom django.template import loader\nfrom django.http import HttpResponse\n\n\ndef index(request):\n t = loader.get_template('start_page.html')\n context = {\n 'variable':'var',\n 'gbimg':'gbcolor.jpg'\n }\n return HttpResponse(t.render(context, request))\n\ndef map(request):\n t= loader.get_template('map.html')\n context = {\n 'gbimg':'map.png'\n }\n return HttpResponse(t.render(context, request))\n\n# Create your views here.\n","repo_name":"nidzik/PythonDjango","sub_path":"rush00/rush00/rush00/moviemon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74746638327","text":"elemento = int(input('Insira o valor do elemento a ser buscado: '))\n\nindice = 0\n\nlista = [5,8,3,1,0,2]\n\nfor i in range(len(lista)):\n if elemento == lista[i]:\n print(i)\n\n \n","repo_name":"bihellzin/monitoria-p1","sub_path":"aulas-monitoria/07-10/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"3723093846","text":"import sys\n\nsys.stdin = open('specialsort.txt')\n\nfor testcase in range(int(input())):\n n = int(input())\n nums = list(map(int, input().split()))\n\n print(f'#{testcase + 1}', end=' ')\n for _ in range(5):\n maxnum = nums[0]\n minnum = nums[0]\n \n for num in nums:\n if num > maxnum:\n maxnum = num\n if num < minnum:\n minnum = num\n print(f'{maxnum} {minnum}', end=' ')\n \n trash = nums.pop(nums.index(maxnum))\n trash = nums.pop(nums.index(minnum))\n print()\n","repo_name":"hani2057/algorithm","sub_path":"swea/8월/0811/specialsort.py","file_name":"specialsort.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"22381092606","text":"import pandas as pd\nimport numpy as np\n\n\n## data source : https://www.eia.gov/tools/faqs/faq.php?id=74&t=11\ndataElectric = pd.read_excel('data/annual_generation_state.xls')\ndataCarbon = pd.read_excel('data/emission_annual.xls')\n\n### preprocessing data:\ndataCarbon= dataCarbon[dataCarbon['Year']==2018]\ndataCarbon = dataCarbon[dataCarbon['State']!='DC']\ndataCarbon = dataCarbon[dataCarbon['State']!= 'US-TOTAL']\ndataCarbon = dataCarbon[dataCarbon['Energy Source'] == 'All Sources']\ndataCarbon = np.asarray(dataCarbon)\n\n## for electricity data :\ndataElectric.reindex(['a','b','c','d','e','f'])\nb=dataElectric.columns\ncolumns = ['year','state','type','resource','generation']\ndic,i = {},0\nfor j in range(len(columns)):\n dic[b[j]] = columns[j]\nE = dataElectric.rename(columns=dic)\nE = E[E['year']==2018]\nE\nE = E[E['resource'] == 'Total']\nE = E[E['state'] != 'DC']\nE = E[E['state'] != 'US-Total']\nE = E[E['type'] == 'Total Electric Power Industry']\nE = np.asarray(E)\ndataElectric = E\n#################\n\n\ndef emissionDict(dataCarbon) :\n \"\"\"\n @dataCarbon : Carbon emission data\n @return : a dictionary with key = state name, value = CO2 emission\n \"\"\"\n assert isinstance(dataCarbon, pd.DataFrame)\n prev = 'AK'\n index,sumOfEmissions = 0,0\n emissionDict = {}\n for i in range(len(C)) :\n item = C[i]\n if item[1]!=prev :\n emissionDict[prev] = sumOfEmissions\n sumOfEmissions = item[4]\n prev = item[1]\n else :\n sumOfEmissions += item[4]\n emissionDict['Wyoming'] = sumOfEmissions\n return emissionDict\n\n\ndef ele_generation(dataElectric) :\n \"\"\"\n @dataElectric : electricity generation in each state\n @return : a dictionary with key = state name, value = electricity generation\n \"\"\"\n assert isinstance(dataElectric,pd.DataFrame)\n generationDict = {}\n for i in range(len(dataElectric)) :\n item = dataElectric[i]\n generationDict[item[1]] = item[4]\n return generationDict\n\n\ndef co2_per_mwh(generationDict,emissionDict) :\n \"\"\"\n @dataElectric : annual electricity generation in each state\n @dataCarbon : annual CO2 emission generation in each state\n @return : a dictionary with key = states name, value : CO2 emission per mwh electricity\n \"\"\"\n assert isinstance(generationDict,dict)\n assert isinstance(emissionDict,dict)\n perMPH = {}\n for name in generationDict :\n perMPH[name] = emissionDict[name]*1.0 / generationDict[name]\n return perMPH\n\n\ndef generate_csv(perMPH) :\n \"\"\"\n This function writes a csv file with state name as index and\n the value of CO2 generation per mwh electricity as column\n @perMPH : a dictionary\n \"\"\"\n my_dict = perMPH\n with open('co2_mwh.csv', 'w') as f:\n f.write('states,co2/mwh\\n')\n for key in my_dict.keys():\n f.write(\"%s,%s\\n\"%(key,my_dict[key]))\n","repo_name":"anurag1paul/electric_vehicles_analysis","sub_path":"data_analysis/environment_data_process.py","file_name":"environment_data_process.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74424183608","text":"import json \nfrom celery import shared_task \nfrom guided_redaction.jobs.models import Job\nfrom guided_redaction.job_run_summaries.api import (\n JobRunSummariesViewSet,\n JobRunSummariesGenerateViewSet,\n)\n\n@shared_task\ndef create_manual_jrs(job_uuid):\n job = Job.objects.get(pk=job_uuid)\n if job:\n job.status = 'running'\n job.save()\n worker = JobRunSummariesViewSet()\n response = worker.process_create_request(json.loads(job.request_data))\n job.response_data = json.dumps(response.data)\n job.status = 'success'\n job.save()\n\n@shared_task\ndef create_automatic_jrs(job_uuid):\n job = Job.objects.get(pk=job_uuid)\n if job:\n job.status = 'running'\n job.save()\n worker = JobRunSummariesGenerateViewSet()\n response = worker.process_create_request(json.loads(job.request_data))\n job.response_data = json.dumps(response.data)\n job.status = 'success'\n job.save()\n","repo_name":"dcaulton/guided_redaction","sub_path":"api/guided_redaction/job_run_summaries/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"40899057875","text":"from tensorflow.keras.layers import Activation, Dense, Input, Concatenate, Flatten, InputLayer, Embedding\nfrom tensorflow.keras.models import Model, Sequential\nimport tensorflow as tf\nimport os\n\n\n\ndef build_multi_input_model(shape_vec, shape_mat):\n \"\"\"Build (and compile) multi input network.\n Args: \n shape_vec: Shape of the input vector\n shape_mat: Shape of the input matrix\n shape_out: Shape of the output vector\n Returns:\n model: Keras model\n \"\"\"\n\n # first branch for the\n inp1 = Input(shape=(1,), name='Country_ID')\n model1 = Embedding(23, 2, name='Country_Embedding')(inp1)\n model1 = Flatten()(model1)\n\n # second branch for the vector input\n inp2 = Input(shape=shape_vec, name=\"Date_and_Regimes\")\n\n # third branch for the matrix input\n inp3 = Input(shape=shape_mat, name=\"Ensemble\")\n model3 = Flatten()(inp3)\n \n # concatenate the two inputs\n x = Concatenate(axis=1)([model1, inp2, model3])\n\n # add the hiddden layers\n x = Dense( 100 , activation='linear' , name=\"Combined_Hidden_Layer_1\" )( x )\n x = Dense( 100 , activation='relu' , name=\"Combined_Hidden_Layer_2\" )( x )\n x = Dense( 100 , activation='relu' , name=\"Combined_Hidden_Layer_3\" )( x )\n\n x = Dense( 2 , activation='linear' , name=\"Output_Layer\" )(x)\n\n # returns the Model\n return Model([inp1, inp2, inp3], outputs=x)\n\n\ndef printModel(model, dir='', name='my_model.png'):\n tf.keras.utils.plot_model(model, to_file=os.path.join(dir , name), show_shapes=True,\n show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96)\n\ndef reset_weights(model):\n for layer in model.layers: \n if hasattr(layer,'init'):\n input_dim = layer.input_shape[1]\n new_weights = layer.init((input_dim, layer.output_dim),name='{}_W'.format(layer.name))\n layer.trainable_weights[0].set_value(new_weights.get_value())","repo_name":"muellerelias/nnpostprocessing","sub_path":"model/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71770193208","text":"def BFS(s):\r\n queue = []\r\n queue.append(s)\r\n visited[s] = True\r\n dist[s] = 0\r\n while queue:\r\n s = queue.pop(0)\r\n for i in graph[s]:\r\n if visited[i] == False:\r\n visited[i] = True\r\n queue.append(i)\r\n dist[i] = dist[s]+1\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n q = int(input())\r\n for i in range(q):\r\n n , m = map(int,input().split())\r\n graph = [[] for x in range(n)]\r\n dist = [-1 for x in range(n)]\r\n visited = [False for x in range(n)]\r\n for _ in range(m):\r\n u,v = map(lambda x: int(x)-1,input().split())\r\n graph[u].append(v)\r\n graph[v].append(u)\r\n s = int(input()) - 1\r\n BFS(s)\r\n # print(dist)\r\n for i in range(n):\r\n if i == s:\r\n continue\r\n if dist[i] != -1:\r\n print(dist[i]*6,end=\" \")\r\n else:\r\n print(-1,end=\" \")\r\n print()\r\n","repo_name":"GenesisBlock3301/Data-Structure-and-Algorithm","sub_path":"Graph Theory/Breadth First Search Shortest Reach (hackerrank).py","file_name":"Breadth First Search Shortest Reach (hackerrank).py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"4960846173","text":"import sys\nimport json\nfrom flask import Flask, Response, request\n\nfrom ecarton_code_challenge.lib.convert import convert_chars\n\napp = Flask('code_challenge')\n\n@app.route('/convert', methods=['POST'])\ndef convert():\n\n request_data = json.loads(request.data)\n\n converted = convert_chars(request_data)\n\n resp = Response(\n response=json.dumps(converted),\n mimetype='application/json',\n status=200)\n\n return resp\n\n\ndef create_app():\n return app\n","repo_name":"evert2410/engie","sub_path":"ecarton_code_challenge/lib/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"33134410532","text":"#!/usr/bin/python\n\nimport serial\nimport serial.tools.list_ports\nimport time\nfrom modules.utils import timeit\n\n\nclass SerialComWorker():\n \"\"\"\n Class to handle the serial communication between the PC and the EDF signal generator\n\n This class will be in charge of managing the ports and sending the data to the device\n \"\"\"\n def __init__(self, config):\n self.config_params_ = config\n print(\"Serial communication worker initialized\")\n\n def listSerialPorts(self):\n \"\"\"\n Method to create a list of all corresponding EDF signal generator devices.\n\n Callback for the GUI interaction\n \"\"\"\n self.generator_devices_ = self.searchCommPortsWindows_()\n user_device_list = []\n if self.generator_devices_:\n # Create list to be displayed to user\n for device in self.generator_devices_:\n user_device_list.append(str(device.device))\n return user_device_list\n else:\n return []\n\n def selectCommPort(self, user_chosen_device):\n \"\"\"\n Method to save the selected comm port.\n\n Callback for the GUI interaction\n \"\"\"\n # Check that devices are loaded\n if self.generator_devices_:\n # Go through loaded devices and check if name is in user_chosen_device\n for device in self.generator_devices_:\n if device.name in user_chosen_device:\n print(\"Selected port: \" + device.name)\n self.chosen_device_ = device\n\n @timeit\n def beginTransmision(self, bytes_packages: list, channels_amount, sample_rate):\n \"\"\"\n Method to start the transmition to the generator.\n\n Callback for the GUI interaction\n \"\"\"\n config_sample_rate_pkg = self.createConfigPackage_(self.config_params_[\"config_sample_rate\"], sample_rate)\n config_channel_amount_pkg = self.createConfigPackage_(self.config_params_[\"config_channels_amount\"], channels_amount)\n config_reset_all_dacs_pkg = self.createConfigPackage_(self.config_params_[\"config_reset_all_dacs\"], channels_amount)\n data_pkgs = [bytes_packages[i:i+64] for i in range(0,len(bytes_packages),64)]\n\n try:\n # Start serial connection\n serial_connection = serial.Serial(self.chosen_device_.name, baudrate=115200, bytesize=serial.EIGHTBITS, write_timeout=5)\n\n # Write sample rate config\n serial_connection.write(serial.to_bytes(config_sample_rate_pkg))\n time.sleep(0.1)\n\n # Write amount of channels config\n serial_connection.write(serial.to_bytes(config_channel_amount_pkg))\n\n for byte_pkg in data_pkgs:\n #for j in range(channels_amount):\n serial_connection.write(b\"\".join(byte_pkg))\n\n\n # When simulation ended, we reset outputs and configs of DACs:\n serial_connection.write(serial.to_bytes(config_reset_all_dacs_pkg))\n \n\n # End serial connection\n serial_connection.close()\n return True\n except serial.SerialTimeoutException:\n print(\"Serial write operation timed out, try resetting the device\")\n return False\n\n\n ###### Private ######\n\n \"\"\"\n List of key-value pairs of EDF signal generators found. Should contain:\n Name: Identifier for the device\n Device: String used to open and close the port (COMx for Windows)\n \"\"\"\n generator_devices_ = []\n chosen_device_ = \"\" # Selected serial communication port\n\n def searchCommPortsWindows_(self):\n \"\"\"\n Method to look for connected EDF signal generator devices in Windows\n\n Returns a list of serial comm devices with key-value pairs containing information about it\n\n It uses the PID 0483 to identify the STMicroelectronics device and 5740 for the Virtual COMM port\n \"\"\"\n generator_devices = []\n ports = serial.tools.list_ports.comports()\n for port in ports:\n if (\"0483\" and \"5740\") in port.hwid:\n device = {}\n device[\"Name\"] = port.name\n device[\"Device\"] = port.device\n generator_devices.append(port)\n return generator_devices\n\n def createConfigPackage_(self, config_num: int, config_data: int):\n \"\"\"\n This method creates a custom configuration package to send config_data to the microcontroller.\n \"\"\"\n enum_pkg = int(config_num).to_bytes(2, byteorder=\"big\", signed=False)\n data_pkg = int(config_data).to_bytes(2, byteorder=\"big\", signed=False)\n return b\"\".join([enum_pkg, data_pkg])\n","repo_name":"Gonzalor95/TProfesional_EEG","sub_path":"PyEDF-APP/modules/SerialComWorker.py","file_name":"SerialComWorker.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"4681056653","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('signup-admin/', views.signup_admin, name='signup_admin'),\n path('signin-admin/', views.signin_admin, name='signin_admin'),\n path('home/', views.home, name='home' ),\n path('student-home/', views.student_home, name='student_home' ),\n path('accounts/login/', views.home, name='home' ),\n path('logout/', views.logout, name='logout'),\n\n path('add-student/', views.add_student, name='add_student' ),\n path('view-student/', views.view_students, name='view_students' ),\n path('delete//', views.delete_student, name='delete_student' ),\n path('edit//', views.edit_student, name='edit_student' ),\n\n path('add-teacher/', views.add_teacher, name='add_teacher' ),\n path('view-teacher/', views.view_teachers, name='view_teachers' ),\n path('deletet//', views.delete_teacher, name='delete_teacher' ),\n path('editt//', views.edit_teacher, name='edit_teacher' ),\n\n path('add-department/', views.add_department, name='add_department' ),\n path('view-department/', views.view_departments, name='view_departments' ),\n path('deleted//', views.delete_department, name='delete_department' ),\n\n path('v-student/', views.v_students, name='v_students' ),\n path('v-teacher/', views.v_teachers, name='v_teachers' ),\n path('v-department/', views.v_departments, name='v_departments' ),\n]\n","repo_name":"Kamlesh-KD/University-Management-System","sub_path":"system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29361989619","text":"# Algoritmo para obtener signo zodiacal.\n\ndia_nacimiento = int(input(\"Ingrese dia de nacimiento : \"))\nmes_nacimiento = int(input(\"Ingrese mes de nacimiento : \"))\n\n# Transforma valores.\nmes_dia = int((\"00\"+str(mes_nacimiento))[-2:] + (\"00\"+str(dia_nacimiento))[-2:])\n\n# Diccionario con zodiaco.\nzodiaco = {\n 1222: \"capricornio\",\n 1122: \"sagitario\",\n 1023: \"escorpio\",\n 923: \"libra\",\n 823: \"virgo\",\n 723: \"leo\",\n 621: \"cancer\",\n 521: \"geminis\",\n 420: \"tauro\",\n 321: \"aries\",\n 219: \"piscis\",\n 120: \"acuario\",\n 0: \"capricornio\"\n}\n\n# Diccionario con zodiaco.\nfor i in zodiaco:\n if mes_dia >= i:\n print(\" \"+zodiaco[i])\n break\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej7/hito1_ej7_aca352e1a4a5b448a93d844e71d52fa5.py","file_name":"hito1_ej7_aca352e1a4a5b448a93d844e71d52fa5.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"18502481021","text":"\"\"\"\nGiven a string s, return true if the s can be palindrome after deleting at most one\ncharacter from it.\n\nExample 1:\nInput: s = \"aba\"\nOutput: true\n\nExample 2:\nInput: s = \"abca\"\nOutput: true\nExplanation: You could delete the character 'c'.\n\nExample 3:\nInput: s = \"abc\"\nOutput: false\n\n\nConstraints:\n\n1 <= s.length <= 105\ns consists of lowercase English letters.\n\"\"\"\n\n\n# Time: O(n)\n# Space: O(1)\ndef valid_palindrome(s):\n def verify(s, left, right, deleted):\n while left < right:\n if s[left] != s[right]:\n if deleted:\n return False\n else:\n return verify(s, left + 1, right, True) or verify(s, left, right - 1, True)\n else:\n left += 1\n right -= 1\n return True\n\n return verify(s, 0, len(s) - 1, False)\n\n\n# Another Solution ---------------------------------------------------------------------------\n# Time: O(n)\n# Space: O(1)\ndef valid_palindrome_v2(s):\n low = 0\n high = len(s) - 1\n while low < high:\n if s[low] != s[high]:\n return is_palindrome(s, low + 1, high) or is_palindrome(s, low, high - 1)\n low += 1\n high -= 1\n\n\ndef is_palindrome(string, low, high):\n while low < high:\n if string[low] != string[high]:\n return False\n low += 1\n high -= 1\n return True\n\n\nif __name__ == \"__main__\":\n print(valid_palindrome_v2(\"abcba\"))\n","repo_name":"candaceleach41/algo_ds_coding_prep","sub_path":"easy/valid_palindrome_ii.py","file_name":"valid_palindrome_ii.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"31725076494","text":"# Dictionaries are unordered.\n\nmy_dict = {\n 'key1': 1,\n 'key2': None,\n 'key3': 3.14,\n 'key4': [1,2,3],\n}\n# Iterate over keys\n# for x in my_dict:\n# print(x)\n\n# Iterate over values\n# for x in my_dict.values():\n# print(x)\n\n# Unpacking values\n# a, b, c, d = my_dict.values()\n# print(a, b, c, d)\n\n# Unpacking each tuple in the dictionary\n# for t in my_dict.items():\n# print(t)\n\n# Unpacking key, value pairs\n# for k, v in my_dict.items():\n# print(k, v)\n\n# ** unpacks k/v pairs into another dictionary. Can only be used on the right hand side. Notice how 'h':5 overwrode 'h':4.\nmy_dict_1 = {'p': 1, 'y': 2}\nmy_dict_2 = {'t': 3, 'h': 4}\nmy_dict_3 = {'h': 5, 'o': 6, 'n': 7}\nmerged_dict = {**my_dict_1, **my_dict_2, **my_dict_3}\nprint(merged_dict)","repo_name":"alexdavidkim/Python3-Notes","sub_path":"iterables_sequence_types/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"73288238968","text":"# Here's an example of stuff to copy and paste into an interactive Python\n# interpreter to get a connection loaded.\n# Or you can load it with 'python -i interactive_mode.py'.\n\n# Set some variables.\n\nbmrc = \"~/.bmrc\"\nsite = \"www\"\nbmutilspath = \"./lib\"\n\n# Import everything, make a connection, and try to log in.\n\nimport json\nimport os\nimport sys\nsys.path.append(os.path.expanduser(bmutilspath).rstrip(\"/\"))\nimport bmutils\nbmconnection = bmutils.BMClientParser(os.path.expanduser(bmrc), site)\nif not bmconnection.verify_login():\n print(\"Could not login\")\n \n# At this point you can do whatever you want. Here's how to load a game,\n# and print its info in nice JSON.\n\ngamenumber = 3038\n\ngame = bmconnection.wrap_load_game_data(gamenumber)\nprint(json.dumps(game, sys.stdout, indent=1, sort_keys=True))\n","repo_name":"buttonmen-dev/buttonmen","sub_path":"tools/api-client/python/interactive_mode.py","file_name":"interactive_mode.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"}
+{"seq_id":"35429763319","text":"import pyinputplus as pyip\nfrom datetime import date\nimport calendar\nimport openpyxl\nimport glob\nimport csv\n\n\ndef get_current_date():\n \"\"\"Get current date with goal format of: dd MMM YYYY\"\"\"\n date_year = date.today().year\n date_month = date.today().month\n month_abbr = calendar.month_abbr[date_month]\n date_day = date.today().day\n return f'{date_day} {month_abbr} {date_year}'\n\ndef get_user_input(message):\n \"\"\"Get input from the user with an individualized message and return the user's input.\"\"\"\n output = \"\"\n while True:\n output = input(message)\n print(f\"You entered {output}; is this correct?\")\n verify = pyip.inputMenu([\"Yes\", \"No\"], numbered=True)\n if verify == \"Yes\":\n break\n return output\n\ndef choose_excel_file():\n \"\"\"Showing the user all of the Excel files in the current working directory and asking them to select one to\n load if they have an ongoing file they are adding to.\"\"\"\n excel_files_in_directory = glob.glob('*.xlsx')\n print(\"The following Excel workbooks are in this folder:\")\n i = 1\n for file in excel_files_in_directory:\n print(f\"{i}: {file}\")\n i += 1\n load_current_file = pyip.inputMenu(['Yes', 'No'],\n \"\\nDo you want to pick one of these files to load for the output file?\\n\",\n numbered=True)\n if load_current_file == 'Yes':\n output = pyip.inputMenu(excel_files_in_directory, numbered=True)\n return output\n else:\n return 'None'\n\ndef choose_file(message):\n \"\"\"Asking the user to clarify which csv file correlates to VAX ID and which to VAX Reports data.\"\"\"\n files = glob.glob('*.csv')\n print(message)\n output = pyip.inputMenu(files, numbered=True)\n return output\n\n# Variables for counting\ntotal_occurrences = 0\ntotal_deaths = 0\ntotal_er_visits = 0\ntotal_hospitalizations = 0\ntotal_covid_vax_occurrences = 0\ntotal_covid_vax_deaths = 0\ntotal_covid_vax_er_visits = 0\ntotal_covid_vax_hospitalizations = 0\n\n# VAX file structure: Column 0 - VAERS_ID, Column 1 - VAX_TYPE, Column 2 - VAX_MANU,\n# Column 3 - VAX_LOT, Column 4 - VAX_DOSE_SERIES, Column 5 - VAX_ROUTE, Column 6 - VAX_SITE,\n# Column 7 - VAX_NAME\nVAX_file = choose_file('Which file has the vaccine ID information (Ex: VAERSVAX)?')\nDATA_file = choose_file(\"Which file has the vaccine report data (Ex: VAERSDATA)?\")\n\n# Choose and read into a list the VAX data.\nvax_data = []\nwith open(VAX_file, 'r', encoding='windows-1252') as file:\n reader = csv.reader(file, delimiter=',')\n headers = next(reader)\n for row in reader:\n vax_data.append(row)\n\n# Setting up a dictionary to read all the VAX data into.\n# Key is VAX_NAME, value is a list of VAERS_ID\nvax_data_initial = {}\n\nvax_count_variable = 0\nwhile vax_count_variable < len(vax_data):\n vax_name = vax_data[vax_count_variable][7]\n vax_id = vax_data[vax_count_variable][0]\n if vax_name in vax_data_initial:\n vax_data_initial[vax_name].append(vax_id)\n else:\n vax_data_initial[vax_name] = [vax_id]\n vax_count_variable += 1\n\n# Setup a dictionary for each VAERS_ID entry.\n# Determining whether the report is due to death.\nvax_reports = {}\n\n# DATA file structure:\n# Column 0 - VAERS_ID\n# Column 1 - RECVDATE\n# Column 2 - STATE\n# Column 3 - AGE_YRS\n# Column 4 - CAGE_YR\n# Column 5 - CAGE_MO\n# Column 6 - SEX\n# Column 7 - RPT_DATE\n# Column 8 - SYMPTOM_TEXT\n# Column 9 - DIED\n# Column 10 - DATEDIED\n# Column 11 - L_THREAT\n# Column 12 - ER_VISIT\n# Column 13 - HOSPITAL\n# Column 14 - HOSPDAYS\n# Column 15 - X_STAY\n# Column 16 - DISABLE\n# Column 17 - RECOVD\n# Column 18 - VAX_DATE\n# Column 19 - ONSET_DATE\n# Column 20 - NUMDAYS\nvax_data_data = []\nwith open(DATA_file, 'r', encoding='windows-1252') as file:\n reader = csv.reader(file, delimiter=',')\n headers = next(reader)\n for row in reader:\n vax_data_data.append(row)\n\ndata_count_variable = 0\nwhile data_count_variable < len(vax_data_data):\n vaers_id = vax_data_data[data_count_variable][0]\n reported_death = 0\n reported_er_visit = 0\n reported_hospitalization = 0\n if vax_data_data[data_count_variable][9] == \"Y\":\n reported_death += 1\n if vax_data_data[data_count_variable][12] == \"Y\":\n reported_er_visit += 1\n if vax_data_data[data_count_variable][13] == \"Y\":\n reported_hospitalization += 1\n\n # Add VAERS_ID to dictionary.\n vax_reports[vaers_id] = [reported_death, reported_er_visit, reported_hospitalization]\n data_count_variable += 1\n\nvax_data_by_type = []\nfor vaccine_type in vax_data_initial:\n vaccine_name = vaccine_type\n total_reported_occurrences = 0\n total_reported_deaths = 0\n total_reported_er_visits = 0\n total_reported_hospitalizations = 0\n for report_id in vax_data_initial[vaccine_type]:\n total_reported_occurrences += 1\n # 0 - reported_death, 1 - reported_er_visit, 2 - reported_hospitalization\n total_reported_deaths += vax_reports[report_id][0]\n total_reported_er_visits += vax_reports[report_id][1]\n total_reported_hospitalizations += vax_reports[report_id][2]\n\n # Add parsed data to list.\n vax_data_by_type.append([vaccine_name, # 0\n total_reported_occurrences, # 1\n total_reported_deaths, # 2\n total_reported_er_visits, # 3\n total_reported_hospitalizations]) # 4\n\n # Update totals.\n total_occurrences += total_reported_occurrences\n total_deaths += total_reported_deaths\n total_er_visits += total_reported_er_visits\n total_hospitalizations += total_reported_hospitalizations\n\n # Update COVID19 vaccine totals.\n if vaccine_type.__contains__('COVID19'):\n total_covid_vax_occurrences += total_reported_occurrences\n total_covid_vax_deaths += total_reported_deaths\n total_covid_vax_er_visits += total_reported_er_visits\n total_covid_vax_hospitalizations += total_reported_hospitalizations\n\nsorted_vax_data_list = sorted(vax_data_by_type, key=lambda vax_deaths: vax_deaths[2], reverse=True)\n\n# A variable for the date of the current data.\ndata_date = get_user_input(\"What's the date for this data (it's in the name of the zip folder)? \")\n\n# Check to see if output Excel already exists.\n# Load sheet if exists, else create new file.\nchosen_file = choose_excel_file()\noutput_wb = \"\"\nif chosen_file == 'None':\n output_wb = openpyxl.Workbook()\n chosen_file = get_user_input(\"What would you like to name the file? \")\nelse:\n output_wb = openpyxl.load_workbook(chosen_file)\n\noutput_wb_sheet = output_wb.create_sheet(index=0, title=data_date)\noutput_wb_sheet.merge_cells('A1:D1')\noutput_wb_sheet['A1'] = f\"VAERS Data from: {data_date}; Parsed on: {get_current_date()}\"\noutput_wb_sheet['A2'] = \"Vaccine Type\"\noutput_wb_sheet['B2'] = \"Number of Reports\"\noutput_wb_sheet['C2'] = \"Deaths Reported\"\noutput_wb_sheet['D2'] = \"ER Visits Reported\"\noutput_wb_sheet['E2'] = \"Hospitalizations Reported\"\n\nrow_to_write_to = 3 # Starting at 3 since the date is going in 1 and headers in 2.\nfor vaccine in sorted_vax_data_list:\n # Write values to Excel.\n output_wb_sheet[f'A{row_to_write_to}'] = vaccine[0]\n output_wb_sheet[f'B{row_to_write_to}'] = vaccine[1]\n output_wb_sheet[f'C{row_to_write_to}'] = vaccine[2]\n output_wb_sheet[f'D{row_to_write_to}'] = vaccine[3]\n output_wb_sheet[f'E{row_to_write_to}'] = vaccine[4]\n row_to_write_to += 1\n\n# Writing out the totals and comparing COVID19 to everything else.\noutput_wb_sheet['G2'] = \"Total Deaths\"\noutput_wb_sheet['G3'] = total_deaths\noutput_wb_sheet['G5'] = \"COVID19 Vaccine Deaths\"\noutput_wb_sheet['G6'] = total_covid_vax_deaths\noutput_wb_sheet['G8'] = \"Non-COVID Vaccine Deaths\"\noutput_wb_sheet['G9'] = total_deaths - total_covid_vax_deaths\noutput_wb_sheet['G11'] = \"Total ER Visits\"\noutput_wb_sheet['G12'] = total_er_visits\noutput_wb_sheet['G14'] = \"COVID19 ER Visits\"\noutput_wb_sheet['G15'] = total_covid_vax_er_visits\noutput_wb_sheet['G17'] = \"Non-COVID ER Visits\"\noutput_wb_sheet['G18'] = total_er_visits - total_covid_vax_er_visits\noutput_wb_sheet['G20'] = \"Total Hospitalizations\"\noutput_wb_sheet['G21'] = total_hospitalizations\noutput_wb_sheet['G23'] = \"COVID19 Hospitalizations\"\noutput_wb_sheet['G24'] = total_covid_vax_hospitalizations\noutput_wb_sheet['G26'] = \"Non-COVID Hospitalizations\"\noutput_wb_sheet['G27'] = total_hospitalizations - total_covid_vax_hospitalizations\noutput_wb_sheet['I2'] = \"Total Reports\"\noutput_wb_sheet['I3'] = total_occurrences\noutput_wb_sheet['I5'] = \"COVID19 Reports\"\noutput_wb_sheet['I6'] = total_covid_vax_occurrences\noutput_wb_sheet['I8'] = \"Non-COVID Reports\"\noutput_wb_sheet['I9'] = total_occurrences - total_covid_vax_occurrences\n\n# Clean up the spreadsheet.\nsheets = output_wb.sheetnames\nif 'Sheet' in sheets:\n del output_wb['Sheet']\n\nif chosen_file.endswith('.xlsx'):\n output_wb.save(chosen_file)\n output_wb.close()\nelse:\n output_wb.save(f'{chosen_file}.xlsx')\n output_wb.close()","repo_name":"calebwsaunders/VAERS_verification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"22309893642","text":"import os\r\nfrom PIL import Image\r\n\r\n# Set the path and change working directory to the path of the images.\r\npath = \"test\"\r\nos.chdir(path)\r\n\r\n# Set some constants for the desired size of the x axis for the image and the logo filename.\r\nX_FIT_SIZE = 800\r\nLOGO_FILENAME = \"testing.png\"\r\n\r\n# Open the logo and also set some variables for its width and height.\r\nlogoIm = Image.open(LOGO_FILENAME)\r\nlogoWidth, logoHeight = logoIm.size\r\n\r\n# Create 2 new folders in the directory, don't raise an error if the folder already exists.\r\nos.makedirs(\"With Logo\", exist_ok=True)\r\nos.makedirs(\"Without Logo\", exist_ok=True)\r\n\r\n# Loop over all files in the working directory.\r\nfor filename in os.listdir('.'):\r\n if not (filename.endswith('.png') or filename.endswith('.jpg')) or filename == LOGO_FILENAME:\r\n continue # Skip non-image files and the logo file itself.\r\n\r\n # If the file passes through the check, open the image and save its width and height\r\n im = Image.open(filename)\r\n width, height = im.size\r\n\r\n # Check if image needs to be resized.\r\n if width > X_FIT_SIZE or width < X_FIT_SIZE:\r\n\r\n # Calculate the new width and height to resize to.\r\n height = int((X_FIT_SIZE / width) * height)\r\n width = X_FIT_SIZE\r\n\r\n # Resize the image.\r\n print(\"Resizing {0}...\".format(filename))\r\n im = im.resize((width, height))\r\n\r\n # Save the changes for the image without the logo.\r\n im.save(os.path.join(\"Without Logo\", filename))\r\n\r\n # Create 4 instances of the image, so we can edit each one and paste the logo on a different\r\n # corner each time without keeping the old one. We need to do this so we don't reference\r\n # the exact im Image because then every change to imBR affects im and vice-versa.\r\n imBR = im.resize((width, height))\r\n imBL = im.resize((width, height))\r\n imTL = im.resize((width, height))\r\n imTR = im.resize((width, height))\r\n\r\n # Add the logo to the image and save the image as the name + corner of logo.\r\n # This is being done for all 4 corners.\r\n # The last line of code in the group of code for each corner, puts the\r\n # location of the logo between the name and the extension (.png or .jpg).\r\n\r\n # Add logo to bottom right corner.\r\n print('Adding logo to the bottom right corner of {0}...'.format(filename))\r\n imBR.paste(logoIm, (width - logoWidth, height - logoHeight), logoIm)\r\n imBR.save(os.path.join('With Logo', \"{0}-BottomRight{1}\".format(filename[:-4], filename[-4:])))\r\n\r\n # Add logo to bottom left corner.\r\n print('Adding logo to the bottom left corner of {0}...'.format(filename))\r\n imBL.paste(logoIm, (0, height - logoHeight), logoIm)\r\n imBL.save(os.path.join('With Logo', \"{0}-BottomLeft{1}\".format(filename[:-4], filename[-4:])))\r\n\r\n # Add logo tp top left corner.\r\n print('Adding logo to the top left corner of {0}...'.format(filename))\r\n imTL.paste(logoIm, (0, 0), logoIm)\r\n imTL.save(os.path.join('With Logo', \"{0}-TopLeft{1}\".format(filename[:-4], filename[-4:])))\r\n\r\n # Add logo to top right corner.\r\n print('Adding logo to the top right corner of {0}...'.format(filename))\r\n imTR.paste(logoIm, (width - logoWidth, 0), logoIm)\r\n imTR.save(os.path.join('With Logo', \"{0}-TopRight{1}\".format(filename[:-4], filename[-4:])))\r\n","repo_name":"AxillV/image-watermark-creator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"2933893624","text":"import os\nimport bs4\nimport pandas as pd\nimport time as t\nimport requests as rq\nimport webbrowser as web\n\nclass Scrape:\n def __init__(self):\n self.count = 1\n self.url = \"\"\n self.result = \"\"\n self.stage2 = \"\"\n self.e = \"\"\n self.lizt = []\n self.wait_anims = [\"Loading. < ÓwÓ <\", \n \"Loading.. ~< -w- <\", \n \"Loading... > ÒwÒ =>\", \n \"loading.. > -w- >~\"]\n \n def WelcomeAndCheck(self):\n print(\"Welcome to web scraper\")\n t.sleep(5)\n while True:\n try:\n self.url = input(\"Your url ? : \")\n self.respond = rq.get(self.url)\n self.result = self.respond.status_code\n for i in self.wait_anims:\n os.system(\"clear\")\n print(i)\n t.sleep(1.5)\n if self.result == 200:\n print(\"Success\")\n t.sleep(1)\n self.processing()\n break\n else:\n raise Exception()\n except:\n print(\"cannot connect to server, try again or check your url\")\n \n def processing(self):\n os.system(\"clear\")\n print(\"stage 1 passed\")\n self.stage2 = bs4.BeautifulSoup(self.respond.text, \"html.parser\")\n while True:\n try:\n self.stage3asktag = str(input(\"tag? : \"))\n self.stage3askclassortag = str(input(\"class or tag : \"))\n self.stage3askclassname = str(input(\"name of class/id? : \"))\n \n self.stage3 = self.stage2.find_all(self.stage3asktag, {self.stage3askclassortag : self.stage3askclassname})\n \n if self.stage3:\n self.lizt = []\n for i in self.stage3:\n self.e = i.text\n self.lizt.append(self.e)\n print(f\"Prewiew : {self.lizt}\")\n self.check_save_xl()\n else:\n raise Exception()\n except:\n print(\"cannot scrape\")\n t.sleep(2)\n os.system(\"clear\")\n \n def check_save_xl(self):\n os.system(\"clear\")\n print(\"do you want to save as xl?\")\n while True:\n try:\n os.system(\"clear\")\n self.save_check_xl = str(input(\"Excel [y/n]\")).lower()\n \n if self.save_check_xl == \"y\":\n self.excel()\n break\n elif self.save_check_xl == \"n\":\n self.check_save_txt()\n else:\n raise Exception()\n except:\n print(\"only type y or n\")\n\n def excel(self):\n os.system(\"clear\")\n self.name_content = str(input(\"the name of column? : \"))\n self.dataframe = pd.DataFrame({self.name_content : self.lizt})\n self.filename = str(input(\"file name? : \"))\n self.dataframe.to_excel(f\"{self.filename}.xlsx\", index=False)\n self.check_save_txt()\n \n def check_save_txt(self):\n os.system(\"clear\")\n print(\"do you want to save as xl or txt file?\")\n while True:\n try:\n self.save_check_txt = input(\".txt? [y/n]\").lower()\n if self.save_check_txt == \"y\":\n self.text()\n elif self.save_check_txt == \"n\":\n pass\n else:\n raise Exception()\n except:\n print(\"only type y or n\")\n \n def text(self):\n self.txt_filename = input(\"file name? : \")\n with open(f\"{self.txt_filename}.txt\", \"w\") as file:\n for i in self.stage3:\n self.e = i.text\n file.write(self.e + \"\\n\")\n \n \n def start(self):\n self.WelcomeAndCheck()\n\ntest = Scrape()\ntest.start()\n","repo_name":"Sphxre173/URLscraper-v1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"31099460502","text":"import fileinput\nimport getopt\nimport sys\n\ndef fake_link():\n opts, args = getopt.getopt(sys.argv[1:], 'o:s:')\n for opt, arg in opts:\n if opt == '-o':\n out = arg\n\n with open(out, 'wb') as ofp, fileinput.input(files=args, mode='rb') as ifp:\n for line in ifp:\n if not line.startswith(b'#link'):\n ofp.write(line)\n\ndef fake_win32_link():\n args = sys.argv[1:]\n while args:\n arg = args[0]\n if arg == '-o':\n out = args[1]\n args = args[2:]\n continue\n if arg[0] not in '/-':\n break\n args = args[1:]\n if arg.lower().startswith('/out:'):\n out = arg[5:]\n with open(args[0], 'rb') as ifp, open(out, 'wb') as ofp:\n for line in ifp:\n if not line.startswith(b'#link'):\n ofp.write(line)\n\nif __name__ == '__main__':\n if sys.platform == 'win32':\n fake_win32_link()\n else:\n fake_link()\n sys.exit(0)\n","repo_name":"SCons/scons","sub_path":"test/fixture/mylink.py","file_name":"mylink.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1830,"dataset":"github-code","pt":"77"}
+{"seq_id":"15963174933","text":"from aldryn_apphooks_config.fields import AppHookConfigField\nfrom aldryn_apphooks_config.models import AppHookConfig\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\nfrom cms.models.fields import PlaceholderField\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nimport datetime\nfrom cms_appconfig import AdventCalendarConfig\nimport random\n\ndef placeholder_name(self):\n return _('Advent calendar') + ' ' + unicode(self.day)\n\n\nclass AdventCalenderDay(models.Model):\n app_config = AppHookConfigField(AdventCalendarConfig, verbose_name=_('calendar'), default=None)\n day = models.DateField(verbose_name=_('date'))\n placeholder = PlaceholderField(placeholder_name)\n order = models.IntegerField(verbose_name=_('display order'), default=0)\n\n def __str__(self):\n return _('Advent calendar') + ' ' + self.day.strftime('%Y-%m-%d')\n\n class Meta:\n verbose_name = _('Advent calendar day')\n verbose_name_plural = _('Advent calendar days')\n\n@receiver(post_save, sender=AdventCalendarConfig)\ndef create_advent_calender_days(sender, instance, created, **kwargs):\n if created:\n calendar_days = 24\n order = range(calendar_days)\n random.shuffle(order)\n for day in range(calendar_days):\n date = instance.start_date + datetime.timedelta(days=day)\n AdventCalenderDay.objects.create(\n app_config=instance,\n day=unicode(date),\n order=order[day]\n )\n","repo_name":"Maskinteknologsektionen/Website","sub_path":"advent_calendar/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"70778484409","text":"from pyqtgraph import PlotWidget,GraphicsLayoutWidget\nfrom PyQt5 import QtWidgets, QtWidgets\nimport numpy as np\nimport pyqtgraph as pg\nimport bisect\nfrom Model.streamManager import StreamManager\nfrom scipy.signal import savgol_filter as sgf\nimport scipy.integrate as igt\n\nclass AnalyseViewModel:\n def __init__(self,analyseTabView,config):\n self.config = config\n self.tabView = analyseTabView\n self.setUpHandels()\n self.initControls()\n\n def setUpHandels(self):\n\n # get handels\n # combo boxes\n self.chnComboBox = self.tabView.findChild(QtWidgets.QComboBox,\"chnNumComboBox\")\n self.orientationComboBox = self.tabView.findChild(QtWidgets.QComboBox,\"orientationComboBox\")\n # buttons\n self.resetButton = self.tabView.findChild(QtWidgets.QPushButton,\"resetButton\")\n self.selectRoiButton = self.tabView.findChild(QtWidgets.QPushButton,\"selectRoiButton\")\n self.filterButton = self.tabView.findChild(QtWidgets.QPushButton,\"filterButton\")\n self.analyseButton = self.tabView.findChild(QtWidgets.QPushButton,\"analyseButton\")\n self.massCompButton = self.tabView.findChild(QtWidgets.QPushButton,\"massCompButton\")\n # labels\n self.preBurnLabel = self.tabView.findChild(QtWidgets.QLabel,\"preBurnLabel\")\n self.postBurnLabel = self.tabView.findChild(QtWidgets.QLabel,\"postBurnLabel\")\n self.startTimeLabel = self.tabView.findChild(QtWidgets.QLabel,\"startTimeLabel\")\n self.stopTimeLabel = self.tabView.findChild(QtWidgets.QLabel,\"stopTimeLabel\")\n self.idtLabel = self.tabView.findChild(QtWidgets.QLabel,\"idtLabel\")\n self.irtLabel = self.tabView.findChild(QtWidgets.QLabel,\"irtLabel\")\n self.atLabel = self.tabView.findChild(QtWidgets.QLabel,\"atLabel\")\n self.btLabel = self.tabView.findChild(QtWidgets.QLabel,\"btLabel\")\n self.maxThrustLabel = self.tabView.findChild(QtWidgets.QLabel,\"maxThrustLabel\")\n self.spImpulsLabel = self.tabView.findChild(QtWidgets.QLabel,\"spImpulsLabel\")\n self.totImpulsLabel = self.tabView.findChild(QtWidgets.QLabel,\"totImpulsLabel\")\n # line edits\n self.windowLineEdit = self.tabView.findChild(QtWidgets.QLineEdit,\"windowLineEdit\")\n self.orderLineEdit = self.tabView.findChild(QtWidgets.QLineEdit,\"orderLineEdit\")\n self.fuelMassLineEdit = self.tabView.findChild(QtWidgets.QLineEdit,\"fuelMassLineEdit\")\n # check boxes\n self.massCompCheckBox = self.tabView.findChild(QtWidgets.QCheckBox,\"massCompCheckBox\")\n self.calcMassCheckBox = self.tabView.findChild(QtWidgets.QCheckBox,\"calcMassCheckBox\")\n # graph view\n self.graphView = self.tabView.findChild(GraphicsLayoutWidget,\"analyseGraphView\")\n self.roi = None\n self.inf1 = None\n\n def initControls(self):\n self.chnComboBox.addItems([\"Channel {}\".format(num) for num in range(1,9,1)])\n self.orientationComboBox.addItems([\"upwards\",\"downwards\",\"horizontal\"])\n self.initGraph()\n self.selectRoiButton.state = \"selectRoi\"\n # connections\n self.resetButton.clicked.connect(self.resetGraphView)\n self.selectRoiButton.clicked.connect(self.selectRegions)\n self.analyseButton.clicked.connect(self.analyse)\n self.filterButton.clicked.connect(self.applyFilter)\n self.massCompButton.clicked.connect(self.computeMassCompensation)\n\n def resetGraphView(self):\n chnNum = self.chnComboBox.currentIndex()+1\n with StreamManager.numDataLock:\n if len(StreamManager.numData[chnNum])>=50:\n self.x = np.array(StreamManager.numData[0])\n self.y = np.array(StreamManager.numData[chnNum])\n\n scale = float(self.config.chnConfigs[chnNum-1].scale)\n offset = float(self.config.chnConfigs[chnNum-1].offset)\n self.y_ = self.y * scale + offset\n self.curve.setData(y=self.y_,x=self.x)\n if self.roi is None:\n self.roi = pg.LinearRegionItem([min(self.x),max(self.x)])\n self.Plt.addItem(self.roi)\n else:\n self.roi.setRegion([min(self.x),max(self.x)])\n self.roi.show()\n self.selectRoiButton.setText(\"Select Region of Interest\")\n self.selectRoiButton.state = \"selectRoi\"\n self.selectRoiButton.show()\n if self.inf1 is not None:\n self.inf1.hide()\n self.inf2.hide()\n\n def initGraph(self):\n win: GraphicsLayoutWidget = self.graphView\n self.Plt = win.addPlot(title=\"\",col=0,row=0)\n self.curve = self.Plt.plot(pen=(1,2*1.3))\n\n def selectRegions(self):\n if self.selectRoiButton.state == \"selectRoi\":\n self.cropDataToRegion()\n self.updateGraph()\n self.selectRoiButton.state = \"selectPreBurnData\"\n self.selectRoiButton.setText(\"Select Pre Burn Values\")\n elif self.selectRoiButton.state == \"selectPreBurnData\":\n self.getPreBurnValues()\n self.selectRoiButton.state = \"selectPostBurnData\"\n self.selectRoiButton.setText(\"Select Post Burn Values\")\n elif self.selectRoiButton.state == \"selectPostBurnData\":\n self.getPostBurnValues()\n self.calculateStartStopTime()\n\n def cropDataToRegion(self):\n x1, x2 = self.roi.getRegion()\n idx1 = max(bisect.bisect_left(self.x,x1),0)\n idx2 = min(bisect.bisect_right(self.x,x2),len(self.x)-1)\n self.x = self.x[idx1:idx2]\n self.y_ = self.y_[idx1:idx2]\n print(self.roi.getRegion())\n\n def getPreBurnValues(self):\n x1, x2 = self.roi.getRegion()\n idx1 = max(bisect.bisect_left(self.x,x1),0)\n idx2 = min(bisect.bisect_right(self.x,x2),len(self.x)-1)\n self.preBurnData = self.y_[idx1:idx2]\n self.preBurnValue = self.preBurnData.mean()\n self.preBurnStd = self.preBurnData.std()\n self.preBurnLabel.setText(\"{:.2f}\".format(self.preBurnValue))\n\n def getPostBurnValues(self):\n x1, x2 = self.roi.getRegion()\n idx1 = max(bisect.bisect_left(self.x,x1),0)\n idx2 = min(bisect.bisect_right(self.x,x2),len(self.x)-1)\n self.postBurnData = self.y_[idx1:idx2]\n self.postBurnValue = self.postBurnData.mean()\n self.postBurnStd = self.postBurnData.std()\n self.postBurnLabel.setText(\"{:.2f}\".format(self.postBurnValue))\n\n def calculateStartStopTime(self):\n # try to find the start value\n for i in range(len(self.x)):\n value = self.y_[i]\n if value > (max(self.preBurnData) + 2*self.preBurnStd):\n self.startTime = self.x[i]\n self.startTimeLabel.setText(\"{:.2f}\".format(self.startTime))\n break\n\n # try to find the stop value\n for i in range(len(self.x)):\n value = self.y_[(i+1)*-1] # inverse the search direction\n if value > (max(self.postBurnData) + 2*self.postBurnStd):\n self.stopTime = self.x[(i+1)*-1]\n self.stopTimeLabel.setText(\"{:.2f}\".format(self.stopTime))\n break\n if self.inf1 is None:\n self.inf1 = pg.InfiniteLine(angle=90, label='start time={:1.2f}'.format(self.startTime),\n labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': True})\n self.inf2 = pg.InfiniteLine(angle=90, label='stop time={:1.2f}'.format(self.stopTime),\n labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': True})\n self.Plt.addItem(self.inf1)\n self.Plt.addItem(self.inf2)\n self.inf1.setPos([self.startTime,0])\n self.inf2.setPos([self.stopTime,0])\n else:\n self.inf1.setPos([self.startTime,0])\n self.inf2.setPos([self.stopTime,0])\n self.inf1.show()\n self.inf2.show()\n self.roi.hide()\n #self.curve.setData(fillLevel = min(self.y_))\n\n def updateGraph(self):\n self.curve.setData(x=self.x,y=self.y_)\n\n def analyse(self):\n # 1. get max thrust value\n self.maxThrust = max(self.y_)\n self.maxThrust_Newton = self.maxThrust * 9.81\n print(\"max thrust:{:0.2f}\".format(self.maxThrust))\n # 2. get left 10% thrust time\n firstFound = False\n for i in range(len(self.x)):\n value = self.y_[i]\n if value > (self.maxThrust*0.1) and not firstFound:\n self.burnStartTime = self.x[i]\n print(\"burn time:{:0.2f}\".format(self.burnStartTime))\n firstFound =True\n elif value > (self.maxThrust*0.75):\n self.riseTime = self.x[i]\n print(\"rise time:{:0.2f}\".format(self.riseTime))\n break\n\n # 3. get right 10% thrust time\n firstFound = False\n for i in range(len(self.x)):\n value = self.y_[(i+1)*-1]\n if value > (self.maxThrust*0.1) and not firstFound:\n self.burnStopTime = self.x[(i+1)*-1]\n print(\"burn out time:{:0.2f}\".format(self.burnStopTime))\n firstFound =True\n elif value > (self.maxThrust*0.75):\n self.fallTime = self.x[(i+1)*-1]\n print(\"fall time:{:0.2f}\".format(self.fallTime))\n break\n\n # 4. get total impuls in Ns\n idx1 = max(bisect.bisect_left(self.x,self.startTime),0)\n idx2 = min(bisect.bisect_right(self.x,self.stopTime),len(self.x)-1)\n y_corr = (self.y_[idx1:idx2]-self.preBurnValue) * 9.81\n self.totImpuls = np.trapz(y= y_corr,x=self.x[idx1:idx2])\n print(\"Impuls:{:0.2f} Ns\".format(self.totImpuls))\n # 5. get specific impuls\n m_tot = self.preBurnValue - self.postBurnValue\n self.spImpuls = self.totImpuls / (m_tot * 9.81)\n print(\"spezific Impuls:{:0.2f} s\".format(self.spImpuls))\n # 6. update interface\n self.idtLabel.setText(\"{:0.2f} s\".format(self.burnStartTime - self.startTime))\n self.irtLabel.setText(\"{:0.2f} s\".format(self.riseTime - self.burnStartTime))\n self.btLabel.setText(\"{:0.2f} s\".format(self.fallTime-self.burnStartTime))\n self.atLabel.setText(\"{:0.2f} s\".format(self.burnStopTime-self.burnStartTime))\n self.maxThrustLabel.setText(\"{:0.2f} N\".format(self.maxThrust_Newton))\n self.totImpulsLabel.setText(\"{:0.2f} Ns\".format(self.totImpuls))\n self.spImpulsLabel.setText(\"{:0.2f} s\".format(self.spImpuls))\n\n def applyFilter(self):\n try:\n windowSize = int(self.windowLineEdit.text())\n order = int(self.orderLineEdit.text())\n self.y_ = sgf(self.y_,windowSize,order,mode=\"nearest\")\n self.updateGraph()\n except Exception as err:\n print(\"Ein Fehler ist aufgetreten!\")\n print(err)\n\n def computeMassCompensation(self):\n # iterativly compute the mass flow and correct the sensor data\n # algorithm by David Madlener\n m_tot = self.preBurnValue - self.postBurnValue\n t0 = self.startTime\n t1 = self.stopTime\n idx0 = bisect.bisect_left(self.x,t0)\n idx1 = bisect.bisect_right(self.x,t1)\n S = self.y_[idx0:idx1] # get sensor data (kg)\n t = self.x[idx0:idx1] # get time (s)\n P_old = 0\n m = np.ones(len(S)) * self.preBurnValue # initialize m(t) with constant pre burn values\n delta_P = 1\n e = 0.00001\n\n while delta_P > e:\n F = S - m # compute thrust F (kg)\n P_new = np.trapz(y=F,x=t) # integrate thrust (kg s)\n m_dot = -1 * F * (m_tot/P_new)\n m = igt.cumtrapz(m_dot,t,initial=m[0])\n delta_P = abs(P_new-P_old)\n P_old = P_new\n #print(\"delta_P:{}\".format(delta_P))\n\n self.y_[:idx0] = self.y_[:idx0] - self.preBurnValue\n self.y_[idx0:idx1] = S - m - self.preBurnValue\n self.y_[idx1:] = self.y_[idx1:] - self.postBurnValue\n self.updateGraph()\n","repo_name":"deets/unifhy-rocket-engine-test-stand","sub_path":"modul3/ViewModel/analyseViewModel.py","file_name":"analyseViewModel.py","file_ext":"py","file_size_in_byte":12002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"39970117596","text":"import importlib\nfrom contracting.execution import runtime\nfrom contractdb.driver import ContractDBDriver\nfrom contracting.execution.module import install_database_loader\nfrom contracting.db.encoder import encode\n\nimport ecdsa\nimport logging\nimport hashlib\n\n## Create new executor that takes a transaction JSON thing and executes it. It also enforces the stamps, etc.\n# if that is set in the environment variables\n\nexpected_tx_keys = {'sender', 'signature', 'payload'}\nexpected_tx_batch_keys = {'sender', 'signature', 'payload', 'index'}\nexpected_payload_keys = {'contract', 'function', 'arguments'}\n\nMALFORMED_TX = 1\nINVALID_SIG = 2\nPY_EXCEPTION = 3\n\n\nclass Engine:\n def __init__(self, stamps_enabled=False, timestamps_enabled=False, driver=ContractDBDriver()):\n install_database_loader()\n\n self.driver = driver\n\n self.log = logging.getLogger('Engine')\n self.stamps_enabled = stamps_enabled\n self.timestamps_enabled = timestamps_enabled\n\n def verify_tx_structure(self, tx: dict, part_of_batch=False):\n expected_keys = expected_tx_keys if not part_of_batch else expected_tx_batch_keys\n if tx.keys() ^ expected_keys != set():\n return False\n\n if tx['payload'].keys() ^ expected_payload_keys != set():\n return False\n\n if self.stamps_enabled and not tx['payload'].get('stamps'):\n return False\n\n if self.timestamps_enabled and not tx['payload'].get('timestamp'):\n return False\n\n return True\n\n @staticmethod\n def verify_tx_signature(tx: dict):\n tx_payload = encode(tx['payload'])\n tx_payload_bytes = tx_payload.encode()\n\n signature = bytes.fromhex(tx['signature'])\n pk = bytes.fromhex(tx['sender'])\n\n vk = ecdsa.VerifyingKey.from_string(pk, curve=ecdsa.NIST256p, hashfunc=hashlib.sha256)\n try:\n vk.verify(signature, tx_payload_bytes)\n except ecdsa.BadSignatureError:\n return False\n return True\n\n # key = nacl.signing.VerifyKey(pk)\n # try:\n # key.verify(tx_payload_bytes, signature)\n # except nacl.exceptions.BadSignatureError:\n # return False\n # return True\n\n def run(self, tx: dict, environment={}, part_of_batch=False):\n tx_output = {\n 'status': 0,\n 'updates': {},\n 'result': None,\n }\n\n # Add additional KV pair if stamps are enabled\n if self.stamps_enabled:\n tx_output['cost'] = 0\n\n # Verify the structure of the tx\n if not self.verify_tx_structure(tx, part_of_batch):\n self.log.error(\"Malformed transaction {}\".format(tx))\n tx_output['status'] = MALFORMED_TX\n return tx_output\n\n # Verify the signature of the tx\n if not self.verify_tx_signature(tx):\n self.log.error(\"Invalid signature for the transaction {}\".format(tx))\n tx_output['status'] = INVALID_SIG\n return tx_output\n\n # Extract the payload to pass as execution arguments\n payload = tx.get('payload')\n\n # Set the runtime driver (we might be able to remove this)\n runtime.rt.env.update({'__Driver': self.driver})\n runtime.rt.env.update(environment)\n\n runtime.rt.context._base_state = {\n 'signer': tx['sender'],\n 'caller': tx['sender'],\n 'this': tx['payload']['contract'],\n 'owner': self.driver.get_owner(tx['payload']['contract'])\n }\n\n try:\n # Access the payload values and load them from the database\n module = importlib.import_module(payload.get('contract'))\n func = getattr(module, payload.get('function'))\n tx_output['result'] = func(**payload.get('arguments'))\n\n except Exception as e:\n tx_output['result'] = str(e)\n tx_output['status'] = PY_EXCEPTION\n\n # Get the current cache of sets for the tx output\n\n _driver = runtime.rt.env.get('__Driver')\n\n tx_output['updates'] = _driver.sets\n\n # Clear them for the next execution\n _driver.clear_sets()\n\n runtime.rt.clean_up()\n\n return tx_output\n","repo_name":"Lamden/contractdb","sub_path":"contractdb/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"19384957109","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('search', '0001_initial'),\n ('feedback', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ImageComment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comment', models.TextField(verbose_name='Comment')),\n ('tag_friend', models.CharField(max_length=1024, null=True, verbose_name='Tag Friends', blank=True)),\n ('like_count', models.IntegerField(default=0, max_length=100, verbose_name='like count')),\n ('is_deleted', models.BooleanField(default=False, verbose_name='Deleted Comment')),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'ImageComment',\n 'verbose_name_plural': 'ImageComments',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ImageCommentLike',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('image_comment', models.ForeignKey(related_name=b'like_image_comment', to='uploadimages.ImageComment')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ImageLike',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UploadImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', models.ImageField(upload_to=b'upload_images', null=True, verbose_name='Image')),\n ('google_images', models.TextField(null=True, verbose_name='Google Images')),\n ('review_images', models.ImageField(upload_to=b'upload_images', null=True, verbose_name='Review Image')),\n ('tag_friend', models.CharField(max_length=1024, null=True, verbose_name='Tag Friends', blank=True)),\n ('special_feature', models.TextField(max_length=1024, null=True, verbose_name='Special Feature', blank=True)),\n ('location', django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, verbose_name='Review Location', geography=True)),\n ('is_verified', models.BooleanField(default=False, verbose_name='Upload Image Verified')),\n ('is_credited', models.BooleanField(default=False, verbose_name='Credit on Uploaded Image')),\n ('comment_count', models.IntegerField(default=0, max_length=100, verbose_name='comment count')),\n ('like_count', models.IntegerField(default=0, max_length=100, verbose_name='like count')),\n ('with_whom', models.CharField(max_length=1024, null=True, verbose_name='With Friend', blank=True)),\n ('is_deleted', models.BooleanField(default=False, verbose_name='Deleted Image')),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('owner', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ('place', models.ForeignKey(to='search.PlaceDetail', db_column=b'place_id')),\n ('review', models.ForeignKey(to='feedback.ReviewRating', null=True)),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'UploadImage',\n 'verbose_name_plural': 'UploadImages',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='imagelike',\n name='upload_image',\n field=models.ForeignKey(related_name=b'like_image', to='uploadimages.UploadImage'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='imagecomment',\n name='upload_image',\n field=models.ForeignKey(related_name=b'image_comment', to='uploadimages.UploadImage'),\n preserve_default=True,\n ),\n ]\n","repo_name":"bharat-gera/Nautlus","sub_path":"uploadimages/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"70559851129","text":"from typing import Any, Dict, List, Tuple, Type\nimport ctypes\nimport numpy as np\n\nfrom vxpy.core import logger\nimport vxpy.core.devices.camera as vxcamera\nimport vxpy.core.ipc as vxipc\nfrom vxpy.core.devices.camera import CameraDevice\nfrom vxpy.definitions import *\nfrom vxpy.ext_lib.tis_windows import tisgrabber as tis\n\nlog = logger.getLogger(__name__)\n\nic = ctypes.cdll.LoadLibrary('tisgrabber_x64.dll')\ntis.declareFunctions(ic)\nic.IC_InitLibrary(0)\n\n\nclass CallbackUserdata(ctypes.Structure):\n def __init__(self):\n ctypes.Structure.__init__(self)\n\n\nclass TISCamera(vxcamera.CameraDevice):\n \"\"\"TheImagingSource camera using the tisgrabber.dll for Windows OS\n \"\"\"\n\n def __repr__(self):\n return f'{TISCamera.__name__} {self.properties[\"model\"]} {self.properties[\"serial\"]}'\n\n manufacturer = 'TIS'\n\n # NOTE: TIS MAY only support 8-bit images for now?\n sink_formats = {'Y800': (1, np.uint8), # (Y8) 8-bit monochrome\n 'RGB24': (3, np.uint8), # 8-bit RGB\n 'RGB32': (4, np.uint8), # 8-bit RGBA\n # 'UYVY': (2, np.uint16),\n 'Y16': (1, np.uint16)} # 16-bit monochrome\n\n def __init__(self, *args, **kwargs):\n vxcamera.CameraDevice.__init__(self, *args, **kwargs)\n\n self._frame: np.ndarray = None\n\n self.metadata = {}\n self.settings = {}\n\n self.last_snap = vxipc.get_time()\n self.new_image = False\n\n def get_settings(self) -> Dict[str, Any]:\n if len(self.settings) == 0:\n settings = {**self.properties, 'exposure': 0.01, 'gain': 1.0}\n return settings\n return self.settings\n\n @property\n def frame_rate(self) -> float:\n return self.properties['frame_rate']\n\n @property\n def width(self) -> float:\n return self.properties['width']\n\n @property\n def height(self) -> float:\n return self.properties['height']\n\n @classmethod\n def get_camera_list(cls) -> List[CameraDevice]:\n camera_list = []\n devicecount = ic.IC_GetDeviceCount()\n for i in range(0, devicecount):\n model = tis.D(ic.IC_GetDevice(i))\n uniquename = tis.D(ic.IC_GetUniqueNamefromList(i))\n serial = uniquename.replace(model, '').strip(' ')\n props = {'serial': serial, 'model': model,\n 'width': 640, 'height': 480, 'frame_rate': 60.0}\n cam = TISCamera(**props)\n camera_list.append(cam)\n\n return camera_list\n\n def _open(self) -> bool:\n\n # Open (empty) device\n self.h_grabber = ic.IC_CreateGrabber()\n\n # Set callback\n self.userdata = CallbackUserdata()\n self._frame_ready_callback = ic.FRAMEREADYCALLBACK(self._fetch_and_convert_buffer)\n ic.IC_SetFrameReadyCallback(self.h_grabber, self._frame_ready_callback, self.userdata)\n\n return True\n\n def _fetch_and_convert_buffer(self, h_grabber, p_buffer, frame_number, p_data):\n width = ctypes.c_long()\n height = ctypes.c_long()\n bits_per_pixel = ctypes.c_int()\n color_format = ctypes.c_int()\n\n # Query the image description values\n ic.IC_GetImageDescription(h_grabber, width, height, bits_per_pixel, color_format)\n\n # Calculate the buffer size\n bytes_per_pixel = int(bits_per_pixel.value / 8.0)\n buffer_size = width.value * height.value * bytes_per_pixel\n\n source_format = self.properties['format']\n if buffer_size > 0:\n image = ctypes.cast(p_buffer, ctypes.POINTER(ctypes.c_ubyte * buffer_size))\n _dtype = self.sink_formats[source_format][1]\n _shape = (height.value, width.value, bytes_per_pixel // _dtype().nbytes)\n self._frame = np.ndarray(buffer=image.contents,\n dtype=_dtype,\n shape=_shape)\n\n self.new_image = True\n\n def _get_property_value_range(self, property_name):\n value_min = ctypes.c_float()\n value_max = ctypes.c_float()\n ic.IC_GetPropertyAbsoluteValueRange(self.h_grabber, tis.T(property_name), tis.T('Value'), value_min, value_max)\n\n return value_min.value, value_max.value\n\n def _set_property(self, property_name, value):\n limits = self._get_property_value_range(property_name)\n if not limits[0] <= value <= limits[1]:\n log.warning(f'Cannot set value of property {property_name} to {value} '\n f'on camera device {self}. Out of range {limits}')\n return\n\n # Set\n log.debug(f'Set property value of property {property_name} to {value} on device {self}')\n ic.IC_SetPropertyAbsoluteValue(self.h_grabber, tis.T(property_name), tis.T('Value'), ctypes.c_float(value))\n\n # Verify\n new_value = ctypes.c_float()\n ic.IC_GetPropertyAbsoluteValue(self.h_grabber, tis.T(property_name), tis.T('Value'), new_value)\n value_min, value_max = self._get_property_value_range(property_name)\n log.debug(f'New property value for {property_name} is {new_value.value:.5f} '\n f'({value_min:.5f} - {value_max:.5f}) on device {self}')\n\n def _set_property_switch(self, property_name, switch_name, value):\n # Set\n ic.IC_SetPropertySwitch(self.h_grabber, tis.T(property_name), tis.T(switch_name), value)\n log.debug(f'Set property switch {switch_name} of property {property_name} to {value} on device {self}')\n\n # Verify\n new_value = ctypes.c_long()\n ic.IC_GetPropertySwitch(self.h_grabber, tis.T(property_name), tis.T(switch_name), new_value)\n log.debug(f'New property switch value {property_name}:{switch_name} '\n f'is {new_value.value} on device {self}')\n\n def _start_stream(self) -> bool:\n # Open device by model and serial\n model = self.properties['model']\n serial = self.properties['serial']\n ic.IC_OpenDevByUniqueName(self.h_grabber, tis.T(f'{model} {serial}'))\n\n # Setting\n source_format = self.properties['format']\n format_str = f'{source_format} ({self.width}x{self.height})'\n ic.IC_SetVideoFormat(self.h_grabber, tis.T(format_str))\n ic.IC_SetFrameRate(self.h_grabber, ctypes.c_float(self.frame_rate))\n\n # Set to continuous mode\n ic.IC_SetContinuousMode(self.h_grabber, 0)\n\n # Set trigger enable\n ic.IC_SetPropertySwitch(self.h_grabber, tis.T('Trigger'), tis.T('Enable'), 1)\n\n # Set properties\n self._set_property_switch('Gain', 'Auto', 0)\n self._set_property_switch('Exposure', 'Auto', 0)\n self._set_property('Exposure', self.properties['exposure'])\n self._set_property('Gain', self.properties['gain'])\n\n # Start\n ic.IC_StartLive(self.h_grabber, 0)\n\n return True\n\n def next_snap(self) -> bool:\n current_time = vxipc.get_time()\n\n do_next = current_time >= self.last_snap + 1. / self.frame_rate\n\n if do_next:\n self.last_snap = current_time\n\n return do_next\n\n def snap_image(self) -> None:\n ic.IC_PropertyOnePush(self.h_grabber, tis.T('Trigger'), tis.T('Software Trigger'))\n\n def next_image(self) -> bool:\n return self.new_image\n\n def get_image(self) -> np.ndarray:\n self.new_image = False\n return self._frame\n\n def _end_stream(self) -> bool:\n ic.IC_StopLive(self.h_grabber)\n return True\n\n def _close(self) -> bool:\n pass\n\nif __name__ == '__main__':\n pass\n","repo_name":"thladnik/vxPy","sub_path":"vxpy/devices/camera/tis_windows_tisgrabber.py","file_name":"tis_windows_tisgrabber.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"}
+{"seq_id":"71597758328","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2 as cv2\nimport csv as csv\nimport tensorflow as tf\nfrom keras.models import Sequential, Model\nfrom keras.layers import Lambda, Cropping2D\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout \nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\n\n### D A T A G E N E R A T I O N \n### 1.) Load the data from the driving log file\n# The csv file is structured like this: \n# center - left - right - steering - throttle - brake - speed\nlogFileLines = []\nwith open (\"./data/driving_log.csv\") as log: \n reader = csv.reader(log)\n next(reader)\n for line in reader: \n logFileLines.append(line) \n### 2.) Split the data into training and validation set\ntrainingData, validationData = train_test_split(logFileLines, test_size=0.2) \n### 3.) Define a generator which provides data batches more (memory) efficiently than just loading and storing the entire data set\ndef dataGenerator(data, batchSize=32): \n numDataSamples = len(data)\n while True:\n # Randomize data\n np.random.shuffle(data)\n # Return (i.e. yield) a batch every time the dataGenerator gets called\n for offset in range(0, numDataSamples, batchSize):\n batchData = data[offset:offset+batchSize]\n # Extract image links for center, left and right images\n # Extract steering values \n centerImgLinks = []\n leftImgLinks = []\n rightImgLinks = []\n steeringCenter = []\n steeringLeft = []\n steeringRight = [] \n for line in batchData: \n centerImgLinks.append(\"./data/\" + line[0])\n leftImgLinks.append(\"./data/\" + (line[1])[1:])\n rightImgLinks.append(\"./data/\" + (line[2])[1:])\n # Use left and right camera images to pretend the AV is swerved to either left or right\n # Adapt the steering by a correction factor of 0.2 in order to get the AV back to the center\n steeringCenterValue = float(line[3])\n steeringLeftValue = steeringCenterValue + 0.2\n steeringRightValue = steeringCenterValue - 0.2\n steeringCenter.append(steeringCenterValue)\n steeringLeft.append(steeringLeftValue)\n steeringRight.append(steeringRightValue)\n # Load actual images\n centerImages = []\n leftImages = []\n rightImages = []\n for centerImgLink, leftImgLink, rightImgLink in zip(centerImgLinks, leftImgLinks, rightImgLinks): \n centerImages.append(plt.imread(centerImgLink))\n leftImages.append(plt.imread(leftImgLink))\n rightImages.append(plt.imread(rightImgLink))\n # Stack images and steering values together respectively\n images = centerImages + leftImages + rightImages\n steerings = steeringCenter + steeringLeft + steeringRight\n # Augment the data by flipping the image and inverse the corresponding steering \n augmentedImages = []\n augmentedSteerings = []\n for img, steerVal in zip(images, steerings): \n flippedImg = np.fliplr(img)\n flippedSteerVal = - steerVal\n augmentedImages.append(img)\n augmentedImages.append(flippedImg)\n augmentedSteerings.append(steerVal)\n augmentedSteerings.append(flippedSteerVal) \n # Return (yield) the training batch \n X_train = np.array(augmentedImages) \n y_train = np.array(augmentedSteerings)\n yield sklearn.utils.shuffle(X_train, y_train) \n\n \n### B U I L D T H E M O D E L A R C H I T E C T U R E \nmodel = Sequential()\n# L a y e r 0 (P R E P R O C E S S I N G) \n# Lambda layer as preprocessing unit (normalization and mean centering)\n# Cropping layer to remove the above part of the images (which might be rather noise for the NN) \nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((60,20), (0,0))))\n# L a y e r 1\n# Convolution and MaxPool --> Input: 80x320x3 --> Layer 1 --> Output: 40x160x24 \nmodel.add(Conv2D(kernel_size=(5,5), filters=24, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 2\n# Convolution and MaxPool --> Input: 40x160x24 --> Layer 2 --> Output: 20x80x36 \nmodel.add(Conv2D(kernel_size=(5,5), filters=36, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 3\n# Convolution and MaxPool --> Input: 20x80x36 --> Layer 3 --> Output: 10x40x48\nmodel.add(Conv2D(kernel_size=(5,5), filters=48, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 4\n# Convolution and MaxPool --> Input: 10x40x48 --> Layer 4 --> Output: 5x20x64\nmodel.add(Conv2D(kernel_size=(3,3), filters=64, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 5\n# Convolution and MaxPool --> Input: 5x20x64 --> Layer 5 --> Output: 2x10x64\nmodel.add(Conv2D(kernel_size=(3,3), filters=64, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 6\n# Flatten Layer --> Input: 2x10x64 --> Layer 4 Output: 1280\nmodel.add(Flatten())\n# L a y e r 7\n# Dense (Fully Connected) and Relu --> Input 1280 --> Layer 7 --> Output: 320\nmodel.add(Dense(320))\nmodel.add(Activation('relu'))\n# L a y e r 8\n# Dense (Fully Connected) and Relu --> Input 320 --> Layer 8 --> Output: 160\nmodel.add(Dense(160))\nmodel.add(Activation('relu'))\n# L a y e r 9 \n# Dense (Fully Connected) --> Input 160 --> Layer 9 --> Output: 16\nmodel.add(Dense(16))\nmodel.add(Activation('relu'))\n# L a y e r 10 (O u t p u t)\n# Dense (Fully Connected) --> Input 16 --> Layer 10 --> Output: 1\nmodel.add(Dense(1))\n\n\n### T R A I N T H E M O D E L\n# Define data generator for training and validation batches\nbatchSize = 32\ntrainingDataGenerator = dataGenerator(trainingData, batchSize)\nvalidationDataGenerator = dataGenerator(validationData, batchSize)\n# Use mean squared error function as loss and the adam optimizer (stochastic gradient descent)\nmodel.compile(loss=\"mse\", optimizer=\"adam\")\n# Training\nbehavioralCloningModel = model.fit_generator(trainingDataGenerator, steps_per_epoch=np.ceil(len(trainingData)/batchSize), \\\n validation_data=validationDataGenerator, validation_steps=np.ceil(len(validationData)/batchSize), \\\n epochs=10, verbose=1)\n# Save the model\nmodel.save(\"model.h5\")\n\n\n\n","repo_name":"dschmoeller/BehavioralCloningDeepNNsKeras","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"1779391306","text":"# importing Libraries\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport seaborn as sns\r\n\r\n# Importing the dataset\r\nos.chdir('E:\\\\Programing\\\\UdemyML\\\\Machine Learning A-Z Template Folder\\\\Part 4 - Clustering\\\\Section 24 - K-Means Clustering')\r\ndf = pd.read_csv('Mall_Customers.csv')\r\nprint(df)\r\nx = df.iloc[:,[3,4]].values\r\n\r\n# Using Elbow Method\r\nfrom sklearn.cluster import KMeans\r\nwcss = []\r\nfor i in range(1,11):\r\n k = KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10,random_state=0)\r\n k.fit(x)\r\n wcss.append(k.inertia_)\r\n\r\nsns.set()\r\nplt.plot(range(1,11),wcss)\r\nplt.title('Elbow Method ')\r\nplt.xlabel('No.of.Clusters')\r\nplt.ylabel('WCSS Score')\r\nplt.show()\r\n\r\n# Fitting The Model To 5 Clusters\r\nk = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10, random_state=0)\r\ny_k = k.fit_predict(x)\r\nprint(y_k)\r\n\r\n# Scatter Plot The Clusters\r\nplt.scatter(x[y_k==0,0],x[y_k==0,1],c='red',label = 'Cluster 1')\r\nplt.scatter(x[y_k==1,0],x[y_k==1,1],c='blue',label = 'Cluster 2')\r\nplt.scatter(x[y_k==2,0],x[y_k==2,1],c='green',label = 'Cluster 3')\r\nplt.scatter(x[y_k==3,0],x[y_k==3,1],c='yellow',label = 'Cluster 4')\r\nplt.scatter(x[y_k==4,0],x[y_k==4,1],c='cyan',label = 'Cluster 5')\r\nplt.xlabel('Annual Income')\r\nplt.ylabel('Spending Score')\r\nplt.title('Clustering Of Mall Clients')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n","repo_name":"gemyhamed/Udemy_ML_C-MyownWork","sub_path":"K-means Clustring/KMeans Clustring.py","file_name":"KMeans Clustring.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"42423650345","text":"text = []\r\nwith open('final.txt', 'r', encoding='utf-8') as f:\r\n\tfor line in f:\r\n\t\ttext.append(line.split('|')[2])\r\nword_dic = {}\r\n\r\nfor line in text:\r\n\tline_split = line.translate(str.maketrans('','','!(),-.[]_،؟!@#$\\n')).split(' ')\r\n\tfor word in line_split:\r\n\t\tif word in word_dic:\r\n\t\t\tword_dic[word] += 1\r\n\t\telse:\r\n\t\t\tword_dic[word] = 1\r\nword_dic_sorted = {k: v for k, v in sorted(word_dic.items(), key=lambda item: item[1], reverse=True)}\r\nwith open('word_count.txt', 'w', encoding='utf-8') as w:\r\n\tfor rank ,(word, count) in enumerate(word_dic_sorted.items()):\r\n\t\tw.write('{}-word: {}, count: {}\\n'.format(rank+1, word, count))\r\n\r\n","repo_name":"shenasa-ai/persian-tts","sub_path":"top-words.py","file_name":"top-words.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"}
+{"seq_id":"42836011316","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 13 11:08:16 2020\r\n\r\n@author: prnvb\r\n\"\"\"\r\n\r\nfrom model import build_encoder, build_decoder_densenet, build_decoder_efnb2,\\\r\n build_decoder_efnb3, build_decoder_efnb4\r\nfrom keras.layers import Dense, Input, Dropout, Multiply, Add, Concatenate\r\nfrom keras.models import Model\r\n\r\nfrom utils import LATENT_DIM,NUM_CLASSES,INPUT_SHAPE\r\n\r\ndef build_classifier(encoder,dropout_rate=0.4):\r\n input_image = Input(shape=INPUT_SHAPE)\r\n embedding = encoder(input_image)\r\n #out = Dense(int(LATENT_DIM/2),activation='relu')(embedding)\r\n if dropout_rate>0:\r\n\t embedding = Dropout(0.3)(embedding)\r\n out = Dense(NUM_CLASSES,activation='softmax')(embedding)\r\n classifier = Model(input_image,out)\r\n classifier.name = 'Classifier'\r\n return classifier\r\n\r\ndef build_classifier_v2(encoder,input_shape):\r\n input_image = Input(shape=input_shape)\r\n embedding = encoder(input_image)\r\n #out = Dense(int(LATENT_DIM/2),activation='relu')(embedding)\r\n #out = Dropout(0.3)(out)\r\n out = Dense(NUM_CLASSES,activation='softmax')(embedding)\r\n classifier = Model(input_image,out)\r\n classifier.name = 'Classifier'\r\n return classifier\r\n\r\n\r\ndef build_conditioner():\r\n input_label_condition_vector = Input(shape=(NUM_CLASSES,))\r\n x = Dense(256,activation='relu')(input_label_condition_vector)\r\n #x = Dropout(0.2)(x)\r\n x = Dense(LATENT_DIM,activation='relu')(x)\r\n model = Model(input_label_condition_vector,x)\r\n return model\r\n\r\ndef build_c2ae(model_name): #encoder\r\n \r\n H_gamma = build_conditioner()\r\n H_gamma.name = 'H_gamma'\r\n H_beta = build_conditioner()\r\n H_beta.name = 'H_beta'\r\n \r\n #input_image = Input(shape=INPUT_SHAPE)\r\n #z = encoder(input_image)\r\n \r\n #condition_type_input = Input(shape=(1,))\r\n \r\n z = Input(shape=(LATENT_DIM,))\r\n \r\n l_m = Input(shape=(NUM_CLASSES,))\r\n gamma_m = H_gamma(l_m)\r\n beta_m = H_beta(l_m)\r\n z_l_m = Multiply()([z,gamma_m])\r\n z_l_m = Add()([z_l_m,beta_m])\r\n \r\n \r\n l_nm = Input(shape=(NUM_CLASSES,))\r\n gamma_nm = H_gamma(l_nm)\r\n beta_nm = H_beta(l_nm)\r\n z_l_nm = Multiply()([z,gamma_nm])\r\n z_l_nm = Add()([z_l_nm,beta_nm])\r\n \r\n if model_name == 'densenet121':\r\n decoder = build_decoder_densenet(LATENT_DIM)\r\n \r\n if model_name == 'efnb2':\r\n decoder = build_decoder_efnb2(LATENT_DIM)\r\n \r\n if model_name == 'efnb3':\r\n decoder = build_decoder_efnb3(LATENT_DIM)\r\n \r\n if model_name == 'efnb4':\r\n decoder = build_decoder_efnb4(LATENT_DIM)\r\n \r\n match_recon = decoder(z_l_m)\r\n nonmatch_recon = decoder(z_l_nm)\r\n \r\n out = Concatenate(axis=-1)([match_recon,nonmatch_recon])\r\n \r\n #c2ae = Model(inputs=[input_image,l_j],outputs=reconstruction)\r\n #c2ae = Model(inputs=[z,l_j,condition_type_input],outputs=reconstruction)\r\n \r\n c2ae = Model(inputs=[z,l_m,l_nm],outputs=out)\r\n \r\n return c2ae, decoder, H_gamma, H_beta#, condition_type_input #, encoder\r\n\r\nif __name__ == '__main__':\r\n encoder = build_encoder(LATENT_DIM)\r\n classifier = build_classifier(encoder)\r\n c2ae, _, decoder, H_gamma, H_beta = build_c2ae(encoder)\r\n \r\n encoder.summary()\r\n decoder.summary()\r\n H_gamma.summary()\r\n H_beta.summary()\r\n c2ae.summary()\r\n","repo_name":"pranavbudhwant/ISIC","sub_path":"c2ae/c2ae.py","file_name":"c2ae.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"7229180673","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n\npage_dist = dict()\nresponse = requests.get('http://old.iachina.cn/upload/product/20091207050241328.html')\nresponse.encoding = 'gbk'\nresponse = response.text\np_list = BeautifulSoup(response,\"lxml\").find_all('p')\nlevel = 0\nfor p in p_list:\n\n try :\n if p['align'] == \"center\":\n title = p.get_text()\n print(\"title : \"+title)\n except:\n print(p.get_text())\n\n\n\n\n","repo_name":"xiaoweiab/learn1","sub_path":"translate/dealbaoxian/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"4962613615","text":"# author:lzt\n# date: 2019/12/12 10:50\n# file_name: lock_test\n# 有100张票 3个窗口同时开卖 每卖出一张 票数-1 直到100张票卖完为止\nfrom threading import Thread\nimport time\nimport threading\n\ntickets = 100\n\n# 获取一把锁\nlock1 = threading.Lock()\n\n\ndef window1():\n global tickets\n while tickets > 0:\n lock1.acquire()\n # 二次判断:检测数据有没有在等待期间发生变化\n if tickets > 0:\n # 打印票面\n print(\"window1卖出票号:\", tickets)\n # time.sleep(0.02)\n # 票数-1\n tickets -= 1\n lock1.release()\n\n\ndef window2():\n global tickets\n while tickets > 0:\n lock1.acquire()\n if tickets > 0:\n # 打印票面\n print(\"window2卖出票号:\", tickets)\n # time.sleep(0.1)\n # 票数-1\n tickets -= 1\n lock1.release()\n\n\ndef window3():\n global tickets\n while tickets > 0:\n lock1.acquire()\n if tickets > 0:\n # 打印票面\n print(\"window3卖出票号:\", tickets)\n # time.sleep(0.05)\n # 票数-1\n tickets -= 1\n lock1.release()\n\n\nt1 = Thread(target=window1)\nt2 = Thread(target=window2)\nt3 = Thread(target=window3)\n\nt1.start()\nt2.start()\nt3.start()\n","repo_name":"1987617587/lsh_py","sub_path":"basics/day29/lzt/lock_test.py","file_name":"lock_test.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"40273269722","text":"import os\nfrom setuptools import setup\n\n\ntry:\n descr = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()\nexcept IOError:\n descr = ''\n\ntry:\n from pypandoc import convert\n descr = convert(descr, 'rst', format='md')\nexcept ImportError:\n pass\n\nsetup_parameters = dict(\n name=\"pims_nd2\",\n version=\"1.1\",\n description=\"An image reader for nd2 (NIS Elements) multidimensional images\",\n author=\"Casper van der Wel\",\n install_requires=['pims>=0.3'],\n author_email=\"caspervdw@gmail.com\",\n url=\"https://github.com/soft-matter/pims_nd2\",\n packages=['pims_nd2'],\n include_package_data=True,\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: C\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS\"],\n platforms=['MacOS X', 'Windows', 'Linux CentOs 6.5/7', 'Linux Debian 7/8'],\n long_description=descr)\n\nsetup(**setup_parameters)\n","repo_name":"soft-matter/pims_nd2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"}
+{"seq_id":"73471847288","text":"import setuptools\n\nREQUIRED = [\n \"numpy\",\n \"pandas\",\n \"scikit-learn\"\n]\n\nsetuptools.setup(\n name=\"lambdata-isaacgrove\",\n version=\"0.8\",\n packages=setuptools.find_packages(),\n # Project uses reStructuredText, so ensure that the docutils get\n # installed or upgraded on the target machine\n install_requires=REQUIRED,\n # metadata to display on PyPI\n author=\"isaacgrove\",\n author_email=\"isaacgrove333@gmail.com\",\n description=\"Lambda DS Unit 3 lambdata - helper functions\",\n keywords=\"\",\n url=\"\", # project home page, if any\n classifiers=[\n \"License :: OSI Approved :: MIT License\"\n ]\n # could also include long_description, download_url, etc.\n)","repo_name":"isaacgrove/unit3-day1-lambdata","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"5430453039","text":"from firebase_admin import db\nfrom rest_framework.response import Response\nfrom apps.metrics.helpers.combine_metrics_helper.combine_metrics import SearchNode\n\ndef handleEditName(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n old_name = data['old_name']\n new_name = data['new_name']\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n nodes = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes']\n try:\n for t in list_t:\n if t['name'] == old_name:\n t.update({\n 'name': str(new_name).upper()\n })\n for node in nodes:\n if(node['data']['id'] in t['composite_component']):\n print(node['data']['id'])\n node['data'].update({\n 'composite': str(new_name).upper()\n })\n break\n\n\n # Se actualiza la lista t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes'] = nodes\n # Se actualiza la bd\n # arch_arr[int(arch_index)]['versions'][int(version_index)]['elements'] = elements\n project_ref = db.reference(url)\n project_ref.update({\n 'architectures': arch_arr\n })\n\n return Response(data={\"ok\": True})\n except Exception as e:\n print('Error:', e)\n return Response({\"ok\":False})\n\n\n# Permite editar el componente compuesto al que pertenece un nodo\ndef handleEditNodeCompositeComponent(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n nodeData = data['node']\n composite_component = data['new_name']\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n nodes = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes']\n\n try:\n fullNode = SearchNode(nodeData, nodes) # me quede sin nombres jeje\n\n aux = False\n # Si el nodo pertenece con anterioridad a otro componente compuesto entonces lo saco de esa lista t\n if 'composite' in fullNode['data']:\n print('pertenecia a otro componente')\n for lt in list_t:\n for index, cc in enumerate(lt['composite_component']):\n if cc == fullNode['data']['name']:\n lt['composite_component'].pop(index)\n aux = True\n break\n\n if aux:\n print('break')\n break\n\n for t in list_t:\n if t['name'] == composite_component:\n t['composite_component'].append(nodeData)\n\n for node in nodes :\n if(node['data']['id'] == nodeData):\n node['data'].update({\n 'composite': t['name'],\n 'bg': t['bg']\n })\n\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes'] = nodes\n\n\n project_ref = db.reference(url)\n project_ref.update({\n 'architectures': arch_arr\n })\n return Response(data={'ok': True})\n except Exception as e:\n print(e)\n return Response(data={'ok': False})\n\n# Genera la tabla de los componentes compuestos\ndef handleCompositeComponentBoard(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n edges = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['edges']\n nodes = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes']\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n\n # print(len(edges))\n # print(len(nodes))\n # print(len(list_t))\n try:\n for item in list_t:\n # Required interfaces\n ca = []\n # Provided interfaces\n ce = []\n\n for component in item['composite_component']:\n for edge in edges:\n sourceNode = SearchNode(edge['data']['source'], nodes)\n targetNode = SearchNode(edge['data']['target'], nodes)\n\n if component == sourceNode['data']['id']:\n if 'composite' not in targetNode['data']:\n composite = ''\n else:\n composite = targetNode['data']['composite']\n if sourceNode['data']['composite'] != composite:\n if edge['scratch']['index'] not in ce and edge['scratch']['index'] not in ca:\n ce.append(edge['scratch']['index'])\n\n\n if component == targetNode['data']['id']:\n if 'composite' not in sourceNode['data']:\n composite = ''\n else:\n composite = sourceNode['data']['composite']\n\n if targetNode['data']['composite'] != composite:\n if edge['scratch']['index'] not in ca and edge['scratch']['index'] not in ce:\n ca.append(edge['scratch']['index'])\n\n # print('--------NEXT---------')\n item.update({\n 'required_interfaces': ca,\n 'provided_interfaces': ce,\n 'description': ''\n })\n\n # Actualizo la lista t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n project_ref = db.reference(url)\n # Actualizo los datos en la base de datos\n project_ref.update({\n 'architectures': arch_arr\n })\n\n return Response(data={'ok': True})\n except Exception as e:\n print(e)\n return Response(data={'ok': False})\n\n# TODO\n# ? Hace falta limpiar las tablas\n# Edita la descripción de los componentes compuestos\ndef handleEditCompositeComponentDescription(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n cc_name = data['name']\n description = data['description']\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n\n try:\n for item in list_t:\n if item['name'] == cc_name:\n item.update({\n 'description': description\n })\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n project_ref = db.reference(url)\n # Actualizo los datos en la base de datos\n project_ref.update({\n 'architectures': arch_arr\n })\n return Response(data={'ok': True})\n except Exception as e:\n print(e)\n return Response(data={'ok': False})\n","repo_name":"Leopgf/tesis-back","sub_path":"apps/metrics/helpers/combine_metrics_helper/composite_component_handler.py","file_name":"composite_component_handler.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27528022819","text":"\"\"\"These are the actions primary related to DialogDomain, but any module can use them. Basically it provides ways for visually inserting, editting and\r\nupdating the db:domain table.\"\"\"\r\nimport output\r\nimport wx\r\nimport session\r\nfrom errors import *\r\nimport DialogEditDomain\r\nfrom table_domain import t_domain\r\n \r\ndef insert():\r\n \"\"\"Calls the edit dialog in insert mode. Returns True of False whether a record has been inserted or not.\"\"\"\r\n result = False\r\n dlg = DialogEditDomain.create(None)\r\n dlg.set_mode(\"insert\")\r\n try:\r\n dlg.ShowModal()\r\n if dlg.result == wx.ID_OK:\r\n result = True\r\n else:\r\n raise error_abort(\"Insert canceled.\")\r\n finally:\r\n dlg.Destroy()\r\n return result\r\n\r\n\r\ndef edit(id):\r\n \"\"\"Calls the edit dialog in edit mode. Returns True of False whether a record has been edited or not.\"\"\"\r\n result = False\r\n dlg = DialogEditDomain.create(None)\r\n dlg.set_mode(\"edit\")\r\n dlg.set_id(id)\r\n try:\r\n dlg.ShowModal()\r\n if dlg.result == wx.ID_OK:\r\n result = True\r\n else:\r\n raise error_abort(\"Edit canceled.\")\r\n finally:\r\n dlg.Destroy()\r\n return result\r\n\r\ndef delete(id):\r\n if wx.MessageBox(\"Are you sure, mate?\", \"Confirm delete\", wx.YES_NO, None) == wx.YES:\r\n # raise error_x(\"Sorry, this action is too dangerous to be performed!\")\r\n t_domain.delete(id)\r\n else:\r\n raise error_abort(\"Delete not confirmed.\")\r\n\r\n","repo_name":"trevisanj/sheware","sub_path":"act_domain.py","file_name":"act_domain.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"25015054524","text":"from database.models import Command, Result, Request, db, CharField\nfrom datetime import datetime\nimport psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\nfrom loguru import logger\nfrom loader import bot\nfrom telebot.types import Message\n\n\ndef check_database() -> None:\n \"\"\"\n Функция проверяет наличие базы данных, если БД не существует, то создает её.\n В конце создает таблицы.\n \"\"\"\n logger.add('debug_in_database.log', level='DEBUG', format=\"{time} {level} {message}\", rotation=\"10 KB\",\n compression=\"zip\")\n con = psycopg2.connect(\"user='postgres' host='localhost' password='12345'\")\n dbname = 'history'\n\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n try:\n cur.execute('CREATE DATABASE ' + dbname)\n logger.info('DATABASE created')\n\n except psycopg2.ProgrammingError as err:\n logger.exception(err)\n logger.error('DATABASE already exists')\n\n finally:\n with db:\n db.create_tables([Request, Command, Result])\n\n\ndef insert_in_requests(user_id: int, time: datetime) -> int:\n \"\"\"\n Cоздаёт запись в таблице requests\n :param user_id: id пользователя\n :param time: время когда пользователь сделал запрос\n :return: айди записи для создания связи между таблицами\n \"\"\"\n with db:\n request = Request.create(user_id=user_id, time=time)\n logger.info('INSERT in requests')\n return request.id\n\n\ndef insert_in_commands(request_id, command_name: str, city_name: str,\n data_in: str, data_out: str, quantity: str,\n min_price: CharField = None, max_price: CharField = None, min_distance: CharField = None,\n max_distance: CharField = None) -> int:\n \"\"\"\n Cоздаёт запись в таблице commands\n :param request_id: айди прошлого запроса(requests)\n :param command_name: имя команды\n :param city_name: названия города\n :param data_in: дата заезда\n :param data_out: дата выезда\n :param quantity: кол-во отелей\n :param min_price: мин. цена (optional)\n :param max_price: макс. цена (optional)\n :param min_distance: мин. дистанция до центра (optional)\n :param max_distance: макс. дистанция до центра (optional)\n :return: айди записи для создания связи между таблицами\n \"\"\"\n with db:\n command = Command.create(request_id=request_id, command_name=command_name, city_name=city_name,\n min_price=min_price, max_price=max_price, min_distance=min_distance,\n max_distance=max_distance, data_in=data_in, data_out=data_out, quantity=quantity)\n logger.info('INSERT in commands')\n return command.id\n\n\ndef insert_in_results(command_id, hotel: str, address: str, price: str,\n distance: str, total_price: str, url: str) -> None:\n \"\"\"\n Cоздаёт запись в таблице results\n :param command_id: айди прошлого запроса(commands)\n :param hotel: название отеля\n :param address: адрес\n :param price: цена за ночь\n :param distance: расстояние до центра\n :param total_price: общая сумма денег\n :param url: ссылка на отель\n :return: None\n \"\"\"\n with db:\n Result.insert(command_id=command_id, hotel=hotel, address=address, price=price,\n distance=distance, total_price=total_price, url=url).execute()\n logger.info('INSERT in results')\n\n\n@logger.catch()\ndef select_user_history(message: Message):\n \"\"\"\n Получает из базы данных историю всех запросов пользователя лимит(5),\n после этого обрабатывает их и приводит в тип текста.\n И после всего этого выводит пользователю его команду и отели которые он нашел, с помощью этой команды.\n :param message: сообщение пользователя(с помощью него мы получаем id,\n и имеем возможность отправить текст из функции)\n \"\"\"\n with db:\n keys = Request.select().where(Request.user_id == message.from_user.id).limit(5).order_by(Request.time.desc())\n for key in keys:\n command = Command.select().where(Command.request_id == key).get()\n text1 = (f'Время: {str(key.time)[0:19]} Команда: {command.command_name}\\n'\n f'Город: {command.city_name}, с {command.data_in} по {command.data_out}')\n if command.command_name == 'beastdeal':\n text1 += (f'параметры поиска:\\n'\n f'минимальная цена: {command.min_price} и максимальная цена: {command.max_price}\\n'\n f'минимальное расстояние: {command.min_distance}'\n f' и максимальное расстояние: {command.max_distance}\\n')\n bot.send_message(message.chat.id, text1)\n\n history = Result.select().where(Result.command_id == key)\n for one_story in history:\n text2 = (f'Название отеля: {one_story.hotel}, цена за ночь: {one_story.price}\\n'\n f'Расстояние до центра {one_story.distance}\\n'\n f'Полная стоимость проживания {one_story.total_price}\\n'\n f'Адресс: {one_story.address}\\nСсылка на страницу отеля: {one_story.url}')\n\n bot.send_message(message.chat.id, text2, disable_web_page_preview=True)\n\n\n\n","repo_name":"banrj/telegram_travel_bot","sub_path":"database/database_commands.py","file_name":"database_commands.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"43798845662","text":"import itertools as it\nimport ubelt as ub\nimport pathlib\nimport time\nimport os\nimport stat\n\n\ndef ensure_selenium_chromedriver():\n \"\"\"\n os.environ['webdriver.chrome.driver'] = ensure_selenium_chromedriver()\n \"\"\"\n import requests\n import zipfile\n timeout = 5.0\n\n def latest_version():\n rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE', timeout=timeout)\n if rsp.status_code != 200:\n raise Exception\n version = rsp.text.strip()\n return version\n\n # version = latest_version()\n # version = '91.0.4472.19'\n # version = '90.0.4430.24'\n version = '92.0.4515.107'\n\n known_hashs = {\n '91.0.4472.19': '49622b740b1c7e66b87179a2642f6c57f21a97fc844c84b30a48',\n '90.0.4430.24': 'b85313de6abc1b44f26a0e12e20cb66657b840417f5ac6018946',\n '92.0.4515.107': '844c0e04bbbfd286617af2d7facd3d6cf7d3491b1e78120f8e0',\n }\n url = 'http://chromedriver.storage.googleapis.com/{}/chromedriver_linux64.zip'.format(version)\n bin_dpath = pathlib.Path(ub.expandpath('~/.local/bin'))\n download_dpath = bin_dpath / f'chromedriver_{version}'\n download_dpath.mkdir(exist_ok=True, parents=True)\n\n zip_fpath = ub.grabdata(\n url, hash_prefix=known_hashs.get(version, 'unknown-version'),\n dpath=download_dpath,\n )\n zip_fpath = pathlib.Path(zip_fpath)\n # dpath = zip_fpath.parent\n\n # TODO: version the binary\n chromedriver_fpath_real = download_dpath / 'chromedriver'\n chromedriver_fpath_link = bin_dpath / 'chromedriver'\n\n if not chromedriver_fpath_real.exists() or not chromedriver_fpath_link.exists():\n # Also check hash?\n\n zfile = zipfile.ZipFile(str(zip_fpath))\n try:\n fpath = zfile.extract(\n 'chromedriver', path=chromedriver_fpath_real.parent)\n finally:\n zfile.close()\n\n chromedriver_fpath_real_ = pathlib.Path(fpath)\n assert chromedriver_fpath_real_.exists()\n ub.symlink(chromedriver_fpath_real_, chromedriver_fpath_link,\n overwrite=True)\n\n if not ub.WIN32:\n print('add permission chromedriver_fpath_real_ = {!r}'.format(chromedriver_fpath_real_))\n st = os.stat(chromedriver_fpath_real_)\n os.chmod(chromedriver_fpath_real_, st.st_mode | stat.S_IEXEC)\n\n os.environ['PATH'] = os.pathsep.join(\n ub.oset(os.environ['PATH'].split(os.pathsep)) |\n ub.oset([str(chromedriver_fpath_link.parent)]))\n return chromedriver_fpath_link\n\n\ndef run_pvpoke_simulation(mons, league='auto'):\n \"\"\"\n Args:\n mons (List[pypogo.Pokemon]): pokemon to simulate.\n Must have IVS, movesets, level, etc... fields populated.\n \"\"\"\n from selenium import webdriver\n from selenium.webdriver.common.keys import Keys\n # from selenium.webdriver.support.ui import Select\n import pandas as pd\n # import pypogo\n\n if league == 'auto':\n for mon in mons:\n if mon.cp <= 1500:\n league = 'great'\n elif mon.cp <= 2500:\n league = 'ultra'\n elif mon.level <= 41:\n league = 'master-classic'\n elif mon.level <= 51:\n league = 'master'\n else:\n raise AssertionError\n break\n # for mon in mons:\n # mon.populate_all\n mon_cachers = {}\n have_results = {}\n to_check_mons = []\n for mon in mons:\n mon._slug = mon.slug()\n mon_cachers[mon._slug] = cacher = ub.Cacher(\n 'pvpoke_sim', depends=[mon._slug, league], appname='pypogo')\n mon_results = cacher.tryload()\n if mon_results is None:\n to_check_mons.append(mon)\n else:\n have_results[mon._slug] = mon_results\n\n if to_check_mons:\n # Requires the driver be in the PATH\n ensure_selenium_chromedriver()\n\n url = 'https://pvpoke.com/battle/matrix/'\n driver = webdriver.Chrome()\n driver.get(url)\n time.sleep(2.0)\n\n if league == 'great':\n league_box_target = 'Great League (CP 1500)'\n meta_text = 'Great League Meta'\n elif league == 'ultra':\n league_box_target = 'Ultra League (Level 50)'\n meta_text = 'Ultra League Meta'\n # meta_text = 'Premier Cup Meta'\n # meta_text = 'Remix Cup Meta'\n # meta_text = 'Premier Classic Cup Meta'\n elif league == 'master-classic':\n league_box_target = 'Master League (Level 40)'\n meta_text = 'Master League Meta'\n elif league == 'master':\n league_box_target = 'Master League (Level 50)'\n meta_text = 'Master League Meta'\n else:\n raise NotImplementedError\n\n leage_select = driver.find_elements_by_class_name('league-select')[0]\n leage_select.click()\n leage_select.send_keys(league_box_target)\n leage_select.click()\n leage_select.send_keys(Keys.ENTER)\n\n # leage_select.text.split('\\n')\n # leage_select.send_keys('\\n')\n # leage_select.send_keys('\\n')\n\n def add_pokemon(mon):\n add_poke1_button = driver.find_elements_by_class_name('add-poke-btn')[0]\n add_poke1_button.click()\n\n select_drop = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/select')\n\n if 1:\n import xdev\n all_names = select_drop.text.split('\\n')\n distances = xdev.edit_distance(mon.display_name(), all_names)\n chosen_name = all_names[ub.argmin(distances)]\n else:\n chosen_name = mon.name\n\n search_box = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/input')\n search_box.send_keys(chosen_name)\n\n advanced_ivs_arrow = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/a/span[1]')\n advanced_ivs_arrow.click()\n\n level40_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[2]')\n level41_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[3]')\n level50_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[4]')\n level51_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[5]')\n\n if mon.level >= 51:\n level51_cap.click()\n elif mon.level >= 50:\n level50_cap.click()\n elif mon.level >= 41:\n level41_cap.click()\n elif mon.level >= 40:\n level40_cap.click()\n\n level_box = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/input')\n level_box.click()\n level_box.clear()\n level_box.clear()\n level_box.send_keys(str(mon.level))\n\n iv_a = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[1]')\n iv_d = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[2]')\n iv_s = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[3]')\n\n # TODO\n # driver.find_elements_by_class_name('move-select')\n\n iv_a.clear()\n iv_a.send_keys(str(mon.ivs[0]))\n\n iv_d.clear()\n iv_d.send_keys(str(mon.ivs[1]))\n\n iv_s.clear()\n iv_s.send_keys(str(mon.ivs[2]))\n\n # USE_MOVES = 1\n if mon.moves is not None:\n # mon.populate_all()\n\n fast_select = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[1]')\n fast_select.click()\n fast_select.send_keys(mon.pvp_fast_move['name'])\n fast_select.send_keys(Keys.ENTER)\n\n charge1_select = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[2]')\n charge1_select.click()\n charge1_select.send_keys(mon.pvp_charge_moves[0]['name'])\n charge1_select.send_keys(Keys.ENTER)\n\n charge2_select = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[3]')\n charge2_select.click()\n charge2_select.send_keys(mon.pvp_charge_moves[1]['name'])\n charge2_select.send_keys(Keys.ENTER)\n\n save_button = driver.find_elements_by_class_name('save-poke')[0]\n save_button.click()\n\n quickfills = driver.find_elements_by_class_name('quick-fill-select')\n quickfill = quickfills[1]\n quickfill.text.split('\\n')\n quickfill.click()\n quickfill.send_keys(meta_text)\n quickfill.click()\n\n for mon in to_check_mons:\n add_pokemon(mon)\n\n shield_num_to_text = {\n 0: 'No shields',\n 1: '1 shield',\n 2: '2 shields',\n }\n\n shield_case_to_data = {}\n\n for atk_num_shields, def_num_sheids in it.product(shield_num_to_text, shield_num_to_text):\n shield_selectors = driver.find_elements_by_class_name('shield-select')\n shield_selectors[2].click()\n shield_selectors[2].send_keys(shield_num_to_text[atk_num_shields])\n shield_selectors[2].send_keys(Keys.ENTER)\n\n shield_selectors[3].click()\n shield_selectors[3].send_keys(shield_num_to_text[def_num_sheids])\n shield_selectors[3].send_keys(Keys.ENTER)\n\n #shield_selectors[0].click()\n\n battle_btn = driver.find_elements_by_class_name('battle-btn')[0]\n battle_btn.click()\n\n # Clear previous downloaded files\n dlfolder = pathlib.Path(ub.expandpath('$HOME/Downloads'))\n for old_fpath in list(dlfolder.glob('_vs*.csv')):\n old_fpath.unlink()\n\n time.sleep(2.0)\n\n # Download new data\n dl_btn = driver.find_element_by_xpath('//*[@id=\"main\"]/div[4]/div[9]/div/a')\n dl_btn.click()\n\n while len(list(dlfolder.glob('_vs*.csv'))) < 1:\n pass\n\n new_fpaths = list(dlfolder.glob('_vs*.csv'))\n assert len(new_fpaths) == 1\n fpath = new_fpaths[0]\n\n data = pd.read_csv(fpath, header=0, index_col=0)\n shield_case_to_data[(atk_num_shields, def_num_sheids)] = data\n\n for idx, mon in enumerate(to_check_mons):\n mon_results = {ss: scores.iloc[idx] for ss, scores in shield_case_to_data.items()}\n cacher = mon_cachers[mon._slug]\n cacher.save(mon_results)\n have_results[mon._slug] = mon_results\n\n _tojoin = ub.ddict(list)\n _joined = ub.ddict(list)\n for mon_results in have_results.values():\n for ss, scores in mon_results.items():\n _tojoin[ss].append(scores)\n\n for ss, vals in _tojoin.items():\n _joined[ss] = pd.concat([v.to_frame().T for v in vals])\n _joined.default_factory = None\n results = _joined\n return results\n","repo_name":"Erotemic/pypogo","sub_path":"pypogo/pvpoke_driver.py","file_name":"pvpoke_driver.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"}
+{"seq_id":"5294664414","text":"import unittest\nimport stats as s\n\nclass TestStatsMethods(unittest.TestCase):\n\n\tdef test_compute_avg(self):\n\t\texpected = 2.5\n\t\ttest = s.compute_avg([1,2,3,4])\n\t\tself.assertEqual(test, expected)\n\n\tdef test_compute_min(self):\n\t\texpected = 1\n\t\ttest = s.compute_min([1,2,3,4])\n\t\tself.assertEqual(test, expected)\n\n\tdef test_compute_max(self):\n\t\texpected = 4\n\t\ttest = s.compute_max([1,2,3,4])\n\t\tself.assertEqual(test, expected)\n\nif __name__ == '__main__':\n\tunittest.main()\n\nunittest.main()","repo_name":"cmoussa1/Travis-CI-for-Python","sub_path":"unittest_stats.py","file_name":"unittest_stats.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"23944934821","text":"#!/usr/bin/env python\n# import pytest\n\n\nclass Virus(object):\n '''Properties and attributes of the virus used in Simulation.'''\n\n def __init__(self, name, repro_rate, mortality_rate):\n self.name = name\n self.repro_rate = repro_rate\n self.mortality_rate = mortality_rate\n\n# ERIK's test\ndef test_virus_instantiation():\n '''Check to make sure that the virus instantiator is working.'''\n virus = Virus(\"Ebola\", 0.22, 0.7)\n assert virus.name == \"Ebola\"\n assert virus.repro_rate == 0.22\n assert virus.mortality_rate == 0.7\n\n# MAKHMUD's test\ndef test_virus_tuberculosis():\n virus = Virus(\"Tuberculosis\", 0.55, 0.67)\n assert virus.name == \"Tuberculosis\"\n assert virus.repro_rate == 0.55\n assert virus.mortality_rate == 0.67\n","repo_name":"makhmudislamov/HerdImmunityMakeSchool-Refactored","sub_path":"virus.py","file_name":"virus.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"25204048428","text":"\"\"\" Another chatgpt stab at geodesics in de Sitter Space \n\n\n\"\"\"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Define parameters\nH = 1.0 # Hubble constant\nL = 1.0 # de Sitter radius\nm = 0.1 # mass of particle\ntmax = 5.0 # maximum time\nN = 1000 # number of time steps\ndt = tmax / N # time step size\n\n# Define initial conditions\nx0 = 0.0\ny0 = L\npx0 = m * np.sqrt((H*L)**2 - 1.0) # ho-hum this is zero.\npy0 = 0.0\n\n# results\nresults = None\n\n# Define the differential equations for x, y, px, and py\ndef f(t, X):\n x, y, px, py = X\n \n r = np.sqrt(x**2 + y**2)\n f_x = px / (m * r)\n f_y = py / (m * r)\n f_px = -m * H**2 * x / r**3\n f_py = -m * H**2 * y / r**3\n return np.array([f_x, f_y, f_px, f_py])\n\ndef main():\n # Solve the differential equations using the Runge-Kutta method\n\n t = 0.0\n X = np.array([x0, y0, px0, py0])\n xvals = [x0]\n yvals = [y0]\n tvals = [t]\n Xvals = [dict(t=t, x=x0, y=y0, px=px0, py=py0)]\n while t < tmax:\n k1 = dt * f(t, X)\n k2 = dt * f(t + 0.5*dt, X + 0.5*k1)\n k3 = dt * f(t + 0.5*dt, X + 0.5*k2)\n k4 = dt * f(t + dt, X + k3)\n X = X + (k1 + 2.0*k2 + 2.0*k3 + k4) / 6.0\n xvals.append(X[0])\n yvals.append(X[1])\n tvals.append(t)\n x, y, px, py = X\n Xvals.append(dict(t=t, x=x, y=y, px=px, py=py))\n t += dt\n\n global results\n results = Xvals\n \n # Plot the geodesic\n #plt.plot(xvals, yvals)\n plt.plot(tvals, xvals, label='x')\n plt.plot(tvals, yvals, label='y')\n plt.plot(tvals, list(x['py'] for x in Xvals), label='py')\n plt.plot(tvals, list(x['px'] for x in Xvals), label='px')\n plt.plot(tvals, list(math.sqrt(x['x']**2 + x['y']**2) for x in Xvals), label='r')\n #plt.xlim(-2*L, 2*L)\n #plt.ylim(-2*L, 2*L)\n #plt.gca().set_aspect('equal', adjustable='box')\n plt.legend()\n plt.xlabel('t')\n plt.show()\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"swfiua/gotu","sub_path":"gotu/aidss2.py","file_name":"aidss2.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"39263293153","text":"import pygame\nfrom PIL import Image as PilImage, ImageSequence\nfrom typing import List\nfrom pygame import Surface\nimport os\n\nfrom game.Entity.Image import Image\n\nclass ImageService:\n IMAGE_FORMAT_GIF = 'GIF'\n FRAME_TYPE_RGBA = 'RGBA'\n\n def __init__(self) -> None:\n self.imageSurfaceMap = {}\n\n def getImageFrameSurfaceList(self, imagePath: str) -> List[Surface]:\n result = []\n\n pilImage = PilImage.open(imagePath)\n if pilImage.format == self.IMAGE_FORMAT_GIF and pilImage.is_animated:\n for frame in ImageSequence.Iterator(pilImage):\n result.append(self.convertPilImageToSurface(frame.convert(self.FRAME_TYPE_RGBA)))\n else:\n result.append(self.convertPilImageToSurface(pilImage))\n\n return result\n\n def convertPilImageToSurface(self, pilImage) -> Surface:\n return pygame.image.fromstring(pilImage.tobytes(), pilImage.size, pilImage.mode).convert_alpha()\n\n def scaleImageSurface(self, imageSurface: Surface, width: int, height: int):\n return pygame.transform.scale(imageSurface, (width, height))\n\n def scaleImageSurfaceList(self, imageSurfaceList: List[Surface], width: int, height: int) -> list:\n result = []\n\n for imageSurface in imageSurfaceList:\n result.append(self.scaleImageSurface(imageSurface, width, height))\n\n return result\n\n def buildImage(self, path: str, width: int, height: int) -> Image:\n imageFrameSurfaceList = self.getImageFrameSurfaceList(path)\n imageFrameSurfaceList = self.scaleImageSurfaceList(\n imageFrameSurfaceList,\n width,\n height\n )\n\n self.imageSurfaceMap[path] = imageFrameSurfaceList\n\n image = Image(path)\n\n return image\n\n def buildImageList(self, path: str, width: int, height: int) -> List[Image]:\n result = []\n\n for fileName in os.listdir(path):\n result.append(self.buildImage(\"%s%s\" % (path, fileName), width, height))\n\n return result\n\n","repo_name":"white-rabbit-1-sketch/helicopter","sub_path":"Service/System/ImageService.py","file_name":"ImageService.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"39354750456","text":"#!/usr/bin/env python\n\n\"\"\"Tests for `xbitinfo` package.\"\"\"\nimport os\n\nimport numpy as np\nimport pytest\nimport xarray as xr\nfrom numpy.testing import assert_allclose, assert_equal\nfrom xarray.core import formatting\nfrom xarray.core.dataarray import DataArray\nfrom xarray.core.dataset import Dataset\nfrom xarray.core.variable import Variable\nfrom xarray.testing import assert_identical\n\nimport xbitinfo as xb\n\n\ndef assert_different(a, b):\n \"\"\"Raises an AssertionError if two objects are equal. This will match\n data values, dimensions and coordinates, but not names or attributes\n (except for Dataset objects for which the variable names must match).\n Arrays with NaN in the same location are considered equal.\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n See Also\n --------\n assert_identical, assert_allclose, Dataset.equals, DataArray.equals\n numpy.testing.assert_array_equal\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n if isinstance(a, (Variable, DataArray)):\n assert not a.equals(b), formatting.diff_array_repr(a, b, \"equals\")\n elif isinstance(a, Dataset):\n assert not a.equals(b), formatting.diff_dataset_repr(a, b, \"equals\")\n else:\n raise TypeError(f\"{type(a)} not supported by assertion comparison\")\n\n\ndef bitinfo_assert_equal(bitinfo1, bitinfo2):\n assert list(bitinfo1.keys()) == list(bitinfo2.keys()), print(\n f\"lhs = {bitinfo1.keys()} vs rhs = {bitinfo2.keys()}\"\n )\n for v in bitinfo1.keys():\n assert_equal(bitinfo1[v], bitinfo2[v])\n\n\ndef bitinfo_assert_allclose(bitinfo1, bitinfo2, **kwargs):\n assert list(bitinfo1.keys()) == list(bitinfo2.keys()), print(\n f\"lhs = {bitinfo1.keys()} vs rhs = {bitinfo2.keys()}\"\n )\n for v in bitinfo1.keys():\n assert_allclose(bitinfo1[v], bitinfo2[v], **kwargs)\n\n\ndef bitinfo_assert_different(bitinfo1, bitinfo2):\n \"\"\"Fail bitinfo different values.\"\"\"\n assert (bitinfo1 != bitinfo2).any()\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_returns_dataset(implementation):\n \"\"\"Test xb.get_bitinformation returns xr.Dataset.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n assert isinstance(\n xb.get_bitinformation(ds, implementation=implementation, axis=0), xr.Dataset\n )\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_dim(implementation):\n \"\"\"Test xb.get_bitinformation is sensitive to dim.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n bitinfo0 = xb.get_bitinformation(ds, axis=0, implementation=implementation)\n bitinfo2 = xb.get_bitinformation(ds, axis=2, implementation=implementation)\n assert_different(bitinfo0, bitinfo2)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_dim_string_equals_axis_int(implementation):\n \"\"\"Test xb.get_bitinformation undestands xarray dimension names the same way as axis as integers.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n bitinfox = xb.get_bitinformation(ds, dim=\"x\", implementation=implementation)\n bitinfo2 = xb.get_bitinformation(ds, axis=2, implementation=implementation)\n assert_identical(bitinfox, bitinfo2)\n\n\ndef test_get_bitinformation_masked_value(implementation=\"julia\"):\n \"\"\"Test xb.get_bitinformation is sensitive to masked_value.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n bitinfo = xb.get_bitinformation(ds, dim=\"x\", implementation=implementation)\n bitinfo_no_mask = xb.get_bitinformation(\n ds, dim=\"x\", masked_value=\"nothing\", implementation=implementation\n )\n bitinfo_no_mask_None = xb.get_bitinformation(\n ds, dim=\"x\", masked_value=None, implementation=implementation\n )\n assert_identical(bitinfo_no_mask, bitinfo_no_mask_None)\n assert_different(bitinfo, bitinfo_no_mask)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_set_zero_insignificant(implementation):\n \"\"\"Test xb.get_bitinformation is sensitive to set_zero_insignificant.\"\"\"\n ds = xr.tutorial.load_dataset(\"air_temperature\")\n dim = \"lon\"\n bitinfo = xb.get_bitinformation(ds, dim=dim, implementation=implementation)\n bitinfo_szi_False = xb.get_bitinformation(\n ds, dim=dim, set_zero_insignificant=False, implementation=implementation\n )\n try:\n bitinfo_szi_True = xb.get_bitinformation(\n ds, dim=dim, set_zero_insignificant=True, implementation=implementation\n )\n assert_identical(bitinfo, bitinfo_szi_True)\n except NotImplementedError:\n assert implementation == \"python\"\n if implementation == \"python\":\n assert_identical(bitinfo, bitinfo_szi_False)\n elif implementation == \"julia\":\n assert_different(bitinfo, bitinfo_szi_False)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_confidence(implementation):\n \"\"\"Test xb.get_bitinformation is sensitive to confidence.\"\"\"\n ds = xr.tutorial.load_dataset(\"air_temperature\")\n dim = \"lon\"\n bitinfo = xb.get_bitinformation(ds, dim=dim, implementation=implementation)\n try:\n bitinfo_conf99 = xb.get_bitinformation(\n ds, dim=dim, confidence=0.99, implementation=implementation\n )\n bitinfo_conf50 = xb.get_bitinformation(\n ds, dim=dim, confidence=0.5, implementation=implementation\n )\n assert_different(bitinfo_conf99, bitinfo_conf50)\n assert_identical(bitinfo, bitinfo_conf99)\n except AssertionError:\n assert implementation == \"python\"\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_label(rasm, implementation):\n \"\"\"Test xb.get_bitinformation serializes when label given.\"\"\"\n ds = rasm\n xb.get_bitinformation(\n ds, dim=\"x\", label=\"./tmp_testdir/rasm\", implementation=implementation\n )\n assert os.path.exists(\"./tmp_testdir/rasm.json\")\n # second call should be faster\n xb.get_bitinformation(\n ds, dim=\"x\", label=\"./tmp_testdir/rasm\", implementation=implementation\n )\n os.remove(\"./tmp_testdir/rasm.json\")\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\n@pytest.mark.parametrize(\"dtype\", [\"float64\", \"float32\", \"float16\"])\ndef test_get_bitinformation_dtype(rasm, dtype, implementation):\n \"\"\"Test xb.get_bitinformation returns correct number of bits depending on dtype.\"\"\"\n ds = rasm.astype(dtype)\n v = list(ds.data_vars)[0]\n dtype_bits = dtype.replace(\"float\", \"\")\n assert len(xb.get_bitinformation(ds, dim=\"x\")[v].coords[\"bit\" + dtype_bits]) == int(\n dtype_bits\n )\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_multidim(rasm, implementation):\n \"\"\"Test xb.get_bitinformation runs on all dimensions by default\"\"\"\n ds = rasm\n bi = xb.get_bitinformation(ds, implementation=implementation)\n # check length of dimension\n assert bi.dims[\"dim\"] == len(ds.dims)\n bi_time = bi.sel(dim=\"time\").Tair.values\n bi_x = bi.sel(dim=\"x\").Tair.values\n bi_y = bi.sel(dim=\"y\").Tair.values\n assert any(bi_time != bi_x)\n assert any(bi_time != bi_y)\n assert any(bi_y != bi_x)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_different_variables_dims(rasm, implementation):\n \"\"\"Test xb.get_bitinformation runs with variables of different dimensionality\"\"\"\n ds = rasm\n # add variable with different dimensionality\n ds[\"Tair_mean\"] = ds.Tair.mean(dim=\"time\")\n bi = xb.get_bitinformation(ds, implementation=implementation)\n assert all(np.isnan(bi.Tair_mean.sel(dim=\"time\")))\n bi_Tair_mean_x = bi.Tair_mean.sel(dim=\"x\")\n bi_Tair_x = bi.Tair.sel(dim=\"x\")\n assert_different(bi_Tair_mean_x, bi_Tair_x)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_different_dtypes(rasm, implementation):\n ds = rasm\n ds[\"Tair32\"] = ds.Tair.astype(\"float32\")\n ds[\"Tair16\"] = ds.Tair.astype(\"float16\")\n bi = xb.get_bitinformation(ds, implementation=implementation)\n for bitdim in [\"bit16\", \"bit32\", \"bit64\"]:\n assert bitdim in bi.dims\n assert bitdim in bi.coords\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_dim_list(rasm, implementation):\n bi = xb.get_bitinformation(rasm, dim=[\"x\", \"y\"], implementation=implementation)\n assert (bi.dim == [\"x\", \"y\"]).all()\n\n\ndef test_get_bitinformation_keep_attrs(rasm):\n bi = xb.get_bitinformation(rasm, dim=[\"x\", \"y\"]).Tair\n assert \"units\" in bi.attrs\n assert bi.attrs[\"units\"] == 1\n for a in rasm.Tair.attrs.keys():\n assert bi.attrs[\"source_\" + a] == rasm.Tair.attrs[a], print(bi.attrs)\n\n\n@pytest.mark.parametrize(\n \"ds,dim,axis\",\n [\n (pytest.lazy_fixture(\"ugrid_demo\"), None, -1),\n (pytest.lazy_fixture(\"icon_grid_demo\"), \"ncells\", None),\n (pytest.lazy_fixture(\"air_temperature\"), \"lon\", None),\n (pytest.lazy_fixture(\"rasm\"), \"x\", None),\n (pytest.lazy_fixture(\"ROMS_example\"), \"eta_rho\", None),\n (pytest.lazy_fixture(\"era52mt\"), \"time\", None),\n (pytest.lazy_fixture(\"eraint_uvz\"), \"longitude\", None),\n ],\n)\ndef test_implementations_agree(ds, dim, axis):\n \"\"\"Test whether the python and julia implementation retrieve the same results\"\"\"\n bi_python = xb.get_bitinformation(\n ds,\n dim=dim,\n axis=axis,\n implementation=\"python\",\n set_zero_insignificant=False,\n overwrite=True,\n masked_value=None,\n )\n bi_julia = xb.get_bitinformation(\n ds,\n dim=dim,\n axis=axis,\n implementation=\"julia\",\n set_zero_insignificant=False,\n overwrite=True,\n masked_value=None,\n )\n bitinfo_assert_allclose(bi_python, bi_julia, rtol=1e-4)\n","repo_name":"observingClouds/xbitinfo","sub_path":"tests/test_get_bitinformation.py","file_name":"test_get_bitinformation.py","file_ext":"py","file_size_in_byte":10000,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"77"}
+{"seq_id":"70505270329","text":"from threading import Lock\nimport random\n\nfrom abs_estimator import AbsEstimator\n\n\nclass SumEst(AbsEstimator):\n _ITERATION_NUMBER = 100\n _POOL_SAMPLE_SIZE = 1000\n _ITERATION_NUMBER_INFORMATION = \"Number of iterations\"\n _POOL_SAMPLE_SIZE_INFORMATION = \"Size of the query pool sample\"\n _PAIR_QUERY_INDEX = 0\n _PAIR_DOCUMENT_INDEX = 1\n\n @property\n def experiment_details(self):\n additional_information = {SumEst._ITERATION_NUMBER_INFORMATION: SumEst._ITERATION_NUMBER,\n SumEst._POOL_SAMPLE_SIZE_INFORMATION: SumEst._POOL_SAMPLE_SIZE}\n return additional_information\n\n @property\n def common_api(self):\n return self.__common_api\n\n @common_api.setter\n def common_api(self, val):\n self.__common_api = val\n\n def __init__(self, common_api):\n self.__common_api = common_api\n\n def estimate(self):\n super().estimate()\n estimation_acc = 0\n query_pool = self.common_api.read_query_pool()\n pool_size = self._estimate_pool_size(query_pool)\n for i in range(0, SumEst._ITERATION_NUMBER):\n query_document_pair = self._select_query_document_pair(query_pool)\n document = query_document_pair[SumEst._PAIR_DOCUMENT_INDEX]\n query = query_document_pair[SumEst._PAIR_QUERY_INDEX]\n document_inverse_degree = self._calculate_document_inverse_degree(document, query_pool)\n degree_query = self._calculate_degree_query(query)\n partial_estimation = pool_size * degree_query * document_inverse_degree\n estimation_acc += partial_estimation\n self.common_api.report_progress(i, SumEst._ITERATION_NUMBER)\n estimation = estimation_acc / SumEst._ITERATION_NUMBER\n return estimation\n\n def _verify_match(self, query, document):\n content = document.content.lower()\n if content.find(query.lower()) != -1:\n return True\n return False\n\n def _select_query_document_pair(self, query_pool):\n list_size = len(query_pool)\n while True:\n random_index = random.randrange(list_size)\n random_query = query_pool[random_index]\n try:\n document_list = self.common_api.download(random_query).results\n except:\n continue\n valid_list = []\n for document in document_list:\n if self._verify_match(random_query, document):\n valid_list.append(document)\n if len(valid_list) > 0:\n random_index = random.randrange(len(valid_list))\n random_document = valid_list[random_index]\n return [random_query, random_document]\n\n def _get_matching_query_list(self, document, query_pool):\n lock = Lock()\n matching_query_list = []\n\n def iteration(query):\n nonlocal document, matching_query_list, lock\n if self._verify_match(query, document):\n with lock:\n matching_query_list.append(query)\n\n self.common_api.execute_in_parallel(query_pool, iteration)\n return matching_query_list\n\n def _calculate_degree_query(self, query):\n lock = Lock()\n count = 0\n\n def iteration(document):\n nonlocal query, count, lock\n if self._verify_match(query, document):\n with lock:\n count += 1\n\n document_list = self.common_api.download(query).results\n self.common_api.execute_in_parallel(document_list, iteration)\n return count\n\n def _estimate_pool_size(self, query_pool):\n count = 0\n query_pool_size = len(query_pool)\n lock = Lock()\n\n # noinspection PyUnusedLocal\n def iteration(iteration_number):\n nonlocal query_pool, query_pool_size, count, lock\n random_index = random.randrange(0, query_pool_size)\n query = query_pool[random_index]\n document_list = self.common_api.download(query).results\n for document in document_list:\n if self._verify_match(query, document):\n with lock:\n count += 1\n return\n\n self.common_api.execute_in_parallel(range(0, SumEst._POOL_SAMPLE_SIZE), iteration)\n return len(query_pool) * count / SumEst._POOL_SAMPLE_SIZE\n\n def _calculate_document_inverse_degree(self, document, query_pool):\n matching_query_list = self._get_matching_query_list(document, query_pool)\n i = 1\n while True:\n random_index = random.randrange(0, len(matching_query_list))\n query = matching_query_list[random_index]\n try:\n document_list = self.common_api.download(query).results\n except:\n continue\n for item in document_list:\n if item.identifier == document.identifier:\n return i / len(matching_query_list)\n i += 1\n","repo_name":"fpbfabio/estimation_methods","sub_path":"sum_est.py","file_name":"sum_est.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"37562572928","text":"\"\"\"\nProyecto: Panel de control de velociadad de motores Tf \n@Autor: EDVS\n\"\"\"\n\n#%%\n# import libraries \nimport sys\nfrom time import time\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtSerialPort\n\nimport time\n\n# Author of the library: Stefan Holstein \n# inspired by: https://github.com/Werkov/PyQt4/blob/master/examples/widgets/analogclock.py\nfrom analoggaugewidget import AnalogGaugeWidget\n\nclass Main_App(QMainWindow):\n\n def __init__(self,parent=None,*args):\n super(Main_App,self).__init__(parent=parent)\n\n self.ancho = 450 \n self.altura = 800\n self.run = True\n\n # --- VARIABLES PARA LA LECTURA DE DE LOS SENSORES--------\n self.velocidad_M1 = 0\n self.velocidad_M2 = 0\n self.corriente_M1 = 0\n self.corriente_M2 = 0\n\n self.setFixedSize(self.ancho,self.altura)\n self.setWindowTitle(\"panel de control\")\n self.General = QLabel(self)\n self.General.setGeometry(0,0,self.ancho,self.altura)\n self.General.setStyleSheet(\"border-radius: 3px; border: none; background-color: #000000;\")\n\n\n self.box_Panel = QLabel(self.General)\n self.tv_tituloPANEL = QLabel(\"PANEL DE CONTROL\",self.box_Panel)\n \n self.compotenes = QWidget(self.box_Panel)\n self.name_dispsitivo = QLabel('Dispositivos:',self.compotenes)\n \n \n self.list_Puertos = QComboBox(self)\n \n #----------Box panel---------#\n self.box_Panel.setGeometry(QRect(10, 10,self.ancho-20, self.altura-20))\n self.box_Panel.setStyleSheet(\" border-radius: 15px; background-color: #101010;\")\n \n #----------Box panel de control---------#\n \n font = QFont()\n font.setPointSize(13)\n font.setBold(True)\n self.tv_tituloPANEL.setFont(font)\n self.tv_tituloPANEL.setStyleSheet(\"border: none; color: #C2185B;\")\n self.tv_tituloPANEL.setGeometry(100, 10, 250, 40)\n\n ## --- COMPONENTES ---------------\n self.compotenes.setGeometry(5,50,420,46)\n self.compotenes.setStyleSheet(\" border-radius: 5px; border:1px solid #607D8B;\")\n\n #-------dispositivos-----\n font = QFont()\n font.setPointSize(11)\n self.name_dispsitivo.setFont(font)\n self.name_dispsitivo.setStyleSheet(\" border-radius: 15px; border: none;color:#1565C0\")\n self.name_dispsitivo.setGeometry(10,3,120,40)\n\n #---------Lista de puertos--------------#\n font.setPointSize(10)\n self.list_Puertos.setFont(font)\n self.list_Puertos.setGeometry(135, 65, 150, 35)\n ports = [\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\"]\n \n self.list_Puertos.addItems(ports)\n \n self.list_Puertos.setStyleSheet(\"QListView {background-color: #B3E5FC;}\")\n self.list_Puertos.setStyleSheet(\"border-radius: 2px; border:1px solid #1565C0;color:#4CAF50; background-color: transparest;\")\n \n\n \n # ----- button list ports-------------\n font.setPointSize(11)\n\n self.button = QPushButton(self.compotenes)\n self.button.setFont(font)\n self.button.setMouseTracking(True)\n self.button.setText(\"Conectar\")\n self.button.setCursor(Qt.PointingHandCursor)\n self.button.setAutoDefault(False)\n self.button.setGeometry(300, 6, 100, 34)\n self.button.setCheckable(True)\n self.button.clicked.connect(self.Mensaje)\n self.button.setStyleSheet(\"background-color: rgb(251, 192, 45); border-radius: 5px; border: 1px solid rgb(100,100,100);\")\n\n \n\n # ----------- PROGRES BARR--------------#\n \n self.C_bar = QWidget(self.box_Panel)\n self.C_bar.setGeometry(20, 100,390,300)\n self.C_bar.setStyleSheet(\" border-radius: 10px; background-color: black; border:none;\")\n\n self.frame_1 = QFrame(self.C_bar)\n self.frame_1.setGeometry(10, 10,160,160)\n self.frame_1.setFrameShape(QFrame.StyledPanel)\n self.frame_1.setFrameShadow(QFrame.Raised)\n self.sensor_M1= AnalogGaugeWidget(self.frame_1)\n self.sensor_M1.setMinimumSize(QSize(150, 150))\n \n \n self.frame_2 = QFrame(self.C_bar)\n self.frame_2.setGeometry(220, 10,160,160)\n self.frame_2.setFrameShape(QFrame.StyledPanel)\n \n\n self.frame_2.setFrameShadow(QFrame.Raised)\n self.sensor_M2= AnalogGaugeWidget(self.frame_2)\n self.sensor_M2.setMinimumSize(150, 150)\n self.sensor_M2.value_min = -60\n self.sensor_M2.value_max = 60\n self.sensor_M2.units = \"deg\"\n\n # ---Label----------\n self.LedDirecion = QLabel(self.C_bar)\n self.LedDirecion.setGeometry(175, 20,30,30)\n self.LedDirecion.setStyleSheet(\" border-radius: 15px; background-color: black; border: 1px solid #CFD8DC;\")\n \n # +++++++++++++++++++++++Label para la lectura del sensor de corriente++++++++++++++++++++++++\n self.img_LogoCarrito = QLabel(self.C_bar)\n self.img_LogoUPC = QLabel(self.C_bar)\n #----------------LOGO UPC---------#\n self.img_LogoCarrito.setGeometry(10,180, 120, 100)\n self.img_LogoCarrito.setPixmap(QPixmap(\"imagenes/carrito.png\"))\n self.img_LogoCarrito.setStyleSheet(\"background-color: black ;border:none;\")\n \n self.img_LogoCarrito.setScaledContents(True)\n\n #----------LOGO AESS---------#\n self.img_LogoUPC.setGeometry(250, 180, 100, 100)\n self.img_LogoUPC.setPixmap(QPixmap(\"imagenes/LOGO_UPC.png\"))\n self.img_LogoUPC.setStyleSheet(\"border:none;\")\n \n self.img_LogoUPC.setScaledContents(True)\n\n\n # ----DEFINIR SET POINT DEL MOTOR 1 (motor derecho)-------\n \"\"\"self.corr_M1 = QWidget(self.C_bar)\n self.corr_M1.setGeometry(110,200,165,50)\n self.corr_M1.setStyleSheet(\" border-radius: 10px; border: 1px solid #FFEE58;\")\n\n self.L_corrD = QLabel(\"Corriente MI: (mA):\",self.corr_M1)\n self.L_corrD.setGeometry(5,2,150,20)\n self.L_corrD.setAlignment(Qt.AlignCenter)\n self.L_corrD.setStyleSheet(\"border: none; color: #F5F5F5\")\n\n self.mA_M1 = QLabel(str(self.corriente_M1),self.corr_M1)\n self.mA_M1.setGeometry(5,24,150,20)\n self.mA_M1.setAlignment(Qt.AlignCenter)\n self.mA_M1.setStyleSheet(\"border: none; color: #4CAF50\")\n font.setPointSize(10)\n self.mA_M1.setFont(font)\"\"\"\n\n\n # -----------BOTONES PARA CONTROLAR LA DIRECION Y VELOCIDAD-----\n \n self.botones = QWidget(self.box_Panel)\n self.botones.setGeometry(20, 410,390,350)\n self.botones.setStyleSheet(\" border-radius: 10px; border: none; background-color: black\")\n\n # ----DEFINIR SET POINT DEL MOTOR 1 (motor derecho)-------\n self.SP_M1 = 0\n self.SP_M2 = 0\n\n self.motor1 = QWidget(self.botones)\n self.motor1.setGeometry(10,10,120,50)\n self.motor1.setStyleSheet(\" border-radius: 10px; border: 1px solid #E91E63;\")\n\n self.L_motorD = QLabel(\"MOTOR VEL (rpm):\",self.motor1)\n self.L_motorD.setGeometry(5,2,110,20)\n self.L_motorD.setStyleSheet(\"border: none; color: #F5F5F5\")\n\n self.RMP_M1 = QLabel(str(self.SP_M1),self.motor1)\n self.RMP_M1.setGeometry(10,24,100,20)\n self.RMP_M1.setAlignment(Qt.AlignCenter)\n self.RMP_M1.setStyleSheet(\"border: none; color: #4CAF50\")\n font.setPointSize(10)\n self.RMP_M1.setFont(font)\n\n \n # ----DEFINIR SET POINT DEL MOTOR 2 (motor izquierdo)-------\n self.motor2 = QWidget(self.botones)\n self.motor2.setGeometry(260,10,120,50)\n self.motor2.setStyleSheet(\" border-radius: 10px; border: 1px solid #E91E63;\")\n\n self.L_motorI = QLabel(\"MOTOR POS (deg):\",self.motor2)\n self.L_motorI.setGeometry(5,2,110,20)\n self.L_motorI.setStyleSheet(\"border: none; color: #F5F5F5\")\n\n self.RMP_M2 = QLabel(str(self.SP_M2),self.motor2)\n self.RMP_M2.setGeometry(10,24,100,20)\n self.RMP_M2.setAlignment(Qt.AlignCenter)\n self.RMP_M2.setStyleSheet(\"border: none; color: #4CAF50\")\n font.setPointSize(10)\n self.RMP_M2.setFont(font)\n\n # *************** BOTONES ********************\n h_1 = 80\n w_1 = 80\n cx = 160\n cy = 160\n\n # --------------- BOTON PARA AVANZAR ADELANTE-------------\n self.b_upper = QPushButton(self.botones)\n self.b_upper.setGeometry(cx, cy-h_1, w_1, h_1)\n self.b_upper.setMouseTracking(True)\n self.b_upper.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowUp\")))\n self.b_upper.setIconSize(QSize(h_1,w_1))\n self.b_upper.setCursor(Qt.PointingHandCursor)\n self.b_upper.setAutoDefault(False)\n \n #self.b_upper.clicked.connect(self.Mup)\n self.b_upper.pressed.connect(self.Mup)\n self.b_upper.released.connect(self.stopCount)\n self.b_upper.setStyleSheet(\"border-radius: 30px;\")\n self.b_upper.setCheckable(True)\n\n # --------------- BOTON BOTON PARA RETROCEDER------------- \n self.b_Back = QPushButton(self.botones)\n self.b_Back.setGeometry(cx, cy+h_1, w_1, h_1)\n self.b_Back.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowDown\")))\n self.b_Back.setMouseTracking(True)\n self.b_Back.setIconSize(QSize(h_1,w_1))\n self.b_Back.setCursor(Qt.PointingHandCursor)\n self.b_Back.setAutoDefault(False)\n\n #self.b_Back.clicked.connect(self.MDown)\n self.b_Back.pressed.connect(self.MDown)\n self.b_Back.released.connect(self.stopCount)\n self.b_Back.setStyleSheet(\"border-radius: 30px;\")\n\n # --------------- BOTON PARA GIRAR A LA IZQUIERDA------------- \n self.b_left = QPushButton(self.botones)\n self.b_left.setGeometry(cx+w_1+10, cy, w_1, h_1)\n self.b_left.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowRight\")))\n self.b_left.setMouseTracking(True)\n self.b_left.setIconSize(QSize(h_1,w_1))\n self.b_left.setCursor(Qt.PointingHandCursor)\n self.b_left.setAutoDefault(False)\n\n #self.b_left.clicked.connect(self.MLeft)\n self.b_left.pressed.connect(self.MLeft)\n self.b_left.released.connect(self.stopCount)\n self.b_left.setStyleSheet(\"border-radius: 30px;\")\n\n # --------------- BOTON PARA GIRAR A LA DERECHA------------- \n\n self.b_right = QPushButton(self.botones)\n self.b_right.setGeometry(cx-w_1-10, cy, w_1, h_1)\n self.b_right.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowLeft\")))\n self.b_right.setMouseTracking(True)\n self.b_right.setIconSize(QSize(h_1,w_1))\n self.b_right.setCursor(Qt.PointingHandCursor)\n self.b_right.setAutoDefault(False)\n\n #self.b_right.clicked.connect(self.Mright)\n self.b_right.pressed.connect(self.Mright)\n self.b_right.released.connect(self.stopCount)\n self.b_right.setStyleSheet(\"border-radius: 30px;\")\n\n\n \n #-------Interrupcion cada 50ms para actualizar el set point\n self.direction =''\n self.timer1 = QTimer()\n self.timer1.setInterval(50)\n self.timer1.timeout.connect(self.contador)\n self.timer1.stop() #Inicai imagen statica\n\n # ======================= FUNCIONES ============================\n \n def contador(self):\n if self.direction== 'UP':\n\n self.SP_M1 = self.SP_M1+1\n \n if self.SP_M1>=821:\n self.SP_M1 =821\n \n \n \n elif self.direction== 'DW':\n \n self.SP_M1 = self.SP_M1-1\n \n if self.SP_M2<=-821:\n self.SP_M1=-821\n \n elif self.direction== 'LF':\n self.SP_M2 = self.SP_M2 +1\n if self.SP_M2>=45 :\n self.SP_M2=45\n \n \n elif self.direction== 'RT':\n self.SP_M2 = self.SP_M2-1\n\n if self.SP_M2<=-45 :\n self.SP_M2=-45\n \n self.RMP_M1.setText(str(self.SP_M1))\n self.RMP_M2.setText(str(self.SP_M2))\n #texto1 = 'SP:' + str(self.SP_M1) + ';'+ str(self.SP_M2)\n #self.serial.write(texto1.encode())\n\n def stopCount(self):\n self.timer1.stop()\n self.Write_SetPoint()\n \n def Mup(self):\n self.direction = 'UP' #adelante\n self.timer1.start()\n\n def MDown(self):\n self.direction = 'DW' #retroceso\n self.timer1.start()\n\n def MLeft(self):\n self.direction = 'LF' #Giro a la izquierda\n self.timer1.start()\n \n def Mright(self):\n self.direction = 'RT' ##Giro a la derecha\n self.timer1.start()\n \n\n def Mensaje(self,checked):\n mensaje = QMessageBox(self)\n mensaje.setWindowTitle(\"Mensaje\")\n mensaje.setStyleSheet(\"background-color: rgb(38, 198, 218);color: balck\")\n font = QFont()\n font.setPointSize(10)\n mensaje.setFont(font)\n\n #baud_rate = 9600\n Port = self.list_Puertos.currentText()\n self.serial = QtSerialPort.QSerialPort(Port,baudRate=9600,readyRead=self.ReadValuesSensor)\n self.button.setText(\"Desconectar\" if checked else \"Conectar\")\n if checked:\n if not self.serial.isOpen():\n if not self.serial.open(QIODevice.ReadWrite):\n self.btn_Conectar.setChecked(False)\n #self.timer.start()\n \n\n \n else:\n self.serial.close()\n #self.timer.stop()\n self.contador()\n \n mensaje.setText(\"La conexion fue realizada con éxito \")\n mensaje.move(self.pos().x()+50, self.pos().y()+150)\n mensaje.exec()\n\n \n def Write_SetPoint(self):\n \n texto1 = 'SP:' + str(int(self.SP_M1*(255/821))) + ';'+ str(int((255/2)*(int(self.SP_M2)/60+1)))\n self.serial.write(texto1.encode())\n # print(texto)\n # SP:-NN;-MN\n # (255/2)(int(self.SP_M2)/60+1)\n print(texto1)\n\n def ReadValuesSensor(self):\n\n while self.serial.canReadLine():\n cad = self.serial.readLine().data().decode().strip()\n print(cad)\n if \":\" in cad:\n #print(cad)\n pos=cad.index(\":\")\n label=cad[:pos]\n value=cad[pos+1:]\n if label == 'velo1':\n self.velocidad_M1 = int(value)\n if label == 'velo2':\n self.velocidad_M2 = int(value)\n \n if label == 'corr1':\n self.corriente_M1 = int(value)\n if label == 'corr2':\n self.corriente_M2 = int(value)\n\n self.update_data()\n\n def update_data (self):\n self.sensor_M1.update_value(abs(int(self.velocidad_M1*(821/1023))))\n self.sensor_M2.update_value(int(int(self.velocidad_M2)*(120/1024)-60))\n #(255/2)(int(self.SP_M2)/60+1)\n self.mA_M1.setText(str(round(self.corriente_M1*(5000000/(1023*752)),2)))\n\n if (self.SP_M1<0):\n self.LedDirecion.setStyleSheet(\"background-color: red\")\n else:\n self.LedDirecion.setStyleSheet(\"background-color: green\")\n \n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Main_App()\n ex.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n\n\n#%%","repo_name":"dvsivle/proyecto-diseno-o-de-controlador-de-motores","sub_path":"DISEÑO DE CONTROLADOR DE MOTORES/MICROCONTROLADOR-INTERFACE_APP/APP_CONTROL_CARRITO/AppVelocityControl.py","file_name":"AppVelocityControl.py","file_ext":"py","file_size_in_byte":15504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"23925547989","text":"import os\n\ndef get_tail_byte(fname,last_bytes):\n\t# Open file with 'b' to specify binary mode\n\twith open(fname, 'rb') as file:\n\t\tfile.seek(last_bytes * -1, os.SEEK_END) # Note minus sign\n\t\tbyte_data = file.read()\n\t\treturn byte_data.decode('utf-8')\n\treturn \"\"\n\n\nif __name__ == \"__main__\":\n\tprint(get_tail_byte(\"11_tail.py\",100))\n","repo_name":"donarts/sourcecode","sub_path":"python/example/11_tail.py","file_name":"11_tail.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"7152198298","text":"#!/usr/bin/env python\nimport unittest\nimport gevent\nimport requests\nfrom gevent import monkey\nmonkey.patch_socket()\n\n\nclass TestProxy(unittest.TestCase):\n def test_proxy(self):\n local_proxy = {\"http\": \"http://127.0.0.1:8399\"}\n\n def get():\n r = requests.get(\"http://www.baidu.com\", proxies=local_proxy)\n self.assertEqual(r.status_code, 200)\n\n gevent_list = []\n for i in xrange(5):\n gevent_list.append(gevent.spawn(get))\n gevent.joinall(gevent_list)\n","repo_name":"loadlj/rzproxy","sub_path":"tests/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"77"}
+{"seq_id":"17649401385","text":"import base64\n\nDATABASE_NAME = 'ocean'\nDATABASE_USER = 'oceanuser'\nDATABASE_PASSWORD = 'ocean@123'\nDATABASE_HOST = '127.0.0.1'\nDATABASE_PORT = '5432'\nFRONTEND_URL = 'https://www.testoceanplatform.com/'\nBACKEND_URL = 'https://www.testoceanplatform.com/api/v1/ocean/admin'\n\n# # Xero keys and URL's\n# SIGNUP_SCOPE = 'offline_access+openid+profile+email+accounting.transactions+' \\\n# 'accounting.contacts+accounting.settings+' \\\n# 'accounting.attachments+accounting.reports.read'\n# REDIRECT_URI = 'https://b522-2409-4073-2e93-77db-147a-4fab-45a9-e65.ngrok.io'\n# # REDIRECT_URI='http://localhost:8001/account/token/'\n# CLIENT_ID = '0F28E5B43A7445BCA5DE7B8D2D64A965'\n# CLIENT_SECRET = 'iRxAhGllAUITY-ktKLAY5v37s2IT29NeaBvMo00RSpY8DjRh'\nSTATE = '123'\n#\n\n#\nAUTH_URL_GENERATOR = 'https://login.xero.com/identity/connect/authorize?response_type=code'\nTOKEN_URL = 'https://identity.xero.com/connect/token'\nCONNECTION_URL = 'https://api.xero.com/connections'\nBALANCE_SHEET_URL = 'https://api.xero.com/api.xro/2.0/Reports/BalanceSheet'\nPROFIT_LOSS_URL = 'https://api.xero.com/api.xro/2.0/Reports/ProfitAndLoss'\nBANK_SUMMARY_URL = 'https://api.xero.com/api.xro/2.0/Reports/BankSummary'\nREFRESHING_URL = 'https://identity.xero.com/connect/token'\nUSER_DETAILS = 'https://api.xero.com/api.xro/2.0/Users'\nCONTACT_DETAILS = 'https://api.xero.com/api.xro/2.0/Contacts'\n\n# CLIENT_ID = \"D9B541ECA6E34916AB838BF8E641F8F1\"\n# CLIENT_SECRET = \"phFzovy45PMf0zsEx_Tt7OxoT8Z77Bl45JJbzydz5cGtsn2_\"\n\n\nCLIENT_ID = \"12F7583836C942418227E7EAC79D11D6\"\nCLIENT_SECRET = \"l9llhAyLiv0gViFV4R1A-qMs9BD8ANXsYPbNRUmzASkWqtnO\"\n\nSIGNUP_SCOPE = \"offline_access+openid+profile+email\"\nSIGN_UP_REDIRECT_URI = \"http://localhost:8001/account/xero/callback/\"\n\ntoken_value = CLIENT_ID + ':' + CLIENT_SECRET\nBASIC_TOKEN = base64.urlsafe_b64encode(token_value.encode()).decode()\n\n# AWS SNS keys\n\n# AWS_ACCESS_KEY = \"AKIAVXLDNFMCUBMJOS24\"\n# AWS_SECRET_ACCESS_KEY = \"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\"\n# AWS_TOPIC_ARN = \"arn:aws:sns:ap-south-1:393734859525:OCEAN-TOPIC\"\nREGION_NAME = \"ap-south-1\"\n# AWS_TOPIC_ARN = \"arn:aws:sns:ap-south-1:393734859525:TEST\"\n# AWS_TOPIC_ARN = \"arn:aws:sns:ap-south-1:393734859525:TEST-OTP\"\n\nAWS_ACCESS_KEY = 'AKIAVXLDNFMCR4GVDT3E'\nAWS_SECRET_ACCESS_KEY = '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'\nAWS_TOPIC_ARN = 'arn:aws:sns:ap-south-1:393734859525:OTPCHECK'\n\n# Codat constants\nCODAT_API_KEY = 'NVfJAZiDLd6oZ65LOrKCxp459SBa1s1jb3azmkfd'\nCODAT_AUTHORIZATION_KEY = 'Basic TlZmSkFaaURMZDZvWjY1TE9yS0N4cDQ1OVNCYTFzMWpiM2F6bWtmZA=='\n\nAUTH_PROVIDERS = {\n \"email\": \"email\", \"xero\": \"xero\", \"google\": \"google\"\n}\n# Social Authentication Status\nINITIATED = \"INITIATED\"\nUPDATED_DETAILS = \"UPDATED_DETAILS\"\nCOMPLETED = \"COMPLETED\"\nCOMPLETE_PROFILE = \"COMPLETE_PROFILE\"","repo_name":"AKSHAY-KR99/ocean-imp","sub_path":"ocean_dev/ocean_dev/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"40854360830","text":"# 摄像头实时人脸识别\n\n# Author: coneypo\n# Blog: http://www.cnblogs.com/AdaminXie\n# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera\n\n# Created at 2018-05-11\n# Updated at 2018-10-29\n\nimport dlib # 人脸处理的库 Dlib\nimport numpy as np # 数据处理的库 numpy\nimport cv2 # 图像处理的库 OpenCv\nimport pandas as pd # 数据处理的库 Pandas\nimport time\nimport os\n\nimport redis\nimport pickle\n\nclass Redis:\n @staticmethod\n def connect():\n r = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)\n return r\n\n #将内存数据二进制通过序列号转为文本流,再存入redis\n @staticmethod\n def set_data(r,key,data,ex=None):\n r.set(key,pickle.dumps(data),ex)\n\n # 将文本流从redis中读取并反序列化,返回返回\n @staticmethod\n def get_data(r,key):\n data = r.get(key)\n if data is None:\n return None\n\n return pickle.loads(data)\n\n\n# 人脸识别模型,提取 128D 的特征矢量\n# face recognition model, the object maps human faces into 128D vectors\nfacerec = dlib.face_recognition_model_v1(\"static/data_dlib/dlib_face_recognition_resnet_model_v1.dat\")\n\n\n# 计算两个向量间的欧式距离\ndef return_euclidean_distance(feature_1, feature_2):\n feature_1 = np.array(feature_1)\n feature_2 = np.array(feature_2)\n dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))\n print(\"e_distance: \", dist)\n\n if dist > 0.4:\n return \"diff\"\n else:\n return \"same\"\n\n\n# 处理存放所有人脸特征的 CSV\npath_features_known_csv = \"static/features_all.csv\"\ncsv_rd = pd.read_csv(path_features_known_csv, header=None)\n\n# 存储的特征人脸个数\n# print(csv_rd.shape[0])\n\n# 用来存放所有录入人脸特征的数组\nfeatures_known_arr = []\nfeatures_known_name = []\n\n# 读取已知人脸数据\n# known faces\nfor i in range(csv_rd.shape[0]):\n features_someone_arr = []\n for j in range(0, len(csv_rd.loc[i, :])):\n # for j in range(0, len(csv_rd.ix[i, :])):\n # print(csv_rd.loc[i, :][j])\n features_someone_arr.append(csv_rd.loc[i, :][j])\n # features_someone_arr.append(csv_rd.ix[i, :][j])\n # print(features_someone_arr)\n name = features_someone_arr.pop()\n features_known_name.append(name)\n features_known_arr.append(features_someone_arr)\nprint(\"Faces in Database:\", len(features_known_arr))\n\n# Dlib 检测器和预测器\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor('static/data_dlib/shape_predictor_68_face_landmarks.dat')\n\n# 创建 cv2 摄像头对象\ncap = cv2.VideoCapture(1)\n# cap.open(\"rtsp://admin:Aa123456@192.180.0.180/Streaming/Channels/103\")\n\n# cap.set(propId, value)\n# 设置视频参数,propId 设置的视频参数,value 设置的参数值\ncap.set(3, 100)\n\n# 返回一张图像多张人脸的 128D 特征\ndef get_128d_features(img_gray):\n faces = detector(img_gray, 1)\n if len(faces) != 0:\n face_des = []\n for i in range(len(faces)):\n shape = predictor(img_gray, faces[i])\n face_des.append(facerec.compute_face_descriptor(img_gray, shape))\n else:\n face_des = []\n return face_des\n\n\n# cap.isOpened() 返回 true/false 检查初始化是否成功\nwhile cap.isOpened():\n\n flag, img_rd = cap.read()\n kk = cv2.waitKey(1)\n\n # 取灰度\n img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)\n # print(img_gray)\n # 人脸数 faces\n faces = detector(img_gray, 0)\n\n # 待会要写的字体\n font = cv2.FONT_HERSHEY_COMPLEX\n\n cv2.putText(img_rd, \"Press 'q': Quit\", (20, 450), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)\n\n # 存储人脸名字和位置的两个 list\n # list 1 (faces): store the name of faces Jack unknown unknown Mary\n # list 2 (pos_namelist): store the positions of faces 12,1 1,21 1,13 31,1\n\n # 存储所有人脸的名字\n pos_namelist = []\n name_namelist = []\n features_known_arr2 = []\n\n other = os.listdir('static/data_faces_from_camera/other')\n others=[]\n for i in range(len(other)):\n if(other[i] != '.DS_Store'):\n others.append(other[i])\n now = int(round(time.time(), 2) * 1000)\n if len(others)>0:\n last = max(others)[:-4]\n else:\n last = 0\n code = 800\n # print(last)\n\n # print(int(last)+code)\n # 按下 q 键退出\n if kk == ord('q'):\n break\n else:\n # 检测到人脸\n if len(faces) != 0:\n # 获取当前捕获到的图像的所有人脸的特征,存储到 features_cap_arr\n features_cap_arr = []\n for i in range(len(faces)):\n shape = predictor(img_rd, faces[i])\n features_cap_arr.append(facerec.compute_face_descriptor(img_rd, shape))\n\n # 遍历捕获到的图像中所有的人脸\n for k in range(len(faces)):\n # 让人名跟随在矩形框的下方\n # 确定人名的位置坐标\n # 先默认所有人不认识,是 unknown\n name_namelist.append(\"unknown\")\n\n # 每个捕获人脸的名字坐标\n pos_namelist.append(tuple([faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))\n print(features_known_arr)\n # 对于某张人脸,遍历所有存储的人脸特征\n for i in range(len(features_known_arr)):\n # features_known_arr2 = features_known_arr\n print(\"with person_\", str(i+1), \"the \", end='')\n # name = features_known_arr2[i].pop()\n\n # print(features_known_arr2[i])\n\n # 将某张人脸与存储的所有人脸数据进行比对\n compare = return_euclidean_distance(features_cap_arr[k], features_known_arr[i])\n\n if compare == \"same\": # 找到了相似脸\n name_namelist[k] = features_known_name[i]\n # name_namelist[k] = \"person_\" + str(i+1)\n #else 不相似的脸 截图保存 等待后续操作\n else:\n print(now)\n print(last)\n #\n if((now) > int(last)+code or (int(last) == 0)):\n # 将人脸计数器清零\n cnt_ss = 0\n path_make_dir = \"static/data_faces_from_camera/\"\n for kd, d in enumerate(faces):\n # 计算矩形框大小\n height = (d.bottom() - d.top())\n width = (d.right() - d.left())\n hh = int(height / 2)\n ww = int(width / 2)\n color_rectangle = (255, 255, 255)\n if (d.right() + ww) > 640 or (d.bottom() + hh > 480) or (d.left() - ww < 0) or (\n d.top() - hh < 0):\n cv2.putText(img_rd, \"OUT OF RANGE\", (20, 300), font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)\n save_flag = 1\n color_rectangle = (0, 0, 255)\n else:\n save_flag = 1\n color_rectangle = (0, 255, 255)\n # 根据人脸大小生成空的图像\n im_blank = np.zeros((int(height * 2), width * 2, 3), np.uint8)\n if save_flag:\n cnt_ss += 1\n # print(cnt_ss)\n if(height * 2<720):\n for ii in range(height * 2):\n if(width * 2<720):\n for jj in range(width * 2):\n if(d.top() - hh + ii<720):\n im_blank[ii][jj] = img_rd[d.top() - hh + ii][d.left() - ww + jj]\n cv2.imwrite(path_make_dir + \"/other/\" + str(now) + \".jpg\", im_blank)\n print(\"写入本地:\", path_make_dir + \"/other/\" + str(now) + \".jpg\")\n\n # 矩形框\n for kk, d in enumerate(faces):\n # print(d.left(), d.top())\n # print(d.right(), d.bottom())\n # 绘制矩形框\n # if(name_namelist[kk]!='unknown'):\n # cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 255, 255), 2)\n # else:\n cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 0, 255),\n 2)\n # cv2.rectangle(img_rd,\n # tuple([d.left() - ww, d.top() - hh]),\n # tuple([d.right() + ww, d.bottom() + hh]),\n # color_rectangle, 2)\n\n # 在人脸框下面写人脸名字\n for i in range(len(faces)):\n cv2.putText(img_rd, name_namelist[i], pos_namelist[i], font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)\n\n # 將識別出的人臉存入Redis\n # r = Redis.connect()\n # if(len(name_namelist)>0):\n # Redis.set_data(r, 'name', name_namelist)\n print(\"Name list now:\", name_namelist, \"\\n\")\n\n cv2.putText(img_rd, \"Face Recognition\", (20, 40), font, 1, (0, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(img_rd, \"Faces: \" + str(len(faces)), (20, 100), font, 1, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"camera\", img_rd)\n# 释放摄像头\ncap.release()\n\n# 删除建立的窗口\ncv2.destroyAllWindows()\n","repo_name":"liu279/face_recognize","sub_path":"face_reco_from_camera.py","file_name":"face_reco_from_camera.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"18141561032","text":"\ndef sqrt(a, threshold = 0.00000001, maxIter = 50):\n\t'''Calculate the square root of 'a' using newtons method'''\n\tXi = 1.0 #a starting guess\n\tDelta = 1.0\n\tcnt = 1\n\twhile Delta > threshold and cnt <= maxIter:\n\t\tnewXi = (Xi + a / Xi) / 2\n\t\tDelta = abs(newXi - Xi)\n\t\tXi = newXi\n\t\tcnt += 1\n\treturn Xi\n\n","repo_name":"Gholtes/Algorithms","sub_path":"sqrt.py","file_name":"sqrt.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"35520359899","text":"class Nstacks:\n\n def __init__(self,k,n):\n self.k=k # #of stacks\n self.n=n # size of all stacks\n\n self.arr=[0]*self.n #initialise and arr with k stacks\n\n self.top=[-1]*self.k # all k stacks are empty\n\n self.free=0 # top of free stack\\\n\n self.next= [i+1 for i in range(self.n)] # point to next ele\n self.next[self.n -1]=-1 # point till last ele\n \n def isEmpty(self,sn):\n return self.top[sn]==-1\n\n def isFull(self):\n return self.free ==-1\n\n def push(self,item,sn):\n if self.isFull():\n print(\"STACK OVERFLOWN\")\n return\n \n insert_at=self.free #insert at the first free pos\n\n self.free=self.next[self.free] # move free pos\n self.arr[insert_at]=item #insert the item in free pos\n self.next[insert_at]=self.top[sn] # move top pos\n self.top[sn]=insert_at #new top\n\n def pop(self,sn):\n if self.isEmpty(sn):\n print(\"STACK UNDERFLOWN\")\n return None\n \n topOfStack=self.top[sn] # item at top of stack\n self.top[sn]=self.next[self.top[sn]] # new top\n self.next[topOfStack] #old top is moved to free pos\n self.free=topOfStack\n\n return self.arr[topOfStack]\n\n def printStack(self,sn):\n topIndex=self.top[sn]\n while topIndex!=-1:\n print(self.arr[topIndex])\n topIndex=self.next[topIndex]\n\n def printAll(self):\n \n for i in range(self.n):\n print(self.arr[i])\n\n\nif __name__=='__main__':\n\n NS=Nstacks(4,16)\n\n NS.push(1000,0)\n NS.push(800,0)\n NS.push(900,0)\n NS.push(700,0)\n\n NS.push(121,1)\n NS.push(189,1)\n NS.push(165,1)\n NS.push(132,1)\n\n NS.push(265,2)\n NS.push(244,2)\n NS.push(211,2)\n NS.push(278,2)\n\n NS.push(369,3)\n NS.push(344,3)\n NS.push(311,3)\n NS.push(355,3)\n\n\n print(\"*\"*10) \n print(\"*\"*10)\n NS.printAll()\n print(\"*\"*10)\n print(\"*\"*10)\n\n print(\"\")\n print(\"\")\n\n print(\"*\"*10)\n NS.printStack(0)\n print(\"*\"*10)\n NS.printStack(1)\n print(\"*\"*10)\n NS.printStack(2)\n print(\"*\"*10)\n NS.printStack(3)\n print(\"*\"*10)\n\n\n print(\"popped from 0 \",NS.pop(0))\n print(\"popped from 1 \",NS.pop(1))\n print(\"popped from 2 \",NS.pop(2))\n print(\"popped from 3 \",NS.pop(3))\n\n\n print(\"*\"*10)\n NS.printAll()\n","repo_name":"Abrar-04/DSA-Practice","sub_path":"06.Stacks/450.Stacks/Nstacks.py","file_name":"Nstacks.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"32584052738","text":"import logging\nimport ckan.lib.helpers as h\nimport ckan.plugins as p\nfrom ckan.plugins import implements, toolkit\nfrom ckanext.linkfinder.model import make_uuid\nfrom ckan.logic import get_action\n\nlog = logging.getLogger('ckanext.linkfinder')\n\nclass LinkFinderPlugin(p.SingletonPlugin):\n implements(p.IConfigurer, inherit=True)\n implements(p.ITemplateHelpers, inherit=True)\n implements(p.IDomainObjectModification, inherit=True)\n\n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n\n def get_helpers(self):\n \"\"\"\n A dictionary of extra helpers that will be available to provide\n ga report info to templates.\n \"\"\"\n return {\n 'linkfinder_installed': lambda: True,\n }\n\n def notify(self, entity, operation=None):\n \"\"\"\n if not isinstance(entity, model.Resource):\n return\n\n if operation:\n if operation == model.DomainObjectOperation.new:\n self._create_task(entity)\n else:\n # if operation is None, resource URL has been changed, as the\n # notify function in IResourceUrlChange only takes 1 parameter\n self._create_task(entity)\n \"\"\"\n\n def _create_task(self, resource):\n user = get_action('get_site_user')({'model': model,\n 'ignore_auth': True,\n 'defer_commit': True}, {})\n context = json.dumps({\n 'site_url': self.site_url,\n 'apikey': user.get('apikey')\n })\n data = json.dumps(resource_dictize(resource, {'model': model}))\n\n task_id = make_uuid()\n task_status = {\n 'entity_id': resource.id,\n 'entity_type': u'resource',\n 'task_type': u'qa',\n 'key': u'celery_task_id',\n 'value': task_id,\n 'error': u'',\n 'last_updated': datetime.now().isoformat()\n }\n task_context = {\n 'model': model,\n 'user': user.get('name'),\n }\n\n get_action('task_status_update')(task_context, task_status)\n celery.send_task(\"qa.update\", args=[context, data], task_id=task_id)\n","repo_name":"datagovuk/ckanext-linkfinder","sub_path":"ckanext/linkfinder/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71874049529","text":"import errtee\nimport re, urllib.request\nimport json\nimport os\n\n\"\"\"\nReads the list of files in http://www.apache.org/dist/\n\nCreates:\n../../site/json/foundation/releases.json\nFormat:\n{ top-level dir: { release-id: date}, ... }\n\nThe release id is derived from the filename by removing common suffixes etc, see cleanFilename()\nThe date comes from the first entry\n\n../../site/json/foundation/releases-files.json\nFormat:\n{ top-level dir: { release-id: [list of files for that release-id]}, ... }\n\nTODO: it would probably be more efficient to parse the output of\nsvn ls -R https://dist.apache.org/repos/dist/release/\nCould cache the output based on the last changed date\n\nOr use an rsync listing:\nrsync --list-only -r rsync.apache.org::apache-dist\nNote that rsync excludes hashes, sigs and KEYS files; however they are not needed here.\n\"\"\"\n\nreleases = {}\nfiles = {}\nmainurl = \"http://www.apache.org/dist/\"\n\nx = 0\n\n# don't try to maintain history for the moment...\n#try:\n# with open(\"../../site/json/foundation/releases.json\") as f:\n# releases = json.loads(f.read())\n# f.close()\n#except Exception as err:\n# print(\"Could not read releases.json, assuming blank slate\")\n\ndef getDirList(url):\n try:\n data = urllib.request.urlopen(url).read().decode('utf-8')\n for entry, xd, xdate in re.findall(r\".+\\s+(\\d\\d\\d\\d-\\d\\d-\\d\\d)\", data, re.MULTILINE | re.UNICODE):\n yield(entry, xdate, xd)\n except:\n pass\n\ndef cleanFilename(filename):\n \"\"\"\n Attempts to determine the release id to which a file belongs\n Strips extensions such as .tgz etc, then suffixes such as -sources\n Replaces qualifiers such as -assembly-, -parent- by '-'\n Returns the simplified filename .\n \"\"\"\n for suffix in ['.tgz', '.gz', '.bz2', '.xz', '.zip', '.rar', '.tar', 'tar', '.deb', '.rpm', '.dmg', '.egg', '.gem', '.pom', '.war', '.exe',\n '-scala2.11', '-cdh4', '-hadoop1', '-hadoop2', '-hadoop2.3', '-hadoop2.4', '-all',\n '-src', '_src', '.src', '-sources', '_sources', '-source', '-bin', '-dist',\n '-source-release', '-source-relase', '-apidocs', '-javadocs', '-javadoc', '_javadoc', '-tests', '-test', '-debug', '-uber',\n '-macosx', '-distribution', '-example', '-manual', '-native', '-win', '-win32', '-linux', '-pack', '-packaged', '-lib', '-current', '-embedded',\n '-py', '-py2', '-py2.6', '-py2.7', '-no', 'unix-distro', 'windows-distro', 'with', '-dep', '-standalone', '-war', '-webapp', '-dom', '-om', '-manual', '-site',\n '-32bit', '-64bit', '-amd64', '-i386', '_i386', '.i386', '-x86_64', '-minimal', '-jettyconfig', '-py2.py3-none-any', 'newkey', 'oldkey', 'jars', '-jre13', '-hadoop1', '-hadoop2', '-project',\n '-with-dependencies', '-client', '-server', '-doc', '-docs', 'server-webapps', '-full', '-all', '-standard', '-for-javaee', '-for-tomcat',\n 'hadoop1-scala2', '-deployer', '-fulldocs', '-windows-i64', '-windows-x64', '-embed', '-apps', '-app', '-ref', '-installer', '-bundle', '-java']:\n if filename[len(filename)-len(suffix):] == suffix:\n filename = filename[0:len(filename)-len(suffix)]\n for repl in ['-assembly-', '-minimal-', '-doc-', '-src-', '-webapp-', '-standalone-', '-parent-', '-project-', '-win32-']:\n filename = filename.replace(repl, '-')\n return filename\n\ndef cleanReleases(committeeId):\n if len(releases[committeeId]) == 0:\n del releases[committeeId]\n del files[committeeId]\n\ndef parseDir(committeeId, path):\n print(\" %s...\" % path)\n if len(path) > 100:\n print(\"WARN too long path: recursion?\")\n return\n for f, d, xd in getDirList(\"%s/%s\" % (mainurl, path)):\n if xd:\n if (\"/%s\" % f) not in path and f.lower() not in ['binaries', 'repos', 'updatesite', 'current', 'stable', 'stable1', 'stable2', 'binary', 'notes', 'doc', 'eclipse', 'patches', 'docs', 'changes', 'features', 'tmp', 'cpp', 'php', 'ruby', 'py', 'py3', 'issuesfixed', 'images', 'styles', 'wikipages']:\n parseDir(committeeId, \"%s/%s\" % (path, f))\n # Note: this eliminates binary archives; not sure whether that is intentional or not.\n elif not re.search(r\"(MD5SUM|SHA1SUM|\\.md5|\\.mds|\\.sh1|\\.sh2|\\.sha|\\.asc|\\.sig|\\.bin|\\.pom|\\.jar|\\.whl|\\.pdf|\\.xml|\\.xsd|\\.html|\\.txt|\\.cfg|\\.ish|\\.pl|RELEASE.NOTES|LICENSE|KEYS|CHANGELOG|NOTICE|MANIFEST|Changes|readme|x86|amd64|-manual\\.|-docs\\.|-docs-|-doc-|Announcement|current|-deps|-dependencies|binary|-bin-|-bin\\.|-javadoc-|-distro|rat_report)\", f, flags=re.IGNORECASE):\n filename = cleanFilename(f)\n if len(filename) > 1:\n if filename not in releases[committeeId]:\n releases[committeeId][filename] = d\n files[committeeId][filename] = []\n print(\" - %s\\t\\t\\t%s\" % (filename, f))\n files[committeeId][filename].append(\"%s/%s\" % (path, f))\n\n\nfor committeeId, d, xdir in getDirList(mainurl):\n if committeeId != 'incubator':\n if committeeId not in ['xml', 'zzz', 'maven-repository']:\n print(\"Parsing /dist/%s content:\" % committeeId)\n releases[committeeId] = releases[committeeId] if committeeId in releases else {}\n files[committeeId] = {}\n parseDir(committeeId, committeeId)\n cleanReleases(committeeId)\n else:\n for podling, d, xd in getDirList(\"%s/incubator/\" % mainurl):\n print(\"Parsing /dist/incubator-%s content:\" % podling)\n committeeId = \"incubator-%s\" % podling\n releases[committeeId] = releases[committeeId] if committeeId in releases else {}\n files[committeeId] = {}\n parseDir(committeeId, \"incubator/%s\" % podling)\n cleanReleases(committeeId)\n\nprint(\"Writing releases.json\")\nwith open(\"../../site/json/foundation/releases.json\", \"w\") as f:\n json.dump(releases, f, sort_keys=True, indent=0)\n f.close()\nwith open(\"../../site/json/foundation/releases-files.json\", \"w\") as f:\n json.dump(files, f, sort_keys=True, indent=0)\n f.close()\n\nprint(\"All done!\")","repo_name":"ep-infosec/33_apache_comdev-projects","sub_path":"scripts/cronjobs/parsereleases.py","file_name":"parsereleases.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27238118836","text":"import itertools\nimport operator\nimport random\nimport numpy as np\nimport pickle\nimport os\n\nfrom sklearn.metrics import accuracy_score\nfrom deap import gp\nfrom deap import base\nfrom deap import creator\n\ndef get_args():\n str = \"\\n************************************************************\\n\"\n str += \"* Welcome to Copy Task champion arena *\\n\"\n str += \"* Please provide the following arguments comma delimited *\\n\"\n str += \"* Type of test to run options are (required): *\\n\"\n str += \"* - std -> to run the standard champion *\\n\"\n str += \"* - mul -> to run the multiplication champion *\\n\"\n str += \"* - mod -> to run the modified champion *\\n\"\n str += \"* - log -> to run the logical champion *\\n\"\n str += \"* Depth of sequence i.e. number of 1/-1's (required): *\\n\"\n str += \"* - options are: 4, 5, 6, 15, 21 *\\n\"\n str += \"* Range of noise to use (required): *\\n\"\n str += \"* - options are: 0, 0.5, 0.25, 0.125 *\\n\"\n str += \"* Which champion to load (optional): *\\n\"\n str += \"* - example 'champion_1' .... 'champion_50' *\\n\"\n str += \"* Number of tests to run (optional): *\\n\"\n str += \"* - integer represents the number of tests *\\n\"\n str += \"* Length of Noise in sequence (optional): *\\n\"\n str += \"* - integer represents the length of noise *\\n\"\n str += \"************************************************************\\n\"\n print(str)\n \n options = (\"std\", \"mul\",\"mod\",\"log\")\n while True:\n try:\n input_args = input(\"Choose your champion:\\n\").strip().lower().split(\",\")\n\n if len(input_args) < 2:\n raise ValueError\n\n if len(input_args)>0 and input_args[0].strip() not in options:\n raise ValueError\n\n if len(input_args)>1 and int(input_args[1].strip()) not in (4, 5, 6, 15, 21):\n raise ValueError\n\n # Everything is fine \n break\n\n except ValueError:\n print(\"Sorry your entry is wrong, try again!\")\n\n # Reading Type and Depth Values\n type = input_args[0]\n depth = int(input_args[1])\n\n # Default Range Value if not passed\n range_val = 0\n if type in ('mod'):\n if len(input_args) > 2:\n range_val = float(input_args[2])\n else:\n range_val = 0.5\n\n # Default Champion if not passed\n champion = \"champion_1\"\n if len(input_args) > 3:\n champion = input_args[3]\n \n # Default Number of tests if not passed\n num_test = 50\n if len(input_args) > 4:\n num_test = int(input_args[4])\n\n # Default Noise and generalize\n noise, generalize = 10, True\n if len(input_args) > 5:\n noise = int(input_args[5])\n generalize = False\n\n return type, depth, range_val, champion, num_test, generalize, noise\n\n'''\nProblem setup\n'''\n\n# Generate Random Data\ndef generate_data(noise, depth, range_val, num_tests, generalize):\n retval = []\n for _ in range(num_tests):\n sequence = []\n sequence.append(random.choice((-1.0, 1.0)))\n noise = 10 if not generalize else random.randint(10, 20)\n for _ in range(depth - 1):\n sequence.extend([random.uniform(-range_val,range_val) for _ in range(noise)])\n sequence.append(random.choice((-1.0, 1.0)))\n retval.append(sequence)\n return retval\n\n# Generate Classification based on dataset\ndef generate_output(dataset, type):\n retval = []\n for i in range(num_tests):\n data = dataset[i]\n sequence = []\n counter = 0\n for el in data:\n if type == 'mod':\n if el == 1 or el == -1:\n counter += el\n else:\n counter += el\n sequence.append(-1 if counter < 0 else 1)\n retval.append(sequence)\n return retval\n\n# Generate expected GP Action based on Dataset\ndef generate_action(dataset, type):\n retval = []\n for i in range(num_tests):\n data = dataset[i]\n sequence = []\n MEMORY = []\n if type == 'mod':\n for el in data:\n if el != 1 and el != -1:\n sequence.append(2)\n else:\n if len(MEMORY) == 0 or MEMORY[len(MEMORY)-1] == el:\n sequence.append(0)\n MEMORY.append(el)\n else:\n sequence.append(1)\n MEMORY.pop()\n else:\n for el in data:\n if el == 0:\n sequence.append(2)\n else:\n if len(MEMORY) == 0 or MEMORY[len(MEMORY)-1] == el:\n sequence.append(0)\n MEMORY.append(el)\n else:\n sequence.append(1)\n MEMORY.pop()\n retval.append(sequence)\n return retval\n\n'''\n Begining of DEAP Structure\n'''\n\n# Define a protected division function\ndef protected_div(left, right):\n try:\n return left / right\n except ZeroDivisionError:\n return 1\n\n# Define a new if-then-else function\ndef if_then_else(input, output1, output2):\n if input:\n return output1\n else:\n return output2\n\ndef create_gp(type):\n # defined a new primitive set for strongly typed GP\n pset = gp.PrimitiveSetTyped(\"MAIN\", itertools.repeat(float, 2), float)\n\n if type in (\"std\", \"vec\", \"mod\"):\n pset.addPrimitive(operator.add, [float, float], float)\n pset.addPrimitive(operator.sub, [float, float], float)\n pset.addPrimitive(protected_div, [float, float], float)\n\n if type == \"mul\":\n pset.addPrimitive(operator.add, [float, float], float)\n pset.addPrimitive(operator.sub, [float, float], float)\n pset.addPrimitive(operator.mul, [float, float], float)\n\n if type == \"log\":\n # boolean operators\n pset.addPrimitive(operator.and_, [bool, bool], bool)\n pset.addPrimitive(operator.or_, [bool, bool], bool)\n pset.addPrimitive(operator.not_, [bool], bool)\n pset.addPrimitive(operator.mul, [float, float], float)\n pset.addPrimitive(operator.lt, [float, float], bool)\n pset.addPrimitive(operator.eq, [float, float], bool)\n pset.addPrimitive(protected_div, [float, float], float)\n pset.addPrimitive(if_then_else, [bool, float, float], float)\n\n # terminals\n pset.addEphemeralConstant(\"rand100\", lambda: random.random() * 100, float)\n pset.addTerminal(False, bool)\n pset.addTerminal(True, bool)\n\n creator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n creator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMax)\n\n toolbox = base.Toolbox()\n toolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)\n toolbox.register(\"compile\", gp.compile, pset=pset)\n return toolbox\n\nif __name__ == \"__main__\":\n\n # Const variables\n local_dir = os.path.dirname(__file__)\n champ_path = os.path.join(local_dir, 'champions/')\n\n # Get input from terminal\n type, depth, range_val, champion, num_tests, generalize, noise = get_args()\n\n # Generate Data\n data_validation = generate_data(noise, depth, range_val, num_tests, generalize)\n labels_validation = generate_output(data_validation, type)\n actions_validation = generate_action(data_validation, type)\n \n # Create GP\n toolbox = create_gp(type)\n \n # Load Champion\n champ_name = champ_path + str(depth) + '_champions_' + type\n with open(champ_name, 'rb') as f:\n champions = pickle.load(f)\n print(\"loaded champions\")\n\n hof1, hof2, hof3, hof4 = champions[champion]\n\n \n # Running Test on unseen data and checking results\n print(\"\\n==================\")\n print(\"Begin Testing ....\")\n print(\"==================\\n\")\n\n # Transform the tree expression in a callable function\n tree1 = toolbox.compile(expr=hof1)\n tree2 = toolbox.compile(expr=hof2)\n tree3 = toolbox.compile(expr=hof3)\n tree4 = toolbox.compile(expr=hof4)\n\n # Evaluate the sum of correctly identified\n predictions, predict_actions = [],[]\n # Evaluate the sum of correctly identified\n for i in range(num_tests):\n data = data_validation[i]\n MEMORY, classification, actions = [], [], []\n counter = 0\n length = len(data)\n for j in range(length):\n # If stack is empty then 0, else the value on top of stack\n stack_output = MEMORY[counter - 1] if counter > 0 else 0\n\n arg1 = tree1(data[j],stack_output)\n arg2 = tree2(data[j],stack_output)\n arg3 = tree3(data[j],stack_output)\n arg4 = tree4(data[j],stack_output)\n pos = np.argmax([arg1, arg2, arg3, arg4])\n\n # Action has been decided\n temp = 1 if stack_output >= 0 else -1\n actions.append(pos)\n if pos == 0:\n MEMORY.append(data[j])\n temp = data[j]\n counter += 1\n elif pos == 1:\n MEMORY.pop()\n counter -= 1\n stack_output = MEMORY[counter - 1] if counter > 0 else 0\n temp = 1 if stack_output >= 0 else -1\n else:\n temp = 1 if stack_output >= 0 else -1\n \n # Add to classification\n classification.append(temp)\n\n predictions.append(classification)\n predict_actions.append(actions)\n\n # Evaluate predictions\n total_accuracy = 0\n for i in range(num_tests):\n accuracy = accuracy_score(labels_validation[i], predictions[i])\n print(\"Test #{} Accuracy: {}\".format(i, accuracy))\n total_accuracy += accuracy\n \n print(\"------------------------\")\n print(\"Total Accuracy: {}\".format(total_accuracy/num_tests))","repo_name":"Mihyar-30614/Genetic-Programming-Benchmarking-Deep-Memory-Tasks","sub_path":"DEAP/Sequence Classification/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":10079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"11584406338","text":"'''\nIntegrantes\nEnrique Emanuel Rezende Tavares da Silva - 11796090\nGuilherme Dias Jimenes - 11911021\nRonald Cosmo de Sousa - 11909783\n'''\n\nimport csv\nimport re\nimport random\nfrom copy import deepcopy\nfrom math import sqrt\n\n\ndef knn(training_data:list[dict], query_point:dict, num_of_neighbors:int):\n\t'''\n\tReturns a tuple where the first element is the euclidian distance between the query point and a training point\n\tand the second element is the index of the training point in the dataset\n\t'''\n\tdef make_distance_tuple(y, ind_y) : return (euclidian_dist(query_point, y), ind_y)\n\n\tdistances_from_query_point = list(\n\t\tmap(\n\t\t\tmake_distance_tuple,\n\t\t\ttraining_data,\n\t\t\trange(0, len(training_data))\n\t\t)\n\t)\n\n\tdef get_distance(dist_tuple) : return dist_tuple[0]\n\tdef get_point_index(dist_tuple) : return dist_tuple[1]\n\tdef get_point_from_dataset(point_index) : return training_data[point_index]\n\tdef get_class_of_point(point) : return point[\"a16\"]\n\n\tsorted_distances = sorted(distances_from_query_point, key=get_distance)\n\tk_nearest_neighbors_index = map(get_point_index, sorted_distances[:num_of_neighbors])\n\tk_nearest_neighbors = map(get_point_from_dataset, k_nearest_neighbors_index)\n\n\tknn_classes = list(map(get_class_of_point, k_nearest_neighbors))\n\tplus_class_occurrences = knn_classes.count(\"+\")\n\tminus_class_occurrences = knn_classes.count(\"-\")\n\treturn \"+\" if plus_class_occurrences > minus_class_occurrences else \"-\"\n\n\n'''\nCalculates the euclidian distance between two vectors (`a` and `b`)\n'''\ndef euclidian_dist(a:dict, b:dict) -> float:\n\tcols = list(a)\n\t# Removing class column because its value is a string\n\tcols.remove(\"a16\")\n\tsum_of_squared_diffs = 0\n\tfor col in cols:\n\t\tcomp_a = a[col]\n\t\tcomp_b = b[col]\n\t\tsum_of_squared_diffs = sum_of_squared_diffs + ( (comp_a - comp_b) ** 2 )\n\treturn sqrt(sum_of_squared_diffs)\n\n\n'''\nReturns a list of dicts corresponding to the dataset.\n\nFor example, from the following .csv:,\n\n\t\tfirst_name,last_name\n\t\tJohn, Cleese\n\t\tTerry, Gilliam\n\nthe first row of the dataset would look like this:\n\n\t{'first_name': 'John', 'last_name': 'Cleese'}\n\nAnd the whole dataset would look like this:\n\n\t[\n\t\t{'first_name': 'John', 'last_name': 'Cleese'} ,\n\t\t{'first_name': 'Terry', 'last_name': 'Gilliam'}\n\t]\n\n'''\ndef read_dataset():\n\tprint(\"Reading dataset `Credit Approval`\")\n\twith open('data/crx.data', 'r') as file:\n\t\treader = csv.DictReader(file)\n\t\tdata = []\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data\n\n'''\nRemove the dataset's NA (missing) values by looking which values are equal to `?`.\n'''\ndef remove_null(dataset):\n dado_limpo = []\n for row in dataset:\n # Check if the line contains any value with \"?\"\n if re.search(r'\\?', str(row.values())):\n continue\n dado_limpo.append(row)\n return dado_limpo\n\n\"\"\"\nConvert categorical attributes into dummy variables (one-hot encoding)\n\"\"\"\ndef one_hot_encoding(dataset: dict, column: str):\n\n\tcategories = set()\n\n\t# Discover categories\n\tfor row in dataset:\n\t\tif row[column] not in categories:\n\t\t\tcategories = categories | { row[column] }\n\n\t# create new column for each category discovered\n\tfor row in dataset:\n\t\tfor category in categories:\n\t\t\tnew_col_name = f\"{column}_{category}\"\n\t\t\tvalue = row[column]\n\t\t\trow[new_col_name] = int(value == category)\n\t\trow.pop(column)\n\n\treturn dataset\n\n\ndef one_hot_encode_all_columns(dataset) :\n\tprint(\"One-hot encoding columns a1, a4, a5, a6, a7, a10, a12, a13\")\n\tto_encode = deepcopy(dataset)\n\tone_hot_encoding(to_encode, \"a1\")\n\tone_hot_encoding(to_encode, \"a4\")\n\tone_hot_encoding(to_encode, \"a5\")\n\tone_hot_encoding(to_encode, \"a6\")\n\tone_hot_encoding(to_encode, \"a7\")\n\tone_hot_encoding(to_encode, \"a9\")\n\tone_hot_encoding(to_encode, \"a10\")\n\tone_hot_encoding(to_encode, \"a12\")\n\tone_hot_encoding(to_encode, \"a13\")\n\treturn to_encode\n\n\n\n\"\"\"\nDivides the data into a set of training data (70%) and a set of query data (30%).\nReturns a tuple (training_data, query_data)\n\"\"\"\ndef divide_data(dataset):\n\tprint(\"Dividing data intro training and query sets\")\n\tdataset_size = len(dataset)\n\ttarget_training_quantity = int( 0.7 * dataset_size )\n\ttraining_data = random.choices(dataset, k=target_training_quantity)\n\tquery_data = [row for row in dataset if row not in training_data]\n\treturn training_data, query_data\n\n\n\"\"\"\nNormalizes the dataset by diving each value on a column by the maximum value of\nthat column found in the dataset\n\"\"\"\ndef normalize_dataset(dataset):\n\tprint(\"Normalizing scale of columns with continuous numbers\")\n\tdataset = remove_null(dataset)\n\tmax_values = {}\n\tfor row in dataset:\n\t\tfor key, value in row.items():\n\t\t\ttry:\n\t\t\t\tvalue = float(value) #converts\n\t\t\texcept ValueError: #in case of conversion failure\n\t\t\t\tcontinue\n\t\t\tif key not in max_values or value > max_values[key]:\n\t\t\t\tmax_values[key] = value\n\n\tfor row in dataset:\n\t\tfor key, value in row.items():\n\t\t\ttry:\n\t\t\t\tvalue = float(value)\n\t\t\texcept ValueError:\n\t\t\t\tcontinue\n\t\t\trow[key] = value / max_values[key] #division\n\n\treturn dataset\n\n\"\"\"\nCalculates the accuracy of running k-NN\n\"\"\"\ndef accuracy(points, predicted_values) :\n\tprint(\"Calculating accuracy of k-NN implemented\")\n\tnum_of_points = len(points)\n\ttrue_predictions = 0\n\n\tfor point, prediction in zip(points, predicted_values):\n\t\t# a16 is the column name which contain the classes categories.\n\t\tif point[\"a16\"] == prediction:\n\t\t\ttrue_predictions = true_predictions + 1\n\n\treturn true_predictions / num_of_points\n\n\"\"\"\nRuns k-NN on the `Credit Approval` dataset, making sure that before running:\n\t1 - All null data is removed;\n\t2 - Categorical data is one-hot encoded\n\t3 - All numeric values are normalized\n\t4 - k-NN is trained on 70% of the full dataset\n\nAfter running the algorithm, it outputs to STDOUT the accuracy obtained from querying 30% of the data against the\ntraining data and comparing expected classes X predicted classes.\n\"\"\"\ndef main() :\n\tneighbors = 100\n\tdata = read_dataset()\n\tnormalized_data = normalize_dataset(data)\n\tencoded_data = one_hot_encode_all_columns(normalized_data)\n\ttraining_data, query_data = divide_data(encoded_data)\n\n\tpredictions = []\n\tfor query_point in query_data:\n\t\tpredicted_class = knn(training_data, query_point, neighbors)\n\t\tpredictions.append(predicted_class)\n\taccuracy_knn = accuracy(query_data, predictions)\n\n\tprint(f\"Accuracy of KNN with k={neighbors} is {accuracy_knn}\")\n\n\nif (__name__ == \"__main__\") : main()\n","repo_name":"Oiapokxui/tarefas-ia","sub_path":"tarefa1/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"11948637723","text":"import PyPDF2\nimport sys\n# combined the 3 pdf\n# pdf.py dummy.pdf twopage.pdf tilt.pdf\n\n# inputs = sys.argv[1:]\n\n# def pdf_combiner(pdf_list):\n# for pdf in pdf_list:\n# print(pdf)\n\n# pdf_combiner(inputs)\n\n# PS C:\\Users\\Mohamed Bee\\Desktop\\Python_w_Udemy\\Section17_Scripting with Python\\PDF> python Exo.py dummy.pdf twopage.pdf tilt.pdf\n# output\n# dummy.pdf\n# twopage.pdf\n# tilt.pdf\n\n\n# that is bcz there is the merger obj.\n\n# inputs = sys.argv[1:]\n\n# def pdf_combiner(pdf_list):\n# merger=PyPDF2.PdfFileMerger()\n# for pdf in pdf_list:\n# print(pdf)\n# merger.append(pdf)\n# merger.write('super.pdf')\n \n# pdf_combiner(inputs)\n\n# type all that then enter\n# PS C:\\Users\\Mohamed Bee\\Desktop\\Python_w_Udemy\\Section17_Scripting with Python\\PDF> python Exo.py dummy.pdf twopage.pdf tilt.pdf\n\n# output\n# dummy.pdf\n# twopage.pdf\n# tilt.pdf\n# and then run the program\n\n\ntemplate = PyPDF2.PdfFileReader(open('super.pdf', 'rb'))\nwatermark = PyPDF2.PdfFileReader(open('wtr.pdf', 'rb'))\noutput= PyPDF2.PdfFileWriter()\n\nfor i in range(template.getNumPages()):\n page= template.getPage(i)\n page.mergePage(watermark.getPage(0))\n output.addPage(page)\n \n with open('watermarked_output.pdf', 'wb') as file:\n output.write(file)\n \n \n \n# output pages are watermarked\n ","repo_name":"MBee05/Section17_Scripting-with-Python","sub_path":"PDF/Exo.py","file_name":"Exo.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"32373363900","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as scidist\nimport tqdm\nimport cwrap\nimport PDBloader\nimport eigen\n\n\ndef get_dmat(coords):\n dmat = scidist.pdist(coords)\n dmat = scidist.squareform(dmat)\n return dmat\n\n\ndef get_cmap(dmat, thr=8., sep_cut=2):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> coords = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> cmap = get_cmap(get_dmat(coords[:8]), sep_cut=0)\n >>> cmap\n array([[False, True, True, True, False, False, False, False],\n [ True, False, True, True, True, False, False, False],\n [ True, True, False, True, True, True, False, False],\n [ True, True, True, False, True, True, False, False],\n [False, True, True, True, False, True, True, False],\n [False, False, True, True, True, False, True, True],\n [False, False, False, False, True, True, False, True],\n [False, False, False, False, False, True, True, False]])\n >>> cmap = get_cmap(get_dmat(coords[:8]), sep_cut=2)\n >>> cmap\n array([[False, False, False, True, False, False, False, False],\n [False, False, False, False, True, False, False, False],\n [False, False, False, False, False, True, False, False],\n [ True, False, False, False, False, False, False, False],\n [False, True, False, False, False, False, False, False],\n [False, False, True, False, False, False, False, False],\n [False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False]])\n \"\"\"\n n, n = dmat.shape\n cmap = dmat <= thr\n for i in range(sep_cut + 1):\n mask = ~(np.logical_or(np.diag(np.ones(n - i, dtype=bool), k=i), np.diag(np.ones(n - i, dtype=bool), k=-i)))\n cmap = np.logical_and(cmap, mask)\n return cmap\n\n\ndef mapalign(cmap_a,\n cmap_b,\n sep_x_list=[0, 1, 2],\n sep_y_list=[1, 2, 3, 8, 16, 32],\n gap_e_list=[-0.2, -0.1, -0.01, -0.001],\n niter=20,\n progress=True,\n eigen_init=False,\n eigen_aln=False):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n\n # Few minutes to run. Uncomment the following to test it!\n >>> aln, score, sep_x_best, sep_y_best, gap_e_best = mapalign(cmap_a, cmap_b)\n >>> aln\n array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,\n 26, 27, 28, 29, 30, 31, 32, 33, 34, 44, 45, 46, 47,\n 48, 49, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 103,\n 104, 105, 106, 107, 108, 109, 110, 111, 112, 119, 120, 121, 122,\n 123, 124, 125, 126, 127, 152, 153, 154, 155, 156, 157, 158, 159,\n 160, 161, 162, 163, 164, 165, 166, 167, 168, 169], dtype=int32)\n >>> aln.shape\n (88,)\n >>> score\n 407.2732985813753\n >>> sep_x_best, sep_y_best, gap_e_best\n (1, 16, -0.001)\n \"\"\"\n if eigen_aln:\n aln, score, gap_e_best = eigen.get_alignment(cmap_a,\n cmap_b,\n gap_extension_list=gap_e_list,\n niter=niter,\n progress=progress)\n sep_x_best, sep_y_best = None, None\n else:\n aln, score, sep_x_best, sep_y_best, gap_e_best = cwrap.get_alignment(cmap_a,\n cmap_b,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_extension_list=gap_e_list,\n niter=niter,\n progress=progress,\n eigen_init=eigen_init)\n return aln, score, sep_x_best, sep_y_best, gap_e_best\n\n\ndef get_aln_b(aln_a, nb):\n \"\"\"\n >>> aln_a = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln_a.shape\n (88,)\n >>> aln_b = get_aln_b(aln_a, 215)\n >>> aln_b\n array([ 2., 3., 4., 5., 8., 9., 12., 13., 14., 15., 16., 17., 18.,\n 19., 20., -1., -1., -1., -1., 21., 22., 23., 24., 25., -1., -1.,\n -1., -1., -1., -1., -1., 26., -1., -1., -1., -1., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., -1., -1.,\n -1., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52.,\n 53., 54., 55., 56., 57., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., 58.,\n 59., 60., 61., 62., 63., 64., 65., 66., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., 67., 68., 69., 70., 71., 72., -1., -1., -1., 73., 74.,\n 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1.])\n \"\"\"\n aln_b = -np.ones(nb)\n ai_aln = np.where(aln_a != -1)[0]\n bi_aln = aln_a[ai_aln]\n aln_b[bi_aln] = ai_aln\n return aln_b\n\n\ndef get_aligned_maps(cmap_a, cmap_b, aln, full=False):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n >>> aln = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln.shape\n (88,)\n\n Returns the maps aligned in the frame of cmap_a\n >>> cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln)\n >>> cmap_a_aln.shape\n (79, 79)\n >>> cmap_a_aln.shape\n (79, 79)\n\n Returns the maps aligned in the frame of cmap_b\n >>> cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln, full=True)\n >>> cmap_a_aln.shape\n (215, 215)\n >>> cmap_b_aln.shape\n (215, 215)\n \"\"\"\n na, na = cmap_a.shape\n nb, nb = cmap_b.shape\n ai_aln = np.where(aln != -1)[0]\n bi_aln = aln[ai_aln]\n if not full: # Only get the aligned parts\n cmap_a_aln = cmap_a[ai_aln, :][:, ai_aln]\n cmap_b_aln = cmap_b[bi_aln, :][:, bi_aln]\n else: # get the FULL matrices with zeros in insertion regions\n if na <= nb:\n cmap_a_aln = np.zeros_like(cmap_b)\n cmap_a_aln[:na, :na] = cmap_a\n cmap_a_aln[bi_aln, :] = cmap_a_aln[ai_aln, :]\n cmap_a_aln[:, bi_aln] = cmap_a_aln[:, ai_aln]\n cmap_b_aln = cmap_b\n else:\n cmap_a_aln = cmap_a\n cmap_b_aln = np.zeros_like(cmap_a)\n cmap_b_aln[:nb, :nb] = cmap_b\n cmap_b_aln[ai_aln, :] = cmap_b_aln[bi_aln, :]\n cmap_b_aln[:, ai_aln] = cmap_b_aln[:, bi_aln]\n return cmap_a_aln, cmap_b_aln\n\n\ndef get_score(cmap_a, cmap_b, aln):\n \"\"\"\n The score is the number of contacts common in the two maps aligned over the total number of contacts for cmap_a\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n >>> aln = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln.shape\n (88,)\n >>> score = get_score(cmap_a, cmap_b, aln)\n >>> score\n 0.5838926174496645\n \"\"\"\n cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln, full=False)\n comm = np.logical_and(cmap_a_aln, cmap_b_aln)\n score = comm.sum() / cmap_a.sum() # min(cmap_a.sum(), cmap_b.sum())\n return score\n\n\ndef plot_aln(cmap_a, cmap_b, aln, full=False, outfilename=None):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n >>> aln = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln.shape\n (88,)\n\n # >>> plot_aln(cmap_a, cmap_b, aln)\n # >>> plot_aln(cmap_a, cmap_b, aln, full=True)\n \"\"\"\n cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln, full=full)\n ai, aj = np.where(cmap_a_aln > 0)\n bi, bj = np.where(cmap_b_aln > 0)\n plt.scatter(bi, bj, s=16., c='gray', alpha=.5, label='cmap_b')\n plt.scatter(ai, aj, s=1., c='blue', label='cmap_a')\n plt.xticks([])\n plt.yticks([])\n plt.gca().set_aspect('equal', adjustable='box')\n plt.legend()\n if outfilename is not None:\n plt.savefig(outfilename)\n else:\n plt.show()\n\n\ndef batch_mapalign(cmap_a,\n logfilename,\n pdblist=[],\n pdbpath=None,\n num_workers=None,\n sep_x_list=[1],\n sep_y_list=[16],\n gap_e_list=[-0.001],\n eigen_init=False):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> cmap_a = get_cmap(dmat_a)\n >>> batch_mapalign(cmap_a, 'mapalign_batch.log', pdblist=['data/2pd0_A.pdb', 'data/3u97_A.pdb'])\n \"\"\"\n import torch\n import logging\n logging.basicConfig(filename=logfilename, level=logging.INFO, format='%(asctime)s: %(message)s')\n logging.info(f\"################ Starting {__file__} ################\")\n if num_workers is None:\n num_workers = os.cpu_count()\n logging.info(f\"num_workers: {num_workers}\")\n dataset = PDBloader.PDBdataset(pdbpath=pdbpath,\n pdblist=pdblist,\n cmap_a=cmap_a,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_e_list=gap_e_list,\n logfilename=logfilename,\n eigen_init=eigen_init)\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=1,\n shuffle=False,\n num_workers=num_workers,\n collate_fn=PDBloader.collate_fn,\n prefetch_factor=8)\n iterator = iter(dataloader)\n pbar = tqdm.tqdm(total=dataset.__len__())\n # for i, batch in enumerate(dataloader):\n for i in range(dataset.__len__()):\n try:\n batch = next(iterator)\n except RuntimeError:\n batch = [[(None, None, None, None, None)]]\n for b in batch:\n for chain_data in b:\n index, pdb, chain, score, native_contact = chain_data\n if index is not None:\n logging.info(f'{index} {pdb} {chain} {score:.4f} {native_contact:.4f}')\n pbar.update(1)\n pbar.close()\n\n\ndef log(msg):\n try:\n logging.info(msg)\n except NameError:\n pass\n\n\nif __name__ == '__main__':\n import sys\n import doctest\n import argparse\n from pymol import cmd\n # ### UNCOMMENT FOR LOGGING ####\n import os\n import PDBloader\n # ### ##################### ####\n # argparse.ArgumentParser(prog=None, usage=None, description=None, epilog=None, parents=[], formatter_class=argparse.HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True, allow_abbrev=True, exit_on_error=True)\n parser = argparse.ArgumentParser(description='')\n # parser.add_argument(name or flags...[, action][, nargs][, const][, default][, type][, choices][, required][, help][, metavar][, dest])\n parser.add_argument('-p1', '--pdb1', help='First structure file to align on pdb2')\n parser.add_argument('-p2', '--pdb2', help='Second pdb file. Can give multiple pdbs', nargs='+')\n parser.add_argument(\n '-db',\n '--pdbpath',\n help=\n 'Path to the pdb database. See: https://github.com/bougui505/misc/blob/master/shell/updatePDB.sh to download the PDB'\n )\n parser.add_argument('-s1', '--sel1', required=False, default='all')\n parser.add_argument('-s2', '--sel2', required=False, default='all')\n parser.add_argument(\n '--sep_x',\n type=int,\n default=1,\n help=\n 'Parameter to compute the STD of the gaussian: s_std=sep_y*(1+(s_min-2)**sep_x), with s_min the min sequence separation for cmap_a and cmap_b of the considered contacts. (default=1)'\n )\n parser.add_argument(\n '--sep_y',\n type=int,\n default=16,\n help=\n 'Parameter to compute the STD of the gaussian: s_std=sep_y*(1+(s_min-2)**sep_x), with s_min the min sequence separation for cmap_a and cmap_b of the considered contacts. (default=16)'\n )\n parser.add_argument('--gap_e',\n type=float,\n default=-0.001,\n help='Gap extension penalty. MUST BE negative (default=-0.001).')\n parser.add_argument('--niter', help='Number of iterations (default 20)', default=20, type=int)\n parser.add_argument('--show', action='store_true', help='Show the contact map alignment')\n parser.add_argument('--save', help='Save the contact map alignment in the given filename')\n parser.add_argument('--full',\n action='store_true',\n help='Display the full contact map alignemnt. Not only the aligned contacts')\n parser.add_argument('--hpo', help='Hyperparameter optimization for sep_x, sep_y and gap_e', action='store_true')\n parser.add_argument(\n '--eigen_init',\n help=\n 'Initialize the scoring alignment matrix using eigenvector decomposition. Faster but less accurate (see: https://doi.org/10.1093/bioinformatics/btq402)',\n action='store_true')\n parser.add_argument(\n '--eigen_aln',\n help=\n 'Contact map alignment using alignment of eigen vectors. Even faster but less accurate (see: https://doi.org/10.1093/bioinformatics/btq402)',\n action='store_true')\n parser.add_argument('--test', help='Test the code', action='store_true')\n args = parser.parse_args()\n\n if args.test:\n doctest.testmod(optionflags=doctest.ELLIPSIS) # | doctest.REPORT_ONLY_FIRST_FAILURE)\n sys.exit()\n\n cmd.load(args.pdb1, 'A_')\n coords_a = cmd.get_coords(f'A_ and polymer.protein and name CA and {args.sel1}')\n dmat_a = get_dmat(coords_a)\n cmap_a = get_cmap(dmat_a)\n if args.hpo:\n sep_x_list = [0, 1, 2]\n sep_y_list = [1, 2, 3, 8, 16, 32]\n gap_e_list = [-0.2, -0.1, -0.01, -0.001]\n else:\n sep_x_list = [args.sep_x]\n sep_y_list = [args.sep_y]\n gap_e_list = [args.gap_e]\n if args.pdb2 is not None:\n if len(args.pdb2) == 1:\n import logging\n logfilename = os.path.splitext(os.path.basename(__file__))[0] + '.log'\n logging.basicConfig(filename=logfilename, level=logging.INFO, format='%(asctime)s: %(message)s')\n logging.info(f\"################ Starting {__file__} ################\")\n log(args.pdb1)\n log(args.pdb2)\n cmd.load(args.pdb2[0], 'B_')\n coords_b = cmd.get_coords(f'B_ and polymer.protein and name CA and {args.sel2}')\n dmat_b = get_dmat(coords_b)\n cmap_b = get_cmap(dmat_b)\n log(f'cmap_a.shape: {cmap_a.shape}')\n log(f'cmap_b.shape: {cmap_b.shape}')\n aln, score, sep_x_best, sep_y_best, gap_e_best = mapalign(cmap_a,\n cmap_b,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_e_list=gap_e_list,\n progress=args.hpo,\n eigen_init=args.eigen_init,\n eigen_aln=args.eigen_aln,\n niter=args.niter)\n if args.hpo:\n log(f'sep_x: {sep_x_best}')\n log(f'sep_y: {sep_y_best}')\n log(f'gap_e: {gap_e_best}')\n print(f'sep_x: {sep_x_best}')\n print(f'sep_y: {sep_y_best}')\n print(f'gap_e: {gap_e_best}')\n log(f'score: {score:.4f}')\n print(f'score: {score:.4f}')\n native_contacts_score = get_score(cmap_a, cmap_b, aln)\n log(f'native_contacts_score: {native_contacts_score:.4f}')\n print(f'native_contacts_score: {native_contacts_score:.4f}')\n if args.show or args.save is not None:\n plot_aln(cmap_a, cmap_b, aln, full=args.full, outfilename=args.save)\n # >>> sep_x_best, sep_y_best, gap_e_best\n # (2, 16, -0.001)\n elif args.pdb2 is not None:\n batch_mapalign(cmap_a,\n f'mapalign_{os.path.basename(os.path.splitext(args.pdb1)[0])}.log',\n pdblist=args.pdb2,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_e_list=gap_e_list,\n eigen_init=args.eigen_init)\n elif args.pdbpath is not None:\n batch_mapalign(cmap_a,\n f'mapalign_{os.path.basename(os.path.splitext(args.pdb1)[0])}.log',\n pdbpath=args.pdbpath,\n eigen_init=args.eigen_init)\n","repo_name":"bougui505/misc","sub_path":"python/mapalign/mapalign.py","file_name":"mapalign.py","file_ext":"py","file_size_in_byte":21453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"74942696894","text":"# Standard library imports\nimport os\nfrom tempfile import NamedTemporaryFile\nfrom uuid import uuid4\nfrom itertools import islice\n\n# Third party imports\nimport pandas as pd\n\n\ndef df_to_table(df,\n table,\n write_disposition='WRITE_EMPTY',\n blocking=True):\n \"\"\"Upload a Pandas DataFrame to Google BigQuery\n\n Args:\n df (DataFrame): The Pandas DataFrame to be uploaded.\n table (google.cloud.bigquery.Table): BigQuery table object.\n write_disposition (str): Either 'WRITE_EMPTY', 'WRITE_TRUNCATE', or\n 'WRITE_APPEND'; the default is 'WRITE_EMPTY'.\n blocking (bool): Set to False if you don't want to block until the job\n is complete.\n\n Returns:\n google.cloud.bigquery.Job: The file upload job object. If you have set\n blocking=False, this can be used to check for job completion.\n \"\"\"\n # Two annoyances here:\n # 1) df.to_csv() requires a non binary mode file handle, whereas\n # table.upload_from_file() requires a binary mode file handle, so\n # we can't reuse the same file handle in read/write mode.\n # 2) Windows won't allow reading from a temporary file whilst it's\n # still open (see robfraz/gbq-pandas issue #2), so we can't use\n # context handlers to auto-close (and therefore delete) the temporary\n # file that we write to.\n\n writebuf = NamedTemporaryFile(mode='w',\n encoding='UTF-8',\n prefix=\"df_to_table_\",\n suffix=\".csv\",\n delete=False) # robfraz/gbq-pandas issue #2\n\n try:\n df.to_csv(writebuf, index=False, encoding='UTF-8')\n writebuf.flush()\n writebuf.close()\n\n with open(writebuf.name, mode='rb') as readbuf:\n job = table.upload_from_file(readbuf,\n encoding='UTF-8',\n source_format='CSV',\n skip_leading_rows=1,\n create_disposition='CREATE_IF_NEEDED',\n write_disposition=write_disposition)\n finally:\n os.remove(writebuf.name)\n\n if blocking:\n job.result()\n\n return job\n\n\ndef query_to_df(sql, client):\n \"\"\"Run a Google BigQuery query, and return the result in a Pandas Dataframe\n\n The query must be a single SQL statement\n\n Args:\n sql (str): A string containing a single SQL statement.\n client (google.cloud.bigquery.Client): BigQuery client object.\n\n Returns\n DataFrame: A Pandas DataFrame containing the result of the query.\n \"\"\"\n job = client.run_async_query(str(uuid4()), sql)\n job.use_legacy_sql = False\n result = job.result()\n return table_to_df(result.destination)\n\n\ndef table_to_df(table, limit=None):\n \"\"\"Download a table from Google BigQuery into a dataframe, with optional row limit\n\n Args:\n table (google.cloud.bigquery.Table): BigQuery table object.\n limit (None|int): The default is limit=None (i.e. all rows in table); set to\n zero to get an empty DataFrame with the column names set, or a positive\n number to limit the maximum number of rows fetched into the DataFrame.\n\n Returns:\n DataFrame: A Pandas DataFrame containing the table data.\n \"\"\"\n if limit and limit < 0:\n limit = None\n\n table.reload()\n return pd.DataFrame(data=list(islice(table.fetch_data(), 0, limit)),\n columns=[column.name for column in table.schema])\n","repo_name":"robfraz/gbq-pandas","sub_path":"gbq_pandas.py","file_name":"gbq_pandas.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"3306275687","text":"from maths import norm, length\nimport numpy as np\n\n\nclass CollisionInfo:\n def __init__(self, did_hit, location, normal):\n self.did_hit = did_hit\n self.location = location\n self.normal = normal\n\n\nclass Ray:\n def __init__(self, origin, direction, emitted_brightness=0.0, gen=0):\n self.origin = origin\n self.direction = norm(direction)\n self.colour = np.ones(3)\n self.emitted_brightness = emitted_brightness\n self.gen = gen\n self.MAX_BOUNCE = 100\n\n def trace(self, scene):\n if self.gen > self.MAX_BOUNCE:\n return self.colour * self.emitted_brightness\n\n min_collision_dist = np.inf\n closest_collision = None\n for object in scene:\n collision_info = object.collision(self)\n if collision_info.did_hit:\n dist_of_collision = length(collision_info.location - self.origin)\n if dist_of_collision < min_collision_dist:\n closest_collision = collision_info\n min_collision_dist = dist_of_collision\n closest_collision_material = object.material\n\n if closest_collision is not None:\n self.colour *= closest_collision_material.colour\n\n new_ray_dir = closest_collision_material.reflect(\n self.direction, closest_collision.normal\n )\n\n reflected_ray = Ray(\n closest_collision.location,\n new_ray_dir,\n emitted_brightness=closest_collision_material.emissivity,\n gen=self.gen + 1,\n )\n self.colour *= reflected_ray.trace(scene)\n self.emitted_brightness = reflected_ray.emitted_brightness\n\n return self.colour * self.emitted_brightness\n","repo_name":"franklinscudder/RayTracer","sub_path":"rays.py","file_name":"rays.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14144170021","text":"import os\nfrom setuptools import setup, find_packages\n\n# get long_description from README.md\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# get install requirements\nwith open('requirements.txt') as fh:\n install_requires = fh.read().splitlines()\n\n# get version\nwith open('version.txt') as fh:\n version = fh.read().strip()[1:]\n\n# list of all utility scripts to be included with package\nscripts=[os.path.join('utils',f) for f in os.listdir('utils') if f.endswith('.py')]\n\nsetup(\n name='sliderule',\n author='SlideRule Developers',\n description='Python client for interacting with sliderule server',\n long_description_content_type=\"text/markdown\",\n url='https://github.com/ICESat2-SlideRule/sliderule/',\n license='BSD 3-Clause',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Physics',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n ],\n packages=find_packages(),\n version=version,\n install_requires=install_requires,\n scripts=scripts,\n)\n","repo_name":"ICESat2-SlideRule/sliderule","sub_path":"clients/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"79"}
+{"seq_id":"35292065010","text":"class VendingMachine:\n def __init__(self):\n self.state = 'Idle'\n self.juices = {'PEPS': 30, 'MOUN': 30, 'DPEP': 50, 'COKE': 20, 'GATO': 20, 'DCOK': 30, 'MINM': 25, 'TROP': 30}\n self.stock = {juice: 1 for juice in self.juices.keys()}\n\n def run(self):\n while True:\n if self.state == 'Idle':\n self.idle_state()\n elif self.state == 'Dispensing':\n self.dispensing_state()\n elif self.state == 'InsufficientFunds':\n self.insufficient_funds_state()\n elif self.state == 'OutOfStock':\n self.out_of_stock_state()\n elif self.state == 'RefillPrompt':\n self.refill_prompt_state()\n elif self.state == 'Refill':\n self.refill_state()\n\n def idle_state(self):\n print(\"Welcome to the vending machine!\")\n print(\"List of drinks:\")\n for juice, price in self.juices.items():\n print(f\"{juice} - ${price}\")\n\n user_input = input(\"Enter the four-letter code for your drink: \")\n if user_input.lower()=='refill':\n self.state = 'Refill'\n\n elif user_input in self.juices:\n if self.stock[user_input] > 0:\n cost = self.juices[user_input]\n amount = float(input(\"Enter the amount of money you will feed: \"))\n if amount == cost:\n print(\"Dispensing drink...\")\n self.stock[user_input] -= 1\n self.state = 'Dispensing'\n elif amount < cost:\n self.state = 'InsufficientFunds'\n else:\n change = amount - cost\n print(f\"Dispensing drink and returning ${change} in change.\")\n self.stock[user_input] -= 1\n self.state = 'Dispensing'\n elif sum(self.stock.values())==0:\n self.state = 'RefillPrompt'\n else:\n self.state = 'OutOfStock' \n else: \n print(\"Invalid input. Please try again.\")\n\n def dispensing_state(self):\n print(\"Enjoy your drink!\")\n self.state = 'Idle'\n\n def insufficient_funds_state(self):\n print(\"The entered amount is less than the cost. Please enter a sufficient amount.\")\n self.state = 'Idle'\n\n def out_of_stock_state(self):\n print(\"Selected juice is out of stock. Please choose another drink.\")\n self.state = 'Idle'\n\n def refill_prompt_state(self):\n print(\"Please refill all the juices.\")\n self.state = 'Idle'\n\n def refill_state(self):\n print(\"Vending Machine has been refilled...\")\n self.stock = {juice: 1 for juice in self.juices.keys()}\n self.state = 'Idle'\n\n# Run the vending machine\nmachine = VendingMachine()\nmachine.run()\n","repo_name":"sanchitgarg2204/sanchitgarg2204.github.io","sub_path":"fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29412090844","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n values = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\n strs = [\"M\", \"CM\", \"D\", \"CD\", \"C\", \"XC\", \"L\", \"XL\", \"X\", \"IX\", \"V\", \"IV\", \"I\"]\n sb = \"\"\n for i in range(len(values)):\n while num >= values[i]:\n num -= values[i]\n sb += strs[i]\n return sb","repo_name":"chandlerche/dailyLeetCode","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"18895842674","text":"import numpy as np\nfrom utils import *\nfrom file_option_name_memo import *\n \n\ndef create_word_sequence(file_name_option,valid_list,grammar):\n _,_,_,w,_,_,_,_,_= create_dataloader(500, file_name_option, valid_list)\n truth_T0, truth_T = grammar_list[grammar].values()\n D,N_max = w.to('cpu').detach().numpy().copy().shape\n truth_F = np.zeros_like(w,dtype=np.int8)\n N = np.zeros(D,dtype=np.int8)\n total_w_num = 0\n for d in range(D):\n truth_F[d][0] = np.random.choice(N_max,p=truth_T0)\n w[d][0] = w[d][truth_F[d][0]]\n N[d] += 1\n total_w_num += 1\n for n in range(1,N_max):\n truth_F[d][n] = np.random.choice(N_max+1,p=truth_T[truth_F[d][n-1]])\n if truth_F[d][n] == N_max:\n w[d][n] = -1\n else:\n w[d][n] = w[d][truth_F[d][n]]\n N[d] += 1\n total_w_num += 1\n return D, N, w, truth_F","repo_name":"YutaMatsui-1122/CSL-VAE","sub_path":"create_word_sequence.py","file_name":"create_word_sequence.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"11400197862","text":"import os\r\n\r\n\r\n# function to make a new database\r\ndef make_db(name):\r\n # lists local folder content\r\n local_folder_content = os.listdir()\r\n # looks if the database you are looking for exists\r\n if str(name) in local_folder_content:\r\n # if your db wasn't found this will be printed out\r\n return print(\"Database with this name already exists!\")\r\n else:\r\n # makes a new db file\r\n db_name = (str(name) + \".py\")\r\n with open(db_name, \"w+\") as db:\r\n db.close()\r\n return print(\"New database made!\")\r\n\r\n\r\n# read info from database\r\ndef read_db(name, print_content=False):\r\n try:\r\n db_name = (str(name) + \".py\")\r\n # opens your desired database\r\n with open(str(db_name), \"r\") as db:\r\n\r\n # reads db content\r\n db_content = db.read()\r\n\r\n if print_content:\r\n print(str(db_content))\r\n # closes the db\r\n db.close()\r\n return db_content\r\n except:\r\n print(\"Failed to get db content\")\r\n\r\n\r\ndef write_entry(name, user_name, user_id, user_age, user_bio, user_adinfo):\r\n\r\n\r\n # looks if the database you are looking for exists\r\n\r\n\r\n try:\r\n db_name = (str(name) + \".py\")\r\n\r\n # opens your desired database\r\n\r\n with open(str(db_name), \"r\") as db:\r\n # reads db content\r\n\r\n db_content = db.read()\r\n # makes a new dictionary for the user\r\n\r\n db.close()\r\n\r\n db = open(str(db_name), \"w\")\r\n user_dict = {\r\n \"Username\": str(user_name),\r\n \"UID\": int(user_id),\r\n \"Age\": int(user_age),\r\n \"Biography\": str(user_bio),\r\n \"Other\": str(user_adinfo)\r\n }\r\n\r\n db_to_write = str(db_content) + \"\\n\" + str(user_name) + \" = \" + str(user_dict)\r\n # writes user's data to your db\r\n db.write(str(db_to_write))\r\n print(\"new entry written to db\")\r\n db.close()\r\n except:\r\n print(\"Failed to write to db\")\r\n\r\n#read desired user's info\r\n\r\ndef read_user_info(name, username):\r\n try:\r\n db_name = (str(name) + \".py\")\r\n db = open(str(db_name), \"r\")\r\n db_content = db.read()\r\n db.close()\r\n info1 = str(db_content.split(f\"{username} = \"))\r\n info2 = info1.split(\"}\")\r\n info3 = info2[0].split(\"{\")\r\n data = info3[1]\r\n\r\n\r\n\r\n return data\r\n except:\r\n return print(\"Failed to get desired user's info\")","repo_name":"yourdarl1ng/mw-database","sub_path":"mw_database.py","file_name":"mw_database.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"15241905309","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport models.blocks as blocks\r\nfrom tf_p_inv import p_inv\r\nfrom constants import *\r\n\r\n\r\ndef hex_proj(a, g, params):\r\n with tf.variable_scope(\"hex_proj\", reuse=tf.AUTO_REUSE):\r\n if params['hex_final_dim'] < params['batch_size']:\r\n l = a - tf.matmul(tf.matmul(tf.matmul(g, p_inv(tf.matmul(g, g, transpose_a=True))),g, transpose_b=True), a) \r\n else:\r\n small_identity = params['small_id'] * tf.eye(params['hex_final_dim'])\r\n l = a - tf.matmul(tf.matmul(tf.matmul(g, p_inv(tf.matmul(g, g, transpose_a=True) + small_identity)),g, transpose_b=True), a) \r\n\r\n return l\r\n\r\n\r\ndef hex_classifier(h, g, phs, params):\r\n \"\"\"Input: [h,g] or [h,0] or [0,g], Output: the layer before the linear layer of softmax\"\"\"\r\n with tf.variable_scope(\"hex_classifier\", reuse=tf.AUTO_REUSE):\r\n keep_rate, stop_grad, _ = phs\r\n inp = tf.concat([h,g], -1)\r\n h_mlp = tf.layers.dense(inp, params['nli_mlp_dim'], tf.nn.relu)\r\n if params['hex_dropout']:\r\n h_drop = tf.nn.dropout(h_mlp, keep_rate)\r\n else:\r\n h_drop = h_mlp\r\n h_drop = tf.layers.dense(h_drop, params['hex_final_dim'])\r\n return h_drop\r\n\r\n\r\ndef hex_softmax(f, params):\r\n if params['final_linear']:\r\n with tf.variable_scope(\"hex_softmax\", reuse=tf.AUTO_REUSE):\r\n logits = tf.layers.dense(f, 3)\r\n return logits\r\n else:\r\n return f\r\n\r\nclass HEX(object):\r\n def __init__(self, params):\r\n if params['hex_share_emb'] == False:\r\n with tf.variable_scope(\"hex_embed\", reuse=tf.AUTO_REUSE):\r\n self.embeddings = tf.Variable(params['embeddings'], trainable=params['emb_train'], name='E')\r\n if params['self_att']:\r\n self.construct_hex_vec = self.construct_hex_vec_selfatt\r\n else:\r\n self.construct_hex_vec = self.construct_hex_vec_simple\r\n \r\n\r\n\r\n def share_emb(self, embeddings):\r\n self.embeddings = embeddings\r\n\r\n\r\n def construct_hex_vec_simple(self, inputs, params, phs):\r\n keep_rate, stop_grad, _ = phs\r\n\r\n premise_x, hypothesis_x = inputs\r\n\r\n with tf.variable_scope(\"hex_superficial\", reuse=tf.AUTO_REUSE):\r\n\r\n ## Calculate representaitons by CBOW method\r\n emb_premise = tf.nn.embedding_lookup(self.embeddings, premise_x) \r\n emb_premise_drop = tf.nn.dropout(emb_premise, keep_rate)\r\n\r\n emb_hypothesis = tf.nn.embedding_lookup(self.embeddings, hypothesis_x)\r\n emb_hypothesis_drop = tf.nn.dropout(emb_hypothesis, keep_rate)\r\n\r\n premise_rep = tf.reduce_sum(emb_premise_drop, 1)\r\n hypothesis_rep = tf.reduce_sum(emb_hypothesis_drop, 1)\r\n\r\n ## Combinations\r\n h_diff = premise_rep - hypothesis_rep\r\n h_mul = premise_rep * hypothesis_rep\r\n\r\n ### MLP\r\n mlp_input = tf.concat([premise_rep, hypothesis_rep, h_diff, h_mul], 1)\r\n\r\n superficial_output = tf.layers.dense(mlp_input, 100)\r\n return premise_rep, hypothesis_rep, mlp_input\r\n\r\n def construct_hex_vec_selfatt(self, inputs, params, phs):\r\n keep_rate, stop_grad, _ = phs\r\n\r\n premise_x, hypothesis_x = inputs\r\n\r\n with tf.variable_scope(\"hex_superficial_selfatt\", reuse=tf.AUTO_REUSE):\r\n\r\n emb_premise = tf.nn.embedding_lookup(self.embeddings, premise_x) \r\n emb_premise_drop = tf.nn.dropout(emb_premise, keep_rate)\r\n\r\n emb_hypothesis = tf.nn.embedding_lookup(self.embeddings, hypothesis_x)\r\n emb_hypothesis_drop = tf.nn.dropout(emb_hypothesis, keep_rate)\r\n\r\n prem_seq_lengths, prem_mask = blocks.length(premise_x)\r\n hyp_seq_lengths, hyp_mask = blocks.length(hypothesis_x)\r\n\r\n prem_self_att= blocks.simple_self_attention_block(emb_premise_drop, params['dim_emb'], prem_seq_lengths, prem_mask, scope = 'superficial_prem_self_att')\r\n hypo_self_att= blocks.simple_self_attention_block(emb_hypothesis_drop, params['dim_emb'], hyp_seq_lengths, hyp_mask, scope = 'superficial_hypo_self_att')\r\n\r\n\r\n premise_rep = tf.reduce_sum(prem_self_att, 1)\r\n hypothesis_rep = tf.reduce_sum(hypo_self_att, 1)\r\n\r\n ## Combinations\r\n h_diff = premise_rep - hypothesis_rep\r\n h_mul = premise_rep * hypothesis_rep\r\n\r\n ### MLP\r\n mlp_input = tf.concat([premise_rep, hypothesis_rep, h_diff, h_mul], 1)\r\n return premise_rep, hypothesis_rep, mlp_input\r\n","repo_name":"owenzx/LexicalDebias-ACL2020","sub_path":"models/hex.py","file_name":"hex.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"}
+{"seq_id":"30245716562","text":"from discord.ext import commands\nimport traceback\nimport aiotrello\nimport datetime\nimport discord\n\n\nclass Suggest(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.trello = aiotrello.Trello(\n key=self.bot.config['trellokey'], token=self.bot.config['trellotoken'])\n\n @commands.command(name='suggest', description='Suggest a feature')\n @commands.cooldown(1, 600, commands.BucketType.user)\n async def suggestcmd(self, ctx, *, suggestion: str):\n if suggestion is None:\n await ctx.error('You can\\'t suggest nothing!')\n else:\n board = await self.trello.get_board(lambda b: b.name == 'Fire')\n suggestions = await board.get_list(lambda l: l.name == 'Suggestions')\n card = await suggestions.create_card(suggestion, f'Suggested by {ctx.author.name} ({ctx.author.id})')\n now = datetime.datetime.now(datetime.timezone.utc).strftime(\n '%d/%m/%Y @ %I:%M:%S %p')\n await card.add_comment(f'Suggested in channel {ctx.channel.name} ({ctx.channel.id}) in guild {ctx.guild.name} ({ctx.guild.id}) at {now} UTC')\n await ctx.success(f'Thanks! Your suggestion was added to the Trello @ <{card.url}>. Make sure to check it every now and then for a response.')\n\n\ndef setup(bot):\n try:\n bot.add_cog(Suggest(bot))\n bot.logger.info(f'$GREENLoaded $CYAN\"suggest\" $GREENcommand!')\n except Exception as e:\n bot.logger.error(\n f'$REDError while adding command $CYAN\"suggest\"', exc_info=e)\n","repo_name":"0xacn/bot","sub_path":"commands/suggest.py","file_name":"suggest.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"}
+{"seq_id":"2831431504","text":"if __name__ == '__main__':\n n = int(input())\n \n listOfNumber = []\n for i in range(1, n+1):\n listOfNumber.append(i)\n \n result = ''.join(map(str, listOfNumber))\n print(result)\n\n\n# Hacker Rank Task\n# The included code stub will read an integer, n , from STDIN.\n# Without using any string methods, try to print the following:\n# 1234...n\n# Note that \"...\" represents the consecutive values in between.\n# Example:\n# n = 5\n\n# Print the string 12345","repo_name":"raihan-tajdid007/hackerRank-prob-solving","sub_path":"printFunction.py","file_name":"printFunction.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"27424267018","text":"import cv2\nimport numpy as np\nfrom line import Line\nfrom abc import ABCMeta, abstractmethod\n\ndef getArea(line):\n\treturn line.area\n\nclass ILineDetector(metaclass=ABCMeta):\n\t\"\"\"\n\t\tClass for line detection and filtering\n\t\"\"\"\n\t@abstractmethod\n\tdef __init__(self, algorithm=None, filtering_criteria=None, quantity=None):\n\t\t\"\"\"\n\t\t\tConstructor identifies the detection specifications\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\talgorithm ———> algorithm to find all lines in the canny frame (default=HOUGH_LINES)\n\t\t\tfiltering_criteria ———> Array of filtering Constants\n\t\t\tquantity ———> filtering by area as an optional excess filtering step\n\t\t\"\"\"\n\t\tpass\n\n\t@abstractmethod\n\tdef xExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the leftmost and rightmost vertical lines\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\t\t\t\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\tpass\n\n\t@abstractmethod\n\tdef yExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the top and bottom horizontal lines\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\t\t\t\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\tpass\n\n\t@abstractmethod\n\tdef run(self, frame):\n\t\t\"\"\"\n\t\t\tThis function does the following:-\n\t\t\t1- Creates the canny version of the frame\n\t\t\t2- Extracts all lines according to the specified algorithm\n\t\t\t3- Applies the desired filtering criterion\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tframe ———> Workpiece frame\n\t\t\"\"\"\n\t\tpass\n\nclass LineDetector(ILineDetector):\n\t\"\"\"\n\t\tClass builder for extracting lines from a frame\n\t\tDependencies\n ————————————\n\t\t- ImageManipulator\n\n\t\tAll Dynamic Variables\n\t\t————————————————————\n\t\tself.__algorithm ———> Hough detection or contours\n\t\tself.__minLength ———> The minimum length of a line\n\t\tself.__quantity ———> The minimum length of a line\n\t\tself.__filtering ———> Filtering Criteria\n\t\tself._horizontals ———> Horizontal Lines after eliminating redundancies\n\t\tself._verticals ———> Vertical Lines after eliminating redundancies\n\n\t\tAll Static Variables\n\t\t————————————————————\n\t\t—) For Algorithms\n\t\t\t1. CONTOURS\n\t\t\t2. HOUGH\n\n\t\t—) For Filtering\n\t\t\t1. XEXTREMES\n\t\t\t2. YEXTREMES\n\t\t\t3. ANGLE\n\t\t\t4. HORIZONTALS\n\t\t\t5. VERTICALS\n\t\"\"\"\n\tCONTOURS = 1\n\tHOUGH = 2\n\t\n\tXEXTREMES=1\n\tYEXTREMES=2\n\tANGLE=3 #TODO\n\tHORIZONTALS=4\n\tVERTICALS=5\n\n\n\tdef __init__(self, algorithm=None, filtering_criteria=None, quantity=None):\n\t\t\"\"\"\n\t\t\tConstructor identifies the detection specifications\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\talgorithm ———> algorithm to find all lines in the canny frame (default=HOUGH_LINES)\n\t\t\tfiltering_criteria ———> list of filtering sequences\n\t\t\tquantity ———> filtering by area as an optional excess filtering step\n\t\t\"\"\"\n\t\tself.__algorithm = algorithm\n\t\tself.minLength = 1\n\t\tself.minLineDistance = 20\n\n\t\tself.__quantity = quantity\n\t\tself.__filtering = filtering_criteria\n\n\tdef _toCanny(frame):\n\t\t\"\"\"\n\t\t\t#TODO : use salama's class\n\t\t\tfunction constructs the canny version of a frame\n\t\t\t:param frame: workpiece frame \n\t\t\t:return: canny version\n\t\t\"\"\"\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\terosion = cv2.erode(gray, (5, 5), iterations=1)\n\t\tcanny = cv2.Canny(erosion, 120, 80)\n\t\tcv2.imshow('canny', canny)\n\t\treturn canny\t\t\n\n\tdef _houghAlgorithm(self, canny):\n\t\tlines = cv2.HoughLinesP(canny, rho=1, theta=np.pi/180.0, threshold=5,minLineLength=self.minLength, maxLineGap=5)\n\t\tresult = []\n\t\ttry:\n\t\t\tfor line in lines:\n\t\t\t\tresult.append(Line(line))\n\t\texcept:\n\t\t\tpass\n\t\treturn result\n\n\tdef _contoursAlgorithm(self, contours, frame):\n\t\t\"\"\"\n\t\t\tMethod filters the found contours to return only those representing lines\n\t\t\"\"\"\n\t\tresult = []\n\t\tfor contour in contours:\n\t\t\t[vx,vy,x,y] = cv2.fitLine(contour, cv2.DIST_L2,0,0.01,0.01)\n\t\t\tthis_line = Line([(int(x), int(y)), (int(vx*2), int(vy*2))], cv2.contourArea(contour)).draw(frame)\n\t\t\tresult.append(this_line)\n\t\t\t# print(contour)\n\t\t\t# temp = contour.tolist()\n\t\t\t# list_version = []\n\t\t\t# for cnt in temp:\n\t\t\t# \tlist_version.append(cnt[0])\n\t\t\t# pts = [(list_version[0][0], list_version[0][1]), (list_version[-1][0], list_version[-1][1])]\n\t\t\t# this_line = Line(pts, cv2.contourArea(contour))\n\t\t\t\n\t\t\t# if this_line.length() > self.minLength:\n\t\t\t# \tresult.append(this_line)\n\t\treturn result\n\n\tdef _eliminateRedundancies(self, lines):\n\t\t\"\"\"\n\t\t\tAyman Optimized gedan here\n\t\t\"\"\"\n\t\toriginalTolerances = (Line.horizontalTolerance, Line.verticalTolerance)\n\t\tLine.horizontalTolerance = 1\n\t\tLine.verticalTolerance = 1\n\n\t\tself._verticals = LineDetector.__filterVerticals(lines)\n\t\tself._horizontals = LineDetector.__filterHorizontals(lines)\n\t\t\n\t\tlength = len(self._verticals)\n\t\ti = 0\n\t\twhile i < length:\n\t\t\tj = i+1\n\t\t\twhile j < length:\n\t\t\t\tif abs(self._verticals[i].perpDistance(self._verticals[j])) < self.minLineDistance:\n\t\t\t\t\tself._verticals.remove(self._verticals[j])\n\t\t\t\t\tlength -= 1\t\n\t\t\t\tj += 1\n\t\t\ti += 1\t\t\n\n\t\tlength = len(self._horizontals)\n\t\ti = 0\n\t\twhile i < length:\n\t\t\tj = i+1\n\t\t\twhile j < length:\n\t\t\t\tif abs(self._horizontals[i].perpDistance(self._horizontals[j])) < self.minLineDistance:\n\t\t\t\t\tself._horizontals.remove(self._horizontals[j])\n\t\t\t\t\tlength -= 1\t\n\t\t\t\tj += 1\n\t\t\ti += 1\n\n\t\tLine.horizontalTolerance = originalTolerances[0]\n\t\tLine.verticalTolerance = originalTolerances[1]\n\n\tdef xExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the leftmost and rightmost vertical lines\n\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\tleftmost = None\n\t\trightmost = None\n\t\tfor line in lines:\n\t\t\tif line.isVertical():\n\t\t\t\tif not leftmost or line.pts[0][0] < leftmost.pts[0][0] - 10:\n\t\t\t\t\tleftmost = line\n\n\t\t\t\tif not rightmost or line.pts[0][0] > rightmost.pts[0][0] + 10:\n\t\t\t\t\trightmost = line\n\n\t\treturn [leftmost, rightmost]\t\n\n\tdef yExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the top and bottom horizontal lines\n\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\ttopmost = None\n\t\tbottommost = None\n\t\tfor line in lines:\n\t\t\tif line.isHorizontal():\n\t\t\t\tif not topmost or line.pts[0][1] < topmost.pts[0][1] - 10:\n\t\t\t\t\ttopmost = line\n\n\t\t\t\tif not bottommost or line.pts[0][0] > bottommost.pts[0][0] + 10:\n\t\t\t\t\tbottommost = line\n\n\t\treturn [topmost, bottommost]\t\t\t\n\n\tdef run(self, frame):\n\t\t\"\"\"\n\t\t\tThis function does the following:-\n\t\t\t\t1- Creates the canny version of the frame\n\t\t\t\t2- Extracts all lines according to the specified algorithm\n\t\t\t\t3- Applies the desired filtering criterion\n\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tframe ———> Workpiece frame\n\t\t\"\"\"\n\t\tcanny = LineDetector.__toCanny(frame)\n\t\t# Detection\n\t\tif self.__algorithm == LineDetector.HOUGH:\n\t\t\tresult = self.__houghAlgorithm(canny)\n\t\telif self.__algorithm == LineDetector.CONTOURS:\n\t\t\tcontours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t\t\tresult = self.__contoursAlgorithm(contours, frame)\n\t\tprint(result)\n\t\tself.__eliminateRedundancies(result)\n\t\tprint(result)\n\t\t# Now you have the lines stored in self._horizontals and self._verticals\n\t\t# Filtering\n\t\t\n\t\tif self.__filtering != None:\n\t\t\tresult = []\n\t\t\tfor sequence in self._filtering:\n\t\t\t\tsequence_result = None\n\t\t\t\tfor criterion in sequence:\n\t\t\t\t\tif sequence_result is None:\n\t\t\t\t\t\tsequence_result = []\n\t\t\t\t\t\tsequence_result.extend(self.__horizontals)\n\t\t\t\t\t\tsequence_result.extend(self.__verticals)\n\t\t\t\t\n\t\t\t\t\tif criterion == LineDetector.XEXTREMES:\n\t\t\t\t\t\tsequence_result = self.xExtremes(sequence_result)\n\t\t\t\t\n\t\t\t\t\telif criterion == LineDetector.YEXTREMES:\n\t\t\t\t\t\tsequence_result = self.yExtremes(sequence_result)\n\n\t\t\t\t\telif criterion == LineDetector.VERTICALS:\n\t\t\t\t\t\tsequence_result = LineDetector.__filterVerticals(sequence_result)\n\n\t\t\t\t\telif criterion == LineDetector.HORIZONTALS:\n\t\t\t\t\t\tsequence_result = LineDetector.__filterHorizontals(sequence_result)\n\t\t\t\tresult.extend(sequence_result)\n\t\t\n\t\tif self.__quantity:\n\t\t\t\tresult = self.__filterByArea(result)\n\t\treturn result\n\n\tdef __filterVerticals(lines):\n\t\t\"\"\"\n\t\t\tFilter vertical lines\n\t\t\"\"\"\n\t\tif not lines:\n\t\t\treturn []\n\n\t\tresult = []\n\t\tfor line in lines:\n\t\t\tif line and line.isVertical():\n\t\t\t\tresult.append(line)\n\t\treturn result\n\n\tdef __filterHorizontals(lines):\n\t\t\"\"\"\n\t\t\tFilter horizontal lines\n\t\t\"\"\"\n\t\tif not lines:\n\t\t\treturn []\n\n\t\tresult = []\n\t\tfor line in lines:\n\t\t\tif line and line.isHorizontal():\n\t\t\t\tresult.append(line)\n\t\treturn result\n\n\tdef __filterByArea(self, lines):\n\t\t\"\"\"\n\t\t\tFilter lines by area\n\t\t\"\"\"\n\t\tlines.sort(key=getArea, reverse=True)\n\t\treturn lines[:self.__quantity]\n\nif __name__ == \"__main__\":\n\tcap = cv2.VideoCapture(\"http://localhost:8070/stream?topic=/robotech/robotech/cameraright/camera_image\")\n\n\twhile cap.isOpened():\n\t\t_, img = cap.read()\n\n\t\tDetector = LineDetector(LineDetector.HOUGH, [[LineDetector.XEXTREMES], [LineDetector.YEXTREMES]])\n\t\tlines = Detector.run(img)\n\t\tfor line in lines:\n\t\t\tif line:\n\t\t\t\tif line.isVertical():\n\t\t\t\t\tline.draw(img)\n\t\t\t\telse:\n\t\t\t\t\tline.draw(img)\n\n\t\tcv2.imshow('lol', img)\t\n\t\tkey = cv2.waitKey(20)\n\t\tif key == 27:\n\t\t\tbreak;","repo_name":"lawaty/CV-Libraries","sub_path":"line_detection.py","file_name":"line_detection.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74941036414","text":"try:\n import cv2\n import numpy as np\nexcept ImportError as e:\n from pip._internal import main as install\n packages = [\"numpy\", \"opencv-python\"]\n for package in packages:\n install([\"install\", package])\nfinally:\n pass\n\ndef warpPerspectiveImage():\n image = cv2.imread(\"cards.jpg\")\n width, height = 250,350\n pts1 = np.float32([[111,219],[287,188],[154,482],[352,440]])\n pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n image_wrap = cv2.warpPerspective(image, matrix, (width, height))\n cv2.imshow(\"Phones\", image_wrap)\n cv2.waitKey(0)\n return cv2.destroyAllWindows()\nwarpPerspectiveImage()","repo_name":"CrispenGari/opencv-python","sub_path":"beginner/Open-Computer-Version-Chapter-5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"35939082637","text":"import numpy as np\r\nfrom skimage import color\r\nfrom skimage import measure\r\nimport os, jpype\r\n\r\nEPS = 1e-15\r\n\r\n# The refinement stage of iteratively refined structural entropy.\r\ndef refinement_SE(adj, y=None):\r\n adj -= np.diag(np.diag(adj))\r\n tol = 1e-10\r\n max_iter = 300\r\n if y is None:\r\n n, k = adj.shape[0], 3\r\n y = np.random.randint(k, size=n)\r\n else:\r\n n, k = adj.shape[0], np.amax(y) + 1\r\n\r\n W = np.array(adj.copy(), dtype=np.float64)\r\n D = np.diag(np.sum(W, axis=-1, keepdims=False))\r\n S = np.eye(k)[y.reshape(-1)].astype(np.float64)\r\n volW = np.sum(W, dtype=np.float64)\r\n links = np.diagonal(np.matmul(np.matmul(S.T, W), S)).copy()\r\n degree = np.diagonal(np.clip(np.matmul(np.matmul(S.T, D), S), a_min=EPS, a_max=None)).copy()\r\n ses = (-links / volW) * np.log2(np.clip(degree, a_min=1e-10, a_max=None) / volW)\r\n z = y.copy()\r\n se = np.sum(ses)\r\n for iter_num in range(max_iter):\r\n for i in range(n):\r\n zi = z[i]\r\n links[zi] -= np.matmul(W[i,:], S[:,zi]) + np.matmul(S[:,zi].T, W[:,i])\r\n degree[zi] -= D[i,i]\r\n ses[zi] = (-links[zi]/volW) * np.log2(np.clip(degree[zi], a_min=1e-10, a_max=None)/volW)\r\n S[i,zi] = 0\r\n z[i] = -1\r\n\r\n links_new = links.copy()\r\n degree_new = degree.copy()\r\n links_new += np.matmul(W[i,:], S) + np.matmul(W[:, i].T, S)\r\n degree_new += D[i,i]\r\n ses_new = (-links_new/volW) * np.log2(np.clip(degree_new, a_min=1e-10, a_max=None)/volW)\r\n delta_ses = ses_new - ses\r\n\r\n opt_i = np.argmax(delta_ses)\r\n\r\n zi = opt_i\r\n z[i] = zi\r\n S[i,zi] = 1\r\n links[zi] = float(links_new[zi])\r\n degree[zi] = float(degree_new[zi])\r\n ses[zi] = float(ses_new[zi])\r\n if np.sum(ses) - se < tol:\r\n break\r\n se = np.sum(ses)\r\n return z\r\n\r\n# The merging stage of iteratively refined structural entropy.\r\ndef merging(adj, img_name, sp_scale=None):\r\n img_name = img_name.split('.')[0]\r\n if sp_scale == None:\r\n adj_path = f\"./{img_name}_adj.txt\"\r\n partition_path = f\"./{img_name}_partition.txt\"\r\n else:\r\n adj_path = f\"./{img_name}_{sp_scale}_adj.txt\"\r\n partition_path = f\"./{img_name}_{sp_scale}_partition.txt\"\r\n adj_path = os.path.abspath(adj_path)\r\n partition_path = os.path.abspath(partition_path)\r\n with open(adj_path, 'w') as f:\r\n f.write('{}\\n'.format(int(adj.shape[0])))\r\n for i in range(adj.shape[0]):\r\n for j in range(i + 1, adj.shape[1]):\r\n if adj[i, j] > 0:\r\n f.write('{}\\t{}\\t{}\\n'.format(int(i + 1), int(j + 1), adj[i, j]))\r\n Merging = jpype.JClass(\"algo.Merging\")\r\n Merging.main([adj_path, partition_path])\r\n if os.path.exists(adj_path):\r\n os.remove(adj_path)\r\n # read partition file\r\n y = np.zeros(adj.shape[0], dtype=int)\r\n with open(partition_path, 'r') as f:\r\n for comid, line in enumerate(f.readlines()):\r\n line = line.strip().split('\\t')\r\n for node in line:\r\n y[int(node) - 1] = comid\r\n if os.path.exists(partition_path):\r\n os.remove(partition_path)\r\n return y","repo_name":"zengguangjie/SLED","sub_path":"algo/iterative_refinement_SE.py","file_name":"iterative_refinement_SE.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"7958162624","text":"import sys\nimport heapq\nsys.stdin = open('input.txt')\n\n# 가중치가 존재할때 최단경로를 찾는 알고리즘 - 다익스트라\n# 알고리즘에서는 heapq(최소힙)를 import해서 사용하여 간단하게 구현 가능\n# 코드구조는 BFS와 유사\n\nT = int(input())\n\nfor k in range(1, T + 1):\n N, E = map(int, input().split())\n temp = [list(map(int, input().split())) for i in range(E)] # [[s, e, w], ...]\n dist = [9999 for i in range(N + 1)] # 초기 모든 노드 가중치 무한대로 세팅\n v = [[] for i in range(N + 1)]\n for i in temp:\n v[i[0]].append([i[1], i[2]]) # 연결리스트는 단방향, 가중치를 함께저장\n\n # 시작노드 가중치 0으로 세팅하고 출발\n que = []\n heapq.heappush(que, [0, 0]) # 가중치, idx\n dist[0] = 0\n\n while que:\n d, cur = heapq.heappop(que) # 가중치중 가장 작은애를 뽑아, 시작~ 현재위치까지 쌓아온 가중치, cur이 현재위치\n\n if cur == N:\n print('#{} {}'.format(k, d))\n break\n\n if d > dist[cur]: # visited 대체\n continue\n\n # 현재 위치에서 갈 수 있는 위치들을 한번 보자\n # 만약에, 현재까지 쌓아온 가중치 + 현재에서 다음으로가는 가중치가 시작~다음위치까지 가는 가중치보다 작다면 업데이트\n for i in v[cur]:\n nd = dist[cur] + i[1]\n if dist[i[0]] > nd:\n dist[i[0]] = nd\n heapq.heappush(que, [nd, i[0]])\n","repo_name":"ggpp0909/problem_solving","sub_path":"Python/SWEA/1014/5251_최소이동거리/5251_최소이동거리.py","file_name":"5251_최소이동거리.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"35448593582","text":"import os\r\nimport urllib.request as req\r\nfrom urllib.parse import urlparse\r\n\r\n\r\ndef download(url, to=None):\r\n if to:\r\n localfile = to\r\n else:\r\n fname = os.path.basename(urlparse(url).path)\r\n localfile = os.path.join('.', fname)\r\n print(\"Downloading {}\".format(localfile))\r\n\r\n if not os.path.isfile(localfile):\r\n req.urlretrieve(url, localfile)\r\n\r\n return localfile\r\n","repo_name":"minimekill/BloodyTelevision","sub_path":"getter.py","file_name":"getter.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"21901734751","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport requests\nimport socket\nfrom PyQt5 import uic\nfrom delete import Delete\nfrom new import Add\nfrom search import Search\nfrom update import Update \n\nimport sys\nimport time\n\nclass Menu(QMainWindow):\n\t\"\"\"docstring for tipo\"\"\"\n\tdef __init__(self,delete,search,add,update,ip,name):\n\t\t\n\t\tQMainWindow.__init__(self)\n\t\tuic.loadUi(\"Menu.ui\",self)\n\t\tself.setObjectName(\"window\")\n\t\tself.delete=delete\n\t\tself.search=search\n\t\tself.add=add\n\t\tself.update=update\n\t\tself.labelip.setText(name+\" estas conectado en \"+ip)\n\t\tself.botonbuscar.clicked.connect(self.opensearch)\n\t\tself.botonnuevo.clicked.connect(self.openadd)\n\t\tself.botonactual.clicked.connect(self.openupdate)\n\t\tself.botoneliminar.clicked.connect(self.opendelete)\n\t\t\n\t\twith open(\"style.css\") as f:\n\t\t\tself.setStyleSheet(f.read())\n\t\n\tdef opensearch(self):\n\t\tself.search.show()\n\tdef openadd(self):\n\t\tself.add.show()\n\tdef openupdate(self):\n\t\tself.update.show()\n\tdef opendelete(self):\n\t\tself.delete.show()\n\t\t\n\n\nname = socket.gethostname()\nr = requests.get('http://127.0.0.1:3000/get_my_ip', params={'hostname':str(name) })\napp=QApplication(sys.argv)\n_delete=Delete()\n_search=Search()\n_new=Add()\n_update=Update()\n_menu=Menu(_delete,_search,_new,_update,str(r.json()['ip']),name)\n_menu.show()\napp.exec_()","repo_name":"toodaniels/System-PyMovies","sub_path":"Clientes/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"35606378152","text":"#!/usr/bin/env python #\r\n# -*- coding: utf-8 -*- #\r\n# @Time : 2018-03-29 8:53 #\r\n# @author : xuejf #\r\n# @email :171521952@qq.com #\r\n# -------------------------- #\r\nfrom configparser import *\r\n\r\n\r\nclass ConfigFile ():\r\n #_in_data_dir = r'E:\\work\\auto_test2\\in_data'\r\n #_out_data_dir = r'E:\\work\\auto_test2\\out_data'\r\n _in_data_dir=\"\"\r\n _out_data_dir=\"\"\r\n def __init__(self):\r\n #print(\"enter __init__()\")\r\n cf = ConfigParser()\r\n cf.read(\"init.conf\", encoding=\"utf-8\")\r\n #secs = cf.sections()\r\n #print(secs)\r\n #opts = cf.options(\"base\")\r\n #kvs = cf.items(\"db\")\r\n # read by type\r\n if(self._in_data_dir.strip()==\"\"):\r\n self._in_data_dir = cf.get(\"base\", \"in_data_dir\")\r\n if(self._out_data_dir.strip()==\"\"):\r\n self._out_data_dir = cf.get(\"base\", \"out_data_dir\")\r\n #print(self._in_data_dir)\r\n #print(self._out_data_dir)\r\n\r\ncf=ConfigFile()\r\n\r\n","repo_name":"xuejf/auto-test","sub_path":"config/config_g.py","file_name":"config_g.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"25050078304","text":"from utils import token_required\nimport db\nfrom flask import request\nfrom sqlalchemy import Table, MetaData\nfrom sqlalchemy.exc import OperationalError, DataError, IntegrityError\nfrom flask_cors import cross_origin\nfrom . import handler\n\n\n@handler.route(\"/create\", methods=[\"POST\"])\n@cross_origin()\n@token_required\ndef create_table_data():\n data = request.get_json()\n table = data.get(\"table\")\n if table not in db.get_tables_in_db():\n return {\"error\": \"Table does not exist\"}, 400\n row = data.get(\"row\")\n db.clean_data(row, table)\n current_table = Table(table, MetaData(), autoload_with=db.engine)\n try:\n db.engine.execute(current_table.insert(), row)\n db.session.commit()\n return {\"message\": \"Successfully Created\"}, 200\n except (OperationalError, DataError, IntegrityError) as e:\n return {\"error\": \"Failed to create row, {0}\".format(e.orig)}, 400\n","repo_name":"agzuniverse/Chathuram","sub_path":"src/server/handlers/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"36360541238","text":"# 최소직사각형\n# 각 w, h를 비교해서 둘 중 큰 값을 한 리스트에 넣고 나머지를 리스트로 만든다. 두 개의 리스트 중 가장 큰 값을 뽑아서 곱하면 된다.\n\n# w, h 리스트를 만든다.\n# for문을 돌면서 w, h 중 큰 값은 w리스트 작은 값은 h리스트에 담는다.\n# 두 개의 리스트에서 가장 큰 값이 곱한 값이 답이다.\n\n#1\ndef solution(sizes):\n return max(max(x) for x in sizes) * max(min(x) for x in sizes)\n#2\nsolution = lambda sizes: max(sum(sizes, [])) * max(min(size) for size in sizes)\n#3\ndef solution(sizes):\n answer = 0\n \n sizes = [sorted(size, reverse=True) for size in sizes]\n \n widths = [size[0] for size in sizes]\n heights = [size[1] for size in sizes]\n \n width, height = max(widths), max(heights)\n \n answer = width * height\n \n return answer\n","repo_name":"BBstudyFighting/algorithm","sub_path":"18주차/SUYEON/SQL/programmers_coding test9.PY","file_name":"programmers_coding test9.PY","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"732868355","text":"#!/usr/bin/env python\nimport pandas as pd\nimport numpy as np\n\nimport stats_feature as sf\nimport cross_feature as cf\n\ndef itera(dcols):\n for key, val in dcols.items():\n print(key, val)\n\n##### load the train file into a dataframes ##### \ndf = pd.read_csv('./LoanStats3b.csv', header=1, low_memory=False) \n# delete last two rows\nnlines = len(df)\ndf = df.drop(df.index[[nlines-2, nlines-1]])\n\n##### feature visualization #####\n\ncols = df.columns.tolist()\ndict_cols = {}\nfor icol in range(len(cols)):\n dict_cols[icol] = cols[icol] \n\nitera(dict_cols)\nscol = input('Feature to Visualize [1-51], [-1]->Exit: ')\nwhile (scol != -1):\n sf.vis_feature(df[cols[scol]])\n scol = input('Feature to Visualize [1-51], [-1]->Exit: ') \n\n\nindex_train = (df['loan_status'] == 'Fully Paid') | (df['loan_status'] == 'Charged Off')\ntrain_set = df[index_train]\n\nscol = input('Feature to Couple with Loan Status [1-51], [-1]->Exit: ')\nwhile (scol != -1):\n cf.cross_hist(train_set[cols[scol]], train_set[cols[16]])\n scol = input('Feature to Visualize [1-51], [-1]->Exit: ') \n\n\n","repo_name":"jaurora/MachineLearning","sub_path":"LendingClub/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"13983536663","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# AUTHOR: Ti Bai\n# EMAIL: tibaiw@gmail.com\n# AFFILIATION: MAIA Lab | UT Southwestern Medical Center\n# DATETIME: 9/22/2022\n\n# sys\nimport os\nimport shutil\n\n# monai\nfrom monai.apps.auto3dseg import (\n DataAnalyzer,\n BundleGen,\n AlgoEnsembleBestN,\n AlgoEnsembleBuilder,\n export_bundle_algo_history,\n import_bundle_algo_history,\n)\nfrom monai.auto3dseg import algo_to_pickle\nfrom monai.bundle.config_parser import ConfigParser\n\n\nif __name__ == '__main__':\n ### setup the experiement parameters\n is_data_analysis = False \n need_customized_train_params = False\n\n data_root = r'./data'\n datalist_file = r'./data/task1_AMOS.json'\n result_dir = r'result'\n dataset_name = 'MONAI'\n\n num_fold = 5\n model_name = ['segresnet'] # choose from [\"segresnet_small\", \"segresnet\", \"segresnet2d\", \"dints\", \"swinunetr\"]\n template_path = r'assets/algorithm_templates'\n task = 'segmentation'\n modality = 'CT'\n is_ensemble = False ##### ALWAYS SET IT AS FALSE UNLESS YOU REVISE THIS SCRIPT!!!\n\n train_param = {}\n if need_customized_train_params:\n train_data_size = 100\n num_iterations = 100000\n num_images_per_batch = 1\n num_iterations_per_validation = 1000\n train_param = {\n \"num_iterations\": num_iterations,\n \"num_iterations_per_validation\": num_iterations_per_validation,\n \"num_images_per_batch\": num_images_per_batch,\n \"num_epochs\": num_iterations // (train_data_size // num_images_per_batch),\n \"num_warmup_iterations\": int(0.01 * num_iterations),\n }\n\n # step 0: prepare the environment\n if not os.path.isdir(result_dir):\n os.makedirs(result_dir)\n\n data_src_cfg = {\n \"name\": dataset_name,\n \"task\": task,\n \"modality\": modality,\n \"datalist\": datalist_file,\n \"dataroot\": data_root,\n }\n input = os.path.join(result_dir, 'input.yaml')\n ConfigParser.export_config_file(data_src_cfg, input)\n\n datastats_file = os.path.join(result_dir, 'data_stats.yaml')\n\n # step 1: Data Analysis\n print('Step 1: Analyzing the dataset and saving the results to {} ...'.format(datastats_file))\n if is_data_analysis:\n analyser = DataAnalyzer(datalist_file, data_root, output_path=datastats_file)\n datastat = analyser.get_all_case_stats()\n\n # step 2: Algorithm Generation (algo_gen)\n print('Step 2: Generating the algorithm based on template from {} and saving the results to {} ...'.format(template_path, result_dir))\n if not os.path.exists(os.path.join(result_dir, 'algorithm_templates')):\n shutil.copytree(template_path, os.path.join(result_dir, 'algorithm_templates'))\n default_algos = {\n \"segresnet_small\": dict(_target_=\"segresnet_small.scripts.algo.SegresnetAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", \"segresnet_small\")),\n \"segresnet\": dict(_target_=\"segresnet.scripts.algo.SegresnetAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", \"segresnet\")),\n \"segresnet2d\": dict(_target_=\"segresnet2d.scripts.algo.Segresnet2dAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", \"segresnet2d\")),\n \"dints\": dict(_target_=\"dints.scripts.algo.DintsAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", 'dints')),\n \"swinunetr\": dict(_target_=\"swinunetr.scripts.algo.SwinunetrAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", 'swinunetr'))\n }\n\n used_algorithms = {x: default_algos[x] for x in model_name if x in default_algos}\n\n bundle_generator = BundleGen(\n algo_path=result_dir,\n algos=used_algorithms,\n data_stats_filename=datastats_file,\n data_src_cfg_name=input,\n )\n\n bundle_generator.generate(result_dir, num_fold=num_fold)\n\n # Getting and Saving the history to hard drive\n history = bundle_generator.get_history()\n export_bundle_algo_history(history)\n\n # step 3: generate the train command\n print('Step 3: Generating the training command ...')\n #history = import_bundle_algo_history(result_dir, only_trained=False)\n for task in history:\n current_command = 'python '\n for current_algorithm_name, _ in task.items():\n current_algorithm_folder = os.path.join(result_dir, current_algorithm_name)\n current_train_script = os.path.join(current_algorithm_folder, 'scripts', 'train.py')\n current_command += current_train_script + ' run --config_file='\n\n all_config_files = []\n for current_config_file in os.listdir(os.path.join(current_algorithm_folder, 'configs')):\n current_config_file = os.path.join(current_algorithm_folder, 'configs', current_config_file)\n all_config_files.append(f\"'{current_config_file}'\")\n\n current_command += '\"[' + ','.join(all_config_files) + ']\"'\n\n for k, v in train_param.items():\n current_command += f\" --{k}={v}\"\n\n with open(f'{current_algorithm_name}.sh', 'w') as f:\n f.write('export CUDA_VISIBLE_DEVICES=your_device_id' + '\\n')\n f.write(current_command)\n\n # step 4: run the command\n print('Step 4: Please set the GPU device id (if necessary) and run the training script ...')\n\n # step 5: ensemble\n if is_ensemble:\n print('Step 5: Ensembling the result ...')\n history = import_bundle_algo_history(result_dir, only_trained=True)\n builder = AlgoEnsembleBuilder(history, input)\n builder.set_ensemble_method(AlgoEnsembleBestN(n_best=5))\n ensembler = builder.get_ensemble()\n preds = ensembler()\n\n print('Congrats! May the force be with you ...')\n","repo_name":"baiti01/Auto3DSeg-monai","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"41980222932","text":"from flask import Flask, render_template\nimport sqlalchemy\n\napp = Flask(__name__)\n\nengine = sqlalchemy.create_engine('mysql+pymysql://@127.0.0.1/game_recommendation?charset=utf8mb4')\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Hello, World !\\n\\nAppend /recommendation/ to the current \" \\\n \"url\\n\\nSome available userids 76561197960355015, 76561197960385706\"\n\n\n@app.route('/recommendation/')\ndef recommendation(user_id):\n # retrieve recommendation for 'user_id'\n results = engine.execute('''\n SELECT g0, g1, g2, g3, g4, g5, g6, g7, g8, g9 FROM tbl_recommendation_games WHERE user_id=%s;\n ''' % user_id).first()\n\n lst_recommend_games = []\n for app_id in list(results):\n app_data = engine.execute('''\n SELECT name, initial_price, header_image FROM tbl_steam_app WHERE steam_appid=%s;\n ''' % app_id).first()\n if app_data != None:\n lst_recommend_games.append(app_data)\n\n return render_template('recomendation.html', user_id=user_id, lst_recommend_games=lst_recommend_games)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"jianleisun/recommendation_system_project","sub_path":"rs_flask_web_application.py","file_name":"rs_flask_web_application.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"6614698614","text":"import tensorflow as tf\nfrom keras import layers\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nX = np.array([[0, 0],\n [0, 1],\n [1, 0],\n [1, 1]], dtype=np.float32)\ny_and = np.array([[0], [0], [0], [1]], dtype=np.float32)\ny_or = np.array([[0], [1], [1], [1]], dtype=np.float32)\n\nx_and = layers.Input(shape = (2,))\nout_and = layers.Dense(units = 1, activation = 'sigmoid', name = 'and')(x_and)\n\nx_or = layers.Input(shape = (2,))\nout_or = layers.Dense(units = 1, activation = 'sigmoid', name = 'or')(x_or) # output unit이 1\n\nmodel = tf.keras.Model(inputs = [x_and, x_or], outputs = [out_and, out_or])\nmodel.summary()\n\nopt = tf.keras.optimizers.RMSprop(learning_rate=0.1)\nmodel.compile(optimizer=opt, loss='mse', metrics=['accuracy'])\n\n\nret = model.fit(x = [X, X], y = [y_and, y_or], epochs=100, batch_size=4, verbose=0)\ntest = model.evaluate(x = [X, X], y = [y_and, y_or], verbose=0)\n\nprint('total loss = ', test[0])\nprint('AND : loss = {}, acc = {}'.format(test[1], test[3]))\nprint('OR : loss = {}, acc = {}'.format(test[2], test[4]))\n\nplt.plot(ret.history['loss'], 'r--', label = 'loss')\nplt.plot(ret.history['and_loss'], 'g--', label = 'and_loss')\nplt.plot(ret.history['or_loss'], 'b--', label = 'or_loss')\nplt.plot(ret.history['and_accuracy'], 'g-', label = 'and_accuracy')\nplt.plot(ret.history['or_accuracy'], 'b-', label = 'or_accruacy')\nplt.xlabel('epochs')\nplt.ylabel('loss and accuracy')\nplt.legend(loc='best')\nplt.show()","repo_name":"YeDongVibe/Tensorflow_Class","sub_path":"P.Song/FunctionalAPI/FunctionalAPI(AND,OR).py","file_name":"FunctionalAPI(AND,OR).py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29778955905","text":"import os\nimport argparse\nimport cv2\nimport numpy as np\nimport sys\nimport time\nfrom threading import Thread\nimport importlib.util\nimport pytesseract\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Users\\emielyn\\AppData\\Local\\Programs\\Tesseract-OCR\\tesseract.exe\"\nimport pyrebase\nfrom datetime import date\nfrom datetime import datetime\nimport imutils\nimport Levenshtein\n\nfrom mmocr.apis import TextRecInferencer\ninferencer = TextRecInferencer(model='SATRN', weights=r'C:\\Users\\emielyn\\mmocr\\best_IC15_recog_word_acc_epoch_77.pth')\n\n# Initialize the Firebase app with your service account credentials\n\nfirebaseConfig = {\n \"apiKey\": \"AIzaSyB_4cNoh3klH4mKPSd7dhJzr5QUGoLihy8\",\n \"authDomain\": \"scanmemaster-9da58.firebaseapp.com\",\n \"projectId\": \"scanmemaster-9da58\",\n \"databaseURL\" : \"https://scanmemaster-9da58-default-rtdb.firebaseio.com/\",\n \"storageBucket\": \"scanmemaster-9da58.appspot.com\",\n \"messagingSenderId\": \"270970295536\",\n \"appId\": \"1:270970295536:web:02ecd24ee665578e6d9e35\",\n \"measurementId\": \"G-27WEKS22GB\"\n}\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\ndb = firebase.database()\n\nclass VideoStream:\n \"\"\"Camera object that controls video streaming from the Picamera\"\"\"\n def __init__(self, resolution=(420, 480), framerate=30):\n self.stream = cv2.VideoCapture(\"newCamVid1.mp4\")\n ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))\n ret = self.stream.set(3, resolution[0])\n ret = self.stream.set(4, resolution[1])\n\n # Get the first frame to determine its shape\n _, self.frame = self.stream.read()\n self.original_frame = self.frame.copy() # create a copy of the original frame\n self.output_width = 650\n self.output_height = int(self.frame.shape[0] / (self.frame.shape[1] / self.output_width))\n\n self.stopped = False\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n if self.stopped:\n self.stream.release()\n return\n\n # Read the next frame from the video stream\n (self.grabbed, frame) = self.stream.read()\n\n # Store the original frame\n self.original_frame = frame\n\n # Resize the original frame to the desired output resolution\n resized_frame = cv2.resize(self.original_frame, (self.output_width, self.output_height))\n\n # Store the resized frame\n self.frame = resized_frame\n\n def read(self):\n return self.frame\n\n def read_original(self):\n return self.original_frame\n\n def stop(self):\n self.stopped = True\n\n\n\n# class VideoStream:\n# \"\"\"Camera object that controls video streaming from the Picamera\"\"\"\n# # def __init__(self,resolution=(640,480),framerate=30): :820\n# def __init__(self,resolution=(420,480),framerate=30):\n# # self.stream = cv2.VideoCapture(0)\n\n# self.stream = cv2.VideoCapture(\"newCamVid1.mp4\")\n# # Read the first frame to get its shape\n# _, self.frame = self.stream.read()\n# self.frame = imutils.resize(self.frame, width=50)\n\n# #self.stream = cv2.VideoCapture(\"rtsp://thesis:thesisisit@10.0.254.12/stream2\")\n\n# ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))\n# ret = self.stream.set(3,resolution[0])\n# ret = self.stream.set(4,resolution[1])\n \n# (self.grabbed, self.frame) = self.stream.read()\n\n# self.stopped = False\n\n# def start(self):\n# Thread(target=self.update,args=()).start()\n# return self\n\n# def update(self):\n# while True:\n# if self.stopped:\n# self.stream.release()\n# return\n\n# (self.grabbed, self.frame) = self.stream.read()\n\n# def read(self):\n# return self.frame\n\n# def stop(self):\n# self.stopped = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--modeldir', help='Folder the .tflite file is located in',\n required=True)\nparser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',\n default='detect.tflite')\nparser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',\n default='labelmap.txt')\nparser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',\n default=0.5)\nparser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',\n default='640x480')\nparser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',\n action='store_true')\n\nargs = parser.parse_args()\n\nMODEL_NAME = args.modeldir\nGRAPH_NAME = args.graph\nLABELMAP_NAME = args.labels\nmin_conf_threshold = float(args.threshold)\nresW, resH = args.resolution.split('x')\nimW, imH = int(resW), int(resH)\nuse_TPU = args.edgetpu\n\n\npkg = importlib.util.find_spec('tflite_runtime')\nif pkg:\n from tflite_runtime.interpreter import Interpreter\n if use_TPU:\n from tflite_runtime.interpreter import load_delegate\nelse:\n from tensorflow.lite.python.interpreter import Interpreter\n if use_TPU:\n from tensorflow.lite.python.interpreter import load_delegate\n\nif use_TPU:\n if (GRAPH_NAME == 'detect.tflite'):\n GRAPH_NAME = 'edgetpu.tflite' \n\nCWD_PATH = os.getcwd()\n\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)\n\nPATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)\n\nwith open(PATH_TO_LABELS, 'r') as f:\n labels = [line.strip() for line in f.readlines()]\n\nif labels[0] == '???':\n del(labels[0])\n\nif use_TPU:\n interpreter = Interpreter(model_path=PATH_TO_CKPT,\n experimental_delegates=[load_delegate('libedgetpu.so.1.0')])\n print(PATH_TO_CKPT)\nelse:\n interpreter = Interpreter(model_path=PATH_TO_CKPT)\n\ninterpreter.allocate_tensors()\n\n# Get model details\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\n\nfloating_model = (input_details[0]['dtype'] == np.float32)\n\ninput_mean = 127.5\ninput_std = 127.5\n\n\noutname = output_details[0]['name']\n\nif ('StatefulPartitionedCall' in outname): \n boxes_idx, classes_idx, scores_idx = 1, 3, 0\nelse: \n boxes_idx, classes_idx, scores_idx = 0, 1, 2\n\nframe_rate_calc = 1\nfreq = cv2.getTickFrequency()\n\nvideostream = VideoStream(resolution=(imW,imH),framerate=30).start()\ncount = 0\nexit = 0\ndetected = False\nimage_output = \"iMAGE.jpg\"\n\n\ndef checkExist():\n global exit\n global prev_txt\n while True:\n if exit == 0:\n filename = \"scanned_platenumbers.txt\"\n first_line = \"\"\n # Open the file for reading and writing\n with open(filename, \"r+\") as file:\n # Read the first line of the file\n first_line = file.readline().strip()\n # Read the remaining lines of the file\n remaining_lines = file.readlines()\n # Overwrite the file with the remaining lines\n file.seek(0)\n file.writelines(remaining_lines)\n file.truncate()\n # Close the file\n file.close()\n plateNum = first_line\n\n # print('check '+plateNum)\n\n try:\n if len(plateNum)>0:\n # Get all plate numbers in \"Vehicle_with_criminal_offense\" node\n plate_nums = db.child(\"Vehicle_with_criminal_offense\").shallow().get().val()\n \n # Find closest match to input\n global closest_match\n closest_match = None\n min_distance = float('inf')\n for num in plate_nums:\n distance = Levenshtein.distance(plateNum, num)\n if distance < min_distance:\n closest_match = num\n min_distance = distance\n \n confidence = round((1 - (min_distance / len(plateNum))) * 100, 2)\n if confidence >= 60 and closest_match not in prev_txt:\n exist = db.child(\"Vehicle_with_criminal_offense\").child(closest_match).child(\"plateNumber\").get()\n #print(exist.val())\n if exist.val() != None:\n isApprehended = db.child(\"Vehicle_with_criminal_offense\").child(closest_match).child(\"apprehended\").get()\n #print(\"isApprehended \"+isApprehended.val())\n if isApprehended.val() != 'yes':\n print('Notify '+plateNum)\n # Create Data\n nowD = datetime.now()\n dateToday = str(date.today())\n timeToday = nowD.strftime(\"%H:%M:%S\")\n crimeScanned = db.child(\"Vehicle_with_criminal_offense\").child(closest_match).child(\"criminalOffense\").get()\n\n color = ''\n if confidence >= 60 and confidence <= 75:\n color='yellow'\n elif confidence > 75 and confidence <= 100:\n color='red'\n\n data = {\"PlateNumber\":closest_match, \"Location\": \"Lapasan Zone 4\", \"Date\": dateToday, \"Time\": timeToday, \"Notification\": \"on\", \"Apprehended\": \"no\", \"CriminalOffense\": crimeScanned.val(), 'Color': color, 'DetectedPN': plateNum}\n db.child(\"Scanned\").child((dateToday+\" \"+timeToday)).set(data)\n dataPlateNumber = {\"PlateNumber\":closest_match, \"Apprehended\": \"no\",\"CriminalOffense\": crimeScanned.val()}\n db.child(\"ScannedPlateNumber\").child(closest_match).set(dataPlateNumber)\n\n #For Notification\n db.child(\"ScannedNotification\").set(data)\n db.child(\"ScannedPlateNumberNotification\").set(dataPlateNumber)\n prev_txt.append(closest_match)\n else:\n print(\" \")\n #print(\"Plate Number dont't exist\")\n except Exception as e:\n print(\"err \"+str(e))\n #print(\"Plate Number dont't exist \"+ str(e))\n #print()\n #print('checkDatabase')\n #print('Latest data:', plateNum)\n #print()\n #time.sleep(1)\n else:\n break\n\ndef saveForQuery():\n global exit\n filename = \"scanned_platenumbers.txt\"\n prevPN = ''\n # Create the file if it doesn't exist\n if not os.path.isfile(filename):\n open(filename, \"w\").close()\n\n while True:\n if exit == 0:\n\n #Read the latest scanned on the database\n plateNum = db.child(\"ScannedQuery\").child(\"PlateNumber\").get()\n if plateNum.val() != prevPN:\n # Open the file in append mode\n with open(filename, \"a\") as file:\n # Get the text to append from the user\n plateNum = plateNum.val()\n # Append the text to the end of the file\n file.write(plateNum+ \"\\n\")\n # Close the file\n file.close()\n #print('checkdatabase')\n prevPN = plateNum\n #time.sleep(1)\n else:\n break\n\nprev_txt = []\n\ndef clear_list():\n global exit\n while True:\n if exit == 0:\n time.sleep(30)\n prev_txt.clear()\n print(\"--------------------------\")\n else:\n break\n\n\ndef ocr():\n global detected\n global exit\n global prev_txt\n while True: \n if exit == 0: \n if os.path.exists(image_output):\n try:\n img_ocr = cv2.imread(image_output)\n img_ocr = cv2.resize(img_ocr,None, fx=0.5 , fy =0.5)\n if detected == True:\n # txt =pytesseract.image_to_string(img_ocr, config='-c tessedit_char_whitelist=0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ --psm 8 --oem 3')\n # print(txt) \n # Pass preprocessed image to OCR model\n result = inferencer(img_ocr, print_result=True)\n text = result['predictions'][0]['text']\n\n # Print OCR results\n print('Prediction: ',text)\n data = {\"PlateNumber\":text}\n db.child(\"ScannedQuery\").set(data)\n try:\n os.remove(image_output)\n except OSError as e:\n print(f\"Error: {image_output} path could not be delete. {e}\")\n except Exception as e:\n print(\"\")\n #print(\"An error occured:\", str(e))\n else:\n \n \n continue\n \n else:\n break\n\ndef detection():\n global frame_rate_calc\n global detected\n global exit\n # Set the target frame rate in frames per second\n target_fps = 10\n\n # Calculate the delay between frames in seconds\n frame_delay = 1.0 / target_fps\n while True:\n start_time = time.monotonic()\n t1 = cv2.getTickCount()\n frame1 = videostream.read()\n\n frame = frame1.copy()\n # frame = imutils.resize(frame1, width=820)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_resized = cv2.resize(frame_rgb, (width, height))\n input_data = np.expand_dims(frame_resized, axis=0)\n\n if floating_model:\n input_data = (np.float32(input_data) - input_mean) / input_std\n\n interpreter.set_tensor(input_details[0]['index'],input_data)\n interpreter.invoke()\n\n boxes = interpreter.get_tensor(output_details[boxes_idx]['index'])[0]\n classes = interpreter.get_tensor(output_details[classes_idx]['index'])[0] \n scores = interpreter.get_tensor(output_details[scores_idx]['index'])[0]\n\n area = [(1,160),(647,160),(647,360),(1,360)] #Bahog ug video\n\n # area = [(1,257),(639,257),(639,480),(1,480)] #sa laptop cam\n # area = [(2,243),(637,243),(637,360),(2,360)] #sa CCTV\n\n for i in range(len(scores)):\n if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):\n\n ymin = int(max(1,(boxes[i][0] * imH)))\n xmin = int(max(1,(boxes[i][1] * imW)))\n ymax = int(min(imH,(boxes[i][2] * imH)))\n xmax = int(min(imW,(boxes[i][3] * imW)))\n \n cx = int((xmin + xmax)/2)\n cy = int((ymin + ymax)/2)\n result = cv2.pointPolygonTest(np.array(area, np.int32), (int(cx), int(cy)), False)\n if result >= 0:\n detected = True\n # cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n\n object_name = labels[int(classes[i])] \n label = '%s: %d%%' % (object_name, int(scores[i]*100)) \n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) \n label_ymin = max(ymin, labelSize[1] + 10) \n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) \n # cv2.circle(frame,(cx,cy),5,(10, 255, 0),-1)\n imgRoi = frame[ymin:ymax, xmin:xmax]\n cv2.imwrite(\"iMAGE.jpg\", imgRoi)\n \n else:\n detected = False\n for i in area:\n cv2.polylines(frame,[np.array(area, np.int32)], True, (15,220,10),6)\n\n cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)\n \n # frame1 = imutils.resize(frame, width=650)\n cv2.imshow('Object detector', frame)\n\n \n t2 = cv2.getTickCount()\n time1 = (t2-t1)/freq\n frame_rate_calc= 1/time1\n \n\n if cv2.waitKey(1) == ord('q'):\n exit =1\n break\n elapsed_time = time.monotonic() - start_time\n time.sleep(max(0, frame_delay - elapsed_time))\n videostream.stop()\n cv2.destroyAllWindows()\n\ntask1 = Thread(target=detection)\ntask2 = Thread(target=ocr)\ntask3 = Thread(target=saveForQuery)\ntask4 = Thread(target=checkExist)\ntask5 = Thread(target=clear_list)\n\nwhile True:\n task1.start()\n task2.start()\n task3.start()\n task4.start()\n task5.start()\n\n\n task1.join()\n task2.join()\n task3.join()\n task4.join()\n task5.join()\n if exit ==1:\n print(\"Done executing\")\n break","repo_name":"Millborne-g/MMOCR-codes","sub_path":"camLatest_polylines.py","file_name":"camLatest_polylines.py","file_ext":"py","file_size_in_byte":17846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38019262331","text":"from typing import Any, List\nfrom pytorch_lightning import LightningModule\nfrom src.models.fcvae_model_v1 import FCVAEModelV1\nfrom src.models.fcvae_model_v2 import FCVAEModelV2\nfrom src.models.fcae_model import FCAEModel\nfrom torch import nn\nimport torch\nfrom torchmetrics.classification.accuracy import Accuracy\n\n\nclass ExtractorFCMLPModel(LightningModule):\n \"\"\"\n A LightningModule organizes your PyTorch code into 5 sections:\n - Computations (init).\n - Train loop (training_step)\n - Validation loop (validation_step)\n - Test loop (test_step)\n - Optimizers (configure_optimizers)\n\n Read the docs:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html\n \"\"\"\n\n def __init__(\n self,\n extractor_path: str = \"\",\n task: str = \"regression\",\n n_output: int = 1,\n topology: List[int] = None,\n dropout: float = 0.1,\n num_unfreeze_epochs = 10,\n loss_type: str = \"MSE\",\n extractor_type: str = \"FCVAEModelV2\",\n lr: float = 0.001,\n weight_decay: float = 0.0005,\n **kwargs\n ):\n super().__init__()\n self.save_hyperparameters()\n\n self.extractor_type = extractor_type\n if self.extractor_type == \"FCVAEModelV1\":\n self.feature_extractor = FCVAEModelV1.load_from_checkpoint(extractor_path)\n elif self.extractor_type == \"FCVAEModelV2\":\n self.feature_extractor = FCVAEModelV2.load_from_checkpoint(extractor_path)\n elif self.extractor_type == \"FCAEModel\":\n self.feature_extractor = FCAEModel.load_from_checkpoint(extractor_path)\n else:\n raise ValueError(\"Unsupported extractor_type\")\n\n self.feature_extractor.freeze()\n\n self.task = task\n self.n_output = n_output\n self.topology = [self.feature_extractor.model.n_latent] + list(topology)\n\n self.num_unfreeze_epochs = num_unfreeze_epochs\n\n self.mlp_layers = []\n for i in range(len(self.topology) - 1):\n layer = nn.Linear(self.topology[i], self.topology[i + 1])\n self.mlp_layers.append(nn.Sequential(layer, nn.ReLU(), nn.BatchNorm1d(self.topology[i + 1]), nn.Dropout(dropout)))\n self.mlp_layers.append(nn.Linear(self.topology[-1], self.n_output))\n\n if task == \"classification\":\n self.loss_fn = torch.nn.CrossEntropyLoss(reduction='mean')\n if n_output < 2:\n raise ValueError(f\"Classification with {n_output} classes\")\n elif task == \"regression\":\n if self.hparams.loss_type == \"MSE\":\n self.loss_fn = torch.nn.MSELoss(reduction='mean')\n elif self.hparams.loss_type == \"L1Loss\":\n self.loss_fn = torch.nn.L1Loss(reduction='mean')\n else:\n raise ValueError(\"Unsupported loss_type\")\n\n self.mlp = nn.Sequential(*self.mlp_layers)\n\n self.accuracy = Accuracy()\n\n def on_epoch_end(self):\n # a hook is cleaner (but a callback is much better)\n if self.trainer.current_epoch == self.num_unfreeze_epochs:\n self.feature_extractor.unfreeze()\n\n def forward(self, x: torch.Tensor):\n z = self.feature_extractor.get_latent(x)\n return self.mlp(z)\n\n def get_probabilities(self, x: torch.Tensor):\n x = self.feature_extractor.get_latent(x)\n x = self.mlp(x)\n return torch.softmax(x, dim=1)\n\n def step(self, batch: Any):\n x, y, ind = batch\n out = self.forward(x)\n batch_size = x.size(0)\n y = y.view(batch_size, -1)\n loss = self.loss_fn(out, y)\n\n logs = {\"loss\": loss}\n if self.task == \"classification\":\n out_tag = torch.argmax(out, dim=1)\n acc = self.accuracy(out_tag, y)\n logs[\"acc\"] = acc\n\n return loss, logs\n\n def training_step(self, batch: Any, batch_idx: int):\n loss, logs = self.step(batch)\n d = {f\"train/{k}\": v for k, v in logs.items()}\n self.log_dict(d, on_step=False, on_epoch=True, logger=True)\n return logs\n\n def training_epoch_end(self, outputs: List[Any]):\n pass\n\n def validation_step(self, batch: Any, batch_idx: int):\n loss, logs = self.step(batch)\n d = {f\"val/{k}\": v for k, v in logs.items()}\n self.log_dict(d, on_step=False, on_epoch=True, logger=True)\n return logs\n\n def validation_epoch_end(self, outputs: List[Any]):\n pass\n\n def test_step(self, batch: Any, batch_idx: int):\n loss, logs = self.step(batch)\n d = {f\"test_{k}\": v for k, v in logs.items()}\n self.log_dict(d, on_step=False, on_epoch=True, logger=True)\n return logs\n\n def test_epoch_end(self, outputs: List[Any]):\n pass\n\n def configure_optimizers(self):\n \"\"\"Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n See examples here:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n return torch.optim.Adam(\n params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay\n )\n","repo_name":"GillianGrayson/dnamvae","sub_path":"src/models/extractor_mlp_model.py","file_name":"extractor_mlp_model.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"14567317943","text":"from sys import argv\r\nimport copy\r\nfrom operator import itemgetter\r\nimport time\r\ndef shortPath(towns,city,target,path,i):\r\n\tif(city==[]):\r\n\t\treturn\r\n\telse:\t\r\n\t\tglobal maxWeight,flag,paths\r\n\t\tcity=sorted(city,key=itemgetter(1))[::-1]\r\n\t\tfor p in city:\t\t\t\r\n\t\t\tif(p[0]==target):\r\n\t\t\t\tprevMaxWeight=maxWeight\t\t\r\n\t\t\t\tpath[i]=p\r\n\t\t\t\tmaxWeight=p[1]\t\t\t\t\t\t\r\n\t\t\t\tfor n in range(i,-1,-1):\r\n\t\t\t\t\tif(path[n][1]prevMaxWeight):\r\n\t\t\t\t\tpaths=copy.copy(path)\t\t\t\t\r\n\t\t\t\t\tcontinue\t\t\t\r\n\t\t\t\telse:\t\t\r\n\t\t\t\t\tmaxWeight=prevMaxWeight\t\r\n\t\t\t\t\tcontinue\r\n\t\t\tif p[1] \"+str(n[0])\r\n\t\tif(n[1] 0 :\n for file in each_file:\n date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')\n unix_time = time.mktime(date_stamp.timetuple())\n #print(date_stamp, unix_time)\n full_file_path = each_dir+'/'+file\n #print(full_file_path)\n source = open(full_file_path, 'r').read()\n #print(source)\n try:\n value = float(source.split(gather+':')[1].split(' | ')[0])\n #print(ticker+\":\",value)\n df = df.append({'Date':date_stamp, 'Unix':unix_time, 'Ticker':ticker, 'De Ratio':value,}, ignore_index = True)\n except Exception as e:\n pass\n\n #time.sleep(15)\n save = gather.replace(' ', '').replace(')', '').replace('(','').replace('/', '')+ ('.csv')\n print(save)\n df.to_csv(save)\n\n\nKey_Stats()\n\n\n#note \n#1\n#The df variable is used to store the creation of a new \"DataFrame\" object from Pandas, where we specify the columns to be date, unix, ticker, and DE ratio\n\n#2\n#The Try here identifies the value as usual, then we're re-defining our DataFrame object as the previous DataFrame object with the new data appended to it\n\n#3\n#specifying a custom name for the csv file, then using pandas to_csv capability to output the Data Frame to an actual CSV file\n#Running this then saves the dataframe as a CSV spreadsheet for us. We want to save the data since we really just need to access and store the data once","repo_name":"TakahiroSuzukiqq/python-machineleaning-wk1","sub_path":"structuring_data.py","file_name":"structuring_data.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14395725370","text":"import math\nfrom collections import defaultdict, Counter\n\n\nclass DSU:\n def __init__(self, N):\n self.p = list(range(N))\n\n def find(self, x):\n if self.p[x] != x:\n self.p[x] = self.find(self.p[x])\n return self.p[x]\n\n def union(self, x, y):\n xr, yr = self.find(x), self.find(y)\n self.p[xr] = yr\n\n\nclass Solution:\n def primes_set(self, n):\n for i in range(2, int(math.sqrt(n))+1):\n if n % i == 0:\n return self.primes_set(n//i) | set([i])\n return set([n])\n\n def largest_component_size(self, A):\n \"\"\"\n Time O(n * log(2m) * log(m)) where n is the number of elements\n and m is the max value in list\n Space: O(n + m)\n \"\"\"\n n = len(A)\n UF = DSU(n)\n primes = defaultdict(list)\n for i, num in enumerate(A):\n pr_set = self.primes_set(num)\n for q in pr_set:\n primes[q].append(i)\n for _, indexes in primes.items():\n for i in range(len(indexes)-1):\n UF.union(indexes[i], indexes[i+1])\n return max(Counter([UF.find(i) for i in range(n)]).values())\n","repo_name":"tuvo1106/1337code","sub_path":"0952_largest_component/largest.py","file_name":"largest.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"23830703999","text":"# 01_fruits.py\n# 利用CNN实现图像分类\n# 数据集:爬虫从百度图片搜索结果爬取\n# 内容:包含1036张水果图片\n# 共5个类别(苹果288张、香蕉275张、葡萄216张、\n# 橙子276张、梨251张)\n\n################## 数据预处理 ##################\nimport os\n\nname_dict = {\"apple\": 0, \"banana\": 1, \"grape\": 2,\n \"orange\": 3, \"pear\": 4}\ndata_root_path = \"data/fruits/\" # 数据集所在目录\n# 测试集、训练集文件路径\ntest_file_path = data_root_path + \"test.txt\"\ntrain_file_path = data_root_path + \"train.txt\"\nname_data_list = {} # 记录每个类别有那些图片\n\n\ndef save_name_data_list(path, # 图像路径\n name): # 类别名称\n if name not in name_data_list: # 字典中没有该类别\n img_list = [] # 创建空列表\n img_list.append(path) # 将图片存入列表\n name_data_list[name] = img_list # 存入字典\n else: # 字典中已经存在该类别\n name_data_list[name].append(path)\n\n\n# 遍历数据集中的每个子目录,取出图像样本路径\n# 并写入name_data_list字典\ndirs = os.listdir(data_root_path)\nfor d in dirs:\n full_path = data_root_path + d # 子目录完整路径\n # print(full_path)\n if os.path.isdir(full_path): # 是一个目录\n imgs = os.listdir(full_path) # 列出所有文件\n for img in imgs:\n img_full_path = full_path + \"/\" + img\n save_name_data_list(img_full_path,\n d) # 目录名称即类别名称\n else: # 文件\n pass\n\n# 遍历name_data_list字典,划分测试集、训练集\nwith open(test_file_path, \"w\") as f:\n pass\n\nwith open(train_file_path, \"w\") as f:\n pass\n\n# 遍历字典\nfor name, img_list in name_data_list.items():\n i = 0\n num = len(img_list) # 获取每个列别图片数量\n print(\"%s: %d张\" % (name, num))\n\n for img in img_list:\n line = \"%s\\t%d\\n\" % (img, name_dict[name])\n if i % 10 == 0: # 划分到测试集合\n with open(test_file_path, \"a\") as f:\n f.write(line)\n else: # 划分到训练集\n with open(train_file_path, \"a\") as f:\n f.write(line)\n i += 1\nprint(\"数据预处理完成.\")\n\n############### 模型搭建/训练 ##################\nimport paddle\nimport paddle.fluid as fluid\nimport numpy\nimport sys\nimport os\nfrom multiprocessing import cpu_count\nimport time\nimport matplotlib.pyplot as plt\n\n\ndef train_mapper(sample):\n \"\"\"\n 根据传入样本路径、类别,读取图像数据\n :param sample: 一行文本样本, 元组(文件路径,类别)\n :return: 返回图像数据、类别\n \"\"\"\n img, label = sample # img为路径, lable为类别\n if not os.path.exists(img):\n print(img, \"文件不存在\")\n\n # 读取文件内容\n img = paddle.dataset.image.load_image(img)\n # 将图像设置为固定大小\n img = paddle.dataset.image.simple_transform(\n im=img, # 原始图像\n resize_size=100, # 图像缩放大小\n crop_size=100, # 裁剪图像大小\n is_color=True, # 彩色图像\n is_train=True) # 训练模型(做随机裁剪)\n # 归一化处理,将每个像素值转换为0~1之间\n img = img.astype(\"float32\") / 255.0\n return img, label\n\n\n# 从训练集中读取数据\ndef train_r(train_list, buffred_size=1024):\n def reader():\n with open(train_list, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n # 去除空格和换行符\n line = line.strip().replace(\"\\n\", \"\")\n img_path, lab = line.split(\"\\t\")\n\n yield img_path, int(lab)\n\n return paddle.reader.xmap_readers(\n train_mapper, # 接收reader读取的数据二次处理\n reader, # 原始读取器\n cpu_count(), # 线程数量\n buffred_size) # 缓冲区大小\n\n# 定义reader\nBATCH_SIZE = 32 # 批次大小\n\ntrainer_reader = train_r(train_list=train_file_path)\nrandom_train_reader = paddle.reader.shuffle(\n reader=trainer_reader,\n buf_size=1300) # 随机读取器\nbatch_train_reader = paddle.batch(\n random_train_reader,\n batch_size=BATCH_SIZE)\n\n# 占位符\nimage = fluid.layers.data(name=\"image\",\n shape=[3, 100, 100],\n dtype=\"float32\")\nlabel = fluid.layers.data(name=\"label\",\n shape=[1],\n dtype=\"int64\")\n\ndef create_CNN(image, type_size):\n \"\"\"\n 搭建卷积神经网络\n :param image: 图像数据(经过归一化处理)\n :param type_size:类别数量\n :return: 一组分类概率\n \"\"\"\n # 第一组 conv/pool/dropout\n conv_pool_1 = fluid.nets.simple_img_conv_pool(\n input=image, # 输入图像数据\n filter_size=3, # 卷积核大小\n num_filters=32, # 卷积核数量\n pool_size=2, # 2*2区域做池化\n pool_stride=2, # 池化步长\n act=\"relu\") # 激活函数\n drop = fluid.layers.dropout(x=conv_pool_1,\n dropout_prob=0.5)\n\n # 第二组 conv/pool/dropout\n conv_pool_2 = fluid.nets.simple_img_conv_pool(\n input=drop, # 前一个dropout输出作为输入\n filter_size=3, # 卷积核大小\n num_filters=64, # 卷积核数量\n pool_size=2, # 2*2区域做池化\n pool_stride=2, # 池化步长\n act=\"relu\") # 激活函数\n drop = fluid.layers.dropout(x=conv_pool_2,\n dropout_prob=0.5)\n\n # 第三组 conv/pool/dropout\n conv_pool_3 = fluid.nets.simple_img_conv_pool(\n input=drop, # 前一个dropout输出作为输入\n filter_size=3, # 卷积核大小\n num_filters=64, # 卷积核数量\n pool_size=2, # 2*2区域做池化\n pool_stride=2, # 池化步长\n act=\"relu\") # 激活函数\n drop = fluid.layers.dropout(x=conv_pool_3,\n dropout_prob=0.5)\n\n # fc\n fc = fluid.layers.fc(input=drop,\n size=512, # 神经元数量\n act=\"relu\")\n # dropout\n drop = fluid.layers.dropout(x=fc,\n dropout_prob=0.5)\n # 输出层(使用softmax作为激活函数的fc)\n predict = fluid.layers.fc(input=drop,\n size=type_size,\n act=\"softmax\")\n return predict\n\n# 创建VGG模型\ndef vgg_bn_drop(image, type_size):\n def conv_block(ipt, num_filter, groups, dropouts):\n return fluid.nets.img_conv_group(\n input=ipt, # 输入图像, 格式[N,C,H,W]\n pool_stride=2,#池化步长\n pool_size=2, #池化区域大小\n conv_num_filter=[num_filter] * groups,\n conv_filter_size=3, #卷积核大小\n conv_act=\"relu\",#激活函数\n conv_with_batchnorm=True,#是否采用BN\n pool_type=\"max\")#池化类型\n\n conv1 = conv_block(image, 64, 2, [0.0, 0.0])\n conv2 = conv_block(conv1, 128, 2, [0.0, 0.0])\n conv3 = conv_block(conv2, 256, 3, [0.0, 0.0, 0.0])\n conv4 = conv_block(conv3, 512, 3, [0.0, 0.0, 0.0])\n conv5 = conv_block(conv4, 512, 3, [0.0, 0.0, 0.0])\n\n drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)\n fc1 = fluid.layers.fc(input=drop,\n size=512,\n act=None)\n bn = fluid.layers.batch_norm(input=fc1,\n act=\"relu\")#批量归一化\n drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.0)\n fc2 = fluid.layers.fc(input=drop2,\n size=512,\n act=None)\n predict = fluid.layers.fc(input=fc2,\n size=type_size,\n act=\"softmax\")\n return predict\n\n\n# 调用函数,创建模型\n# predict = create_CNN(image=image, type_size=5)\npredict = vgg_bn_drop(image=image, type_size=5)\n# 损失函数\ncost = fluid.layers.cross_entropy(\n input=predict,\n label=label)\navg_cost = fluid.layers.mean(cost)\n# 准确率\naccuracy = fluid.layers.accuracy(input=predict,\n label=label)\n# 优化器\noptimizer = fluid.optimizer.Adam(\n learning_rate=0.001)\noptimizer.minimize(avg_cost) # 优化目标函数\n\n# 执行器\nplace = fluid.CUDAPlace(0) # GPU训练\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n# feeder\nfeeder = fluid.DataFeeder(\n feed_list=[image, label],\n place=place)\n\ncosts = [] # 记录损失函数值\naccs = [] # 记录准确度\ntimes = 0\nbatchs = [] # 迭代次数\n\n# 开始训练\nfor pass_id in range(5):\n train_cost = 0 # 临时变量,记录损失值\n train_acc = 0\n times += 1\n for batch_id, data in enumerate(batch_train_reader()):\n train_cost, train_acc = exe.run(\n program=fluid.default_main_program(),\n feed=feeder.feed(data), # 喂入参数\n fetch_list=[avg_cost, accuracy])\n # 打印损失值、准确率\n if batch_id % 20 == 0:\n print(\"pass_id:%d, batch_id:%d, cost:%f, acc:%f\"\n % (pass_id, batch_id,\n train_cost[0], train_acc[0]))\n accs.append(train_acc[0])\n costs.append(train_cost[0])\n batchs.append(times)\n# 保存模型\nmodel_save_dir = \"./model/fruits/\"\nif not os.path.exists(model_save_dir):\n os.makedirs(model_save_dir)\nfluid.io.save_inference_model(\n dirname=model_save_dir, #保存路径\n feeded_var_names=[\"image\"],#预测时传入参数\n target_vars=[predict],#预测结果\n executor=exe)#执行器\n\nprint(\"模型保存成功:\", model_save_dir)\n\n# 训练过程可视化\nplt.title(\"training\", fontsize=24)\nplt.xlabel(\"iter\", fontsize=20)\nplt.ylabel(\"cost/acc\", fontsize=20)\nplt.plot(batchs, costs, color='red', label=\"Training Cost\")\nplt.plot(batchs, accs, color='green', label=\"Training Acc\")\nplt.legend()\nplt.grid()\nplt.savefig(\"train.png\")\nplt.show()\n\n\n#################### 预测 #####################\nfrom PIL import Image\n\n# 加载图像数据\ndef load_img(path): # path为图像路径\n img = paddle.dataset.image.load_and_transform(\n path, 100, 100, False).astype(\"float32\")\n img = img / 255.0 # 归一化\n\n return img\n\n# 定义执行器\nplace = fluid.CPUPlace()\ninfer_exe = fluid.Executor(place) #用于预测的执行器\n\ninfer_imgs = [] # 存放待预测的图像数据\ntest_img = \"apple_1.png\" # 待测试的图像\ninfer_imgs.append(load_img(test_img))#将图像数据存入待预测列表\n\ninfer_imgs = numpy.array(infer_imgs)#将列表转换为数组\n\n# 加载模型\ninfer_program, feed_target_names, fetch_targets = \\\n fluid.io.load_inference_model(model_save_dir,\n infer_exe)\n# 执行预测\nresults = infer_exe.run(infer_program,\n feed={feed_target_names[0]:infer_imgs},\n fetch_list=fetch_targets)\n# print(results)\n\nresult = numpy.argmax(results[0][0])\nfor k, v in name_dict.items():\n if result == v:\n print(\"预测结果:\", k)\n\n# 显示待预测的图像\nimg = Image.open(test_img)\nplt.imshow(img)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wangjiancheng-123/datascience","sub_path":"深度学习/01_fruits.py","file_name":"01_fruits.py","file_ext":"py","file_size_in_byte":11206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74214809216","text":"class graph(object):\n def __init__(self, size):\n self.adjacency_list = {}\n self.maxSize = 0\n for x in range(1, size + 1):\n self.adjacency_list[x] = []\n self.size = size\n\n def add_node(self, start, end, weight):\n\n self.maxSize += weight\n\n self.adjacency_list[start].append([end, weight])\n self.adjacency_list[end].append([start, weight])\n\n def print_graph(self):\n for x in range(1, self.size + 1):\n print(x, \" : \", self.adjacency_list[x])\n\n\ndef minIndex(g, visited, distance, minDis):\n\n minIndex = -1\n\n for count in range(1, g.size + 1):\n if distance[count] <= minDis and (not visited[count]):\n minIndex = count\n minDis = distance[count]\n\n return minIndex\n\n\ndef dijksrta_short(g, start, end):\n visited = [False] * (g.size + 1)\n distance = [g.maxSize] * (g.size + 1)\n\n distance[start] = 0\n\n for _ in range(g.size):\n\n minIndex1 = minIndex(g, visited, distance, g.maxSize)\n\n visited[minIndex1] = True\n\n for x in g.adjacency_list[minIndex1]:\n\n if not visited[x[0]]:\n if distance[x[0]] > distance[minIndex1] + x[1]:\n distance[x[0]] = distance[minIndex1] + x[1]\n\n return distance[end]\n\n\ng = graph(5)\ng.add_node(1, 2, 10)\ng.add_node(2, 3, 15)\ng.add_node(1, 3, 70)\ng.add_node(2, 4, 15)\ng.add_node(4, 5, 20)\ng.add_node(1, 5, 100)\n\ng.print_graph()\n\nprint(dijksrta_short(g, 1, 5))\n","repo_name":"ArtistBanda/Algorithms-and-Basic-Programmes","sub_path":"Python/Algorithms/dijkstra_algorithm.py","file_name":"dijkstra_algorithm.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"5630863559","text":"from esys.escript import *\nimport numpy as np\nfrom math import floor\nfrom scipy.interpolate import RegularGridInterpolator\nfrom .datamapping import mapToDomain\nfrom esys.escript.linearPDEs import LinearSinglePDE, SolverOptions\nfrom esys.escript.pdetools import Locator\n\ndef setupERTPDE(domain, poisson=True):\n \"\"\"\n used t setup all ERT PDEs\n \"\"\"\n pde=LinearSinglePDE(domain, isComplex=False)\n pde.setSymmetryOn()\n optionsG=pde.getSolverOptions()\n #optionsG.setSolverMethod(SolverOptions.DIRECT)\n\n optionsG.setSolverMethod(SolverOptions.PCG)\n optionsG.setTolerance(1e-8)\n if True and hasFeature('trilinos'):\n #print(\"trilinos solver used.\")\n optionsG.setPackage(SolverOptions.TRILINOS)\n optionsG.setPreconditioner(SolverOptions.AMG)\n if poisson:\n optionsG.setTrilinosParameter(\"problem:type\", \"Poisson-3D\")\n optionsG.setTrilinosParameter(\"verbosity\", \"none\")\n optionsG.setTrilinosParameter(\"number of equations\", 1)\n #optionsG.setTrilinosParameter(\"max levels\", 3) # 10 is default 3 seems to be a good number\n #optionsG.setTrilinosParameter(\"cycle type\", \"V\")\n optionsG.setTrilinosParameter(\"problem: symmetric\", True)\n #optionsG.setTrilinosParameter(\"smoother: pre or post\", \"both\")\n #optionsG.setTrilinosParameter(\"Convergence Tolerance\", 1e-12)\n return pde\n\n\nclass IPModel(object):\n \"\"\"\n \"\"\"\n def __init__(self, domain, survey, locations=[], field_resolution=1., field_origin=(0.,0.,0), sigma_background=0.1, gamma_background=0.0001, padding_tags=[], stationsFMT=None):\n self.domain=domain\n self.survey=survey\n self.locations=locations\n self.stationsFMT=stationsFMT\n self.pde=setupERTPDE(domain)\n x=self.pde.getDomain().getX()[0]\n y=self.pde.getDomain().getX()[1]\n z=self.pde.getDomain().getX()[2]\n self.pde.setValue(q=whereZero(x-inf(x))+whereZero(x-sup(x))+ whereZero(y-inf(y))+whereZero(y-sup(y))+whereZero(z-inf(z)))\n\n self.locations=locations\n self.observation_locator=Locator(Solution(domain), [ self.survey.getStationLocation(s) for s in self.survey.getObservationElectrodes()])\n self.source_locator=Locator(ContinuousFunction(domain), [ self.survey.getStationLocation(ip) for ip in self.survey.getInjectionStations() ])\n\n self.field_resolution=field_resolution\n self.field_origin=field_origin\n self.sigma_background=sigma_background\n self.gamma_background=gamma_background\n self.padding_tags=padding_tags\n\n self.injections= [ i for i in self.survey.injectionIterator()]\n self.injectionMap=[ k for k in range(len(self.injections)) ]\n \n self.setUpDataMaps()\n self.setPrimaryPotential()\n \n\n \n def getAllInjections(self):\n return self.injectionMap\n \n def getInjection(self, k):\n return self.injections[self.injectionMap[k]]\n \n def getNumberOfInjections(self):\n return len(self.injections)\n \n def setUpDataMaps(self):\n \"\"\"\n This sets up the mapping of the DC self.dataDCMaps[self.numSrc] and IP self.dataIPMaps[self.numSrc] predictions to an array d[self.numDataMax, self.numSrc]\n \"\"\"\n self.numSrc=self.getNumberOfInjections()\n self.dataDCMaps={}\n self.dataIPMaps={}\n self.numData={}\n for k, i in enumerate(self.getAllInjections()):\n self.dataDCMaps[i] = { s: j for j,s in enumerate(self.survey.getObservations(self.getInjection(i)))}\n self.dataIPMaps[i] = { s: j+len(self.dataDCMaps[i]) for j,s in enumerate(self.survey.getObservations(self.getInjection(i)))}\n self.numData[i]=len(self.dataDCMaps[i])+len(self.dataIPMaps[i])\n self.numDataMax=max(self.numData.values())\n\n self.use=np.zeros((self.numDataMax, self.numSrc), dtype=bool)\n for k, i in enumerate(self.getAllInjections()):\n for j in self.dataDCMaps[i].values():\n self.use[j,k]=True \n for i in self.dataIPMaps[i].values():\n self.use[j,k]=True\n \n def makeDataSet(self, sources):\n \"\"\"\n \n \"\"\"\n responses=np.zeros((self.numDataMax, len(sources)), dtype=float)\n if self.survey.hasDipoleInjections():\n for k, ip in enumerate(sources):\n for s,i in self.dataDCMaps[ip].items():\n responses[i,k]=self.survey.getDataRecord(self.getInjection(ip)+ s, datatype='R')\n for s,i in self.dataIPMaps[ip].items():\n d=self.survey.getDataRecord( self.getInjection(ip) + s, datatype='R')\n e=self.survey.getDataRecord(self.getInjection(ip) + s, datatype='ETA')\n responses[i,k]=e/(1-e)*d\n else:\n for k, ip in enumerate(sources):\n for s,i in self.dataDCMaps[ip].items():\n responses[i,k]=self.survey.getDataRecord( (self.getInjection(ip),) + s, datatype='R')\n for s,i in self.dataIPMaps[ip].items():\n d=self.survey.getDataRecord( (self.getInjection(ip),) + s , datatype='R')\n e=self.survey.getDataRecord( (self.getInjection(ip),) + s, datatype='ETA')\n responses[i,k]=e/(1-e)*d \n return responses\n \n def setPrimaryPotential(self):\n \"\"\"\n this sets the primary potential assuming sigma=1 and I=1\n \"\"\"\n self.primary_potential={}\n self.primary_potential_at_stations = {}\n self.pde.setValue(A=kronecker(3), X=Data()) \n for i, ip in enumerate(self.survey.getListOfInjectionStations()):\n s=Scalar(0.,DiracDeltaFunctions(self.domain))\n if self.stationsFMT is None:\n s.setTaggedValue(ip,1.)\n else: \n s.setTaggedValue(self.stationsFMT%ip,1.)\n self.pde.setValue(y_dirac=s)\n self.primary_potential[ip]=self.pde.getSolution()\n self.primary_potential_at_stations[ip]=np.array(self.observation_locator(self.primary_potential[ip]))\n print(\"Primary potential for %s: %s\"%(ip,str(self.primary_potential[ip])))\n\n def runSurvey(self, sources, sigma_field, gamma_field):\n # sources point into \n # array to return data: \n responses=np.zeros((self.numDataMax, len(sources)), dtype=float)\n \n # extend the fields to the domain and grep values at source locations: \n sigma, sigma_p=mapToDomain(self.domain, sigma_field, self.field_resolution, origin=self.field_origin, data0=self.sigma_background, tags0=self.padding_tags, locators=self.source_locator )\n gamma, gamma_p=mapToDomain(self.domain, gamma_field, self.field_resolution, origin=self.field_origin, data0=self.gamma_background, tags0=self.padding_tags, locators=self.source_locator )\n \n self.pde.setValue(A=sigma*kronecker(3), y_dirac=Data())\n secondary_potential_at_stations={}\n u_at_stations={}\n # DC .... \n for k, j in enumerate(sources):\n if self.survey.hasDipoleInjections():\n ips=self.getInjection(j)\n for ip in ips:\n if not ip in secondary_potential_at_stations:\n idx=self.survey.getInjectionStationIndex(ip)\n sigma0=sigma_p[idx]\n print(\"DC injection %s at %s, sigma_p=%e\"%(ip, idx, sigma0))\n\n self.pde.setValue(X=(1-sigma/sigma0)*grad(self.primary_potential[ip])) \n u_s=self.pde.getSolution()\n secondary_potential_at_stations[ip]=np.array(self.observation_locator(u_s))\n\n u_at_stations[ip]=secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]/sigma0 \n for s,i in self.dataDCMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=u_at_stations[ips[0]][Midx]-u_at_stations[ips[0]][Nidx]- u_at_stations[ips[1]][Midx]+u_at_stations[ips[1]][Nidx] \n else:\n ip=self.getInjection(j)\n idx=self.survey.getInjectionStationIndex(ip)\n sigma0=sigma_p[idx]\n print(\"DC injection %s at %s, sigma_p=%e\"%(ip, idx, sigma0))\n\n self.pde.setValue(X=(1-sigma/sigma0)*grad(self.primary_potential[ip])) \n u_s=self.pde.getSolution()\n secondary_potential_at_stations[ip]=np.array(self.observation_locator(u_s))\n\n u_at_stations=secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]/sigma0 \n for s,i in self.dataDCMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=u_at_stations[Midx]-u_at_stations[Nidx]\n \n #.. IP\n sigma2=sigma/(1+gamma)\n du_at_stations={}\n u_s={}\n self.pde.setValue(A=sigma2*kronecker(3), y_dirac=Data())\n for k, j in enumerate(sources):\n \n if self.survey.hasDipoleInjections():\n ips=self.getInjection(j)\n for ip in ips:\n if not ip in u_s:\n idx=self.survey.getInjectionStationIndex(ip)\n sigma20=sigma_p[idx]/(1+gamma_p[idx])\n sigma0=sigma_p[idx]\n print(\"IP injection %s at %s, sigma2_p, gamma_p = %e, %e\"%(ip, idx, sigma20, gamma_p[idx]))\n self.pde.setValue(X=(1-sigma2/sigma20)*grad(self.primary_potential[ip])) \n \n u_s[ip]=self.pde.getSolution()\n du_at_stations[ip]=np.array(self.observation_locator(u_s[ip]))-secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]*(gamma_p[idx]/sigma0)\n for s,i in self.dataIPMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=du_at_stations[ips[0]][Midx]-du_at_stations[ips[0]][Nidx]-du_at_stations[ips[1]][Midx]+du_at_stations[ips[1]][Nidx]\n else:\n ip=self.getInjection(j)\n idx=self.survey.getInjectionStationIndex(ip)\n sigma20=sigma_p[idx]/(1+gamma_p[idx])\n sigma0=sigma_p[idx]\n print(\"IP injection %s at %s, sigma2_p, gamma_p = %e, %e\"%(ip, idx, sigma20, gamma_p[idx]))\n self.pde.setValue(X=(1-sigma2/sigma20)*grad(self.primary_potential[ip])) \n \n u_s=self.pde.getSolution()\n du_at_stations=np.array(self.observation_locator(u_s))-secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]*(gamma_p[idx]/sigma0)\n for s,i in self.dataIPMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=du_at_stations[Midx]-du_at_stations[Nidx]\n \n self.sigma=sigma\n self.gamma=gamma\n \n return responses # [self.numDataMax, len(sources)]\n \n","repo_name":"LutzGross/fingal","sub_path":"bin/fingal/ipmodel.py","file_name":"ipmodel.py","file_ext":"py","file_size_in_byte":11465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"39922243810","text":"import sys\r\ninput=sys.stdin.readline\r\nn=int(input())\r\na=list(map(int, input().split()))\r\n\r\nd=[1]*n\r\nfor i in range(1,n):\r\n s=[]\r\n for j in range(i):\r\n if a[i]\n\nimport numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom scipy.linalg import norm\nfrom itertools import cycle, izip\nfrom sklearn.utils import atleast2d_or_csr, check_random_state\nfrom sklearn.utils import gen_even_slices\nfrom sklearn.utils import shuffle\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\ndef _binary_KL_divergence(p, p_hat):\n \"\"\"\n Computes the a real, KL divergence of two binomial distributions with\n probabilities p and p_hat respectively.\n \"\"\"\n return (p * np.log(p / p_hat)) + ((1 - p) * np.log((1 - p) / (1 - p_hat)))\n\n\ndef _logistic(X):\n \"\"\"\n Implements the logistic function.\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n return 1. / (1. + np.exp(np.clip(-X, -30, 30)))\n\n\ndef _d_logistic(X):\n \"\"\"\n Implements the derivative of the logistic function.\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n return X * (1 - X)\n\n\ndef _tanh(X):\n \"\"\"\n Computes the hyperbolic tan function\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n return np.tanh(X, X)\n\n\ndef _d_tanh(X):\n \"\"\"\n Computes the derivative of the hyperbolic tan function\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n X *= -X\n X += 1\n return X\n\n\nclass Autoencoder(BaseEstimator, TransformerMixin):\n\n \"\"\"\n Sparse Autoencoder (SAE)\n\n A Sparse Autoencoder with one hidden layer.\n Parameters\n ----------\n n_hidden : int\n Number of hidden neurons\n activation: string, optional\n Activation function for the hidden layer; either \"logistic\" for\n 1 / (1 + exp(x)), or \"tanh\" for the hyperbolic tangent.\n algorithm : string, optional\n Optimization function for training the weights; could be \"l-bfgs-b\", \"cg\",\n \"newton-cg\", or \"bfgs\"\n learning_rate : float, optional\n Learning rate to use during learning. It is *highly* recommended\n to tune this hyper-parameter. Possible values are 10**[0., -3.].\n beta : float, optional\n Weight of sparsity penalty term\n sparsity_param : float, optional\n Desired average activation of the hidden units\n batch_size : int, optional\n Number of examples per minibatch.\n max_iter : int, optional\n Number of iterations/sweeps over the training dataset to perform\n during training.\n tol : float, optional\n Tolerance for the optimization. When the loss at iteration i+1 differs\n less than this amount from that at iteration i, convergence is\n considered to be reached.\n verbose: bool, optional\n When True (False by default) the method outputs the progress\n of learning after each iteration.\n random_state : integer or numpy.RandomState, optional\n A random number generator instance to define the state of the\n random permutations generator. If an integer is given, it fixes the\n seed. Defaults to the global numpy random number generator.\n\n Attributes\n ----------\n self.coef_hidden_ : array-like, shape (n_hidden, n_features)\n Weight matrix, where n_features in the number of visible\n units and n_hidden is the number of hidden units.\n self.coef_output_ : array-like, shape (n_features, n_hidden)\n Weight matrix, where n_features in the number of visible\n units and n_hidden is the number of hidden units.\n intercept_hidden_ : array-like, shape (n_hidden,), optional\n Biases of the hidden units\n intercept_visible_ : array-like, shape (n_features,), optional\n Biases of the visible units\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.neural_network import SAE\n >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\n >>> model = SAE(n_hidden=10)\n >>> model.fit(X)\n Autoencoder(activation_func='logistic', alpha=0.0001, batch_size=1000, beta=3,\n learning_rate=0.0001, max_iter=20, n_hidden=10,\n algorithm='l-bfgs', random_state=None, sparsity_param=0.01,\n tol=1e-05, verbose=False)\n\n References\n ----------\n\n [1] Ngiam, Jiquan, et al. \"On optimization methods for deep learning.\"\n Proceedings of the 28th International Conference on Machine Learning (ICML-11). 2011.\n http://ai.stanford.edu/~quocle/LeNgiCoaLahProNg11.pdf\n \"\"\"\n activation_functions = {\n 'tanh': _tanh,\n 'logistic': _logistic\n }\n derivative_functions = {\n 'tanh': _d_tanh,\n 'logistic': _d_logistic\n }\n def __init__(\n self, n_hidden=25, activation='logistic', algorithm='l-bfgs',\n decoder = 'non_linear', learning_rate=0.3, alpha=3e-3, beta=3, sparsity_param=0.1,\n batch_size=500, shuffle_data=False, max_iter=200, tol=1e-5, verbose=False, random_state=None):\n self.activation = activation\n self.algorithm = algorithm\n self.decoder = decoder\n self.n_hidden = n_hidden\n self.alpha = alpha\n self.learning_rate = learning_rate\n self.beta = beta\n self.sparsity_param = sparsity_param\n self.batch_size = batch_size\n self.shuffle_data = shuffle_data\n self.max_iter = max_iter\n self.tol = tol\n self.verbose = verbose\n self.random_state = random_state\n\n def _init_fit(self, n_features):\n \"\"\"\n Initialize weight and bias parameters\n\n Parameters\n ----------\n n_features: int\n Number of features (visible nodes).\n\n Returns\n -------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2), 1)\n \"\"\"\n rng = check_random_state(self.random_state)\n self.coef_hidden_ = rng.uniform(-1, 1, (n_features, self.n_hidden))\n self.coef_output_ = rng.uniform(-1, 1, (self.n_hidden, n_features))\n self.intercept_hidden_ = rng.uniform(-1, 1, self.n_hidden)\n self.intercept_output_ = rng.uniform(-1, 1, n_features)\n\n def _init_param(self):\n \"\"\"\n Sets the activation, derivative and the output functions\n \"\"\"\n self.activation_func = self.activation_functions[self.activation]\n self.derivative_func = self.derivative_functions[self.activation]\n \n def _unpack(self, theta, n_features):\n \"\"\"\n Extract the coefficients and intercepts (W1,W2,b1,b2) from theta\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2), 1)\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n n_features: int\n Number of features (visible nodes).\n \"\"\"\n N = self.n_hidden * n_features\n self.coef_hidden_ = np.reshape(theta[:N],\n (n_features, self.n_hidden))\n self.coef_output_ = np.reshape(theta[N:2 * N],\n (self.n_hidden, n_features))\n self.intercept_hidden_ = theta[2 * N:2 * N + self.n_hidden]\n self.intercept_output_ = theta[2 * N + self.n_hidden:]\n\n def _pack(self, W1, W2, b1, b2):\n \"\"\"\n Pack the coefficients and intercepts (W1,W2,b1,b2) from theta\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2), 1)\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n n_features: int\n Number of features\n n_classes: int\n Number of target classes\n \"\"\"\n return np.hstack((W1.ravel(), W2.ravel(),\n b1.ravel(), b2.ravel()))\n\n def transform(self, X):\n \"\"\"\n Computes the extracted features.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n\n Returns\n -------\n h: array-like, shape (n_samples, n_components)\n \"\"\"\n return self.activation_func(safe_sparse_dot(X, self.coef_hidden_) + self.intercept_hidden_)\n\n def fit_transform(self, X, y=None):\n \"\"\"\n Fit the model to the data X and transform it.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n \"\"\"\n self.fit(X)\n return self.transform(X)\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model to the data X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self\n \"\"\"\n X = atleast2d_or_csr(X, dtype=np.float64, order=\"C\")\n n_samples, n_features = X.shape\n self._init_fit(n_features)\n self._init_param()\n if self.shuffle_data:\n X, y = shuffle(X, y, random_state=self.random_state)\n # generate batch slices\n self.batch_size = np.clip(self.batch_size, 0, n_samples)\n n_batches = n_samples / self.batch_size\n batch_slices = list(\n gen_even_slices(\n n_batches *\n self.batch_size,\n n_batches))\n #l-bfgs does not work well with minibatches\n if self.algorithm == 'l-bfgs':\n self.batch_size = n_samples\n # preallocate memory\n a_hidden = np.empty((self.batch_size, self.n_hidden))\n a_output = np.empty((self.batch_size, n_features))\n delta_o = np.empty((self.batch_size, n_features))\n if self.algorithm == 'sgd':\n for i in xrange(self.max_iter):\n for batch_slice in batch_slices:\n cost = self.backprop_sgd(\n X[batch_slice],\n n_features, self.batch_size,\n delta_o, a_hidden, a_output)\n if self.verbose:\n print(\"Iteration %d, cost = %.2f\"\n % (i, cost))\n elif self.algorithm == 'l-bfgs':\n self._backprop_lbfgs(\n X, n_features,\n a_hidden, a_output, \n delta_o, n_samples)\n return self\n\n def backprop(self, X, n_features, n_samples,\n delta_o, a_hidden, a_output):\n \"\"\"\n Computes the sparse autoencoder cost function ``Jsparse(W,b)``\n and the corresponding derivatives of Jsparse with respect to the\n different parameters given in the initialization [1]\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n n_features: int\n Number of features (visible nodes).\n n_samples: int\n Number of samples\n\n Returns\n -------\n cost: float\n grad: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n\n References\n -------\n [1] http://ufldl.stanford.edu/wiki/index.php/Autoencoders_and_Sparsity\n \"\"\"\n # Forward propagate\n a_hidden[:] = self.activation_func(safe_sparse_dot(X, self.coef_hidden_)\n + self.intercept_hidden_)\n if self.decoder=='non_linear':\n a_output[:] = self.activation_func(safe_sparse_dot(a_hidden, self.coef_output_)\n + self.intercept_output_)\n elif self.decoder=='linear':\n a_output[:] = safe_sparse_dot(a_hidden, self.coef_output_) + self.intercept_output_\n # Get average activation of hidden neurons\n sparsity_param_hat = np.sum(a_hidden, 0) / n_samples\n sparsity_delta = self.beta * \\\n ((1 - self.sparsity_param) / (1 - sparsity_param_hat)\n - self.sparsity_param / sparsity_param_hat)\n # Backward propagate\n diff = X - a_output\n #Linear decoder\n if self.decoder=='non_linear':\n delta_o[:] = -diff * self.derivative_func(a_output)\n elif self.decoder=='linear':\n delta_o[:] = -diff\n delta_h = (\n (safe_sparse_dot(delta_o, self.coef_output_.T) +\n sparsity_delta)) *\\\n self.derivative_func(a_hidden)\n # Get cost \n cost = np.sum(diff ** 2) / (2 * n_samples)\n # Add regularization term to cost \n cost += (0.5 * self.alpha) * (\n np.sum(self.coef_hidden_ ** 2) + np.sum(\n self.coef_output_ ** 2))\n # Add sparsity term to the cost\n cost += self.beta * np.sum(\n _binary_KL_divergence(\n self.sparsity_param,\n sparsity_param_hat))\n #Get gradients\n W1grad = safe_sparse_dot(X.T, delta_h) / n_samples \n W2grad = safe_sparse_dot(a_hidden.T, delta_o) / n_samples\n b1grad = np.sum(delta_h, 0) / n_samples\n b2grad = np.sum(delta_o, 0) / n_samples\n # Add regularization term to gradients \n W1grad += self.alpha * self.coef_hidden_\n W2grad += self.alpha * self.coef_output_\n return cost, W1grad, W2grad, b1grad, b2grad\n\n def reconstruct(self, a_hidden):\n if self.decoder=='non_linear':\n a_output = self.activation_func(safe_sparse_dot(a_hidden, self.coef_output_)\n + self.intercept_output_)\n elif self.decoder=='linear':\n a_output = safe_sparse_dot(a_hidden, self.coef_output_) + self.intercept_output_\n return a_output[:]\n \n \n def backprop_sgd(\n self, X, n_features, n_samples, delta_o, a_hidden, a_output):\n \"\"\"\n Updates the weights using the computed gradients\n\n Parameters\n ----------\n X: {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Y : numpy array of shape [n_samples]\n Subset of the target values.\n\n n_features: int\n Number of features\n\n n_classes: int\n Number of target classes\n\n n_samples: int\n Number of samples\n\n \"\"\"\n cost, W1grad, W2grad, b1grad, b2grad = self.backprop(\n X, n_features, n_samples, delta_o, a_hidden, a_output)\n # Update weights\n self.coef_hidden_ -= (self.learning_rate * W1grad)\n self.coef_output_ -= (self.learning_rate * W2grad)\n self.intercept_hidden_ -= (self.learning_rate * b1grad)\n self.intercept_output_ -= (self.learning_rate * b2grad)\n # TODO: dynamically update learning rate\n return cost\n \n def _backprop_lbfgs(\n self, X, n_features, a_hidden, a_output, delta_o, n_samples):\n \"\"\"\n Applies the one of the optimization methods (l-bfgs-b, bfgs, newton-cg, cg)\n to train the weights\n\n Parameters\n ----------\n X: {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Y : numpy array of shape [n_samples]\n Subset of the target values.\n\n n_features: int\n Number of features\n\n n_classes: int\n Number of target classes\n\n n_samples: int\n Number of samples\n\n \"\"\"\n initial_theta = self._pack(\n self.coef_hidden_,\n self.coef_output_,\n self.intercept_hidden_,\n self.intercept_output_)\n optTheta, _, _ = fmin_l_bfgs_b(\n func=self._cost_grad,\n x0=initial_theta,\n maxfun=self.max_iter,\n disp=self.verbose,\n args=(\n X,\n n_features,\n n_samples,\n delta_o,\n a_hidden,\n a_output))\n self._unpack(optTheta, n_features)\n\n def _cost_grad(self, theta, X, n_features,\n n_samples, delta_o, a_hidden, a_output):\n \"\"\"\n Computes the MLP cost function ``J(W,b)``\n and the corresponding derivatives of J(W,b) with respect to the\n different parameters given in the initialization\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n X: {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n n_features: int\n Number of features\n n_classes: int\n Number of target classes\n n_samples: int\n Number of samples\n\n Returns\n -------\n cost: float\n grad: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n\n \"\"\"\n self._unpack(theta, n_features)\n cost, W1grad, W2grad, b1grad, b2grad = self.backprop(\n X, n_features, n_samples, delta_o, a_hidden, a_output)\n return cost, self._pack(W1grad, W2grad, b1grad, b2grad)\n","repo_name":"IssamLaradji/randomized_neural_networks","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":17854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"73475333694","text":"import sys\n\nclass FileIO:\n def __init__(self, input_file, output_file = None):\n self.lines = [line for line in open(input_file, 'r')][::-1]\n self.output_file = output_file\n self.clear_file()\n\n def clear_file(self):\n if self.output_file:\n with open(self.output_file, 'w') as f:\n f.close()\n\n def get_input(self, from_file=1):\n \"\"\"Get input from file or from stdin.\"\"\"\n return self.lines.pop() if from_file else sys.stdin.readline()\n\n def write_output(self, *content, to_file=1, sep=\" \"):\n \"\"\"Write output to file or to stdout.\"\"\"\n content = sep.join(str(k) for k in content) + \"\\n\"\n if self.output_file and to_file:\n with open(self.output_file, 'a') as f:\n f.write(content)\n f.close()\n else:\n sys.stdout.write(content)","repo_name":"iammanish17/FileIO","sub_path":"FileIO.py","file_name":"FileIO.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"36728831831","text":"import argparse\nimport sys\nimport socket\nimport threading\nimport types\nimport csv\nimport os\nimport broadcast_reciever\nimport broadcast_sender\nimport sensor1\nimport time\n\n\ncache = {\n '/NewYork/Temperature':'80'\n}\ninformationBase= {\n '/NewYork/Sensor':'0'\n}\npendingInterestTable = {}\n\n\n#Tried implementing this to create a global object that can be accessed by the listener.\nclass Unit:\n\n def __init__(self,city, port):\n self.city=city\n self.port=port\n \n def __str__(self):\n return self.city + self.port\n\nthisUnit=Unit(city=\"\",port=0)\nsensorPort=33333\n\nclass Package:\n\n def __init__(self, type, name,sender):\n self.type=type\n self.name=name\n self.sender=sender\n \n def __str__(self):\n return self.type\n \nclass Interest(Package):\n pass\n\n \n\nclass Data(Package):\n\n def __init__(self, content):\n self.content = content\n\n\ndef inputHandler(package,city):\n if str(package.type) == \"interest\":\n forwardingInformationBase(package=package)\n checkSensors(interest=package,city=city)\n checkContentStore(package=package)\n elif str(package.type) ==\"data\":\n contentStore(package)\n\n \ndef checkSensors(interest,city):\n print(\"Sending to sensors\",interest.name)\n print(interest.name)\n splitWords = interest.name.split(\"/\")\n print(splitWords[1])\n if city==splitWords[1]:\n sensor=splitWords[2]\n sensorvalue = sensor1.Sensor.get_sensor(sensor)\n print(sensorvalue)\n dataPackage = Data(content=sensorvalue)\n dataPackage.name = interest.name\n dataPackage.sender = interest.sender\n print(dataPackage.content)\n contentStore(dataPackage=dataPackage)\n\n\ndef forwardData(dataPackage, destination):\n print(destination)\n print(destination, \"for data packet\")\n forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n forward.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n networks = csv.reader(open(\"networks.csv\",\"r\"),delimiter=\",\")\n for row in networks:\n if row[2]==destination:\n target=row[0]\n port=int(row[1])\n print(target,port)\n print(\"Forwarding data to requested destination\")\n forward.connect((target,port))\n message = f'{dataPackage.name},{dataPackage.type},{dataPackage.sender},{dataPackage.content}'.encode('utf-8')\n forward.send(message)\n forward.close()\n\n\ndef forwardInterest(package):\n words= package.name.split(\"/\")\n networkName= words[1]\n print(networkName)\n forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n forward.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n #forward.setblocking(False)\n networks = csv.reader(open(\"networks.csv\",\"r\"),delimiter=\",\")\n for row in networks:\n if row[2]==networkName:\n target=row[0]\n port=int(row[1])\n print(\"Forwarding interest to requested destination\")\n forward.connect((target,port))\n message = f'{package.name},{package.type},{package.sender}'.encode('utf-8')\n print(message)\n forward.send(message)\n forward.close()\n\n\ndef contentStore(dataPackage):\n print(\"Storing in content store\")\n name = dataPackage.name\n data = dataPackage.content \n newContent = {name:data}\n print(newContent)\n cache.update(newContent)\n print(\"Content saved\")\n\ndef checkContentStore(package):\n print(\"Checking content store\")\n for name, data in list(cache.items()):\n if package.name == name:\n print(\"Found in contentstore\")\n dataPackage= Data(content=data)\n dataPackage.name=name\n dataPackage.type=\"data\"\n dataPackage.sender=package.sender\n print(package.sender)\n forwardData(dataPackage, package.sender)\n \n\n\ndef checkInterestTable(prefix, sender, content):\n for query, author in pendingInterestTable:\n if prefix == query and author==sender:\n forwardData(content, author)\n\ndef forwardingInformationBase(package):\n print(\"Checking informationbase\")\n exists=False\n for interest, value in list(informationBase.items()):\n if package.name == interest:\n exists=True\n if value=='0':\n forwardInterest(package)\n informationBase[interest]='1'\n elif value=='1':\n print(interest, \"Already forwarded\")\n \n if exists== False:\n name = package.name\n newInterest={name:'1'}\n print(newInterest)\n informationBase.update(newInterest)\n forwardInterest(package)\n\ndef createInterest(input,city):\n #host = socket.gethostbyname(socket.gethostname())\n interest = Interest(type=\"interest\",name=input,sender=city)\n print(\"Created interest\")\n inputHandler(interest,city)\n\n\ndef ClientConsole(city):\n\n #listener()\n print('==================================================')\n print('Your device is now running')\n print('==================================================')\n print('Welcome to the NDN network(input help for help)')\n while True:\n operation = input(\">>>\")\n if operation=='/Local/Sensors':\n print(\"Sensors\")\n elif operation=='/Local/Sensors/SensorWeather':\n createInterest(operation)\n elif operation=='/Local/Sensors/WindSpeed':\n print(\"Windspeed\")\n elif operation == 'Broadcast/Recieve':\n broadcast_reciever.broadcastReceiver()\n elif operation == 'Broadcast/Send':\n broadcast_sender.broadcast(thisUnit.port, city)\n elif operation=='quit':\n break\n elif operation=='listen':\n print(\"Listening\")\n elif operation=='help':\n print('/Sensors: Get list of sensors.')\n print('getf: get file from the server.')\n print('quit: close the connection and quit.')\n elif operation =='data':\n package = Package(type=\"interest\",name=\"/NewYork/Temp\", sender=\"Bob\")\n checkSensors(package=package)\n else:\n createInterest(operation,city) \n print('The client has been logged out.')\n\n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--city\", required=True)\n parser.add_argument(\"-p\", \"--port\", required=True)\n args = parser.parse_args()\n city = args.city\n thisUnit.port = int(args.port)\n os.system('python3 listen.py %d &'%thisUnit.port)\n os.system('python3 sensor1.py &')\n console = threading.Thread(target=ClientConsole(city))\n console.start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"PerAndresen/Project3","sub_path":"forward_engine.py","file_name":"forward_engine.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38034764800","text":"# from django.conf import settings\r\nfrom contextlib import nullcontext\r\nfrom django.contrib import messages\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.mixins import LoginRequiredMixin\r\nfrom django.shortcuts import render, get_object_or_404\r\nfrom django.views.generic import ListView, DetailView, View\r\nfrom django.shortcuts import redirect\r\nfrom django.utils import timezone\r\nfrom hamcrest import none\r\nfrom .forms import CheckoutForm, RefundForm\r\nfrom .models import Item, OrderItem, Order, BillingAddress, Refund, Category, sizeItems\r\n\r\n\r\n\r\n# Create your views here.\r\nimport random\r\nimport string\r\n# import stripe\r\n# stripe.api_key = settings.STRIPE_SECRET_KEY\r\n\r\n\r\ndef create_ref_code():\r\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\r\n\r\n\r\n# class PaymentView(View):\r\n# def get(self, *args, **kwargs):\r\n# # order\r\n# order = Order.objects.get(user=self.request.user, ordered=False)\r\n# if order.billing_address:\r\n# context = {\r\n# 'order': order,\r\n# 'DISPLAY_COUPON_FORM': False\r\n# }\r\n# return render(self.request, \"payment.html\", context)\r\n# else:\r\n# messages.warning(\r\n# self.request, \"لم تقم بإضافة عنوان إرسال الفواتير\")\r\n# return redirect(\"core:checkout\")\r\n\r\n# def post(self, *args, **kwargs):\r\n# order = Order.objects.get(user=self.request.user, ordered=False)\r\n# # token = self.request.POST.get('stripeToken')\r\n# amount = int(order.get_total() * 100)\r\n# try:\r\n# # charge = stripe.Charge.create(\r\n# # amount=amount, # cents\r\n# # currency=\"usd\",\r\n# # source=token\r\n# # )\r\n# # create the payment\r\n# payment = Payment()\r\n# # payment.stripe_charge_id = charge['id']\r\n# payment.user = self.request.user\r\n# payment.amount = order.get_total()\r\n# payment.save()\r\n\r\n# # assign the payment to the order\r\n# order.ordered = True\r\n# order.payment = payment\r\n# # TODO : assign ref code\r\n# order.ref_code = create_ref_code()\r\n# order.save()\r\n\r\n# messages.success(self.request, \"تمت إضافة الطلب بنجاح\")\r\n# return redirect(\"/\")\r\n\r\n# # except stripe.error.CardError as e:\r\n# # # Since it's a decline, stripe.error.CardError will be caught\r\n# # body = e.json_body\r\n# # err = body.get('error', {})\r\n# # messages.error(self.request, f\"{err.get('message')}\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.RateLimitError as e:\r\n# # # Too many requests made to the API too quickly\r\n# # messages.error(self.request, \"RateLimitError\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.InvalidRequestError as e:\r\n# # # معلومات غير صالحة were supplied to Stripe's API\r\n# # messages.error(self.request, \"معلومات غير صالحة\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.AuthenticationError as e:\r\n# # # Authentication with Stripe's API failed\r\n# # # (maybe you changed API keys recently)\r\n# # messages.error(self.request, \"ليس لديك أذن الدخول\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.APIConnectionError as e:\r\n# # # Network communication with Stripe failed\r\n# # messages.error(self.request, \"خطأ في الشبكة\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.StripeError as e:\r\n# # # Display a very generic error to the user, and maybe send\r\n# # # yourself an email\r\n# # messages.error(self.request, \"هناك خطأ ما\")\r\n# # return redirect(\"/\")\r\n\r\n# except Exception as e:\r\n# # send an email to ourselves\r\n# messages.error(self.request, \"حدث خطأ جسيم\")\r\n# return redirect(\"/\")\r\n\r\n\r\nclass HomeView(ListView):\r\n template_name = \"index.html\"\r\n queryset = Item.objects.filter(is_active=True)\r\n context_object_name = 'items'\r\n\r\n\r\nclass OrderSummaryView(LoginRequiredMixin, View):\r\n def get(self, *args, **kwargs):\r\n try:\r\n order = Order.objects.get(user=self.request.user, ordered=False)\r\n \r\n # sizeItemList = sizeItems.objects.filter( is_active=True)\r\n context = {\r\n 'object': order,\r\n # 'sizeItemList': sizeItemList\r\n }\r\n return render(self.request, 'order_summary.html', context)\r\n except ObjectDoesNotExist:\r\n messages.error(self.request, \"ليس لديك طلب نشط\")\r\n return redirect(\"/\")\r\n\r\n\r\nclass ShopView(ListView):\r\n model = Item\r\n paginate_by = 6\r\n template_name = \"shop.html\"\r\n\r\n\r\nclass ItemDetailView(DetailView):\r\n model = Item\r\n template_name = \"product-detail.html\"\r\n # context = {\r\n # 'sizeItems': order\r\n # }\r\n\r\n# class CategoryView(DetailView):\r\n# model = Category\r\n# template_name = \"category.html\"\r\n\r\nclass CategoryView(View):\r\n def get(self, *args, **kwargs):\r\n category = Category.objects.get(slug=self.kwargs['slug'])\r\n item = Item.objects.filter(category=category, is_active=True)\r\n context = {\r\n 'object_list': item,\r\n 'category_title': category,\r\n 'category_description': category.description,\r\n 'category_image': category.image\r\n }\r\n return render(self.request, \"category.html\", context)\r\n\r\n\r\nclass CheckoutView(View):\r\n def get(self, *args, **kwargs):\r\n try:\r\n order = Order.objects.get(user=self.request.user, ordered=False)\r\n form = CheckoutForm()\r\n context = {\r\n 'form': form,\r\n 'order': order\r\n }\r\n # 'couponform': CouponForm(),\r\n # 'DISPLAY_COUPON_FORM': False\r\n return render(self.request, \"checkout.html\", context)\r\n\r\n except ObjectDoesNotExist:\r\n messages.info(self.request, \"ليس لديك طلب نشط\")\r\n return redirect(\"core:checkout\")\r\n\r\n def post(self, *args, **kwargs):\r\n form = CheckoutForm(self.request.POST or None)\r\n try:\r\n order = Order.objects.get(user=self.request.user, ordered=False)\r\n print(self.request.POST)\r\n if form.is_valid():\r\n street_address = form.cleaned_data.get('street_address')\r\n apartment_address = form.cleaned_data.get('apartment_address')\r\n country = form.cleaned_data.get('country')\r\n city = form.cleaned_data.get('city')\r\n phone = form.cleaned_data.get('phone')\r\n gps = form.cleaned_data.get('gps')\r\n # add functionality for these fields\r\n # same_shipping_address = form.cleaned_data.get(\r\n # 'same_shipping_address')\r\n save_info = form.cleaned_data.get('save_info')\r\n # print('yasser : ')\r\n # address_type = form.cleaned_data.get('address_type')\r\n billing_address = BillingAddress(\r\n user=self.request.user,\r\n street_address=street_address,\r\n apartment_address=apartment_address,\r\n country=country,\r\n city=city,\r\n phone=phone,\r\n save_info=save_info,\r\n gps=gps\r\n )\r\n # address_type=address_type,\r\n billing_address.save()\r\n # assign to the order\r\n order.billing_address = billing_address\r\n if billing_address.save_info== True:\r\n order.shipping_address = billing_address\r\n order.ordered = True\r\n order.save()\r\n OrderItem.objects.filter(order__pk=order.pk).update(ordered=True,ordered_date = timezone.now())\r\n\r\n # orderItems = OrderItem.objects.filter(order__pk=order.pk)\r\n # for order_item in orderItems:\r\n # print(order_item.ordered)\r\n # order_item.ordered = True\r\n # order_item.save()\r\n\r\n messages.success(self.request, \"تمت إضافة الطلب بنجاح\")\r\n return redirect(\"/\")\r\n # add redirect to the selected payment option\r\n # if address_type == 'B':\r\n # return redirect('core:payment', address_type='الدفع فاتورة/نقداً')\r\n # elif address_type == 'S':\r\n # return redirect('core:payment', address_type='الدفع عند التوصيل')\r\n # else:\r\n # messages.warning(\r\n # self.request, \" خيار دفع غير صالح\")\r\n # return redirect('core:checkout')\r\n except ObjectDoesNotExist:\r\n messages.error(self.request, \"ليس لديك طلب نشط\")\r\n return redirect(\"core:order-summary\")\r\n\r\n\r\n# def home(request):\r\n# context = {\r\n# 'items': Item.objects.all()\r\n# }\r\n# return render(request, \"index.html\", context)\r\n#\r\n#\r\n# def products(request):\r\n# context = {\r\n# 'items': Item.objects.all()\r\n# }\r\n# return render(request, \"product-detail.html\", context)\r\n#\r\n#\r\n# def shop(request):\r\n# context = {\r\n# 'items': Item.objects.all()\r\n# }\r\n# return render(request, \"shop.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/login/\")\r\ndef add_to_cart(request, slug ):\r\n item = get_object_or_404(Item, slug=slug)\r\n if request.method =='GET':\r\n print('wwwwwwwww')\r\n if request.method =='POST':\r\n print('GGGGGGGGGGGG')\r\n if 'sizeItemss' in request.GET:\r\n id = request.GET.get(\"sizeItemss\")\r\n if int (id) > 0 :\r\n sizeItem= get_object_or_404(sizeItems,item = item ,pk = id)\r\n order_item, created = OrderItem.objects.get_or_create(\r\n item=item,\r\n user=request.user,\r\n ordered=False,\r\n sizeItem= sizeItem\r\n )\r\n else:\r\n print('44sssssssss4')\r\n order_item, created = OrderItem.objects.get_or_create(\r\n item=item,\r\n user=request.user,\r\n ordered=False,\r\n )\r\n else:\r\n print('4444444ggggg')\r\n order_item, created = OrderItem.objects.get_or_create(\r\n item=item,\r\n user=request.user,\r\n ordered=False,\r\n )\r\n # order_item, created = OrderItem.objects.get_or_create(\r\n # item=item,\r\n # user=request.user,\r\n # ordered=False,\r\n # )\r\n\r\n order_qs = Order.objects.filter(user=request.user, ordered=False)\r\n if order_qs.exists():\r\n order = order_qs[0]\r\n if order.items.filter(item__slug=item.slug).exists():\r\n order_item.quantity += 1\r\n order_item.save()\r\n messages.info(request, \"تم تحديث كمية العنصر.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n order.items.add(order_item)\r\n messages.info(request, \"تمت إضافة بند إلى عربة التسوق.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n ordered_date = timezone.now()\r\n order = Order.objects.create(\r\n user=request.user, ordered_date=ordered_date)\r\n order.items.add(order_item)\r\n messages.info(request, \"تمت إضافة بند إلى عربة التسوق.\")\r\n return redirect(\"core:order-summary\")\r\n\r\n\r\n@login_required(login_url=\"/login/\")\r\ndef remove_from_cart(request, slug):\r\n item = get_object_or_404(Item, slug=slug)\r\n order_qs = Order.objects.filter(\r\n user=request.user,\r\n ordered=False)\r\n if order_qs.exists():\r\n order = order_qs[0]\r\n # check if the order item is in the order\r\n if order.items.filter(item__slug=item.slug).exists():\r\n order_item = OrderItem.objects.filter(\r\n item=item,\r\n user=request.user,\r\n ordered=False\r\n )[0]\r\n order_item.delete()\r\n order.items.remove(order_item)\r\n messages.info(request, \"تمت إزالة العنصر من عربة التسوق الخاصة بك.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"العنصر لم يكن في عربة التسوق الخاصة بك.\")\r\n return redirect(\"core:product\", slug=slug)\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"ليس لديك طلب نشط.\")\r\n return redirect(\"core:product\", slug=slug)\r\n return redirect(\"core:product\", slug=slug)\r\n\r\n\r\n@login_required(login_url=\"/login/\")\r\ndef remove_single_item_from_cart(request, slug):\r\n item = get_object_or_404(Item, slug=slug)\r\n order_qs = Order.objects.filter(\r\n user=request.user,\r\n ordered=False)\r\n if order_qs.exists():\r\n order = order_qs[0]\r\n # check if the order item is in the order\r\n if order.items.filter(item__slug=item.slug).exists():\r\n order_item = OrderItem.objects.filter(\r\n item=item,\r\n user=request.user,\r\n ordered=False\r\n )[0]\r\n if order_item.quantity > 1:\r\n order_item.quantity -= 1\r\n order_item.save()\r\n else:\r\n order_item.delete()\r\n order.items.remove(order_item)\r\n messages.info(request, \" تم تحديث كمية العنصر هذا.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"العنصر لم يكن في عربة التسوق الخاصة بك.\")\r\n return redirect(\"core:product\", slug=slug)\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"ليس لديك طلب نشط.\")\r\n return redirect(\"core:product\", slug=slug)\r\n return redirect(\"core:product\", slug=slug)\r\n\r\n\r\n# def get_coupon(request, code):\r\n# try:\r\n# coupon = Coupon.objects.get(code=code)\r\n# return coupon\r\n# except ObjectDoesNotExist:\r\n# messages.info(request, \"هذه القسيمة غير موجودة\")\r\n# return redirect(\"core:checkout\")\r\n\r\n\r\n# class AddCouponView(View):\r\n# def post(self, *args, **kwargs):\r\n# form = CouponForm(self.request.POST or None)\r\n# if form.is_valid():\r\n# try:\r\n# code = form.cleaned_data.get('code')\r\n# order = Order.objects.get(\r\n# user=self.request.user, ordered=False)\r\n# order.coupon = get_coupon(self.request, code)\r\n# order.save()\r\n# messages.success(self.request, \"تمت إضافة القسيمة بنجاح\")\r\n# return redirect(\"core:checkout\")\r\n\r\n# except ObjectDoesNotExist:\r\n# messages.info(self.request, \"ليس لديك طلب نشط\")\r\n# return redirect(\"core:checkout\")\r\n\r\n\r\nclass RequestRefundView(View):\r\n def get(self, *args, **kwargs):\r\n form = RefundForm()\r\n context = {\r\n 'form': form\r\n }\r\n return render(self.request, \"request_refund.html\", context)\r\n\r\n def post(self, *args, **kwargs):\r\n form = RefundForm(self.request.POST)\r\n if form.is_valid():\r\n ref_code = form.cleaned_data.get('ref_code')\r\n message = form.cleaned_data.get('message')\r\n email = form.cleaned_data.get('email')\r\n # edit the order\r\n try:\r\n order = Order.objects.get(ref_code=ref_code)\r\n order.refund_requested = True\r\n order.save()\r\n\r\n # store the refund\r\n refund = Refund()\r\n refund.order = order\r\n refund.reason = message\r\n refund.email = email\r\n refund.save()\r\n\r\n messages.info(self.request, \"تم استلام طلبك\")\r\n return redirect(\"core:request-refund\")\r\n\r\n except ObjectDoesNotExist:\r\n messages.info(self.request, \"هذا الطلب غير موجود\")\r\n return redirect(\"core:request-refund\")\r\n","repo_name":"fxamar/OceanWind","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"72180091776","text":"from django.core.exceptions import EmptyResultSet\nfrom haystack.inputs import Raw\n\nfrom haystack.query import SearchQuerySet\nfrom django.db.models import Q\n\nfrom ads.models import Ad\nfrom comohay import settings\n\nimport logging\n\n\ndef double_clean(query_fragment, backend):\n \"\"\"\n Provides a mechanism for sanitizing user input before presenting the\n value to the backend.\n\n A basic (override-able) implementation is provided.\n \"\"\"\n if not isinstance(query_fragment, str):\n return query_fragment\n\n words = query_fragment.split()\n cleaned_words = []\n\n for word in words:\n if word in backend.RESERVED_WORDS:\n word = word.replace(word, word.lower())\n\n for char in backend.RESERVED_CHARACTERS:\n word = word.replace(char, \"\\\\\\\\%s\" % char)\n\n cleaned_words.append(word)\n\n return \" \".join(cleaned_words)\n\n\ndef has_duplicates(ad, verbose=False, title_mm=None, description_mm=None):\n \"\"\"\n Returns true if the passed ad has a duplicate in the database using the solr index, otherwise returns false\n\n Arguments\n ad (`Ad`):\n The ad from whom to detect if it has a duplicate\n verbose (`string`):\n Whether to print or no information about the process\n title_mm (`string`):\n minimum should match for the ad title,see https://solr.apache.org/guide/6_6/the-dismax-query-parser.html#TheDisMaxQueryParser-Themm_MinimumShouldMatch_Parameter\n description_mm (`string`):\n minimum should match for the ad description, see https://solr.apache.org/guide/6_6/the-dismax-query-parser.html#TheDisMaxQueryParser-Themm_MinimumShouldMatch_Parameter\n \"\"\"\n\n sqs = SearchQuerySet()\n\n if title_mm is None:\n title_mm = '{}<{}%'.format(settings.TITLE_MIN_WORDS, settings.TITLE_SIMILARITY)\n\n if description_mm is None:\n description_mm = '{}<{}%'.format(settings.DESCRIPTION_MIN_WORDS, settings.DESCRIPTION_SIMILARITY)\n\n clean_desc = double_clean(ad.description, sqs.query.backend)\n clean_desc = clean_desc.replace(\"'\", \"\\\\'\")\n max_desc_len = len(ad.description) + int(len(ad.description) * settings.DESCRIPTION_LENGTH_DIFF)\n\n clean_title = double_clean(ad.title, sqs.query.backend)\n clean_title = clean_title.replace(\"'\", \"\\\\'\")\n max_title_len = len(ad.title) + int(len(ad.title) * settings.TITLE_LENGTH_DIFF)\n\n ids_values = sqs.filter(\n content=Raw(\n \"description_length:[0 TO {}] AND {{!dismax qf=description mm={} v='{}'}} AND title_length:[0 TO {}] AND {{!dismax qf=title mm={} v='{}'}}\".format(\n max_desc_len, title_mm, clean_desc, max_title_len, description_mm, clean_title))\n ).values_list('id')\n\n ids = list(map(lambda x: x[0].split('.')[-1], ids_values))\n\n # TODO: think about adding a date comparison. It can be possible that the ad content is similar but corresponds\n # to other intent of selling another stock of the same product\n\n a = Q(id__in=ids)\n b = Q()\n\n has_contact_info = False\n\n if ad.contact_phone:\n b |= Q(contact_phone=ad.contact_phone)\n has_contact_info = True\n\n if ad.contact_email:\n b |= Q(contact_email=ad.contact_email)\n has_contact_info = True\n\n if ad.external_contact_id and ad.external_source:\n b |= (Q(external_contact_id=ad.external_contact_id) & Q(external_source=ad.external_source))\n has_contact_info = True\n\n if ad.contact_tg:\n b |= Q(contact_tg=ad.contact_tg)\n has_contact_info = True\n\n if has_contact_info:\n # Looking for duplicated ads from the same contact\n duplicates = Ad.objects.filter(a & (b))\n else:\n # Looking for duplicate ads from the same source that don't have contact information\n duplicates = Ad.objects.filter(\n Q(id__in=ids) &\n Q(external_source=ad.external_source) &\n (Q(contact_phone=None) | Q(contact_phone='')) &\n (Q(contact_email=None) | Q(contact_email='')) &\n (Q(external_contact_id=None) | Q(external_contact_id='')) &\n (Q(contact_tg=None) | Q(contact_tg=''))\n )\n\n if duplicates.count() > 0:\n if verbose:\n print('Found {} duplicates ({}) of ad:\"{}\"'.format(duplicates.count(), ','.join(ids), ad.title))\n for ad in duplicates.all():\n print('Title: {}'.format(ad.title))\n print('------------------------------------------------------------------')\n return True\n\n return False\n\n\ndef remove_duplicates(ad, verbose=False):\n \"\"\"\n Ad :param ad:\n \"\"\"\n\n sqs = SearchQuerySet()\n similarity = int(settings.DESCRIPTION_SIMILARITY * 100)\n\n # If the query has less than 4 clauses then it has to match at 100%, otherwise the number computed in similarity\n similarity = '3<{}'.format(similarity)\n\n clean_desc = double_clean(ad.description, sqs.query.backend)\n clean_desc = clean_desc.replace(\"'\", \"\\\\'\")\n max_desc_len = len(ad.description) + int(len(ad.description) * settings.DESCRIPTION_LENGTH_DIFF)\n\n clean_title = double_clean(ad.title, sqs.query.backend)\n clean_title = clean_title.replace(\"'\", \"\\\\'\")\n max_title_len = len(ad.title) + int(len(ad.title) * settings.TITLE_LENGTH_DIFF)\n\n ids_values = sqs.filter(\n content=Raw(\n \"description_length:[0 TO {}] AND {{!dismax qf=description mm={}% v='{}'}} AND title_length:[0 TO {}] AND {{!dismax qf=title mm={}% v='{}'}}\".format(\n max_desc_len, similarity, clean_desc, max_title_len, similarity, clean_title))\n ).values_list('id')\n\n ids = list(map(lambda x: x[0].split('.')[-1], ids_values))\n\n if (ad.contact_phone is not None and ad.contact_phone != '') or (\n ad.contact_email is not None and ad.contact_email != '') or (\n ad.external_contact_id is not None and ad.external_contact_id != ''):\n try:\n # Remove duplicated ads from same contact\n a = Q(id__in=ids)\n b = Q(contact_email=ad.contact_email)\n c = Q(contact_phone=ad.contact_phone)\n d = Q(external_contact_id=ad.external_contact_id) & Q(external_source=ad.external_source)\n\n to_delete = Ad.objects.filter(a & (b | c | d)).exclude(\n external_source=ad.external_source,\n external_id=ad.external_id\n )\n\n if verbose and to_delete.count() > 0:\n print('Removing {} duplicates ({}) of ad:\"{}\"'.format(to_delete.count(), ','.join(ids), ad.title))\n for ad in to_delete.all():\n print('Title: {}'.format(ad.title))\n # print('Description: {}'.format(ad.description))\n\n print('------------------------------------------------------------------')\n\n to_delete.delete()\n\n except Exception as e:\n logging.error(\"Error removing duplicated items: \" + str(e))\n\n else:\n try:\n # Remove duplicated ads from same source\n a = Q(id__in=ids)\n b = Q(external_source=ad.external_source)\n\n to_delete = Ad.objects.filter(a & b).exclude(\n external_source=ad.external_source,\n external_id=ad.external_id\n )\n\n if verbose and to_delete.count() > 0:\n print('Removing {} duplicates ({}) of ad:\"{}\"'.format(to_delete.count(), ','.join(ids), ad.title))\n for ad in to_delete.all():\n print('Title: {}'.format(ad.title))\n # print('Description: {}'.format(ad.description))\n\n print('------------------------------------------------------------------')\n\n to_delete.delete()\n\n except Exception as e:\n logging.error(\"Error removing duplicated items: \" + str(e))\n","repo_name":"daxslab/comohay","sub_path":"ads/services/ad_service.py","file_name":"ad_service.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"}
+{"seq_id":"70055763137","text":"from __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom django.core.checks import Error, Warning, register\n\n\n@register()\ndef css_install_check(app_configs, **kwargs):\n errors = []\n\n css_path = os.path.join(\n os.path.dirname(__file__), 'static', 'wagtailadmin', 'css', 'normalize.css'\n )\n\n if not os.path.isfile(css_path):\n error_hint = \"\"\"\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see http://docs.wagtail.io/en/latest/contributing/developing.html\n\n File not found: %s\n \"\"\" % css_path\n\n errors.append(\n Warning(\n \"CSS for the Wagtail admin is missing\",\n hint=error_hint,\n id='wagtailadmin.W001',\n )\n )\n return errors\n\n\n@register()\ndef base_form_class_check(app_configs, **kwargs):\n from wagtail.wagtailadmin.forms import WagtailAdminPageForm\n from wagtail.wagtailcore.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n if not issubclass(cls.base_form_class, WagtailAdminPageForm):\n errors.append(Error(\n \"{}.base_form_class does not extend WagtailAdminPageForm\".format(\n cls.__name__),\n hint=\"Ensure that {}.{} extends WagtailAdminPageForm\".format(\n cls.base_form_class.__module__,\n cls.base_form_class.__name__),\n obj=cls,\n id='wagtailadmin.E001'))\n\n return errors\n\n\n@register()\ndef get_form_class_check(app_configs, **kwargs):\n from wagtail.wagtailadmin.forms import WagtailAdminPageForm\n from wagtail.wagtailcore.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n edit_handler = cls.get_edit_handler()\n if not issubclass(edit_handler.get_form_class(cls), WagtailAdminPageForm):\n errors.append(Error(\n \"{cls}.get_edit_handler().get_form_class({cls}) does not extend WagtailAdminPageForm\".format(\n cls=cls.__name__),\n hint=\"Ensure that the EditHandler for {cls} creates a subclass of WagtailAdminPageForm\".format(\n cls=cls.__name__),\n obj=cls,\n id='wagtailadmin.E002'))\n\n return errors\n","repo_name":"zhl2008/awd-platform","sub_path":"web_hxb2/lib/python3.5/site-packages/wagtail_bak/wagtailadmin/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"79"}
+{"seq_id":"32408387862","text":"from flask import Flask, request, redirect\nfrom espeak import espeak\nimport twilio.twiml\nimport urllib, pycurl, os\nimport collections\nimport re\nimport subprocess\n\ndef getPhrase(phrase):\n\ttextPhrase = \"\"\n\tparameters = {\"\": phrase}\n\tdata = urllib.urlencode(parameters)\n\ttextPhrase = \"%s%s\" % (textPhrase,data)\n\treturn textPhrase\n\ndef speakSpeechFromText(phrase):\n\tphrase = getPhrase(phrase)\n\tespeak.synth(phrase)\n\tprint(\"Espeak on\")\napp = Flask(__name__)\n@app.route(\"/\", methods=['GET', 'POST'])\ndef hello_monkey():\n \"\"\"Respond to incoming calls with a simple text message.\"\"\"\n sms = request.args.get('Body')\n\t\n if not sms == \"\":\n speakSpeechFromText(sms)\n resp = twilio.twiml.Response()\n return str(resp)\n\nif __name__ == \"__main__\":\n\tprint (\"Hello twilio\")\n\tapp.run( host='0.0.0.0', debug=True, port = 80)\n","repo_name":"ferzeuz/SMStoSpeech","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2011110721","text":"from django.urls import path, include\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf.urls.i18n import i18n_patterns\n\n\nfrom rest_framework_swagger.views import get_swagger_view\nfrom rest_framework_simplejwt.views import (\n TokenVerifyView,\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nfrom main import settings\nfrom main.yasg import urlpatterns as doc_urls\n\n\nschema_view = get_swagger_view(title='Pastebin API')\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('restframework/', include('rest_framework.urls')),\n path('api/token/access/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),\n path('i18n/', include('django.conf.urls.i18n')),\n # APPS\n path('user/', include('apps.users.urls')),\n path('main_page/', include('apps.main_page.urls')),\n path('investor/', include('apps.investor.urls')),\n path('feedback/', include('apps.feedback.urls')),\n path('other/', include('apps.other.urls')),\n path('trade_zone/', include('apps.trade_zone.urls')),\n path('invest_zone/', include('apps.invest_zone.urls')),\n path('food_zone/', include('apps.food_zone.urls')),\n path('fashion_zone/', include('apps.fashion_zone.urls')),\n path('b2b_meeting/', include('apps.b2b_meeting.urls')),\n path('tickets/', include('apps.ticket.urls')),\n path('profile_visit/', include('apps.profile_visit.urls')),\n path('chat/', include('apps.chat.urls')),\n]\n\nurlpatterns += i18n_patterns(\n path('user/', include('apps.users.urls')),\n path('main_page/', include('apps.main_page.urls')),\n path('investor/', include('apps.investor.urls')),\n path('feedback/', include('apps.feedback.urls')),\n path('other/', include('apps.other.urls')),\n path('trade_zone/', include('apps.trade_zone.urls')),\n path('invest_zone/', include('apps.invest_zone.urls')),\n path('fashion_zone/', include('apps.fashion_zone.urls')),\n path('b2b_meeting/', include('apps.b2b_meeting.urls')),\n)\n\nurlpatterns += doc_urls\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Bilalchik/hit_expo","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71905215936","text":"from odoo import _, api, fields, models\nfrom odoo.exceptions import UserError\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n _description = \"Account Entry\"\n\n asset_id = fields.Many2one(\n comodel_name='account.asset',\n help='Asset')\n schedule_date = fields.Date(\n string='Schedule Date',\n help='Rent Schedule Date.')\n source = fields.Char(\n string='Account Source',\n help='Source from where account move created.')\n\n def assert_balanced(self):\n prec = self.env['decimal.precision'].precision_get('Account')\n if self.ids:\n self._cr.execute(\"\"\"\n SELECT move_id FROM account_move_line WHERE move_id in %s\n GROUP BY move_id HAVING abs(sum(debit) - sum(credit)) > %s\n \"\"\", (tuple(self.ids), 10 ** (-max(5, prec))))\n if self._cr.fetchall():\n raise UserError(_(\"Cannot create unbalanced journal entry.\"))\n return True\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n\n\nclass AccountPaymentRegister(models.TransientModel):\n _inherit = 'account.payment.register'\n\n tenancy_id = fields.Many2one(\n comodel_name='account.analytic.account',\n string='Tenancy',\n help='Tenancy Name.')\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n\n @api.model\n def default_get(self, fields_list):\n # OVERRIDE\n res = super().default_get(fields_list)\n context = dict(self._context) or {}\n active_id = self.env[context.get('active_model')].browse(\n context.get('active_id'))\n if active_id:\n res['property_id'] = active_id.property_id.id or False\n res['tenancy_id'] = active_id.new_tenancy_id.id or False\n return res\n\n def action_create_payments(self):\n res = super(AccountPaymentRegister, self).action_create_payments()\n context = dict(self._context) or {}\n if self._context.get('asset') or self._context.get('openinvoice'):\n schedule_obj = self.env['tenancy.rent.schedule']\n invoice_id = context.get('active_id')\n for schedule in schedule_obj.search([('invc_id', '=', invoice_id)]):\n amount = 0.0\n if schedule.invc_id.state == 'paid':\n schedule.paid = True\n schedule.move_check = True\n if schedule.invc_id:\n amount = schedule.invc_id.amount_residual\n schedule.write({'pen_amt': amount})\n return res\n\n def _create_payment_vals_from_wizard(self):\n res = super()._create_payment_vals_from_wizard()\n res.update({'asset_id': self.property_id.id,\n 'property_id': self.property_id.id, 'tenancy_id': self.tenancy_id.id})\n return res\n\n\nclass AccountPayment(models.Model):\n _inherit = 'account.payment'\n\n tenancy_id = fields.Many2one(\n comodel_name='account.analytic.account',\n string='Tenancy',\n help='Tenancy Name.')\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n amount_due = fields.Monetary(\n comodel_name='res.partner',\n related='partner_id.credit',\n readonly=True,\n default=0.0,\n help='Display Due amount of Customer')\n\n def action_post(self):\n res = super(AccountPayment, self).action_post()\n invoice_obj = self.env['account.move']\n context = dict(self._context or {})\n for rec in self:\n if context.get('return'):\n invoice_browse = invoice_obj.browse(\n context.get('active_id')).new_tenancy_id\n invoice_browse.write({'amount_return': rec.amount})\n if context.get('deposite_received'):\n tenancy_active_id = self.env[\n 'account.analytic.account'].browse(context.get('active_id'))\n tenancy_active_id.write({'amount_return': rec.amount})\n return res\n\n @api.model\n def create(self, vals):\n res = super(AccountPayment, self).create(vals)\n if res and res.id and res.tenancy_id and res.tenancy_id.id:\n if res.payment_type == 'inbound':\n res.tenancy_id.write({'acc_pay_dep_rec_id': res.id})\n if res.payment_type == 'outbound':\n res.tenancy_id.write({'acc_pay_dep_ret_id': res.id})\n return res\n\n def _prepare_move_line_default_vals(self, write_off_line_vals):\n result = super()._prepare_move_line_default_vals(write_off_line_vals)\n context = dict(self._context) or {}\n for line in result:\n if not self.move_id.asset_id:\n self.move_id.asset_id = self.property_id.id or False\n if context.get('account_deposit_received') and line.get('debit') > 0 and self.tenancy_id.id:\n if self.payment_type in ('inbound', 'outbound'):\n line.update({\n 'analytic_account_id': self.tenancy_id.id,\n 'property_id': self.property_id.id\n })\n return result\n\n def _seek_for_lines(self):\n rec = super(AccountPayment, self)._seek_for_lines()\n if rec and rec[0] and self.tenancy_id and self.tenancy_id.id:\n if self.payment_type in ('inbound', 'outbound'):\n rec[0].update({'analytic_account_id': self.tenancy_id.id, 'property_id': self.property_id.id})\n return rec\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.move\"\n\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n new_tenancy_id = fields.Many2one(\n comodel_name='account.analytic.account',\n string='Tenancy ')\n","repo_name":"hassanshah9586/Mishhin","sub_path":"Mishhin-production/property_management_ee/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"36413776343","text":"from models.updown import UpDown\nfrom models.xlan import XLAN\nfrom models.xtransformer import XTransformer\nfrom models.transformer import Transformer\nfrom models.btoformer import Btoformer, Objformer\n\n__factory = {\n 'UpDown': UpDown,\n 'XLAN': XLAN,\n 'XTransformer': XTransformer,\n 'Transformer': Transformer,\n 'Btoformer': Btoformer,\n 'Objformer': Objformer\n}\n\ndef names():\n return sorted(__factory.keys())\n\ndef create(name, *args, **kwargs):\n if name not in __factory:\n raise KeyError(\"Unknown caption model:\", name)\n return __factory[name](*args, **kwargs)","repo_name":"YehLi/BTO-Net","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"37371233254","text":"#!/usr/bin/python3\n\n# Example of receiving and processing data using textfsm\n\nimport yaml\nimport textfsm\nimport myworkfuncs\nfrom tabulate import tabulate\n\nif __name__ == '__main__':\n\n devices = yaml.safe_load(open('mydevices.yaml'))\n all_done = myworkfuncs.threads_conn('connect_ssh', devices['routers'], command='sh ver')\n\n with open(\"cisco_ios_sh_ver_custom.textfsm\") as f:\n re_table = textfsm.TextFSM(f)\n header = re_table.header\n\n for item in all_done:\n for crouter in item:\n print(item[crouter])\n result = re_table.ParseText(item[crouter])\n print(tabulate(result, headers=header))\n print()\n","repo_name":"DmitriyPanteleev/my-network-automation","sub_path":"some_netinfo_parsers/parse_w_textfsm.py","file_name":"parse_w_textfsm.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"42141133281","text":"from copy import deepcopy\nimport sudoku\n\n\n# single solution\ninput1 = [\n [0, 0, 6, 1, 0, 0, 0, 0, 8], \n [0, 8, 0, 0, 9, 0, 0, 3, 0], \n [2, 0, 0, 0, 0, 5, 4, 0, 0], \n [4, 0, 0, 0, 0, 1, 8, 0, 0], \n [0, 3, 0, 0, 7, 0, 0, 4, 0], \n [0, 0, 7, 9, 0, 0, 0, 0, 3], \n [0, 0, 8, 4, 0, 0, 0, 0, 6], \n [0, 2, 0, 0, 5, 0, 0, 8, 0], \n [1, 0, 0, 0, 0, 2, 5, 0, 0],\n]\n\n# multiple solutions\ninput2 = [\n [9, 0, 3, 0, 0, 0, 0, 5, 0],\n [0, 0, 8, 0, 0, 0, 3, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [2, 0, 7, 0, 0, 0, 1, 4, 8],\n [0, 6, 1, 0, 4, 0, 9, 0, 0],\n [0, 9, 4, 2, 7, 0, 0, 6, 0],\n [4, 2, 5, 3, 0, 6, 8, 7, 0],\n [0, 0, 6, 9, 5, 0, 4, 3, 0],\n [0, 0, 9, 0, 0, 0, 0, 1, 5],\n]\n\n\n\ndef is_single_solution(iterations:int=50, board:list[list[int]]=None, difficulty:str='easy'):\n\n solutions = set()\n if board is None:\n sb = sudoku.SudokuBoard(difficulty=difficulty)\n else: \n sb = board\n for i in range(iterations):\n sb_copy = deepcopy(sb)\n print(f'solving #{i}....')\n sb_copy.solve_board()\n solutions.add(''.join([ str(num) for row in sb_copy.board for num in row]))\n\n for solution_str in solutions:\n solution = [list(solution_str[i*9 : i*9 + 9]) for i in range(0, 9)]\n for row in solution:\n print([int(num) for num in row])\n print()\n print()\n\n print('Original puzzle:')\n for row in sb.board:\n print(row)\n\n print()\n print('Unique solutions:', len(list(solutions)))\n print()\n\nis_single_solution()\n\n# sb = sudoku.SudokuBoard(board=input2)\n# sb.solve_board()\n# for row in sb.board:\n# print(row)","repo_name":"FirstFlush/sudoku","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"1790961190","text":"#!/usr/bin/env python\n#\n# windyworld.py\n#\n\nimport os\nimport shutil\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.rcParams['backend'] = 'TkAgg'\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport copy\nfrom datetime import datetime as dt\n\n\nclass Env:\n\n def __init__(self, action_size=4, stochastic_wind=False):\n\n self.action_size = action_size # 4 for four move; 8 for king's move\n self.stochastic_wind = stochastic_wind\n random.seed(0)\n self.dim = (10, 7)\n self.start = [0, 3]\n self.goal = [7, 3]\n self.wind = [0,0,0,1,1,1,2,2,1,0]\n self.FOURMOVE = {\n 0: (0, 1), # NORTH\n 1: (1, 0), # EAST\n 2: (0, -1), # SOUTH\n 3: (-1, 0)} # WEST\n self.KINGSMOVE = {\n 0: (0, 1), # NORTH\n 1: (1, 1), # NORTHEAST\n 2: (1, 0), # EAST\n 3: (1, -1), # SOUTHEAST\n 4: (0, -1), # SOUTH\n 5: (-1, -1), # SOUTHWEST\n 6: (-1, 0), # WEST\n 7: (-1, 1), # NORTHWEST\n 8: (0, 0)} # STAY\n\n def reset(self):\n\n self.state = self.start.copy()\n self.map = np.full(self.dim, 9)\n\n \"\"\"\nmove 0:north, 1:east, 2:south, 3:west \n \"\"\"\n def act(self, move):\n\n ### set action ID to map ###\n state0 = self.state.copy()\n self.map[state0[0], state0[1]] = move\n\n if self.action_size == 4:\n x, y = self.FOURMOVE[move]\n elif self.action_size in [8, 9]: # King's move w/o a nith move\n x, y = self.KINGSMOVE[move]\n\n self.state[0] += x\n self.state[1] += y + self.wind[state0[0]] # plus wind\n\n ### STOCHASTIC WIND IF THERE IS WIND ###\n # above in 1/3, below in 1/3, and no effect in 1/3\n if self.stochastic_wind and self.wind[state0[0]] >= 1:\n self.state[1] += random.randint(0, 2) - 1\n\n r = -1\n is_goal = False\n if self.state == self.goal:\n is_goal = True\n r = 0\n if self.state[0] < 0:\n self.state[0] = 0\n elif self.state[0] >= self.dim[0]:\n self.state[0] = self.dim[0] - 1\n if self.state[1] < 0:\n self.state[1] = 0\n elif self.state[1] >= self.dim[1]:\n self.state[1] = self.dim[1] - 1\n return r, is_goal\n\n def show_map(self):\n\n print (np.flipud(np.transpose(env.map)))\n\n\nclass AbstractAgent:\n\n def __init__(self, dim, epsilon, initializer='random'):\n\n self.epsilon = epsilon\n self.initialize(dim, initializer)\n\n def initialize(self):\n\n if self.initializer == 'zero':\n self.q = np.zeros((self.dim[0], self.dim[1], self.action_size))\n elif self.initializer == 'random':\n self.q = np.random.rand(self.dim[0], self.dim[1], self.action_size)\n\n def e_greedy(self, state):\n\n if random.random() < self.epsilon: # RANDOM\n return random.randint(0, self.action_size - 1)\n else:\n return np.argmax(self.q[state[0], state[1], :])\n\n def max_q(self, state):\n\n return max(self.q[state[0], state[1], :])\n\n def __getitem__(self, s, a):\n\n return self.q[s[0], s[1], a]\n\n def get_q(self, s, a):\n\n return self.q[s[0], s[1], a]\n\n '''\n def get_prob(self, s):\n\n print (self.q[s[0], s[1], :])\n return None\n '''\n\n def show_value(self, png_file):\n\n m = np.max(self.q, axis=2)\n sns.heatmap(m.transpose())\n plt.savefig(png_file)\n plt.close('all')\n\n def show_arrow(self):\n\n m = np.argmax(self.q, axis=2)\n arrow = list(map(lambda x: ' '.join([self.ARROW[x] for x in x]), m.transpose()))\n for a in reversed(arrow):\n print (a)\n\n def get_action_str(self, a_list):\n\n if self.action_size == 4:\n delimiter = ''\n elif self.action_size in [8,9]:\n delimiter = ' '\n return delimiter.join([self.DIRECTION[a] for a in a_list])\n\n def find_policy(self):\n\n m = ([[np.argmax(self.q[i,j,:]) for i in range(self.dim[0])] for j in range(self.dim[1])])\n print (np.flipud(np.array(m)))\n\n\ndef softmax(x):\n\n return np.exp(x) / np.sum(np.exp(x))\n\n\nclass FourMoveAgent(AbstractAgent):\n\n def __init__(self, dim, epsilon, initializer='random'):\n\n self.dim = dim\n self.action_size = 4\n self.epsilon = epsilon\n self.initializer = initializer\n self.AGENTTYPE = 4\n self.DIRECTION = {0: 'U', 1:'R', 2:'D', 3:'L'}\n self.ARROW = {0: '^', 1:'>', 2:'v', 3:'<'}\n self.initialize()\n \n\nclass KingsMoveAgent(AbstractAgent):\n\n def __init__(self, epsilon, action_size=8, initiazlier='random'):\n\n self.action_size = action_size\n self.epsilon = epsilon\n #self.DIRECTION = {0: 'U', 1:'r', 2:'R', 3:'e', 4: 'D', 5:'w', 6:'L', 7:'l'}\n self.DIRECTION = {0: '⬆�', 1:'↗�', 2:'➡�', 3:'↘�', 4: '⬇�', 5:'↙�', 6:'⬅�', 7:'↙�', 8:'🔄'}\n self.ARROW = {0: '⬆�', 1:'↗�', 2:'➡�', 3:'↘�', 4: '⬇�', 5:'↙�', 6:'⬅�', 7:'↙�', 8:'🔄'}\n self.initialize(dim, initializer)\n self.q = np.zeros((10, 7, self.action_size))\n \n \nclass ActorCriticAgent:\n\n def __init__(self, dim, epsylon):\n\n self.epsylon = epsylon\n self.value = np.zeros(dim)\n self.policy = np.zeros((dim[0], dim[1], 4))\n\n def e_greedy(self, state):\n\n if random.random() < self.epsylon: # RANDOM\n return random.randint(0, 3)\n else:\n return np.argmax(self.policy[state[0], state[1], :])\n\n\ndef sarsa(env, agent, alpha, gamma):\n\n ### SARSA ###\n env.reset()\n a = agent.e_greedy(env.state)\n a_list = []\n r = -1\n R = 0\n i = 0\n is_goal = False\n while not is_goal: # AN EPISODE\n a_list.append(a)\n i += 1\n s0 = env.state.copy()\n r, is_goal = env.act(a)\n R += r\n a1 = agent.e_greedy(env.state)\n value = agent.get_q(s0, a)\n agent.q[s0[0], s0[1], a] = agent.get_q(s0, a) \\\n + alpha * (r + gamma * agent.get_q(env.state, a1) - agent.get_q(s0, a))\n a = copy.copy(a1)\n a_list.append(a)\n return i, R, a_list\n\n\ndef q_learn(env, agent, alpha, gamma):\n\n ### Q-LEARNING ###\n env.reset()\n R = 0\n i = 0\n r = -1\n is_goal = False\n while not is_goal:\n i += 1\n s0 = env.state.copy()\n a = ql_agent.e_greedy(env.state) \n r, is_goal = env.act(a)\n R += r\n value = agent.q[s0[0], s0[1], a]\n agent.q[s0[0], s0[1], a] = agent.get_q(s0, a) + alpha * (r + gamma * agent.max_q(env.state) - agent.get_q(s0, a))\n return i, R\n \n\ndef actor_critic(env, agent, alpha, gamma):\n\n ### Q-LEARNING ###\n env.reset()\n R = 0\n i = 0\n r = -1\n is_goal = False\n while not is_goal:\n i += 1\n s0 = env.state.copy()\n a = agent.e_greedy(env.state) \n r, is_goal = env.act(a)\n R += r\n value = agent.value[s0[0], s0[1]]\n policy = agent.policy[s0[0], s0[1], a]\n delta = r + gamma * agent.value[env.state[0], env.state[1]] - value\n agent.value[s0[0], s0[1]] += delta\n agent.policy[s0[0], s0[1], a] += delta\n if is_goal:\n break\n return i, R\n\n\ndef show_step_graph(step_list, std_list, png_file):\n\n plt.plot(s_step_list, label='#steps')\n plt.plot(std_list, label='SD')\n plt.yscale('log')\n plt.savefig(png_file)\n plt.close('all')\n return\n\n\n###\nif __name__ == '__main__':\n\n epsilon = 0.1\n alpha = 0.5\n #alpha = 0.1\n #alpha = 0.01\n gamma = 1.0\n dim = (10, 7)\n num = 1000\n slide = 20\n stochastic_wind = True\n\n now = dt.now()\n\n #agent = FourMoveAgent(epsilon)\n #ql_agent = FourMoveAgent(epsilon)\n\n #agent = KingsMoveAgent(epsilon)\n #ql_agent = KingsMoveAgent(epsilon)\n agent = KingsMoveAgent(epsilon, 8)\n ql_agent = KingsMoveAgent(epsilon, 8)\n #agent = KingsMoveAgent(epsilon, 9)\n #ql_agent = KingsMoveAgent(epsilon, 9)\n ac_agent = ActorCriticAgent(dim, epsilon)\n\n if stochastic_wind:\n sw_tag = '-sw'\n else:\n sw_tag = ''\n png_dir = '%s-%s%s' % (now.strftime('png-%y%m%d-%H%M%S'), agent.action_size, sw_tag)\n env = Env(agent.action_size, stochastic_wind)\n w = []\n s_step_list, step_std_list = [], []\n\n if os.path.isdir(png_dir):\n shutil.rmtree(png_dir)\n os.mkdir(png_dir)\n step_graph_file = '%s/step_list.png' % png_dir\n\n for n in range(num):\n s_step, s_r, s_a = sarsa(env, agent, alpha, gamma)\n #ql_step, ql_r = q_learn(env, ql_agent, alpha, gamma)\n #ac_step, ac_r = actor_critic(env, ac_agent, alpha, gamma)\n #w.append([n + 1, s_step, s_r, ql_step, ql_r, ac_step, ac_r])\n w.append([n + 1, s_step, s_r])\n s_step_list.append(s_step)\n step_slide = np.array(s_step_list[-slide:])\n step_std_list.append(step_slide.mean())\n s_a_str = agent.get_action_str(s_a)\n print ('%3d %3d %2.2f %2.2f' % (n+1, s_step, step_slide.mean(), step_slide.std()), s_a_str)\n if (n+1) % 10 == 0:\n png_file = '%s/value-%03d.png' % (png_dir, n+1)\n agent.show_value(png_file)\n agent.show_arrow()\n show_step_graph(s_step_list, step_std_list, step_graph_file)\n","repo_name":"kawagashira/sutton","sub_path":"windyworld/windyworld.py","file_name":"windyworld.py","file_ext":"py","file_size_in_byte":9626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"10056494531","text":"\"\"\"\nVector2 that handles point screen coordinates\nTransformations related to the game position & game size happen here\n\"\"\"\n\n\nclass Vec2:\n \"Vector 2 class that has methods to scale screen coordinates\"\n\n screen_x_offset: int = 0\n screen_y_offset: int = 0\n screen_x_scale: int = 1\n screen_y_scale: int = 1\n\n def __init__(self, x_pos, y_pos, use_screen_offset: bool = True) -> None:\n self.x_pos = x_pos\n self.y_pos = y_pos\n self.use_screen_offset: bool = use_screen_offset\n\n def get_coords(self) -> tuple:\n \"\"\"Returns screen coordinates with transformations\"\"\"\n x_pos = self.x_pos * Vec2.screen_x_scale\n y_pos = self.y_pos * Vec2.screen_y_scale\n\n if self.use_screen_offset:\n return (round(x_pos + Vec2.screen_x_offset),\n round(y_pos + Vec2.screen_y_offset))\n\n return (round(x_pos), round(y_pos))\n\n @classmethod\n def setup_screen(cls, x_pos: int, y_pos: int, width: int, height: int) -> None:\n \"\"\"Setup for screen coordinate offset and scale\"\"\"\n Vec2.screen_x_offset = x_pos\n Vec2.screen_y_offset = y_pos\n Vec2.screen_x_scale = width / 1920\n Vec2.screen_y_scale = height / 1080\n","repo_name":"jfd02/TFT-OCR-BOT","sub_path":"vec2.py","file_name":"vec2.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"79"}
+{"seq_id":"23865079507","text":"import logging\nimport os\nfrom random import choice\nfrom argparse import ArgumentParser\nfrom urllib.parse import urlparse\n\nfrom notion.client import NotionClient\nfrom notion.block import Block, PageBlock, CollectionViewBlock\nfrom emoji import EMOJI_UNICODE\nimport frontmatter\n\nfrom .markdown import convert\n\ntry:\n from dotenv import load_dotenv\n load_dotenv()\nexcept:\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef random_emoji():\n # Don't allow people, hands, or fingers.\n forbidden_emoji_patterns = ['child', 'skin_tone', 'person', 'hand', 'finger']\n\n emoji_key = None\n while not emoji_key:\n emoji_key = choice(list(EMOJI_UNICODE.keys()))\n\n for pattern in forbidden_emoji_patterns:\n if pattern in emoji_key:\n emoji_key = None\n break\n\n return EMOJI_UNICODE[emoji_key]\n\n\ndef infer_block(root_block, path) -> Block:\n name, ext = os.path.splitext(path)\n\n if name == 'index':\n return root_block\n\n if ext != '.md' and ext != '':\n return None\n\n title = name.replace('-', ' ').replace('_', ' ').capitalize()\n\n for block in root_block.children:\n if block.type != 'page':\n continue\n\n if block.title != title:\n continue\n\n return block\n\n # Create a new page block\n\n return root_block.children.add_new(PageBlock, title=title)\n\n\ndef move_pages_to_end(block):\n # Move pages to the end of the document if they aren't already\n pages_to_move = []\n pages_seen = []\n\n for c in block.children:\n if c.type == 'page':\n pages_seen.append(c)\n else:\n pages_to_move.extend(pages_seen)\n pages_seen.clear()\n\n for page in pages_to_move:\n logger.info(f\"Moving page {page.id} to end of {block.id}\")\n page.move_to(block, 'last-child')\n\n\ndef block_matches_markdown_block(block, markdown_block_type, **markdown_block):\n if markdown_block_type != type(block):\n return False\n\n for key, value in markdown_block.items():\n if key in ['type', 'schema', 'rows']:\n continue\n\n block_attr = getattr(block, key)\n\n if block_attr != value:\n return False\n\n return True\n\n\ndef sync_collection_schema(collection, expected_schema):\n existing_schema = collection.get('schema')\n\n # The schemas must match!\n if existing_schema == expected_schema:\n return\n\n logger.info(f\"Updating schema of {collection.id}\")\n\n # If they don't, try to make them match.\n collection.set('schema', expected_schema)\n\n\ndef sync_collection_rows(block, collection_schema, collection_rows):\n if block.collection is None:\n logger.info(f\"Creating a new collection for {block.id}\")\n # We should have generated a schema and rows for this one\n client = block._client # Hacky internals stuff...\n block.collection = client.get_collection(\n # Low-level use of the API\n # TODO: Update when notion-py provides a better interface for this\n client.create_record(\"collection\", parent=block, schema={\"title\": {\"text\": \"_\", \"type\": \"text\"}})\n )\n\n block.views.add_new(view_type=\"table\")\n\n collection_schema_ids = ['title']\n\n for i in range(len(collection_schema) - 1):\n collection_schema_ids.append('x' + format(i, '0>4x'))\n\n sync_collection_schema(block.collection, dict(zip(collection_schema_ids, collection_schema)))\n\n existing_rows = block.collection.get_rows()\n\n for extra_row in existing_rows[len(collection_rows):]:\n extra_row.remove()\n\n existing_rows_iter = iter(existing_rows)\n\n for row in collection_rows:\n try:\n row_block = next(existing_rows_iter)\n except StopIteration:\n row_block = block.collection.add_row()\n\n if len(row) > len(collection_schema_ids):\n row = row[:len(collection_schema_ids)]\n\n row = zip(collection_schema_ids, row)\n\n for schema_id, prop_value in row:\n if row_block.get_property(schema_id) != prop_value:\n row_block.set_property(schema_id, prop_value)\n\n\ndef sync_markdown_blocks_to_block(markdown_blocks, block):\n touched_blocks = set()\n children_iter = iter(block.children)\n\n for markdown_block in markdown_blocks:\n markdown_block_class = markdown_block[\"type\"]\n del markdown_block[\"type\"]\n\n markdown_contents = markdown_block.pop(\"title\", None)\n collection_schema = markdown_block.pop(\"schema\", None)\n collection_rows = markdown_block.pop(\"rows\", None)\n block_children = markdown_block.pop(\"children\", None)\n\n try:\n child_block = next(children_iter)\n while not block_matches_markdown_block(child_block, markdown_block_class, **markdown_block):\n child_block = next(children_iter)\n logger.info(f\"Using existing markdown block {child_block.id} in {block.id}\")\n except StopIteration:\n # If we've hit the end of the children create a new child.\n child_block = block.children.add_new(markdown_block_class, **markdown_block)\n logger.info(f\"Creating new markdown block {child_block.id} in {block.id}\")\n\n if markdown_contents is not None:\n # Manually set the title property to bypass the `markdown_to_notion` in `notion-py`\n # This is because it chokes up on URLs and really we just don't need this 'cause\n # we're parsing the markdown ourselves.\n if child_block.get([\"properties\", \"title\"]) != markdown_contents:\n child_block.set([\"properties\", \"title\"], markdown_contents)\n\n touched_blocks.add(child_block.id)\n\n if isinstance(child_block, CollectionViewBlock):\n sync_collection_rows(child_block, collection_schema, collection_rows)\n\n if block_children:\n sync_markdown_blocks_to_block(block_children, child_block)\n elif len(child_block.get(child_block.child_list_key, [])) > 0:\n # If no children should exist but there are children attached to this block\n # (a list, etc) we should remove them as they're no longer needed!\n for c in child_block.children:\n c.remove()\n\n\n for c in block.children:\n if c.type != 'page' and c.id not in touched_blocks:\n logger.info(f\"Removing child block {c.id} from {block.id}\")\n c.remove()\n\n\ndef sync_file_to_block(filename, block, links : dict={}):\n logger.info(f\"Syncing {filename} to block {block.id}\")\n\n with open(filename) as markdown_fd:\n contents = markdown_fd.read()\n\n post = frontmatter.loads(contents)\n\n def resolve_link(target):\n try:\n parsed = urlparse(target)\n\n if parsed.scheme:\n return target\n except:\n pass\n\n target_path = os.path.realpath(os.path.join(os.path.dirname(filename), target))\n\n block = links.get(target_path)\n\n if not block:\n return target\n\n return block.get_browseable_url()\n\n markdown_blocks = convert(str(post), link_resolver=resolve_link)\n\n sync_markdown_blocks_to_block(markdown_blocks, block)\n\n\ndef create_page_structure(directory, root_block):\n touched_pages = set()\n\n files_to_pages = dict()\n\n index_path = os.path.realpath(os.path.join(directory, \"index.md\"))\n readme_path = os.path.realpath(os.path.join(directory, \"README.md\"))\n readme_lower_path = os.path.realpath(os.path.join(directory, \"README.md\"))\n\n # Do the index/readme first to ensure the correct sort order.\n if os.path.isfile(index_path):\n files_to_pages[index_path] = root_block\n elif os.path.isfile(readme_path):\n files_to_pages[readme_path] = root_block\n elif os.path.isfile(readme_lower_path):\n files_to_pages[readme_lower_path] = root_block\n\n for path in os.listdir(directory):\n if path.startswith('.'):\n # Skip any \"private\" files / directories\n continue\n\n if path.lower() == 'index.md' or path.lower() == 'readme.md':\n # Skip because we had a special case for this above.\n continue\n\n block = infer_block(root_block, path)\n\n if not block:\n continue\n\n full_path = os.path.realpath(os.path.join(directory, path))\n\n touched_pages.add(block.id)\n\n if os.path.isdir(full_path):\n files_to_pages.update(create_page_structure(full_path, block))\n elif os.path.splitext(full_path)[1].lower() == '.md':\n files_to_pages[full_path] = block\n\n return files_to_pages\n\n\ndef sync_directory_to_block(directory, root_block):\n # Do Two Passes: First, create blocks for all files that need them\n # Keep track of absolute file path -> block\n logger.info(\"Creating page structure..\")\n files_to_pages = create_page_structure(os.path.realpath(directory), root_block)\n\n touched_pages = set(block.id for block in files_to_pages.values())\n\n # Then, for iterate through every single page block created and:\n for full_path, block in files_to_pages.items():\n # Lock it\n if not block.get(['format', 'block_locked'], default=False):\n block.set(['format', 'block_locked'], True)\n\n if block.icon is None:\n block.icon = random_emoji()\n\n # Sync it.\n sync_file_to_block(full_path, block, links=files_to_pages)\n\n # Sort it.\n move_pages_to_end(block)\n\n # Clean it.\n for child in block.children:\n # Any children that are pages under block but aren't in touched_pages should be pruned\n if child.type == 'page' and child.id not in touched_pages:\n child.remove()\n\n # Technologic.\n\ndef main():\n import sys\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.INFO)\n\n parser = ArgumentParser()\n\n parser.add_argument('--notion-token', type=str, default=os.environ.get('NOTION_TOKEN'))\n parser.add_argument('docs_path', type=str)\n parser.add_argument('notion_url', type=str)\n\n args = parser.parse_args()\n\n token = args.notion_token\n root_url = args.notion_url\n docs_path = args.docs_path\n\n # add row to notion collection and add a text block with link to the new card\n client = NotionClient(token_v2=token)\n root_block = client.get_block(root_url)\n\n sync_directory_to_block(docs_path, root_block)\n","repo_name":"imnotjames/notion-docs-sync","sub_path":"notion_docs_sync/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"79"}
+{"seq_id":"15201200233","text":"import sys\nfrom pathlib import Path\nfrom typing import Dict, Any, List\n\nimport numpy as np\nimport pyrallis\nimport torch\nfrom tqdm import tqdm\n\nsys.path.append(\".\")\nsys.path.append(\"..\")\n\nfrom inversion.options.train_options import TrainOptions\nfrom inversion.video.generate_videos import generate_reconstruction_videos\nfrom prepare_data.landmarks_handler import LandmarksHandler\nfrom inversion.video.post_processing import postprocess_and_smooth_inversions\nfrom inversion.video.video_config import VideoConfig\nfrom inversion.video.video_editor import InterFaceGANVideoEditor, StyleCLIPVideoEditor\nfrom inversion.video.video_handler import VideoHandler\nfrom utils.common import tensor2im\nfrom utils.inference_utils import get_average_image, run_on_batch, load_encoder, IMAGE_TRANSFORMS\n\n\n@pyrallis.wrap()\ndef run_inference_on_video(video_opts: VideoConfig):\n # prepare all the output paths\n video_opts.output_path.mkdir(exist_ok=True, parents=True)\n\n # parse video\n video_handler = VideoHandler(video_path=video_opts.video_path,\n output_path=video_opts.output_path,\n raw_frames_path=video_opts.raw_frames_path,\n aligned_frames_path=video_opts.aligned_frames_path,\n cropped_frames_path=video_opts.cropped_frames_path)\n video_handler.parse_video()\n\n aligned_paths, cropped_paths = video_handler.get_input_paths()\n input_images = video_handler.load_images(aligned_paths)\n cropped_images = video_handler.load_images(cropped_paths)\n if video_opts.max_images is not None:\n aligned_paths = aligned_paths[:video_opts.max_images]\n input_images = input_images[:video_opts.max_images]\n cropped_images = cropped_images[:video_opts.max_images]\n\n # load pretrained encoder\n net, opts = load_encoder(video_opts.checkpoint_path, test_opts=video_opts, generator_path=video_opts.generator_path)\n\n # loads/computes landmarks transforms for the video frames\n landmarks_handler = LandmarksHandler(output_path=video_opts.output_path,\n landmarks_transforms_path=video_opts.landmarks_transforms_path)\n video_opts.landmarks_transforms_path = landmarks_handler.landmarks_transforms_path\n landmarks_transforms = landmarks_handler.get_landmarks_transforms(input_paths=aligned_paths,\n cropped_frames_path=video_handler.cropped_frames_path,\n aligned_frames_path=video_handler.aligned_frames_path)\n\n # run inference\n results = run_inference(input_paths=aligned_paths,\n input_images=input_images,\n landmarks_transforms=landmarks_transforms,\n net=net,\n opts=opts)\n\n # save inverted latents (can be used for editing, pti, etc)\n results_latents_path = opts.output_path / \"latents.npy\"\n np.save(results_latents_path, np.array(results[\"result_latents\"]))\n\n result_images = [np.array(tensor2im(im)) for im in results[\"result_images\"]]\n result_latents = np.array(list(results[\"result_latents\"].values()))\n landmarks_transforms = np.array(list(results[\"landmarks_transforms\"]))\n\n result_images_smoothed = postprocess_and_smooth_inversions(results, net, video_opts)\n\n # get video reconstruction\n generate_reconstruction_videos(input_images=cropped_images,\n result_images=result_images,\n result_images_smoothed=result_images_smoothed,\n video_handler=video_handler,\n opts=video_opts)\n\n if opts.interfacegan_directions is not None:\n editor = InterFaceGANVideoEditor(generator=net.decoder, opts=video_opts)\n for interfacegan_edit in video_opts.interfacegan_edits:\n edit_images_start, edit_images_end, edit_latents_start, edit_latents_end = editor.edit(\n edit_direction=interfacegan_edit.direction,\n start=interfacegan_edit.start,\n end=interfacegan_edit.end,\n result_latents=result_latents,\n landmarks_transforms=landmarks_transforms\n )\n edited_images_start_smoothed = editor.postprocess_and_smooth_edits(results, edit_latents_start, video_opts)\n edited_images_end_smoothed = editor.postprocess_and_smooth_edits(results, edit_latents_end, video_opts)\n editor.generate_edited_video(input_images=cropped_images,\n result_images_smoothed=result_images_smoothed,\n edited_images_smoothed=edited_images_start_smoothed,\n video_handler=video_handler,\n save_name=f\"edited_video_{interfacegan_edit.direction}_start\")\n editor.generate_edited_video(input_images=cropped_images,\n result_images_smoothed=result_images_smoothed,\n edited_images_smoothed=edited_images_end_smoothed,\n video_handler=video_handler,\n save_name=f\"edited_video_{interfacegan_edit.direction}_end\")\n\n if opts.styleclip_directions is not None:\n editor = StyleCLIPVideoEditor(generator=net.decoder, opts=video_opts)\n for styleclip_edit in video_opts.styleclip_edits:\n edited_images, edited_latents = editor.edit(edit_direction=styleclip_edit.target_text,\n alpha=styleclip_edit.alpha,\n beta=styleclip_edit.beta,\n result_latents=result_latents,\n landmarks_transforms=landmarks_transforms)\n edited_images_smoothed = editor.postprocess_and_smooth_edits(results, edited_latents, video_opts)\n editor.generate_edited_video(input_images=cropped_images,\n result_images_smoothed=result_images_smoothed,\n edited_images_smoothed=edited_images_smoothed,\n video_handler=video_handler,\n save_name=styleclip_edit.save_name)\n\n\ndef run_inference(input_paths: List[Path], input_images: List, landmarks_transforms: Dict[str, Any], net,\n opts: TrainOptions):\n results = {\"source_images\": [], \"result_images\": [], \"result_latents\": {}, \"landmarks_transforms\": []}\n with torch.no_grad():\n avg_image = get_average_image(net)\n # run inference one frame at a time (technically can be run in batches, but done for simplicity)\n for input_image, input_path in tqdm(zip(input_images, input_paths)):\n results[\"source_images\"].append(input_image)\n image_name = input_path.name\n if landmarks_transforms is not None:\n if image_name not in landmarks_transforms:\n continue\n image_landmarks_transform = torch.from_numpy(landmarks_transforms[image_name][-1]).cuda()\n else:\n image_landmarks_transform = None\n with torch.no_grad():\n transformed_image = IMAGE_TRANSFORMS(input_image)\n result_batch, latents = run_on_batch(inputs=transformed_image.unsqueeze(0).cuda(),\n net=net,\n opts=opts,\n avg_image=avg_image,\n landmarks_transform=image_landmarks_transform)\n # we'll save the last inversion and latent code\n results[\"result_images\"].append(result_batch[0][-1])\n results[\"result_latents\"][image_name] = latents[0][-1]\n results[\"landmarks_transforms\"].append(image_landmarks_transform)\n return results\n\n\nif __name__ == '__main__':\n run_inference_on_video()\n","repo_name":"yuval-alaluf/stylegan3-editing","sub_path":"inversion/video/inference_on_video.py","file_name":"inference_on_video.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":622,"dataset":"github-code","pt":"79"}
+{"seq_id":"28683065973","text":"class Dog:\n\n # Class object attribute\n species = 'mammal'\n\n def __init__(self, breed, name, has_spots):\n self.breed = breed\n self.name = name\n self.has_spots = has_spots\n\n def bark(self):\n print(\"WOOF !!!\")\n\n\nmy_dog = Dog(breed='Lab', name='Dock', has_spots=True)\nprint(\"type(my_object): \", type(my_dog))\nprint(\"type(my_object): \", my_dog.breed)\nmy_dog.bark()\n\n\nclass Circle:\n\n pi = 3.14\n\n def __init__(self, radius=10):\n self.radius = radius\n self.area = Circle.pi * radius ** 2\n\n def circumference(self):\n return 2 * Circle.pi * self.radius\n\n\ncircle = Circle(5)\nprint(\"circle.area: \", circle.area)\nprint(\"circle.circumference: \", circle.circumference())\n\n\n","repo_name":"thbaymet/python-intro","sub_path":"alphabet/aap_classes.py","file_name":"aap_classes.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"40825276743","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 21 23:12:48 2022\n\n@author: gyzdm\n\"\"\"\n\nimport yfinance as yf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport os\n\n\nclass StockRadar:\n def __init__(self, watch_list, backtrack_output, data_input, start_date):\n self.watch_list = watch_list\n self.data = None\n self.sma = None\n self.backtrack_list = []\n self.transactions = []\n self.backtrack_output = backtrack_output\n self.data_input = data_input\n self.start_date = start_date\n self.__load_data()\n print(os.getcwd())\n \n def __load_data(self):\n watch_list_string = \" \".join(self.watch_list)\n if self.data_input:\n if os.path.exists(self.data_input):\n self.data = pd.read_pickle(self.data_input)\n else:\n self.data = yf.download(watch_list_string, start=self.start_date)\n self.data.to_pickle(self.data_input)\n else:\n self.data = yf.download(watch_list_string, start=self.start_date)\n #self.data.to_csv(self.backtrack_output+'data.csv') \n return\n \n def getMovingAverage(self):\n self.sma_window_sizes = [5,10,20,30,50,100,200]\n self.sma_tokens = [\"SMA{}\".format(window_size) for window_size in self.sma_window_sizes]\n #self.sma = self.data.loc[:,([\"Close\"],self.watch_list)]\n columns = pd.MultiIndex.from_product([self.sma_tokens, self.watch_list], names=['sma_type','token'])\n self.sma = pd.DataFrame(columns = columns)\n for window_size in self.sma_window_sizes:\n sma_token = \"SMA{}\".format(window_size)\n for stock_token in self.watch_list:\n stock_close_prices = self.data[\"Close\"][stock_token].to_frame()\n sma_df = stock_close_prices[stock_token].rolling(window_size).mean()\n self.sma.loc[:,(sma_token,stock_token)] = sma_df\n #self.sma.dropna(inplace=True)\n #self.sma.loc[:,(slice(None),['SPY'])].plot()\n #plt.show()\n print(\"{0} Moving Average Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return self.sma\n \n def checkSMACrossing(self):\n if not self.sma:\n self.getMovingAverage()\n stock_crossing_tag = []\n data_365day = self.data[\"Close\"]\n data_30day = data_365day[-30:-1]\n for stock_token in self.watch_list:\n close_today = self.data[\"Close\"][stock_token][-1]\n close_yesterday = self.data[\"Close\"][stock_token][-2]\n high_today = self.data[\"High\"][stock_token][-1]\n low_today = self.data[\"Low\"][stock_token][-1]\n change = close_today/close_yesterday-1\n data_365day = self.data[\"Close\"][stock_token]\n data_30day = data_365day[-30:-1]\n rank365 = data_365day.rank(pct=True)\n rank30 = data_30day.rank(pct=True)\n for sma_token in self.sma_tokens:\n sma_today = self.sma[sma_token][stock_token][-1]\n sma_yesterday = self.sma[sma_token][stock_token][-2]\n if close_today > sma_today and close_yesterday < sma_yesterday:\n stock_crossing_tag.append(\"{0} Up Crossing {1} change:{2:+.1%} rank30:{3:.1%} rank365:{4:.1%}\".format(stock_token,sma_token, change,rank30[-1],rank365[-1])) \n elif close_today < sma_today and close_yesterday > sma_yesterday:\n stock_crossing_tag.append(\"{0} Down Crossing {1} change:{2:+.1%} rank30:{3:.1%} rank365:{4:.1%}\".format(stock_token,sma_token, change,rank30[-1],rank365[-1]))\n elif high_today > sma_today and close_yesterday < sma_yesterday:\n stock_crossing_tag.append(\"{} Failed Up Crossing {}\".format(stock_token,sma_token))\n elif low_today < sma_today and close_yesterday > sma_yesterday:\n stock_crossing_tag.append(\"{} Failed Down Crossing {}\".format(stock_token,sma_token))\n print(\"{0} SMA Crossing Checking Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return stock_crossing_tag\n \n def backtrack_sma(self):\n if self.sma is None:\n self.getMovingAverage()\n print(\"{0} Start SMA Backtracking...\".format(datetime.now().strftime(\"%H:%M:%S\")))\n for stock_token in self.watch_list:\n close_prices = self.data[\"Close\"][stock_token]\n initial_balance = 10000\n # SMA strategy\n for window_size in self.sma_window_sizes:\n sma_token = \"SMA{}\".format(window_size)\n print(\"Working on {1} Backtracking {0}\".format(stock_token,sma_token))\n shares = 0\n balance = 0\n next_year = True\n for row in range(close_prices.shape[0]):\n sma_today = self.sma[sma_token][stock_token][row]\n sma_yesterday = self.sma[sma_token][stock_token][row-1]\n if pd.isna(sma_today) or pd.isna(sma_yesterday):\n continue\n close_today = close_prices[row]\n close_yesterday = close_prices[row-1]\n if next_year:\n year = close_prices.index[row].year\n next_year = False\n if window_size>=100 and close_today > sma_today:\n shares_to_buy = initial_balance/close_prices[row]\n shares += shares_to_buy\n balance = 0\n total_asset = shares*close_prices[row]+balance\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',shares_to_buy,close_today,\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")]) \n else:\n balance += initial_balance\n year_start_asset = shares*close_prices[row] + balance\n if close_today > sma_today and close_yesterday < sma_yesterday:\n if balance > 0:\n shares_to_buy = balance/close_today\n shares += shares_to_buy\n total_asset = shares*close_today\n balance = 0\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',shares_to_buy,close_today,\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")]) \n elif close_today < sma_today and close_yesterday > sma_yesterday:\n if shares>0:\n shares_to_sell = shares\n balance_credits = shares*close_today\n shares =0\n balance+=balance_credits\n total_asset = balance\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'sell',shares_to_sell,close_today,\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")]) \n if row == close_prices.shape[0]-1 or close_prices.index[row+1].year>year:\n total_asset = shares*close_prices[row]+balance\n performance = total_asset/year_start_asset - 1\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'hold',shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n self.backtrack_list.append([stock_token,sma_token,year,performance]) \n next_year = True\n print(\"{0} Moving Average Backtrack Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n \n def backtrack_all_in(self):\n sma_token = 'All_In'\n initial_balance = 10000\n for stock_token in self.watch_list:\n print(\"Working on {1} Backtracking {0}\".format(stock_token,sma_token))\n close_prices = self.data[\"Close\"][stock_token]\n shares = 0\n balance = initial_balance\n for row in range(close_prices.shape[0]):\n if pd.isna(close_prices[row]):\n continue\n if shares == 0:\n shares = balance/close_prices[row]\n year = close_prices.index[row].year\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',shares,close_prices[row],\n 0,balance,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n balance = 0\n if row == close_prices.shape[0]-1 or close_prices.index[row+1].year>year:\n total_asset = shares*close_prices[row]+balance\n performance = total_asset/initial_balance - 1\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'hold',shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n self.backtrack_list.append([stock_token,sma_token,year,performance]) \n shares = 0\n balance = initial_balance\n print(\"{0} All In Backtrack Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n \n def backtrack_automatic(self):\n sma_tokens = [('Automatic_Daily',0),('Automatic_Monthly',12),('Automatic_Biweekly',24)]\n initial_balance = 10000\n for sma_token,frequency in sma_tokens:\n for stock_token in self.watch_list:\n print(\"Working on {1} Backtracking {0}\".format(stock_token,sma_token))\n close_prices = self.data[\"Close\"][stock_token]\n shares = 0\n next_year = True\n for row in range(close_prices.shape[0]):\n if pd.isna(close_prices[row]):\n continue\n if next_year:\n year = close_prices.index[row].year\n next_year = False\n ndays = len(close_prices[close_prices.index.year == year])\n n_interval = ndays if frequency == 0 else frequency\n period = ndays//n_interval\n balance = initial_balance\n periodic_invest_fund = initial_balance/n_interval\n year_start_asset = shares*close_prices[row] + balance\n if row % period == 0 and balance*1.1>=periodic_invest_fund:\n new_shares=periodic_invest_fund/close_prices[row]\n shares+=new_shares\n balance-=periodic_invest_fund\n total_asset = shares*close_prices[row] + balance\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',new_shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n if row == close_prices.shape[0]-1 or close_prices.index[row+1].year>year:\n total_asset = shares*close_prices[row] + balance\n performance = total_asset/year_start_asset - 1\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'hold',new_shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n self.backtrack_list.append([stock_token,sma_token,year,performance]) \n next_year = True\n print(\"{0} Automatic Backtrack Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n \n def backtrack(self):\n # Automatic Strategy\n self.backtrack_automatic()\n # SMA strategy\n self.backtrack_sma()\n # All in strategy\n self.backtrack_all_in()\n backtrack_df = pd.DataFrame(data=self.backtrack_list,columns = ['Stock','Strategy','Year','Performance'])\n backtrack_df.to_csv(self.backtrack_output+'performance.csv')\n transaction_df = pd.DataFrame(data=self.transactions,columns = ['Strategy','Stock','Year','Transaction','Shares','Price',\n 'Balance','Total Asset','Date'])\n transaction_df.to_csv(self.backtrack_output+'transaction.csv')\n print(\"{0} Backtrack results wirting Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n\ndef main_old():\n data = yf.download(\"SPY AAPL\", start=\"2017-01-01\", end=\"2017-04-30\")\n apple = data[\"Close\"][\"AAPL\"]\n msft = yf.Ticker(\"MSFT\")\n # get stock info\n info=msft.info\n \n # get historical market data\n hist = msft.history(period=\"max\")\n \n # show actions (dividends, splits)\n actions = msft.actions\n \n # show sustainability\n sustainability= msft.sustainability\n \n # show analysts recommendations\n recommendations = msft.recommendations\n \n # show news\n news = msft.news\n \n a = 0\n return\n\ndef main():\n #watch_list=[\"SPY\",\"AAPL\"]\n watch_list = [\"AAPL\",\"ADBE\",\"AMD\",\"AMZN\",\"ARKK\",\"ATVI\",\"BABA\",\"BIDU\",\"BILI\",\n \"CRM\",\"DIDIY\",\"DIS\",\"DOCU\",\"EA\",\"EDU\",\"ENPH\",\"FDX\",\"GILD\",\n \"GOOG\",\"HUYA\",\"IAU\",\"JD\",\"JNJ\",\"MA\",\"META\",\"MSFT\",\"MU\",\"NFLX\",\n \"NIO\",\"NTES\",\"NVDA\",\"PARA\",\"PDD\",\"PFSI\",\"PINS\",\"PYPL\",\"QQQ\",\n \"SNAP\",\"SPY\",\"T\",\"TAL\",\"TCEHY\",\"TME\",\"TSLA\",\"TWLO\",\"U\",\"UBER\",\n \"V\",\"VRTX\",\"VXX\",\"VZ\",\"WMT\",\"ZM\"]\n sr = StockRadar(watch_list,r\"C:\\\\Dropbox\\\\Share for Gary\\\\Investment\\\\\",\".\\\\data\\\\data2000.pkl\",\"2000-01-01\")\n sr.backtrack()\n #sma = sr.checkSMACrossing()\n \n \n \n return\n\n\nif __name__ == '__main__':\n main()","repo_name":"gyzdmgqy/stock","sub_path":"Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":14445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29000286814","text":"import sys\nimport numpy as np\n\ndef parse_data():\n \"\"\"Parse data into a 2-D numpy array for vector calculations\"\"\"\n return np.array([_make_array(line.strip()) for line in sys.stdin.readlines()])\n\n\ndef gamma_rate(vector_data):\n \"\"\"Return gamma rate as decimal string (rounding up 0.5 to 1)\"\"\"\n length, _ = vector_data.shape\n gamma_vector = vector_data.sum(axis=0) / length\n\n return ''.join([str(int(i)) for i in np.rint(np.nextafter(gamma_vector, gamma_vector + 1))])\n\n\ndef epsilon_rate(binary_string):\n \"\"\"Return corresponding epsilon rate as binary string (bit-wise complement)\"\"\"\n return ''.join([str(int(not int(i))) for i in binary_string])\n\n\ndef multiply_gamma_epsilon(gamma, epsilon):\n \"\"\"Return decimal product of binary strings gamma and epsilon\"\"\"\n return int(gamma, 2) * int(epsilon, 2)\n\n\ndef oxygen_rate(vector_data):\n position = 0\n while vector_data.shape[0] > 1:\n criterion = gamma_rate(vector_data)[position]\n\n # delete rows from data where bit is not matching criterion\n rows_to_delete = np.where(vector_data[:, position] != int(criterion))[0]\n vector_data = np.delete(vector_data, rows_to_delete, axis=0)\n position += 1\n return ''.join([str(int(i)) for i in vector_data[0]])\n\n\ndef co2_rate(vector_data):\n position = 0\n while vector_data.shape[0] > 1:\n criterion = gamma_rate(vector_data)[position]\n\n # delete rows from data where bit is not matching co2 criterion\n # i.e. where it is matching ox criterion\n rows_to_delete = np.where(vector_data[:, position] == int(criterion))[0]\n vector_data = np.delete(vector_data, rows_to_delete, axis=0)\n position += 1\n return ''.join([str(int(i)) for i in vector_data[0]])\n\n\ndef life_support_rating(vector_data):\n \"\"\"Return decimal product of oxygen_rate and co2_rate binary strings\"\"\"\n ox = oxygen_rate(vector_data)\n co2 = co2_rate(vector_data)\n return int(ox, 2) * int(co2, 2)\n\n\ndef _make_array(string_input):\n return np.array([int(i) for i in string_input])\n\n\nif __name__ == '__main__':\n\n data = parse_data()\n\n # Part 1\n gamma = gamma_rate(data)\n epsilon = epsilon_rate(gamma)\n\n solution = multiply_gamma_epsilon(gamma, epsilon)\n print(solution)\n\n # Part 2\n life_support = life_support_rating(data)\n print(life_support)\n","repo_name":"annplaube/aoc_2021","sub_path":"3/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"23966842299","text":"from django import forms\nfrom filer.models.filemodels import File, Folder\n\n\nclass FileForm(forms.ModelForm):\n class Meta:\n fields = ('name', 'file')\n model = File\n\n def __init__(self, *args, **kwargs):\n self.folder_name = kwargs.pop(\"folder_name\", \"Temp\")\n super(FileForm, self).__init__(*args, **kwargs)\n self.fields['name'].required = True\n self.fields['file'].required = True\n\n def save(self, commit=True):\n object = super(FileForm, self).save(commit=False)\n folder, created = Folder.objects.get_or_create(name=self.folder_name)\n object.folder = folder\n object.save()\n return object\n","repo_name":"SmallsLIVE/smallslive","sub_path":"smallslive/oscar_apps/dashboard/files/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"}
+{"seq_id":"16972444090","text":"from typing import Dict, List, Union\n\nfrom cachetools import cached, TTLCache\n\nfrom app.chain import ChainBase\nfrom app.core.config import settings\nfrom app.core.context import TorrentInfo, Context, MediaInfo\nfrom app.core.metainfo import MetaInfo\nfrom app.db import SessionFactory\nfrom app.db.systemconfig_oper import SystemConfigOper\nfrom app.helper.sites import SitesHelper\nfrom app.log import logger\nfrom app.schemas import Notification\nfrom app.schemas.types import SystemConfigKey, MessageChannel\nfrom app.utils.singleton import Singleton\nfrom app.utils.string import StringUtils\n\n\nclass TorrentsChain(ChainBase, metaclass=Singleton):\n \"\"\"\n 站点首页种子处理链,服务于订阅、刷流等\n \"\"\"\n\n _cache_file = \"__torrents_cache__\"\n\n def __init__(self):\n self._db = SessionFactory()\n super().__init__(self._db)\n self.siteshelper = SitesHelper()\n self.systemconfig = SystemConfigOper()\n\n def remote_refresh(self, channel: MessageChannel, userid: Union[str, int] = None):\n \"\"\"\n 远程刷新订阅,发送消息\n \"\"\"\n self.post_message(Notification(channel=channel,\n title=f\"开始刷新种子 ...\", userid=userid))\n self.refresh()\n self.post_message(Notification(channel=channel,\n title=f\"种子刷新完成!\", userid=userid))\n\n def get_torrents(self) -> Dict[str, List[Context]]:\n \"\"\"\n 获取当前缓存的种子\n \"\"\"\n # 读取缓存\n return self.load_cache(self._cache_file) or {}\n\n @cached(cache=TTLCache(maxsize=128, ttl=600))\n def browse(self, domain: str) -> List[TorrentInfo]:\n \"\"\"\n 浏览站点首页内容,返回种子清单,TTL缓存10分钟\n :param domain: 站点域名\n \"\"\"\n logger.info(f'开始获取站点 {domain} 最新种子 ...')\n site = self.siteshelper.get_indexer(domain)\n if not site:\n logger.error(f'站点 {domain} 不存在!')\n return []\n return self.refresh_torrents(site=site)\n\n def refresh(self) -> Dict[str, List[Context]]:\n \"\"\"\n 刷新站点最新资源,识别并缓存起来\n \"\"\"\n\n # 读取缓存\n torrents_cache = self.get_torrents()\n\n # 所有站点索引\n indexers = self.siteshelper.get_indexers()\n # 配置的Rss站点\n config_indexers = [str(sid) for sid in self.systemconfig.get(SystemConfigKey.RssSites) or []]\n # 遍历站点缓存资源\n for indexer in indexers:\n # 未开启的站点不搜索\n if config_indexers and str(indexer.get(\"id\")) not in config_indexers:\n continue\n domain = StringUtils.get_url_domain(indexer.get(\"domain\"))\n torrents: List[TorrentInfo] = self.browse(domain=domain)\n # 按pubdate降序排列\n torrents.sort(key=lambda x: x.pubdate or '', reverse=True)\n # 取前N条\n torrents = torrents[:settings.CACHE_CONF.get('refresh')]\n if torrents:\n # 过滤出没有处理过的种子\n torrents = [torrent for torrent in torrents\n if f'{torrent.title}{torrent.description}'\n not in [f'{t.torrent_info.title}{t.torrent_info.description}'\n for t in torrents_cache.get(domain) or []]]\n if torrents:\n logger.info(f'{indexer.get(\"name\")} 有 {len(torrents)} 个新种子')\n else:\n logger.info(f'{indexer.get(\"name\")} 没有新种子')\n continue\n for torrent in torrents:\n logger.info(f'处理资源:{torrent.title} ...')\n # 识别\n meta = MetaInfo(title=torrent.title, subtitle=torrent.description)\n # 识别媒体信息\n mediainfo: MediaInfo = self.recognize_media(meta=meta)\n if not mediainfo:\n logger.warn(f'未识别到媒体信息,标题:{torrent.title}')\n # 存储空的媒体信息\n mediainfo = MediaInfo()\n # 清理多余数据\n mediainfo.clear()\n # 上下文\n context = Context(meta_info=meta, media_info=mediainfo, torrent_info=torrent)\n # 添加到缓存\n if not torrents_cache.get(domain):\n torrents_cache[domain] = [context]\n else:\n torrents_cache[domain].append(context)\n # 如果超过了限制条数则移除掉前面的\n if len(torrents_cache[domain]) > settings.CACHE_CONF.get('torrents'):\n torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF.get('torrents'):]\n # 回收资源\n del torrents\n else:\n logger.info(f'{indexer.get(\"name\")} 没有获取到种子')\n # 保存缓存到本地\n self.save_cache(torrents_cache, self._cache_file)\n # 返回\n return torrents_cache\n","repo_name":"2xx8/MoviePilot","sub_path":"app/chain/torrents.py","file_name":"torrents.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"}
+{"seq_id":"24430622414","text":"import textwrap\n\nempty_char = '_'\nx_char = 'X'\no_char = 'O'\nnumber_of_spaces = 9\nwin_count_dict = {x_char: 0, o_char: 0}\ngrid_string = number_of_spaces * empty_char\nmove_count = 0\n\n\ndef print_grid():\n grid = [list(row) for row in textwrap.wrap(grid_string, 3)]\n\n print('---------')\n for row in grid:\n row_string = ' '.join(row)\n print(f\"| {row_string} |\")\n print('---------')\n\n\ndef grid_filled():\n return True if grid_string.count(empty_char) == 0 else False\n\n\ndef number_of_turns(player_char):\n return grid_string.count(player_char)\n\n\ndef count_wins():\n top = grid_string[0:3]\n middle = grid_string[3:6]\n bottom = grid_string[6:9]\n left = grid_string[0::3]\n center = grid_string[1::3]\n right = grid_string[2::3]\n diagonal_1_to_9 = grid_string[0::4]\n diagonal_7_to_3 = grid_string[2:8:2]\n\n for char in list(win_count_dict):\n win_count_dict[char] = [top, middle, bottom, left, center, right,\n diagonal_1_to_9, diagonal_7_to_3\n ].count(char * 3)\n\n\ndef should_the_game_continue():\n count_wins()\n if (win_count_dict[x_char] > 0 and win_count_dict[o_char] > 0) or\\\n (abs(number_of_turns(x_char) - number_of_turns(o_char)) >= 2):\n state = 'Impossible'\n elif grid_filled() and win_count_dict[x_char] == 0 and win_count_dict[o_char] == 0:\n state = 'Draw'\n elif win_count_dict[x_char] > 0:\n state = 'X wins'\n elif win_count_dict[o_char] > 0:\n state = 'O wins'\n else:\n # No End State has been triggered, the game should continue\n return True\n\n # An End State has been triggered, the game should NOT continue\n print(state)\n return False\n\n\ndef make_move(char):\n index = None\n\n while True:\n try:\n # Attempts to get the input and convert the string into integers\n coordinates = [int(string_input) for string_input in input().split(' ')]\n except ValueError:\n print('You should enter numbers!')\n continue\n\n # Validates Input is in the correct range\n if coordinates[0] < 1 or coordinates[0] > 3 or coordinates[1] < 1 or coordinates[1] > 3:\n print('Coordinates should be from 1 to 3!')\n continue\n\n # Converts the pass 2 integer input into the index of the grid_string\n index = (3 * (coordinates[0] - 1)) + (coordinates[1] - 1)\n\n if grid_string[index] != empty_char:\n print('This cell is occupied! Choose another one!')\n continue\n else:\n break\n\n grid_list = list(grid_string)\n grid_list[index] = char\n return ''.join(grid_list)\n\n\n# Print the empty grid and start the Game\nprint_grid()\n\nwhile should_the_game_continue():\n if move_count % 2 == 0:\n grid_string = make_move(x_char)\n else:\n grid_string = make_move(o_char)\n move_count += 1\n print_grid()\n\n","repo_name":"notdevinclark/Simple-Tic-Tac-Toe-Python","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"23110908582","text":"from five import grok\n\nfrom zope.component import getUtility\nfrom zope import schema\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zope.schema.vocabulary import SimpleTerm, SimpleVocabulary\n\nfrom Products.SilvaMetadata.interfaces import IMetadataService\n\nfrom silva.core.interfaces import IAutoTOC\nfrom silva.core.views import views as silvaviews\nfrom silva.core.conf.interfaces import ITitledContent\nfrom silva.core.interfaces import IAddableContents, IPublishable\nfrom silva.translations import translate as _\nfrom zeam.form import silva as silvaforms\n\n\n@apply\ndef sort_order_source():\n orders = []\n for key, title in [\n ('silva', _(u'Silva folder order')),\n ('alpha', _(u'Alphabetically')),\n ('reversealpha', _(u'Reverse alphabetically')),\n ('chronmod', _(u'Chronologically by modification date')),\n ('rchronmod', _(u'Reverse chronologically by modification date'))]:\n orders.append(SimpleTerm(value=key, token=key, title=title))\n return SimpleVocabulary(orders)\n\n\n@grok.provider(IContextSourceBinder)\ndef silva_content_types(context):\n contents = []\n container = context.get_container()\n addables = IAddableContents(container)\n for addable in addables.get_container_addables(IPublishable):\n contents.append(SimpleTerm(\n value=addable,\n token=addable,\n title=addable))\n return SimpleVocabulary(contents)\n\n\nclass IAutoTOCSchema(ITitledContent):\n _local_types = schema.Set(\n title=_(u\"Types to list\"),\n description=_(\n u\"Select here the content types you wish to see in \"\n u\"the table of content. You need to selected container types \"\n u\"(e.g. Folder and Publication) in order for the TOC to \"\n u\"display their contents.\"),\n value_type=schema.Choice(source=silva_content_types),\n default=set(['Silva Document', 'Silva Folder', 'Silva Publication']),\n required=True)\n _toc_depth = schema.Int(\n title=_(u\"Depth\"),\n description=_(\n u\"The depth to which the Table of Contents will be rendered \"\n u\"(-1 means unlimited depth.)\"),\n default=-1,\n min=-1,\n max=99,\n required=True)\n _display_desc_flag = schema.Bool(\n title=_(u\"Display description\"),\n description=_(\n u\"If selected, each item displayed will include its title \"\n u\"and metadata description, if available. \"),\n default=False,\n required=True)\n _show_icon = schema.Bool(\n title=_(\"Show icon\"),\n description=_(\n u\"If selected, each item displayed will include its icon. \"),\n default=False,\n required=True)\n _show_container_link = schema.Bool(\n title=_(\"Show container link\"),\n description=_(\n u\"If selected, there will be a link to the container \"\n u\"(as an H3) before the TOC list.\"),\n default=False,\n required=True)\n _sort_order = schema.Choice(\n title=_(u\"Sort order\"),\n description=_(u\"The order items in a container will be sorted\"),\n source=sort_order_source,\n default='silva',\n required=True)\n\n\n@silvaforms.customize(name='_toc_depth', schema=IAutoTOCSchema)\ndef customize_toc_depth(field):\n field.htmlAttributes['style'] = 'width: 4em;'\n\n\nclass AutoTOCAddForm(silvaforms.SMIAddForm):\n \"\"\"Add an Auto TOC.\n \"\"\"\n grok.context(IAutoTOC)\n grok.name(u'Silva AutoTOC')\n\n fields = silvaforms.Fields(IAutoTOCSchema)\n\n\nclass AutoTOCEditForm(silvaforms.SMIEditForm):\n \"\"\"Add an Auto TOC.\n \"\"\"\n grok.context(IAutoTOC)\n\n fields = silvaforms.Fields(IAutoTOCSchema).omit('id')\n\n\nclass AutoTOCView(silvaviews.View):\n grok.context(IAutoTOC)\n\n def update(self):\n metadata = getUtility(IMetadataService)\n self.description = metadata.getMetadataValue(\n self.context, 'silva-extra', 'content_description', acquire=0)\n","repo_name":"silvacms/Products.Silva","sub_path":"Products/Silva/AutoTOC/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"16494065337","text":"import argparse\nimport yaml\nfrom typing import Dict, List\nimport numpy as np\nimport json\n\nfrom sklearn.gaussian_process import GaussianProcessRegressor, kernels\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.pipeline import Pipeline\n\nfrom proxystore.store import get_store\n\nimport lifecycle\n\n\ndef reprioritize_queue(training_data: List[List],\n pred_data: List[np.array],\n gpr: GaussianProcessRegressor,\n opt_delay: float = 0.5) -> np.ndarray:\n \"\"\"Determine an optimal order in which to excecute a task queue\n\n Args:\n database: Inputs and outputs of completed simulations\n gpr: Gaussian-process regression model\n queue: Existing task queue\n opt_delay: Minimum run time of this function\n Returns:\n Re-ordered priorities of queue\n \"\"\"\n # can be called via funcx so imports\n import time\n import numpy as np\n import scipy\n import datetime\n\n start = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()\n time.sleep(opt_delay)\n\n # Update the GPR with the available training data\n train_X, train_y = zip(*training_data)\n gpr.fit(np.vstack(train_X), train_y)\n\n # Run GPR on the existing task queue\n pred_y, pred_std = gpr.predict(pred_data, return_std=True)\n best_so_far = np.min(train_y)\n # MB: FIXED\n # ei = (best_so_far - pred_y) / pred_std\n ei = (best_so_far - pred_y) * scipy.stats.norm(0, 1).cdf((best_so_far - pred_y) / pred_std) + pred_std * scipy.stats.norm(0, 1).pdf((best_so_far - pred_y) / pred_std)\n\n # Argument sort the EI score, ordered with largest tasks first\n end = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()\n return start, end, np.argsort(-1 * ei)\n\n\ndef reprioritize_fx(fx, completed, pred_data, gpr):\n store = get_store('globus')\n gpr_proxy = store.proxy(gpr)\n ft = fx.submit(reprioritize_queue, completed, pred_data, gpr_proxy)\n return ft.result()\n\n\ndef reprioritize(task_queue, fx, database: Dict[int, List], output_file=None):\n completed = [x[1:] for x in filter(lambda x: x[2] is not None, database.values())]\n uncompleted = [x[:2] for x in filter(lambda x: x[2] is None, database.values())]\n if len(uncompleted) > 0:\n gpr = Pipeline([('scale', MinMaxScaler(feature_range=(-1, 1))),\n ('gpr', GaussianProcessRegressor(normalize_y=True, kernel=kernels.RBF() * kernels.ConstantKernel()))\n ])\n # x[1] is input array\n # start_t, end_t, new_order = reprioritize_queue(completed, [x[1] for x in uncompleted], gpr=gpr)\n start_t, end_t, new_order = reprioritize_fx(fx, completed, [x[1] for x in uncompleted], gpr=gpr)\n\n fts = []\n priorities = []\n max_priority = len(uncompleted)\n for i, idx in enumerate(new_order):\n ft = uncompleted[idx][0]\n priority = max_priority - i\n fts.append(ft)\n priorities.append(priority)\n\n if output_file is not None:\n with open(output_file, 'a') as f_out:\n f_out.write(f'R START: {start_t}\\n')\n f_out.write(f'R END: {end_t}\\n')\n for i, ft in enumerate(fts):\n f_out.write(f'P UPDATE: {ft.eq_task_id} {ft.priority} {priorities[i]}\\n')\n\n task_queue.update_priorities(fts, priorities)\n\n\ndef submit_initial_tasks(task_queue, exp_id, params: Dict):\n search_space_size = params['search_space_size']\n dim = params['sample_dimensions']\n sampled_space = np.random.uniform(size=(search_space_size, dim), low=-32.768, high=32.768)\n\n task_type = 0\n mean_rt = params['runtime']\n std_rt = params['runtime_var']\n\n payloads = []\n for sample in sampled_space:\n payload = json.dumps({'x': list(sample), 'mean_rt': mean_rt, 'std_rt': std_rt})\n payloads.append(payload)\n fts = task_queue.submit_tasks(exp_id, eq_type=task_type, payload=payloads)\n\n database = {}\n for i, ft in enumerate(fts):\n database[ft.eq_task_id] = [ft, sampled_space[i], None]\n\n return database\n\n\ndef run(exp_id, params: Dict):\n output_file = f'./output/{exp_id}_output.txt'\n # To avoid errors in finally\n task_queues = pools = dbs = fx_executors = {}\n try:\n fx_endpoints, db_names, pool_names = lifecycle.find_active_elements(params)\n repro_endpoint = params['reprioritize_endpoint']\n if repro_endpoint not in fx_endpoints:\n fx_endpoints.append(repro_endpoint)\n\n fx_executors = lifecycle.initialize_fx_endpoints(fx_endpoints, params)\n dbs = lifecycle.initialize_dbs(db_names, fx_executors, params)\n task_queues = lifecycle.initialize_task_queues(fx_executors, dbs, params)\n task_queue = task_queues['sim']\n database = submit_initial_tasks(task_queue, exp_id, params)\n # launch after submitting so pool has full data\n pools = lifecycle.initialize_worker_pools(exp_id, pool_names, fx_executors,\n dbs, params)\n lifecycle.initialize_proxystore(params)\n\n num_guesses = params['num_guesses']\n retrain_after = params['retrain_after']\n # next_retrain = retrain_after\n tasks_completed = 0\n fts = [v[0] for _, v in database.items()]\n print(f'NUM GUESSES: {num_guesses}')\n print(f'RETRAIN AFTER: {retrain_after}')\n print(f'FTS: {len(fts)}')\n num_repro = 0\n while tasks_completed < num_guesses:\n completed_fts = task_queue.pop_completed(fts, n=retrain_after)\n for ft in completed_fts:\n _, result = ft.result()\n database[ft.eq_task_id][2] = float(result)\n tasks_completed += 1\n\n print(f\"tasks completed: {tasks_completed}\")\n reprioritize(task_queue, fx_executors[repro_endpoint], database, output_file=output_file)\n num_repro += 1\n if num_repro == 2:\n # pool_names = 'bebop2', add 'bebop2' to params with params['tasks'][0]['pools'].append()\n params['tasks'][0]['pools'].append('bebop2')\n p = lifecycle.initialize_worker_pools(exp_id, ['bebop2'], fx_executors,\n dbs, params)\n pools.update(p)\n print(pools)\n elif num_repro == 4:\n params['tasks'][0]['pools'].append('bebop3')\n p = lifecycle.initialize_worker_pools(exp_id, ['bebop3'], fx_executors,\n dbs, params)\n pools.update(p)\n\n finally:\n for task_queue in task_queues.values():\n task_queue.shutdown()\n for db in dbs.values():\n db.shutdown()\n for pool in pools.values():\n pool.shutdown()\n for fx in fx_executors.values():\n fx.shutdown()\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('exp_id', help='experiment id')\n parser.add_argument('config_file', help=\"yaml format configuration file\")\n return parser\n\n\nif __name__ == '__main__':\n parser = create_parser()\n args = parser.parse_args()\n with open(args.config_file) as fin:\n params = yaml.safe_load(fin)\n\n # launch.launch_dbs(params)\n # launch.launch_worker_pools(args.exp_id, params)\n # launch.stop_dbs(params)\n\n run(args.exp_id, params)\n","repo_name":"NSF-RESUME/2023_ParSocial_OSPREY_example","sub_path":"python/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"73177888254","text":"import os\r\nfrom views import designDrawSchemes, styles_and_animation\r\nfrom helpers import validate\r\nfrom creators import draw_schemes\r\nfrom os.path import isfile, join\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtGui import QIcon, QTextCursor\r\nfrom PyQt5.QtCore import QSize, QTimer, QThread, pyqtSignal\r\n\r\nclass DrawOne(QThread):\r\n change_value = pyqtSignal(str)\r\n def __init__(self, draw_params, gost_frame_params, many_schemes):\r\n super().__init__()\r\n self.draw_params = draw_params\r\n self.many_schemes = many_schemes\r\n self.gost_frame_params = gost_frame_params\r\n self.modules = 0\r\n self.chains = 0\r\n\r\n def run(self):\r\n fp_invertor = 'Data/Schemes/Invertor/'\r\n files_in_invertor = [f for f in os.listdir(fp_invertor) if isfile(join(fp_invertor, f))]\r\n try:\r\n if len(files_in_invertor) != 0:\r\n for file in files_in_invertor:\r\n os.remove(fp_invertor + f\"/{file}\")\r\n except PermissionError:\r\n self.statusBar.showMessage('Открыт pdf файл, закройте его и повторите попытку', 4000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n QTimer.singleShot(4000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return 1 \r\n\r\n config_keys = [] \r\n for key in self.draw_params.keys():\r\n if 'inv_' in key:\r\n config_keys.append(key)\r\n\r\n numbr = 0\r\n for config in config_keys:\r\n counts = int(self.draw_params[config]['count'])\r\n if self.many_schemes == True:\r\n for num in range(counts):\r\n numbr += 1\r\n self.num_error = draw_schemes.draw(self.draw_params, numbr, self.gost_frame_params, config)\r\n if self.num_error['error'] != 0: return \r\n self.modules += self.num_error['modules']\r\n self.chains += self.num_error['chains']\r\n self.change_value.emit(f\"{numbr} из {self.draw_params['count_invertor']}\")\r\n else:\r\n start_num = numbr\r\n numbr += counts\r\n if counts > 1:\r\n if start_num == 0:\r\n nums = f\"{1}-{numbr}\"\r\n else:\r\n nums = f\"{start_num}-{numbr}\"\r\n else:\r\n nums = numbr\r\n \r\n self.num_error = draw_schemes.draw(self.draw_params, nums, self.gost_frame_params, config)\r\n if self.num_error['error'] != 0: return \r\n self.modules += self.num_error['modules'] * counts\r\n self.chains += self.num_error['chains'] * counts\r\n self.change_value.emit(f\"{numbr} из {self.draw_params['count_invertor']}\")\r\n\r\nclass WindowDraw(QtWidgets.QMainWindow, designDrawSchemes.Ui_WindowDrawSchemes):\r\n def __init__(self, instance_of_main_window):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.input_data()\r\n validate.validate_number(self.fields_text)\r\n self.main_window = instance_of_main_window\r\n self.btnDraw.clicked.connect(self.draw)\r\n self.btnOpenScheme.clicked.connect(self.open_scheme)\r\n self.btnAddConfigInvertor.clicked.connect(self.add_invertor)\r\n self.btnDelConfigInvertor.clicked.connect(self.del_invertor)\r\n self.btnAddMPPT.clicked.connect(self.add_config)\r\n self.btnDelMPPT.clicked.connect(self.del_config)\r\n self.btnUpdateConsole.clicked.connect(self.update_console)\r\n self.btnSaveConfig.clicked.connect(self.save_config)\r\n self.checkUse_5or4_line.clicked.connect(self.show_and_hide_color_line_because_phase)\r\n self.inputCount_mppt.textChanged.connect(self.validate_input)\r\n self.inputAll_chain.textChanged.connect(self.validate_input)\r\n self.inputCount_input_mppt.textChanged.connect(self.validate_input)\r\n self.checkUse_three_phase.stateChanged.connect(self.show_and_hide_color_line_because_phase)\r\n self.checkUse_y_connector.stateChanged.connect(self.validate_input)\r\n self.checkUse_all_mppt.stateChanged.connect(self.validate_input)\r\n self.spinBox_maxY.valueChanged.connect(self.validate_input)\r\n self.spinBox_numInvertor.valueChanged.connect(self.up_down_invertor_selection)\r\n self.spinBoxConfigInvertor.valueChanged.connect(self.spin_config)\r\n self.spinBoxMPPT.valueChanged.connect(self.spin_config)\r\n self.checkDifferentMPPT.stateChanged.connect(self.show_and_hide_spinBox_mppt)\r\n\r\n def input_data(self):\r\n self.spinBox_numInvertor.setMinimum(1)\r\n self.spinBox_numInvertor.setEnabled(False)\r\n self.spinBoxConfigInvertor.setMinimum(1)\r\n self.spinBoxConfigInvertor.setMaximum(1)\r\n self.spinBoxMPPT.setMinimum(1)\r\n self.spinBoxMPPT.setMaximum(1)\r\n self.btnOpenScheme.hide()\r\n self.btnDelConfigInvertor.hide()\r\n self.btnDelMPPT.hide()\r\n self.progressBar.hide()\r\n self.spinBox_CloneInvertor.setMinimum(1)\r\n self.show_and_hide_color_line_because_phase()\r\n self.show_and_hide_spinBox_mppt()\r\n self.btnSaveConfig.setIcon(QIcon('Data/System/Icons/save.png'))\r\n self.btnSaveConfig.setIconSize(QSize(30, 30))\r\n self.draw_params = {}\r\n self.fields_text = [self.inputCount_mppt, self.inputCount_input_mppt, self.inputSolar_count_on_the_chain, self.inputAll_chain]\r\n\r\n def open_scheme(self):\r\n self.path_structural_schemes = [QtWidgets.QFileDialog.getOpenFileName(self, 'Выберите файл структурной схемы', \r\n 'Data/Schemes/Invertor', \"*.pdf\")[0]]\r\n if len(self.path_structural_schemes[0]) != 0:\r\n os.startfile(self.path_structural_schemes[0])\r\n\r\n def reset(self):\r\n self.inputCount_mppt.clear()\r\n self.inputCount_input_mppt.clear()\r\n self.inputSolar_count_on_the_chain.clear()\r\n self.inputAll_chain.clear()\r\n self.checkUse_y_connector.setCheckState(0)\r\n self.checkUse_all_mppt.setCheckState(0)\r\n self.checkUse_three_phase.setCheckState(0)\r\n self.checkUse_5or4_line.setCheckState(0)\r\n self.checkUse_5or4_line.setEnabled(False)\r\n self.textConsoleDraw.clear()\r\n self.textConsoleCurrent.clear()\r\n self.spinBox_numInvertor.setValue(1)\r\n self.spinBox_numInvertor.setEnabled(False)\r\n self.spinBoxConfigInvertor.setMinimum(1)\r\n self.spinBoxConfigInvertor.setMaximum(1)\r\n self.spinBox_CloneInvertor.setValue(1)\r\n self.btnOpenScheme.hide()\r\n self.inputName_invertor.clear()\r\n self.inputNumber_invertor.clear()\r\n self.inputTitle_grid_line.clear()\r\n self.inputTitle_grid_line_length.clear()\r\n self.inputTitle_grid_top.clear()\r\n self.inputTitle_grid_switch.clear()\r\n self.inputCountAllInvertors.clear()\r\n\r\n def invertor_and_config_keys(self):\r\n invertors = self.main_window.invertors\r\n self.spinBox_numInvertor.setMaximum(len(invertors))\r\n self.spinBox_numInvertor.setEnabled(True)\r\n\r\n spinbox_val = self.spinBox_numInvertor.value() - 1\r\n self.invertor = invertors[f'found_invertor_{spinbox_val}']\r\n\r\n self.config_keys = []\r\n for key in self.invertor.keys():\r\n if 'inv_' in key:\r\n self.config_keys.append(key) \r\n self.spinBoxConfigInvertor.setMaximum(len(self.config_keys))\r\n\r\n def up_down_invertor_selection(self):\r\n self.invertor_and_config_keys()\r\n if self.invertor['broken_file'] != True:\r\n self.inputName_invertor.setText(f'{self.invertor[\"module\"]}')\r\n self.inputName_invertor.setCursorPosition(0)\r\n self.inputCount_mppt.setText(f'{self.invertor[\"mppt\"]}')\r\n self.inputCountMpptOnParams.setText(f'{self.invertor[\"mppt\"]}')\r\n self.inputCount_input_mppt.setText(f'{self.invertor[\"inputs\"]}')\r\n self.inputSolar_count_on_the_chain.setText(str(0))\r\n self.inputAll_chain.setText(str(0))\r\n self.spinBox_maxY.setMinimum(self.invertor['inputs'])\r\n self.spinBox_maxY.setMaximum(self.invertor['inputs'] * 2)\r\n self.spinBox_maxY.setValue(self.invertor['inputs'] * 2)\r\n if self.invertor['phase'] == 3:\r\n self.checkUse_three_phase.setCheckState(2)\r\n elif self.invertor['phase'] == 1:\r\n self.checkUse_three_phase.setCheckState(0)\r\n self.inputNumber_invertor.setText(f\"{self.invertor['type_inv']}\")\r\n self.inputTitle_grid_line.setText(f\"{self.invertor['title_grid_line']}\")\r\n self.inputTitle_grid_line_length.setText(f\"{self.invertor['title_grid_line_length']}\")\r\n self.inputTitle_grid_top.setText(f\"{self.invertor['title_grid_top']}\")\r\n self.inputTitle_grid_switch.setText(f\"{self.invertor['title_grid_switch']}\")\r\n self.checkUse_5or4_line.setCheckState(2 if self.invertor['use_5or4_line'] == True else 0) \r\n self.inputCountAllInvertors.setText(f\"{int(self.invertor['count_invertor'])}\")\r\n self.spin_config()\r\n self.draw_invertor_config_in_console()\r\n # self.show_and_hide_different_mppt(False)\r\n\r\n def spin_config(self):\r\n if len(self.config_keys) != 0:\r\n if len(self.config_keys) > 1:\r\n self.btnDelConfigInvertor.show()\r\n else:\r\n self.btnDelConfigInvertor.hide()\r\n self.spinBoxConfigInvertor.show()\r\n\r\n config_index = self.spinBoxConfigInvertor.value() - 1\r\n # print(invertor[config_keys[config_index]])\r\n count_params = len(self.invertor[self.config_keys[config_index]]['params'])\r\n diff_index = 0\r\n self.checkDifferentMPPT.setCheckState(0)\r\n if count_params > 1:\r\n self.btnDelMPPT.show()\r\n self.spinBoxMPPT.show()\r\n self.spinBoxMPPT.setMaximum(count_params)\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n self.checkDifferentMPPT.setCheckState(2)\r\n else:\r\n self.btnDelMPPT.hide()\r\n self.spinBoxMPPT.setMaximum(1)\r\n\r\n max_y = self.invertor[self.config_keys[config_index]]['params'][diff_index]['max_y']\r\n self.spinBox_maxY.setValue(max_y)\r\n self.spinBox_CloneInvertor.setValue(int(self.invertor[self.config_keys[config_index]]['count']))\r\n self.inputSolar_count_on_the_chain.setText(str(self.invertor[self.config_keys[config_index]]['params'][diff_index]['pvs']))\r\n self.inputCount_mppt.setText(str(self.invertor[self.config_keys[config_index]]['params'][diff_index]['mppts']))\r\n self.inputAll_chain.setText(str(int(self.invertor[self.config_keys[config_index]]['params'][diff_index]['chains'])))\r\n self.checkUse_y_connector.setCheckState(2 if self.invertor[self.config_keys[config_index]]['params'][diff_index]['y'] == True else 0)\r\n self.validate_input()\r\n \r\n def draw_invertor_config_in_console(self):\r\n self.textConsoleDraw.clear()\r\n self.textConsoleDraw.moveCursor(QTextCursor.Start)\r\n total_pvs = 0\r\n for index in range(len(self.config_keys)):\r\n count_inv = int(self.invertor[self.config_keys[index]]['count'])\r\n self.textConsoleDraw.append(f\" {index + 1} КОНФИГУРАЦИЯ {count_inv} ИНВ. \")\r\n count_params = len(self.invertor[self.config_keys[index]]['params'])\r\n pvs_on_conf = 0\r\n for i in range(count_params):\r\n pvs = self.invertor[self.config_keys[index]]['params'][i]['pvs']\r\n chains = self.invertor[self.config_keys[index]]['params'][i]['chains']\r\n y = '| Y' if self.invertor[self.config_keys[index]]['params'][i]['y'] == True else ''\r\n self.textConsoleDraw.append(f\" {self.invertor[self.config_keys[index]]['params'][i]['mppts']} MPPT | {chains} цеп. | {pvs} ФЭМ {y} \")\r\n pvs_on_conf += pvs * chains\r\n total_pvs += pvs_on_conf * count_inv\r\n self.textConsoleDraw.append(f\" ИТОГО\")\r\n self.textConsoleDraw.append(f\" {int(self.invertor['count_invertor'])} Инверторов\")\r\n self.textConsoleDraw.append(f\" {int(total_pvs)} ФЭМ\")\r\n\r\n def show_and_hide_color_line_because_phase(self):\r\n if self.checkUse_three_phase.isChecked():\r\n self.checkUse_5or4_line.setEnabled(True)\r\n else:\r\n self.checkUse_5or4_line.setEnabled(False)\r\n self.checkUse_5or4_line.setCheckState(0)\r\n\r\n def show_and_hide_spinBox_mppt(self):\r\n if self.checkDifferentMPPT.isChecked():\r\n self.spinBoxMPPT.setEnabled(True)\r\n self.btnAddMPPT.setEnabled(True)\r\n self.btnDelMPPT.setEnabled(True)\r\n else:\r\n self.spinBoxMPPT.setEnabled(False)\r\n self.btnAddMPPT.setEnabled(False)\r\n self.btnDelMPPT.setEnabled(False)\r\n\r\n def validate_input(self): #валидация вводимых данных\r\n false_value = ['Н/Д', '']\r\n self.opacity_effect = QtWidgets.QGraphicsOpacityEffect()\r\n self.opacity_effect.setOpacity(0.6)\r\n config_index = self.spinBoxConfigInvertor.value() - 1\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n use_all_mppt = True if self.checkUse_all_mppt.isChecked() else False\r\n use_y_connector = True if self.checkUse_y_connector.isChecked() else False \r\n \r\n if not self.inputCount_mppt.text() in false_value and not self.inputCount_input_mppt.text() in false_value:\r\n count_input_mppt = int(self.inputCount_input_mppt.text())\r\n self.count_mppt = int(self.inputCount_mppt.text()) \r\n self.textConsoleCurrent.clear() \r\n max_y = self.spinBox_maxY.value() \r\n max_input = count_input_mppt * self.count_mppt\r\n max_input_y = max_y * self.count_mppt\r\n self.textConsoleCurrent.append(f\"Макс. кол-во входов без Y коннектора: {max_input}\")\r\n self.textConsoleCurrent.append(f\"Макс. кол-во входов c Y коннектором: {max_input_y}\")\r\n total_mppts = 0\r\n if len(self.config_keys) != 0:\r\n count_params = len(self.invertor[self.config_keys[config_index]]['params'])\r\n for i in range(count_params):\r\n total_mppts += self.invertor[self.config_keys[config_index]]['params'][i]['mppts']\r\n total_mppts -= self.invertor[self.config_keys[config_index]]['params'][diff_index]['mppts']\r\n total_mppts += self.count_mppt\r\n \r\n if not self.inputAll_chain.text() in false_value:\r\n self.all_chain = int(self.inputAll_chain.text())\r\n if self.all_chain < self.count_mppt and use_all_mppt == True:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Невозможно распределить по всем MPPT\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: Увеличьте кол-во цепочек\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif self.all_chain > max_input and use_y_connector == False:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во цепочек не вмещается\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: примените Y коннекторы / уменьшите кол-во цепочек\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif self.all_chain <= max_input and use_y_connector == True and use_all_mppt == True:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во цепочек слишком мало, чтобы распределить по всем MPPT с Y коннекторами\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: уберите Y коннекторы\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif self.all_chain > max_input_y:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во цепочек слишком большое для данной конфигурации\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: уменьшите кол-во цепочек / измените конфигурацию\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif total_mppts > self.invertor[\"mppt\"]:\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во MPPT выходит за рамки параметров инвертора\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n else:\r\n self.btnDraw.setEnabled(True)\r\n self.btnSaveConfig.setEnabled(True)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect.setOpacity(1))\r\n return 0\r\n else:\r\n self.textConsoleCurrent.clear() \r\n \r\n def check_imput_params(self):\r\n self.set_style_default()\r\n if self.inputCount_mppt.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputCount_mppt)\r\n return 1\r\n elif self.inputCount_input_mppt.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputCount_input_mppt)\r\n return 1\r\n elif self.inputSolar_count_on_the_chain.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputSolar_count_on_the_chain)\r\n return 1\r\n elif self.inputAll_chain.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputAll_chain)\r\n return 1\r\n else:\r\n return 0\r\n\r\n def set_style_default(self):\r\n self.inputCount_mppt.setStyleSheet(styles_and_animation.default_style_input)\r\n self.inputCount_input_mppt.setStyleSheet(styles_and_animation.default_style_input)\r\n self.inputSolar_count_on_the_chain.setStyleSheet(styles_and_animation.default_style_input)\r\n self.inputAll_chain.setStyleSheet(styles_and_animation.default_style_input)\r\n\r\n self.statusBar.setStyleSheet(styles_and_animation.status_white)\r\n self.statusBar.showMessage('', 100)\r\n\r\n def show_and_hide_different_mppt(self, status):\r\n if status == True:\r\n self.spinBoxConfigInvertor.show()\r\n if self.spinBoxConfigInvertor.value() > 1:\r\n self.btnDelConfigInvertor.show()\r\n else:\r\n self.btnDelConfigInvertor.hide()\r\n else:\r\n self.spinBoxConfigInvertor.hide()\r\n self.btnDelConfigInvertor.hide()\r\n\r\n def update_console(self):\r\n self.textConsoleDraw.clear()\r\n\r\n def update_total_count_invertors(self):\r\n count_invertor = 0\r\n for key in self.config_keys:\r\n count_invertor += int(self.invertor[key]['count'])\r\n self.invertor['count_invertor'] = int(count_invertor)\r\n self.main_window.w4.up_down_invertor_selection()\r\n self.inputCountAllInvertors.setText(f\"{int(self.invertor['count_invertor'])}\")\r\n\r\n def save_config(self):\r\n if self.check_imput_params() != 0:\r\n return 1\r\n config_index = self.spinBoxConfigInvertor.value() - 1\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n\r\n self.invertor['module'] = str(self.inputName_invertor.text())\r\n self.invertor['type_inv'] = str(self.inputNumber_invertor.text())\r\n self.invertor['title_grid_line'] = str(self.inputTitle_grid_line.text())\r\n self.invertor['title_grid_line_length'] = str(self.inputTitle_grid_line_length.text())\r\n self.invertor['title_grid_top'] = str(self.inputTitle_grid_top.text())\r\n self.invertor['title_grid_switch'] = str(self.inputTitle_grid_switch.text())\r\n self.invertor['phase'] = 3 if self.checkUse_three_phase.isChecked() else 1\r\n self.invertor['use_5or4_line'] = True if self.checkUse_5or4_line.isChecked() else False\r\n self.invertor['inputs'] = int(self.inputCount_input_mppt.text())\r\n \r\n if not self.config_keys:\r\n self.add_invertor()\r\n else:\r\n self.invertor[self.config_keys[config_index]]['count'] = self.spinBox_CloneInvertor.value()\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['mppts'] = int(self.inputCount_mppt.text())\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['chains'] = int(self.inputAll_chain.text())\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['pvs'] = int(self.inputSolar_count_on_the_chain.text())\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['y'] = True if self.checkUse_y_connector.isChecked() else False\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['max_y'] = self.spinBox_maxY.value()\r\n \r\n total_chains = 0\r\n count_params = len(self.invertor[self.config_keys[config_index]]['params'])\r\n for i in range(count_params):\r\n total_chains += self.invertor[self.config_keys[config_index]]['params'][i]['chains']\r\n self.invertor[self.config_keys[config_index]]['total_chains'] = int(total_chains)\r\n self.update_total_count_invertors()\r\n \r\n self.main_window.w6.up_down_invertor_selection()\r\n self.up_down_invertor_selection()\r\n self.statusBar.showMessage('Параметры сохранены', 2000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_green)\r\n QTimer.singleShot(2000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n\r\n def add_invertor(self):\r\n if self.check_imput_params() != 0:\r\n return 1\r\n mppts = int(self.inputCount_mppt.text())\r\n count_inv = int(self.spinBox_CloneInvertor.value())\r\n pvs = int(self.inputSolar_count_on_the_chain.text())\r\n chains = int(self.inputAll_chain.text())\r\n y_connector = True if self.checkUse_y_connector.isChecked() else False\r\n max_y = self.spinBox_maxY.value()\r\n\r\n if not self.config_keys:\r\n name = 'inv_0'\r\n else:\r\n name = f'inv_{len(self.config_keys)}'\r\n\r\n self.invertor[name] = {'controller': False, 'commutator': False, 'left_yzip': False, 'right_yzip': False, \r\n 'title_other_device': 'УЗИП', 'count': count_inv, 'total_chains': chains, \r\n 'params': [{'mppts': mppts, 'chains': chains, 'pvs': pvs, 'count': 'piece', 'y': y_connector, 'max_y': max_y}]}\r\n\r\n self.invertor_and_config_keys()\r\n self.update_total_count_invertors()\r\n self.draw_invertor_config_in_console()\r\n self.spinBoxConfigInvertor.setValue(len(self.config_keys))\r\n\r\n def del_invertor(self):\r\n current_config_index = self.spinBoxConfigInvertor.value() - 1\r\n del self.invertor[self.config_keys[current_config_index]]\r\n self.invertor_and_config_keys()\r\n index = 0\r\n for key in self.config_keys:\r\n self.invertor[f'inv_{index}'] = self.invertor.pop(key)\r\n index += 1\r\n self.invertor_and_config_keys()\r\n self.update_total_count_invertors()\r\n self.draw_invertor_config_in_console()\r\n self.spin_config()\r\n\r\n def add_config(self):\r\n if self.check_imput_params() != 0:\r\n return 1\r\n current_config_index = self.spinBoxConfigInvertor.value() - 1\r\n mppts = int(self.inputCount_mppt.text())\r\n pvs = int(self.inputSolar_count_on_the_chain.text())\r\n chains = int(self.inputAll_chain.text())\r\n y_connector = True if self.checkUse_y_connector.isChecked() else False\r\n max_y = self.spinBox_maxY.value()\r\n\r\n current_params = self.invertor[self.config_keys[current_config_index]]['params']\r\n current_params.append({'mppts': mppts, 'chains': chains, 'pvs': pvs, 'count': 'piece', 'y': y_connector, 'max_y': max_y})\r\n self.draw_invertor_config_in_console()\r\n self.spin_config()\r\n self.spinBoxMPPT.setValue(len(current_params))\r\n\r\n def del_config(self):\r\n current_config_index = self.spinBoxConfigInvertor.value() - 1\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n del self.invertor[self.config_keys[current_config_index]]['params'][diff_index]\r\n self.draw_invertor_config_in_console()\r\n self.spin_config()\r\n\r\n def out_params(self):\r\n title_project = self.main_window.inputTitleProject.text()\r\n code_project = self.main_window.inputCodeProject.text() \r\n code_project = self.main_window.inputCodeProject.text() \r\n self.many_schemes = True if self.checkManySchemes.isChecked() else False\r\n \r\n self.gost_frame_params = {'title_project': title_project, 'code_project': code_project}\r\n \r\n def draw(self):\r\n try:\r\n fp_invertors = \"Data/Schemes/Invertor\"\r\n files_invertors = [f for f in os.listdir(fp_invertors) if isfile(join(fp_invertors, f))]\r\n if len(files_invertors) != 0:\r\n for i in range(len(files_invertors)):\r\n with open(fp_invertors + f\"/{files_invertors[i]}\", 'w') as image_fd: \r\n pass\r\n except PermissionError:\r\n self.statusBar.showMessage('Открыт pdf файл схемы, перед построением закройте его и повторите попытку', 4000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n QTimer.singleShot(4000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return\r\n \r\n if not self.config_keys:\r\n self.statusBar.showMessage('Суохраните параметры', 2000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_yellow)\r\n QTimer.singleShot(2000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return \r\n \r\n\r\n for num in range(1, len(self.config_keys)):\r\n self.spinBoxConfigInvertor.setValue(num)\r\n self.spin_config()\r\n if self.validate_input() != 0:\r\n self.statusBar.showMessage('Неверная конфигурация MPPT', 4000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_yellow)\r\n QTimer.singleShot(4000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return\r\n \r\n self.out_params()\r\n\r\n self.btnOpenScheme.hide()\r\n self.btnDraw.setEnabled(False)\r\n self.btnDraw.setText(f\"Cоздано 0 из {self.invertor['count_invertor']}\")\r\n self.progressBar.show()\r\n self.progressBar.setMaximum(int(self.invertor['count_invertor']))\r\n self.progressBar.setValue(0)\r\n \r\n self.painter_draw_one = DrawOne(self.invertor, self.gost_frame_params, self.many_schemes)\r\n self.painter_draw_one.change_value.connect(self.setProgressVal)\r\n self.painter_draw_one.finished.connect(self.drawFinished)\r\n self.painter_draw_one.start()\r\n\r\n def setProgressVal(self, val):\r\n self.progressBar.setValue(int(val.split(' ')[0]))\r\n self.btnDraw.setText(f\"Cоздано {val}\")\r\n\r\n def drawFinished(self):\r\n if hasattr(self.painter_draw_one, 'num_error'):\r\n if self.painter_draw_one.num_error['error'] == 0:\r\n self.textConsoleDraw.append(\"----------------------------\")\r\n self.textConsoleDraw.append(\"РЕЗУЛЬТАТЫ:\")\r\n self.textConsoleDraw.append(f\" Всего цепочек: {self.painter_draw_one.chains}\")\r\n self.textConsoleDraw.append(f\" Всего модулей: {self.painter_draw_one.modules}\")\r\n self.statusBar.showMessage('Формирование схем завершено успешно', 6000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_green)\r\n QTimer.singleShot(6000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n self.btnOpenScheme.show()\r\n elif self.painter_draw_one.num_error['error'] == 1:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Кол-во цепочек меньше числа MPPT, невозможно заполгнить все MPPT\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n elif self.painter_draw_one.num_error['error'] == 3:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Данное количесво цепочек не вмещается, примените Y коннекторы, либо измените конфигурацию MPPT\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n elif self.painter_draw_one.num_error['error'] == 4:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Данное количесво цепочек слишком мало чтобы заполнить все MPPT применяя Y коннекторы, уберите Y коннекторы или полное заполнение\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n elif self.painter_draw_one.num_error['error'] == 5:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Слишком большое количество цепочек\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n else:\r\n self.statusBar.showMessage(\"Внимание! При построении схемы возникла проблема\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n self.btnDraw.setEnabled(True)\r\n self.btnDraw.setText('Построить')\r\n self.progressBar.hide()\r\n del self.painter_draw_one\r\n \r\n ","repo_name":"Croud9/Larso","sub_path":"app/logic/logicUIOneScheme.py","file_name":"logicUIOneScheme.py","file_ext":"py","file_size_in_byte":32368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"17611580182","text":"import pytest\n\n\nclass TestBackends(object):\n\n @pytest.fixture\n def simple_payload(self):\n return {'name': 'test.simple', 'metric': 'simple_payload', 'value': 1, 'host': 'test'}\n\n @pytest.fixture\n def structured_payload(self):\n return {'name': 'test.structured', 'metric': 'structured_payload', 'val0': 1, 'val2': 'str', 'val3': [1, 2],\n 'host': 'test', 'tags': ['tag1', 'tags2']}\n\n def test_base_backend_simple_payload(self, mocker, dummy_backend, simple_payload):\n mock_gethostname = mocker.patch('socket.gethostname')\n mock_gethostname.return_value = 'test'\n dummy_backend.report(name='test.simple', metric='simple_payload', value=1, tags=None)\n reported_data = dummy_backend.reported_data['test.simple']\n assert reported_data == simple_payload\n\n def test_base_backend_structured_payload(self, mocker, dummy_backend, structured_payload):\n mock_gethostname = mocker.patch('socket.gethostname')\n mock_gethostname.return_value = 'test'\n dummy_backend.report(name='test.structured', metric='structured_payload',\n value={'val0': 1, 'val2': 'str', 'val3': [1, 2]}, tags=['tag1', 'tags2'])\n reported_data = dummy_backend.reported_data['test.structured']\n assert reported_data == structured_payload\n","repo_name":"APSL/kaneda","sub_path":"tests/unit/test_backends.py","file_name":"test_backends.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"79"}
+{"seq_id":"224641372","text":"\nfrom pyramid_beaker import session_factory_from_settings\nfrom pyramid.config import Configurator\n# from pyramid.session import UnencryptedCookieSessionFactoryConfig\n# my_session_factory = UnencryptedCookieSessionFactoryConfig('not-really-secret')\n\n\"\"\" The docs have a charming parallel to the way `apt-get remove perl` used to\n make you type out 'I know that what I am doing is wrong':\n\n > Note the very long, very explicit name for\n > UnencryptedCookieSessionFactoryConfig. It's trying to tell you that this\n > implementation is, by default, *unencrypted*. You should not use it when\n > you keep sensitive information in the session object, as the information\n > can be easily read by both users of your application and third parties\n > who have access to your users' network traffic. Use a different session\n > factory implementation (preferably one which keeps session data on the\n > server) for anything but the most basic of applications where \"session\n > security doesn't matter\".\n\"\"\"\n\nfrom sqlalchemy import engine_from_config\nfrom .models import DBSession\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n session_factory = session_factory_from_settings(settings)\n config = Configurator(settings=settings)\n config.set_session_factory(session_factory)\n # config = Configurator(session_factory=my_session_factory, settings=settings)\n config.add_static_view('static', 'static', cache_max_age=3600)\n\n # \"Show me your deck list.\"\n config.add_route('give_deck', '/')\n # \"Did I parse your deck list correctly?\"\n config.add_route('check_deck', '/check')\n # \"Okay, I'm asking you questions about your deck.\"\n config.add_route('show_question', '/ask')\n # \"This is my answer to the question.\"\n config.add_route('check_answer', '/answer')\n # /answer should be POSTed to, and leads back to /ask with a flash message\n # telling you whether you were right or wrong.\n\n config.scan()\n return config.make_wsgi_app()\n","repo_name":"seanmcd/VexingArcanix","sub_path":"vexingarcanix/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"7957498194","text":"import sys\nsys.stdin = open('input.txt')\n\nn = int(input())\n\nfor k in range(1,n + 1):\n N = int(input())\n arr = list(map(int, input().split()))\n\n minv = maxv = arr[0]\n for i in range(N):\n if minv > arr[i]:\n minv = arr[i]\n if maxv < arr[i]:\n maxv = arr[i]\n\n result = maxv - minv\n print('#{} {}'.format(k, result))","repo_name":"ggpp0909/problem_solving","sub_path":"Python/SWEA/0810/4828_min_max/4828_min_max.py","file_name":"4828_min_max.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"26619464747","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.patches import Rectangle\r\nfrom matplotlib.widgets import Slider, Button\r\n\r\n# Width of rectangle:\r\nL = 2\r\n\r\ndef collision_data_nospin(N, x0, y0, alpha0):\r\n \r\n t = np.zeros(N) # times at collision\r\n x = np.zeros(N) # x-values at collision\r\n y = np.zeros(N) # y-values at collision\r\n alpha = np.zeros(N) # alpha values at collision\r\n theta1 = np.zeros(N) # theta1 values at collision\r\n theta2 = np.zeros(N) # theta2 values at collision\r\n theta3 = np.zeros(N) # theta3 values at collision\r\n theta4 = np.zeros(N) # theta4 values at collision\r\n \r\n # Initial values:\r\n t[0] = 0\r\n x[0] = x0\r\n y[0] = y0\r\n alpha[0] = alpha0\r\n theta1[0] = np.arctan2(1 - y[0], L - x[0])\r\n theta2[0] = np.arctan2(1 - y[0], -L - x[0])\r\n theta3[0] = np.arctan2(-1 - y[0], -L - x[0])\r\n theta4[0] = np.arctan2( -1 - y[0], L - x[0])\r\n\r\n # Update formula:\r\n for i in range(1, N):\r\n if (alpha[i - 1] - theta1[i - 1])%(2*np.pi) < (theta2[i - 1] - theta1[i - 1])%(2*np.pi):\r\n t[i] = (1 - y[i - 1])/np.sin(alpha[i - 1])\r\n x[i] = x[i - 1] + t[i]*np.cos(alpha[i - 1])\r\n y[i] = 1\r\n alpha[i] = -alpha[i - 1]\r\n if (alpha[i - 1] - theta2[i - 1])%(2*np.pi) < (theta3[i - 1] - theta2[i - 1])%(2*np.pi):\r\n t[i] = (-L - x[i - 1])/np.cos(alpha[i - 1])\r\n x[i] = -L\r\n y[i] = y[i - 1] + t[i]*np.sin(alpha[i - 1])\r\n alpha[i] = np.pi - alpha[i - 1]\r\n if (alpha[i - 1] - theta3[i - 1])%(2*np.pi) < (theta4[i - 1] - theta3[i - 1])%(2*np.pi):\r\n t[i] = (-1 - y[i - 1])/np.sin(alpha[i - 1])\r\n x[i] = x[i - 1] + t[i]*np.cos(alpha[i - 1])\r\n y[i] = -1\r\n alpha[i] = -alpha[i - 1]\r\n if (alpha[i - 1] - theta4[i - 1])%(2*np.pi) < (theta1[i - 1] - theta4[i - 1])%(2*np.pi):\r\n t[i] = (L - x[i - 1])/np.cos(alpha[i - 1])\r\n x[i] = L\r\n y[i] = y[i - 1] + t[i]*np.sin(alpha[i - 1])\r\n alpha[i] = np.pi - alpha[i - 1]\r\n theta1[i] = np.arctan2(1 - y[i], L - x[i])\r\n theta2[i] = np.arctan2(1 - y[i], -L - x[i])\r\n theta3[i] = np.arctan2(-1 - y[i], -L - x[i])\r\n theta4[i] = np.arctan2( -1 - y[i], L - x[i])\r\n \r\n return x, y, alpha\r\n\r\ndef collision_data(N, x0, y0, alpha0, u0, MI_coeff):\r\n \r\n vx0 = np.cos(alpha0)\r\n vy0 = np.sin(alpha0)\r\n \r\n t = np.zeros(N) # times at collision\r\n x = np.zeros(N) # x-values at collision\r\n y = np.zeros(N) # y-values at collision\r\n vx = np.zeros(N) # x-component of veolcity at collision\r\n vy = np.zeros(N) # y-component of veolcity at collision\r\n u = np.zeros(N) # spin values at collision\r\n alpha = np.zeros(N) # alpha values at collision\r\n theta1 = np.zeros(N) # theta1 values at collision\r\n theta2 = np.zeros(N) # theta2 values at collision\r\n theta3 = np.zeros(N) # theta3 values at collision\r\n theta4 = np.zeros(N) # theta4 values at collision\r\n \r\n # Initial values:\r\n t[0] = 0\r\n x[0] = x0\r\n y[0] = y0\r\n vx[0] = vx0\r\n vy[0] = vy0\r\n u[0] = u0\r\n alpha[0] = alpha0\r\n theta1[0] = np.arctan2(1 - y[0], L - x[0])\r\n theta2[0] = np.arctan2(1 - y[0], -L - x[0])\r\n theta3[0] = np.arctan2(-1 - y[0], -L - x[0])\r\n theta4[0] = np.arctan2( -1 - y[0], L - x[0])\r\n\r\n # Update formula:\r\n for i in range(1, N):\r\n if (alpha[i - 1] - theta1[i - 1])%(2*np.pi) < (theta2[i - 1] - theta1[i - 1])%(2*np.pi):\r\n t[i] = (1 - y[i - 1])/vy[i - 1]\r\n x[i] = x[i - 1] + t[i]*vx[i - 1]\r\n y[i] = 1\r\n \r\n vT = -vx[i - 1]\r\n vn = -vy[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = -vparr\r\n vy[i] = -vperp\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n \r\n if (alpha[i - 1] - theta2[i - 1])%(2*np.pi) < (theta3[i - 1] - theta2[i - 1])%(2*np.pi):\r\n t[i] = (-L - x[i - 1])/vx[i - 1]\r\n x[i] = -L\r\n y[i] = y[i - 1] + t[i]*vy[i - 1]\r\n \r\n vT = -vy[i - 1]\r\n vn = vx[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = vperp\r\n vy[i] = -vparr\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n if (alpha[i - 1] - theta3[i - 1])%(2*np.pi) < (theta4[i - 1] - theta3[i - 1])%(2*np.pi):\r\n t[i] = (-1 - y[i - 1])/vy[i - 1]\r\n x[i] = x[i - 1] + t[i]*vx[i - 1]\r\n y[i] = -1\r\n \r\n vT = vx[i - 1]\r\n vn = vy[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = vparr\r\n vy[i] = vperp\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n \r\n if (alpha[i - 1] - theta4[i - 1])%(2*np.pi) < (theta1[i - 1] - theta4[i - 1])%(2*np.pi):\r\n t[i] = (L - x[i - 1])/vx[i - 1]\r\n x[i] = L\r\n y[i] = y[i - 1] + t[i]*vy[i - 1]\r\n vT = vy[i - 1]\r\n vn = -vx[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = -vperp\r\n vy[i] = vparr\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n theta1[i] = np.arctan2(1 - y[i], L - x[i])\r\n theta2[i] = np.arctan2(1 - y[i], -L - x[i])\r\n theta3[i] = np.arctan2(-1 - y[i], -L - x[i])\r\n theta4[i] = np.arctan2( -1 - y[i], L - x[i])\r\n \r\n return x, y, alpha, vx, vy, u\r\n\r\n# Define initial parameters\r\ninit_MI_coeff = 1/2\r\ninit_x = 0\r\ninit_y = 0.25\r\ninit_theta = np.pi/4\r\ninit_u = 0\r\ninit_N = 50\r\n\r\nx_spin, y_spin, alpha_spin, vx_spin, vy_spin, u_spin = collision_data(init_N, init_x, init_y, init_theta, init_u, init_MI_coeff)\r\n\r\nfig, ax = plt.subplots()\r\nline, = ax.plot(x_spin, y_spin, lw=2, c='red')\r\n\r\n# May be uncommented to save collision data:\r\n#np.savetxt('rectangle_edges.txt',np.transpose(np.array([x_spin,y_spin,vx_spin,vy_spin,u_spin])))\r\n\r\nplt.gca().add_patch(Rectangle((-L,-1),2*L,2,\r\n edgecolor='black',\r\n facecolor='none'))\r\nax = plt.gca()\r\nax.set_aspect('equal', adjustable='box')\r\n\r\n# adjust the main plot to make room for the sliders\r\nfig.subplots_adjust(left=0.25, bottom=0.25)\r\n\r\naxMI_coeff = fig.add_axes([0.25, 0.1, 0.65, 0.03])\r\nMI_coeff_slider = Slider(\r\n ax=axMI_coeff,\r\n label='alpha',\r\n valmin=0,\r\n valmax=1,\r\n valinit=init_MI_coeff,\r\n)\r\n\r\nax_x = fig.add_axes([0.25, 0.2, 0.65, 0.03])\r\nx_slider = Slider(\r\n ax=ax_x,\r\n label='x0',\r\n valmin=-L,\r\n valmax=L,\r\n valinit=init_x,\r\n)\r\n\r\nax_y = fig.add_axes([0.25, 0.15, 0.65, 0.03])\r\ny_slider = Slider(\r\n ax=ax_y,\r\n label='y0',\r\n valmin=-1,\r\n valmax=1,\r\n valinit=init_y,\r\n)\r\n\r\nax_theta = fig.add_axes([0.25, 0.25, 0.65, 0.03])\r\ntheta_slider = Slider(\r\n ax=ax_theta,\r\n label='theta0',\r\n valmin=0,\r\n valmax=2*np.pi,\r\n valinit=init_theta,\r\n)\r\n\r\nax_u = fig.add_axes([0.05, 0.25, 0.0225, 0.63])\r\nu_slider = Slider(\r\n ax=ax_u,\r\n label=\"u\",\r\n valmin=0,\r\n valmax=10,\r\n valinit=init_u,\r\n orientation=\"vertical\"\r\n)\r\n\r\nax_N = fig.add_axes([0.1, 0.25, 0.0225, 0.63])\r\nN_slider = Slider(\r\n ax=ax_N,\r\n label=\"N\",\r\n valmin=1,\r\n valmax=100,\r\n valinit=init_N,\r\n orientation=\"vertical\",\r\n valfmt='%0.0f'\r\n)\r\n\r\n\r\n# The function to be called anytime a slider's value changes\r\ndef update(val):\r\n x_spin, y_spin, alpha_spin, vx_spin, vy_spin, u_spin = collision_data(int(N_slider.val), x_slider.val, y_slider.val, theta_slider.val, u_slider.val, MI_coeff_slider.val)\r\n line.set_xdata(x_spin)\r\n line.set_ydata(y_spin)\r\n fig.canvas.draw_idle()\r\n \r\nMI_coeff_slider.on_changed(update)\r\nx_slider.on_changed(update)\r\ny_slider.on_changed(update)\r\ntheta_slider.on_changed(update)\r\nu_slider.on_changed(update)\r\nN_slider.on_changed(update)\r\n\r\nresetax = fig.add_axes([0.8, 0.025, 0.1, 0.04])\r\nbutton = Button(resetax, 'Reset', hovercolor='0.975')\r\n\r\n\r\ndef reset(event):\r\n MI_coeff_slider.reset()\r\n x_slider.reset()\r\n y_slider.reset()\r\n theta_slider.reset()\r\n u_slider.reset()\r\n N_slider.reset()\r\nbutton.on_clicked(reset)\r\n\r\nplt.show()","repo_name":"WarlicTheWizard/billiards","sub_path":"rectangle with sliders.py","file_name":"rectangle with sliders.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6412488724","text":"from sqlalchemy import (\n BigInteger,\n Boolean,\n Column,\n LargeBinary,\n Numeric,\n String,\n Integer,\n UnicodeText,\n)\nfrom Db import SESSION, Base\nimport os\n\n\nclass AutoReply(Base):\n __tablename__ = \"AutoReply\"\n id = Column(Integer, autoincrement=True, primary_key=True)\n text = Column(String)\n file_id = Column(String)\n msg_type = Column(String)\n msg_content = Column(String)\n\n def __init__(self, text, msg_type, msg_content, file_id, id=None):\n self.id = id\n self.msg_type = msg_type\n self.file_id = file_id\n self.text = text\n self.msg_content = msg_content\n\n\nAutoReply.__table__.create(checkfirst=True)\n\n\ndef getAutoReply(text):\n try:\n return SESSION.query(AutoReply).filter(AutoReply.text == text).one()\n except:\n return None\n finally:\n SESSION.close()\n\n\ndef getAllAutoReply():\n try:\n return SESSION.query(AutoReply).all()\n except:\n return None\n finally:\n SESSION.close()\n\n\ndef addAutoReply(text, msg_type, msg_content=\"\", file_id=\"\"):\n try:\n addRep = SESSION.query(AutoReply).filter(AutoReply.text == text).one()\n except Exception as e:\n addRep = None\n print(str(\"error : togglepropsetting : %s\" % (e)))\n\n if addRep:\n addRep.msg_type = msg_type\n addRep.msg_content = msg_content\n try:\n os.remove(addRep.file_id)\n except Exception as e:\n print(\"addAutoReplySetting : %s\" % (e))\n addRep.file_id = file_id\n else:\n addRep = AutoReply(text, msg_type, msg_content, file_id)\n SESSION.add(addRep)\n SESSION.commit()\n\n\ndef remAutoReplySetting(text):\n try:\n remrep = SESSION.query(AutoReply).filter(AutoReply.text == text).one()\n if remrep:\n SESSION.delete(remrep)\n SESSION.commit()\n return True\n except:\n return False\n\n","repo_name":"micodev/botShell","sub_path":"Db/autoReply_sql.py","file_name":"autoReply_sql.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"79"}
+{"seq_id":"18918018871","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.4.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\n# # !pip install xlrd\n# -\n\nimport xlrd\nimport numpy as np\nimport matplotlib.pylab as plt\n\n# # Donnée météorologiques RT2012\n#\n# documentation à propos des données: https://www.rt-batiment.fr/batiments-neufs/reglementation-thermique-2012/donnees-meteorologiques.html\n\nfilename = './FichiersMeteo_RT2012.xls'\nweather_data = xlrd.open_workbook(filename)\n\nprint('sheets:', ', '.join(weather_data.sheet_names()))\n\n# +\ndescriptif = weather_data.sheet_by_name('Descriptif')\n\nprint('Descriptif')\nprint('==========')\nfor row in descriptif.get_rows():\n r = [r.value for r in row]\n print('\\t'.join(r))\n# -\n\nvilles = {'H1a': 'Trappes',\n 'H1b': 'Nancy',\n 'H1c': 'Macon',\n 'H2a': 'Rennes',\n 'H2b': 'La Rochelle',\n 'H2c': 'Agen',\n 'H2d': 'Carpentras',\n 'H3' : 'Nice'}\n\nk = 0\nzc = list(villes.keys())\n\n# +\nzone_climatique = 'H1c'\n#zone_climatique = zc[k]\n#print(k, zone_climatique)\n#k += 1\n\n# Reads columns\nvariables = ['Htsmd', 'te0', 'we0', 'dirN', 'diff', 'Teciel', 'Vent', 'Teau', 'Gamma', 'Psi']\ndatazone = weather_data.sheet_by_name(zone_climatique)\ndata = {var:np.array([cell.value for cell in datazone.col(c, start_rowx=1)])\n for c, var in enumerate(variables)}\n\nfig = plt.figure(figsize=(12, 10))\nnbr_graph = 3\n\n# === Temperature ===\nax1 = plt.subplot(nbr_graph, 1, 1)\n\nT_ext_grid = data['te0'].reshape(-1, 24).T\nT_ciel_grid = data['Teciel'].reshape(-1, 24).T\nT_eau_grid = data['Teau'].reshape(-1, 24).T\n\nplt.axhline(y=0, linewidth=1, color='black');\nplt.axhline(y=20, linewidth=1, linestyle=':', color='black');\n\n# T_ext\nplt.plot(T_ext_grid.mean(axis=0), color='r', label='T° ext.')\nx = np.arange(T_ext_grid.shape[1])\nplt.fill_between(x, T_ext_grid.min(axis=0), T_ext_grid.max(axis=0), color='red', alpha=0.1);\n\n# T_ciel\n#plt.plot(T_ciel_grid.max(axis=0), color='skyblue', label='T° eau (1m sol)')\nplt.plot(T_ciel_grid.mean(axis=0), color='darkblue', label='T° rayonnement ciel', alpha=0.2)\n#plt.plot(T_ciel_grid.min(axis=0), color='skyblue', label='T° eau (1m sol)')\n\n# T_eau\nplt.plot(T_eau_grid.mean(axis=0), color='skyblue', label='T° eau (1m sol)')\n\nplt.xlim(0, T_ext_grid.shape[1]); #plt.title(\"Température extérieure (°C)\");\nplt.ylabel(\"Température extérieure (°C)\");\nplt.legend(); plt.xlabel(\"jour de l'année\");\nplt.title(\"Température extérieure (°C)\");\nplt.ylim((-10, 35))\n\n# === Vent ===\nax1 = plt.subplot(nbr_graph, 1, 2, sharex=ax1)\nvent_grid = data['Vent'].reshape(-1, 24).T\n\nplt.plot(vent_grid.mean(axis=0), color='cadetblue', label='vitesse vent moy.')\nx = np.arange(T_ext_grid.shape[1])\nplt.fill_between(x, vent_grid.min(axis=0), vent_grid.max(axis=0), color='cadetblue', alpha=0.1);\n\nplt.xlim(0, T_ext_grid.shape[1]); plt.title(\"vitesse moyenne du vent (m/s)\");\nplt.ylabel(\"vitesse vent (m/s)\"); plt.xlabel(\"jour de l'année\");\nplt.legend();\nplt.ylim((0, 15))\n\n# === Soleil ===\nax2 = plt.subplot(nbr_graph, 1, 3, sharex=ax1)\nax2.set_title(f'{zone_climatique} {villes[zone_climatique]}')\n\ndirN_grid = data['dirN'].reshape(-1, 24).T\ndiff_grid = data['diff'].reshape(-1, 24).T\nplt.plot(dirN_grid.sum(axis=0), color='darkorange', label='directe')\nplt.fill_between(x, np.zeros_like(x), dirN_grid.sum(axis=0)/24, color='darkorange', alpha=0.1);\n\nplt.plot(diff_grid.sum(axis=0), color='lightslategray', label='diffus')\n#plt.fill_between(x, np.zeros_like(x), diff_grid.sum(axis=0)/24, color='lightslategray', alpha=0.1);\n\nplt.legend();\nplt.xlim(0, T_ext_grid.shape[1]); plt.title(\"Energie solaire directe par jour (Wh/m2)\");\nplt.ylabel(\"Energie solaire directe par jour (Wh/m2)\");\nplt.ylim((0, 400*24))\n\n\n#plt.fill_between(x, np.zeros_like(x), dirN_grid.sum(axis=0)/24, color='darkorange', alpha=0.1);\n\n\n\nplt.xlabel(\"jour de l'année\");\nfig.suptitle(f'zone {zone_climatique} - {villes[zone_climatique]}', fontsize=16)\n\nplt.tight_layout(rect=(0, 0, 1, 0.97))\nfilename = f'{zone_climatique}_{villes[zone_climatique]}.svg'\nplt.savefig(filename)\n\n# +\n# == Heat map == \nT_ext_grid = np.array([h.value for h in datazone.col(1, start_rowx=1)]).reshape(-1, 24).T\n\nplt.figure(figsize=(15, 4))\nplt.pcolormesh(T_ext_grid, shading='flat'); plt.colorbar();\nplt.title(\"Température extérieure (°C)\")\nplt.xlabel(\"jour de l'année\"); plt.ylabel('heure');\n# -\n\nplt.figure(figsize=(15, 4))\nplt.pcolormesh(dirN_grid, shading='flat'); plt.colorbar();\n\nprint( list(data.keys()) )\n\n# +\n# Export to csv\nzone_climatique = 'H1c'\n\ncolumns_to_export = ['Htsmd', 'te0', 'dirN']\ndataarray = np.stack([data[c] for c in columns_to_export], axis=-1)\n\nfilename = f'{zone_climatique}_{villes[zone_climatique]}.csv'\nnp.savetxt(filename, dataarray, fmt='%.18e', delimiter=';', header=';'.join(columns_to_export))\n# -\n\n# # Look at Correlations\n\nplt.plot(data['te0'], data['dirN'], '.')\n\nplt.plot(dirN_grid.max(axis=0), T_ciel_grid.mean(axis=0), '.')\n\nplt.plot(data['te0'], data['Teciel'], '.')\n\n# https://physics.stackexchange.com/a/153947/105894\n# https://github.com/xdze2/thermique_appart/blob/master/Model02_tuile.ipynb\n#\n# It's much closer to 273 K than 2.73 K. The answer depends on the surface temperature, the humidity, the temperature gradient through the atmosphere, and what exactly you mean by \"the temperature of the clear night sky\".\n#\n# The Swinbank formula provides an ad hoc expression for the power radiated by the night sky. A modified version of this formula from Goforth et al. is $$P_{\\text{thermal}} = (1+KC^2)8.78\\times 10^{-13}\\,T^{5.852}\\,{RH}^{0.07195}$$ where\n#\n# $K$ is a scale factor based on cloud height, ranging from 0.34 for very low clouds to 0.06 for very high clouds,\n# $C$ is the fraction of the sky covered by clouds,\n# $T$ is the surface temperature, in kelvins,\n# $RH$ is the surface relative humidity, as a percentage (e.g., $RH$ would be 25 in the case of 25% relative humidity), and\n# $P_{\\text{thermal}}$ is the night sky radiation, in watts per square meter.\n#\n# This can be converted to an effective temperature via the Stefan-Boltzmann law. Now the question arises as to whether you are asking about the effective black body temperature or effective gray body temperature of the night sky. In the first case the Stefan-Boltzmann law yields $T = (P/\\sigma)^{1/4}$. Taking emissivity into account yields $T = (P/(\\epsilon \\sigma))^{1/4}$, where $\\epsilon\\approx 0.74$ is the emissivity of the atmosphere.\n#\n# A couple of examples:\n#\n# A cool clear night in the desert, with a temperature of 5°C and a relative humidity of 5%. The modified Swinbank formula yields a flux of 198 w/m2, which in turn corresponds to a black body temperature of -29.9°C or a gray body temperature of -10.9°C.\n#\n# A warm clear night in the countryside, with a temperature of 15°C and a relative humidity of 25%. The modified Swinbank formula in this case yields a flux of 274 w/m2, which in turn corresponds to a black body temperature of -9.5°C or a gray body temperature of 11.1°C.\n#\n\n\n","repo_name":"xdze2/simuthermique","sub_path":"weather_api/Fichiers_Meteo_RT2012/viz_yearly_weather_data.py","file_name":"viz_yearly_weather_data.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"21688667754","text":"import unittest\nfrom Utils.logger import *\nfrom selenium import webdriver\n\nfrom Utils.utility import *\nfrom classes.DriverHelpers.DriverHelper import DriverHelper\nfrom Utils.Constants import *\nfrom Utils.SetUp import *\nfrom classes.Pages.NFPageClass import *\nfrom classes.Pages.QuickTrendsPageClass import *\nfrom classes.Pages.GenerateReportsPopClass import *\nfrom classes.Pages.ReportsModuleClass import *\nfrom classes.Pages.ConfigurationPageClass import *\n\nsetup = SetUp()\n\nlogin(setup, \"admin\", \"Admin@123\")\nexploreScreenInstance = ExplorePageClass(setup.d)\nexploreHandle = getHandle(setup,\"explore_Screen\")\n\n# exploreScreenInstance.exploreList.launchScreen(exploreHandle,\"exploreList\",\"nf_Screen\")\n\nexploreScreenInstance.exploreList.switchApp(exploreHandle,1)\n\ntime.sleep(4)\nsetup.d.switch_to.window(setup.d.window_handles[1])\nconfScreenInstance = ConfigurationPageClass(setup.d)\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\nconfScreenInstance.leftColumn.select(1,confScreenHandle)\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\n\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"Name\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"NetworkElement1\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"NetworkElement2\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"Port\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"Protocol\",'searchSelector','select')\n\n\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][0])\n\ncreatePopInstance = GenerateReportsPopClass(setup.d)\ncreatePopHandle = getHandle(setup, \"config_popup\")\n\n# Bulk Upload ##\ncreatePopInstance.switcher.switchTo(1,createPopHandle,'createdialog','switcher')\ncreatePopHandle = getHandle(setup, \"config_popup\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['choosefile'],\"/Users/deepanshu.ahuja/Documents/nfib.csv\")\ncreatePopInstance.dropdown.customClick(createPopHandle['createdialog']['upload'])\n\n\n\n\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][0])\ncreatePopInstance = GenerateReportsPopClass(setup.d)\ncreatePopHandle = getHandle(setup, \"config_popup\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['nfName'],\"nfautomation\")\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"FA\",'createdialog','networkElement1')\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"HA\",'createdialog','networkElement2')\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['port'],\"12\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['protocol'],\"23\")\ncreatePopInstance.dropdown.customClick(createPopHandle['createdialog']['submit'])\n\n\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\n\n\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][3])\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][4])\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\n# Delete is not working now\nconfScreenInstance.table.setSelection1(3,confScreenHandle,\"table\")\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][1])\n\n\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\nconfScreenInstance.table.setSelection1(1,confScreenHandle,\"table\")\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][2])\ncreatePopInstance = GenerateReportsPopClass(setup.d)\ncreatePopHandle = getHandle(setup, \"config_popup\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['nfName'],\"nfautomationHost1\")\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"FA\",'createdialog','networkElement1')\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"HA\",'createdialog','networkElement2')\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['port'],\"12\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['protocol'],\"23\")\ncreatePopInstance.dropdown.customClick(createPopHandle['createdialog']['submit'])\n\n# confScreenHandle = getHandle(setup,\"configuration_Screen\")\n\n\n\n\nsetup.d.close()","repo_name":"mayankmahajan/html5auto","sub_path":"suite_ibconfiguration/ibnetwork.py","file_name":"ibnetwork.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"14967167538","text":"from hamcrest import (\n assert_that,\n equal_to\n)\n\nfrom pynformatics.testutils import TestCase\nfrom pynformatics.utils.context import Context\n\n\nclass TestUtils__context_encode(TestCase):\n def test_simple(self):\n context = Context(\n user_id=1,\n problem_id=2,\n statement_id=None,\n )\n assert_that(\n context.encode(),\n equal_to({\n 'user_id': 1,\n 'problem_id': 2,\n 'statement_id': None,\n })\n )\n","repo_name":"riskingh/informatics-mccme-ru","sub_path":"pynformatics/tests/unit/utils/context/encode/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"}
+{"seq_id":"73876346495","text":"\ndef get_pisano_period(m=10):\n prev, curr = 0, 1\n for i in range(0, m * m):\n prev, curr = curr, (prev + curr) % m\n\n # pisano number starts with 01\n if prev == 0 and curr == 1:\n return i+1\n return 60\n \ndef fib_sum(n):\n pp = get_pisano_period(10)\n n = n % pp\n\n if n <= 1:\n return n\n\n prev, cur, sum = 0, 1, 1\n for _ in range(2, n+1):\n prev, cur = cur, (prev + cur) % 10\n sum += cur\n return sum % 10\n\nif __name__ == '__main__':\n input_n = int(input())\n # input_n = 100\n # input_n = 240\n # input_n = 832564823476\n print(fib_sum(input_n))\n","repo_name":"sakshamsds/data-structures-and-algorithms","sub_path":"ucsd_specialization/01_Algorithmic_Toolbox/week2/2_6_fib_sum.py","file_name":"2_6_fib_sum.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74287780096","text":"\"\"\"\n Remember to update MAVLink dialect with:\n cp dialects/* .venv/lib/python3.7/site-packages/message_definitions/v1.0/\n\"\"\"\n\nimport time\nimport serial\nimport logging\n\nimport settings\n\nfrom pymavlink import mavutil\n\n\nlogging.basicConfig(**settings.LOGGING_KWARGS)\n\n\ndef connect_vehicle():\n while True:\n try:\n link = mavutil.mavlink_connection(**settings.MAVLINK_DAEMON)\n logging.info(\n f\"Vehicle connected at {settings.MAVLINK_DAEMON['device']}\")\n break\n except Exception as e:\n logging.error(f\"Vehicle connection error: {e}\")\n time.sleep(1)\n\n return link\n\n\nvehicle_link = connect_vehicle()\n\ntry:\n ground_link = mavutil.mavlink_connection(\n input=False,\n **settings.MAVLINK_GROUND\n )\n logging.info(f\"Ground at {settings.MAVLINK_GROUND['device']}\")\nexcept serial.SerialException:\n ground_link = None\n logging.warning(f\"NO GROUND LINK at {settings.MAVLINK_GROUND}\")\n\n\ntukano_link = mavutil.mavlink_connection(\n input=False,\n **settings.MAVLINK_TUKANO\n)\nlogging.info(f\"MAVLink tukano at {settings.MAVLINK_TUKANO['device']}\")\n\nlogging.info(\"Waiting for vehicle hearbeat\")\nvehicle_link.wait_heartbeat()\nlogging.info(\"Vehicle hearbeat received!\")\n\n\nwhile True:\n\n # From vehicle to ground/tukano\n try:\n vehicle_m = vehicle_link.recv()\n except ConnectionResetError as e:\n logging.error(f\"MAVLINK VEHICLE ERROR: {e}\")\n vehicle_link = connect_vehicle()\n continue\n\n vehicle_msgs = vehicle_link.mav.parse_buffer(vehicle_m)\n if vehicle_msgs:\n for vehicle_msg in vehicle_msgs:\n logging.debug(f\"(VEHICLE_MSG) {vehicle_msg}\")\n if ground_link:\n ground_link.write(vehicle_msg.get_msgbuf())\n\n if tukano_link:\n tukano_link.write(vehicle_msg.get_msgbuf())\n\n # From ground to vehicle\n if ground_link:\n ground_m = ground_link.recv()\n ground_msgs = ground_link.mav.parse_buffer(ground_m)\n if ground_msgs:\n for ground_msg in ground_msgs:\n logging.info(f\"(GROUND_MSG) {ground_msg}\")\n vehicle_link.write(ground_msg.get_msgbuf())\n\n # From tukano to vehicle\n tukano_m = tukano_link.recv()\n tukano_msgs = tukano_link.mav.parse_buffer(tukano_m)\n if tukano_msgs:\n for tukano_msg in tukano_msgs:\n logging.info(f\"(TUKANO_MSG) {tukano_msg}\")\n vehicle_link.write(tukano_msg.get_msgbuf())\n\n time.sleep(settings.SLEEPING_TIME)\n","repo_name":"josezy/tukano","sub_path":"src/deprecated_mavlink_daemon.py","file_name":"deprecated_mavlink_daemon.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"31414642519","text":"\nclass Solution:\n def is_prime(self,n):\n if n == 2 or n == 3: return True\n if n < 2 or n%2 == 0: return False\n if n < 9: return True\n if n%3 == 0: return False\n r = int(n**0.5)\n # since all primes > 3 are of the form 6n ± 1\n # start with f=5 (which is prime)\n # and test f, f+2 for being prime\n # then loop by 6. \n f = 5\n while f <= r:\n \n if n % f == 0: return False\n if n % (f+2) == 0: return False\n f += 6\n return True\n \n def isUgly(self, n: int) -> bool:\n if n == 1 or n == 0:\n return True\n n = abs(n)\n # in an iteration, check find its factors. While finding, check whether each factor is prime or not\n # we dont need to iterate from 1 to n. From 1 to sqrt(n) is sufficient\n for i in range(7,n):\n # if i is a factor of n and bigger then 5, check if it s prime or not. Else, do nothing, so there is no else\n if n % i == 0:\n print(\"one of the factor is %s\" % i)\n # check if i is prime or not\n if self.is_prime(i):\n return False\n return True\n\nepsi = Solution()\nprint(epsi.isUgly(-2147483648))\n","repo_name":"HuachenZH/Python_leet","sub_path":"Math/263. Ugly Number/263.py","file_name":"263.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39759816065","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def pathSum(self, root: Optional[TreeNode], targetSum: int) -> int:\n prefix_sum = defaultdict(int)\n prefix_sum[0] = 1\n \n path_sum = 0\n def dfs(node, psum, prefix_dict):\n nonlocal path_sum\n if not node:\n return\n \n psum += node.val\n prefix_to_delete = psum - targetSum \n if prefix_to_delete in prefix_dict:\n path_sum += prefix_dict[prefix_to_delete]\n prefix_dict[psum] += 1\n dfs(node.left ,psum , prefix_dict)\n dfs(node.right , psum , prefix_dict)\n \n prefix_dict[psum] -= 1\n dfs(root, 0 , prefix_sum)\n return path_sum\n \n \n \n \n ","repo_name":"Natnael16/competitiveprogramming","sub_path":"0437-path-sum-iii/0437-path-sum-iii.py","file_name":"0437-path-sum-iii.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"25440677068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 18 14:53:48 2023\n\n@author: edu_c\n\"\"\"\n\ndef isYearLeap(year):\n if (year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)):\n return True\n else:\n return False\n \n\ndef daysInMonth(year, month):\n meses_31 = [1,3,5,7,8,10,12]\n meses_30 = [4,6,9,11]\n if (month in meses_31):\n return 31 \n elif (month in meses_30):\n return 30\n elif (month == 2):\n if (isYearLeap(year)):\n return 29\n else:\n return 28 \n else:\n return None\n\ndef dias_del_anio(year, month, day):\n dias = 0\n if not((month > 0 and month <12) and (year > 0) and (day > 0 and day <= (daysInMonth(year, month)))):\n return None\n for i in range (1, month):\n dias += daysInMonth(year, i)\n dias += day\n return dias\n\n\n\nprint(dias_del_anio(2023,5,19)) #debe imprimir 139\nprint(dias_del_anio(2023,2,29)) #debe imprimir none","repo_name":"educeav/python_essentials","sub_path":"ejercicio4_dias_correspondientes_de__.py","file_name":"ejercicio4_dias_correspondientes_de__.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"5077011232","text":"# # Python example - Fourier transform using numpy.fft method\n# import numpy as np\n# import pandas as pd\n# import matplotlib.pyplot as plt\n\n# df = pd.read_csv('E:\\\\Django_proj\\\\mysite\\\\media\\\\Acc_time.csv')\n# length = 40960\n\n# # How many time points are needed i,e., Sampling Frequency\n# samplingFrequency = length;\n\n# # At what intervals time points are sampled\n# samplingInterval = 1 / samplingFrequency;\n\n# # # Create subplot\n# # figure, axis = plotter.subplots(4, 1)\n# # plotter.subplots_adjust(hspace=1)\n\n# # Time points\n# time = df['time']\n# amplitude = df['amplitude']\n\n# # Frequency domain representation\n# fourierTransform = np.fft.fft(amplitude)/len(amplitude) # Normalize amplitude\n# fourierTransform = fourierTransform[range(int(len(amplitude)/2))] # Exclude sampling frequency\n# tpCount = len(amplitude)\n# values = np.arange(int(tpCount/2))\n# timePeriod = tpCount/samplingFrequency\n# frequencies = values/timePeriod\n\n# # Frequency domain representation\n\n# plt.title('Fourier transform depicting the frequency components')\n# plt.plot(frequencies, abs(fourierTransform))\n# plt.xlabel('Frequency')\n# plt.ylabel('Amplitude')\n# plt.show()\n\n\n\nimport csv\nimport pandas as pd\n# import numpy as np\n\nfile = (\"E:\\\\Django_proj\\\\restapi\\\\media\\\\Acc_time_ext.csv\")\n# csv = pd.read_csv(file)\n# csv = pd.read_csv(file, header=0, nrows=0).columns.tolist()\n# first = csv.index('time')\n# second = csv.index('amplitude')\n# if csv != first:\n# print('yes')\n# else:\n# print('no')\n# print(csv)\n# print(second)\n\nfile = (\"E:\\\\Django_proj\\\\restapi\\\\media\\\\Acc_time_ext.csv\")\n\ndf=pd.read_csv(file)\ncol = df.columns.tolist()\nn = len(col)\nprint(n)\nif col[0] != 'time' or col[1] != 'amplitude':\n print('column')\nelse:\n print('column')\n\n# time = len(csv[0])\n# num = csv['time']. iloc[1]\n# sf = int((time/num)*1000)\n# print(sf)\n# val = len(file.columns)\n# print(time)\n\n","repo_name":"paranormman/TEAL_project","sub_path":"visual/fft.py","file_name":"fft.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"11839964599","text":"import string\n\nletters = string.ascii_uppercase\n\ndef get_result():\n total = 0\n with open('./files/p022_names.txt', 'r') as f:\n names = list(f.read().replace('\"','').split(','))\n names.sort()\n print(names)\n for pos in range(len(names)):\n score = 0\n for letter in names[pos]:\n score += letters.index(letter) + 1\n total += score * (pos + 1)\n return total","repo_name":"bruno-zaccariello/usefull","sub_path":"EulerProject/euler_22.py","file_name":"euler_22.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"7342725685","text":"import random\n\n#The code for running the dice \ndef game():\n rand = random.randint(1, 6)\n print(\"Your dice rool is \" + str(rand))\n restart = input(\"do u want to get another dice rool?(y/n): \")\n if restart == 'y':\n game()\n else:\n print(\"Thanks for joining!\")\n\ndef again():\n t = True\n count = 0\n while t :\n rand = random.randint(1, 6)\n print(\"Your dice rools are: \")\n print(rand)\n if count == 99:\n break\n else:\n count += 1\n continue\n \n#Taking input from input for starting the game!\nprint(\"If you want 100 dice rools type '100': \")\nstart = input(\"Are you ready? (y/n/100): \")\nif start == \"y\":\n print(\"\")\n game()\nelif start == '100':\n again()\n\nelse:\n print(\"Thanks for joining us\")\n\n \n\n","repo_name":"AbhinavSilwal/dice-rolling-simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"37835021704","text":"import pyttsx3\r\nimport PyPDF2\r\nfrom tkinter import * # Importing the GUI named tkinter\r\nfrom tkinter.filedialog import *\r\nfrom page_range import *\r\nengine = pyttsx3.init() # Object creation\r\naudiotabclose = ''\r\n\r\ndef close_window_a():\r\n engine.stop()\r\n audiotabclose.destroy()\r\n\r\n\r\ndef audio(pageRange, audiotab):\r\n # Create a window\r\n audiotabclose = audiotab\r\n\r\n # Set Title as Image Loader\r\n audiotab.title(\"AudioBook\")\r\n\r\n # Set the resolution of window\r\n audiotab.geometry(\"1000x600\")\r\n audiotab.configure(bg = \"#FFFFFF\")\r\n canvas = Canvas(\r\n audiotab,\r\n bg = \"#FFFFFF\",\r\n height = 600,\r\n width = 1000,\r\n bd = 0,\r\n highlightthickness = 0,\r\n relief = \"ridge\")\r\n canvas.place(x = 0, y = 0)\r\n\r\n background_img = PhotoImage(file = f\"background 2.png\")\r\n background = canvas.create_image(\r\n 534.5, 310.5,\r\n image=background_img)\r\n\r\n img0 = PhotoImage(file = f\"stop 2.png\")\r\n b0 = Button(\r\n image = img0,\r\n borderwidth = 0,\r\n highlightthickness = 0,\r\n command = close_window_a,\r\n relief = \"flat\")\r\n\r\n b0.place(\r\n x = 386, y = 326,\r\n width = 249,\r\n height = 78)\r\n\r\n # Allow Window to be resizable\r\n \r\n frame = Frame(audiotab)\r\n frame.pack()\r\n \r\n \r\n\r\n\r\n rate = engine.getProperty('rate')\r\n print (rate) # Printing the current voice rate\r\n engine.setProperty('rate', 165) # Setting up the new voice rate\r\n volume = engine.getProperty('volume')\r\n print (volume) # Printing the current volume level\r\n engine.setProperty('volume',1.0) # Setting up the volume level between 0 and 1\r\n voices = engine.getProperty('voices')\r\n engine.setProperty('voice', voices[1].id)\r\n \r\n book=askopenfilename()\r\n pdfreader=PyPDF2.PdfFileReader(book)\r\n pages=pdfreader.numPages\r\n try:\r\n a , b = get_text(pageRange)\r\n for num in range(a, b):\r\n page=pdfreader.getPage(num)\r\n text=page.extractText()\r\n player=pyttsx3.init()\r\n player.say(text)\r\n player.runAndWait()\r\n except:\r\n for num in range(0,pages):\r\n page=pdfreader.getPage(num)\r\n text=page.extractText()\r\n player=pyttsx3.init()\r\n player.say(text)\r\n player.runAndWait()\r\n finally:\r\n engine.save_to_file(text, 'audio.mp3') # Saving the voice to a file \r\n engine.runAndWait()\r\n print(\"Your audiobook file has been generated as an mp3 file. Check the project file directory for getting the file.\")\r\n audiotab.mainloop()\r\n\r\n\r\n\r\n \r\n","repo_name":"Lakshminarayana155/Audio-book-using-python-2nd-year-project-","sub_path":"audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"70052074497","text":"from __future__ import with_statement\nimport functools, random\nfrom array import array\nfrom heapq import nsmallest\nfrom operator import itemgetter\nfrom threading import Lock\nfrom time import time\n\nfrom whoosh.compat import iteritems, xrange\n\n\ntry:\n from collections import Counter\nexcept ImportError:\n class Counter(dict):\n def __missing__(self, key):\n return 0\n\n\ndef unbound_cache(func):\n \"\"\"Caching decorator with an unbounded cache size.\n \"\"\"\n\n cache = {}\n\n @functools.wraps(func)\n def caching_wrapper(*args):\n try:\n return cache[args]\n except KeyError:\n result = func(*args)\n cache[args] = result\n return result\n\n return caching_wrapper\n\n\ndef lru_cache(maxsize=100):\n \"\"\"A simple cache that, when the cache is full, deletes the least recently\n used 10% of the cached values.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0] # Hits, misses\n data = {}\n lastused = {}\n\n @functools.wraps(user_function)\n def wrapper(*args):\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n for k, _ in nsmallest(maxsize // 10 or 1,\n iteritems(lastused),\n key=itemgetter(1)):\n del data[k]\n del lastused[k]\n data[args] = user_function(*args)\n result = data[args]\n finally:\n lastused[args] = time()\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n data.clear()\n lastused.clear()\n stats[0] = stats[1] = 0\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n\ndef lfu_cache(maxsize=100):\n \"\"\"A simple cache that, when the cache is full, deletes the least frequently\n used 10% of the cached values.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0] # Hits, misses\n data = {}\n usecount = Counter()\n\n @functools.wraps(user_function)\n def wrapper(*args):\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n for k, _ in nsmallest(maxsize // 10 or 1,\n iteritems(usecount),\n key=itemgetter(1)):\n del data[k]\n del usecount[k]\n data[args] = user_function(*args)\n result = data[args]\n finally:\n usecount[args] += 1\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n data.clear()\n usecount.clear()\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n\ndef random_cache(maxsize=100):\n \"\"\"A very simple cache that, when the cache is filled, deletes 10% of the\n cached values AT RANDOM.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0] # hits, misses\n data = {}\n\n @functools.wraps(user_function)\n def wrapper(*args):\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n keys = data.keys()\n for i in xrange(maxsize // 10 or 1):\n n = random.randint(0, len(keys) - 1)\n k = keys.pop(n)\n del data[k]\n data[args] = user_function(*args)\n result = data[args]\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n data.clear()\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n\ndef db_lru_cache(maxsize=100):\n \"\"\"Double-barrel least-recently-used cache decorator. This is a simple\n LRU algorithm that keeps a primary and secondary dict. Keys are checked\n in the primary dict, and then the secondary. Once the primary dict fills\n up, the secondary dict is cleared and the two dicts are swapped.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n # Cache1, Cache2, Pointer, Hits, Misses\n stats = [{}, {}, 0, 0, 0]\n\n @functools.wraps(user_function)\n def wrapper(*args):\n ptr = stats[2]\n a = stats[ptr]\n b = stats[not ptr]\n key = args\n\n if key in a:\n stats[3] += 1 # Hit\n return a[key]\n elif key in b:\n stats[3] += 1 # Hit\n return b[key]\n else:\n stats[4] += 1 # Miss\n result = user_function(*args)\n a[key] = result\n if len(a) >= maxsize:\n stats[2] = not ptr\n b.clear()\n return result\n\n def cache_info():\n return stats[3], stats[4], maxsize, len(stats[0]) + len(stats[1])\n\n def cache_clear():\n \"\"\"Clear the cache and cache statistics\"\"\"\n stats[0].clear()\n stats[1].clear()\n stats[3] = stats[4] = 0\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n\n return wrapper\n return decorating_function\n\n\ndef clockface_lru_cache(maxsize=100):\n \"\"\"Least-recently-used cache decorator.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library, but\n uses the clock face LRU algorithm instead of an ordered dictionary.\n\n If *maxsize* is set to None, the LRU features are disabled and the cache\n can grow without bound.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics named tuple (hits, misses, maxsize, currsize)\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0, 0] # hits, misses, hand\n data = {}\n\n if maxsize:\n # The keys at each point on the clock face\n clock_keys = [None] * maxsize\n # The \"referenced\" bits at each point on the clock face\n clock_refs = array(\"B\", (0 for _ in xrange(maxsize)))\n lock = Lock()\n\n @functools.wraps(user_function)\n def wrapper(*args):\n key = args\n try:\n with lock:\n pos, result = data[key]\n # The key is in the cache. Set the key's reference bit\n clock_refs[pos] = 1\n # Record a cache hit\n stats[0] += 1\n except KeyError:\n # Compute the value\n result = user_function(*args)\n with lock:\n # Current position of the clock hand\n hand = stats[2]\n # Remember to stop here after a full revolution\n end = hand\n # Sweep around the clock looking for a position with\n # the reference bit off\n while True:\n hand = (hand + 1) % maxsize\n current_ref = clock_refs[hand]\n if current_ref:\n # This position's \"referenced\" bit is set. Turn\n # the bit off and move on.\n clock_refs[hand] = 0\n elif not current_ref or hand == end:\n # We've either found a position with the\n # \"reference\" bit off or reached the end of the\n # circular cache. So we'll replace this\n # position with the new key\n current_key = clock_keys[hand]\n if current_key in data:\n del data[current_key]\n clock_keys[hand] = key\n clock_refs[hand] = 1\n break\n # Put the key and result in the cache\n data[key] = (hand, result)\n # Save the new hand position\n stats[2] = hand\n # Record a cache miss\n stats[1] += 1\n return result\n\n else:\n @functools.wraps(user_function)\n def wrapper(*args):\n key = args\n try:\n result = data[key]\n stats[0] += 1\n except KeyError:\n result = user_function(*args)\n data[key] = result\n stats[1] += 1\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n \"\"\"Clear the cache and cache statistics\"\"\"\n data.clear()\n stats[0] = stats[1] = stats[2] = 0\n for i in xrange(maxsize):\n clock_keys[i] = None\n clock_refs[i] = 0\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n","repo_name":"zhl2008/awd-platform","sub_path":"web_flaskbb/lib/python2.7/site-packages/whoosh/util/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":11852,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"79"}
+{"seq_id":"18041374864","text":"#!/usr/bin/python3\nfrom subprocess import call\nimport os \n\ncall(['git', 'clone', 'https://github.com/CDPS-ETSIT/practica_creativa2.git'])\ncall(['sudo', 'apt-get', 'update'])\ncall(['sudo', 'apt-get', 'install', '-y', 'python3-pip'])\n\ncall(['pip3', 'install', '-r', 'requirements.txt'])\n\nos.chdir('practica_creativa2/bookinfo/src/productpage')\n\nos.environ['GROUP_NUMBER'] = '36'\nnumGrupo = os.environ.get('GROUP_NUMBER')\n\nos.chdir('templates')\ncall(['cp', 'productpage.html', 'productpage_temporal.html'])\nfin = open('productpage_temporal.html', 'r')\nfout = open('productpage.html', 'w')\n\nfor line in fin:\n\tif '{% block title %}Simple Bookstore App{% endblock %}' in line :\n\t\tfout.write(line.replace('{% block title %}Simple Bookstore App{% endblock %}', '{% block title %}Simple Bookstore App [' + numGrupo + ']{% endblock %}'))\n\telse :\n\t\tfout.write(line)\n\nfin.close()\nfout.close()\ncall(['rm', '-f', 'productpage_temporal.html'])\n\nos.chdir('..')\ncall(['python3', 'productpage_monolith.py', '9080'])\n","repo_name":"luis-trave/Creativa2Def","sub_path":"apartado1/apartado1.py","file_name":"apartado1.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14684841275","text":"from istanza import Istanza\nfrom greedy import Greedy\nfrom simulatedAnnealing import SimulatedAnnealing\nfrom pathRelinking import PathRelinking\nimport os\nfrom heapq import nsmallest\nfrom random import choice\n\nclass Menù():\n\tdef __init__(self, config, mainW):\n\t\tself.config = config # Configurazione\n\t\tself.mainW = mainW\t # Interfaccia grafica\n\t\t\n\t\t# Istanze degli algoritmi\n\t\tself.classeIstanza = Istanza(config)\n\t\tself.classeGreedy = Greedy(config)\n\t\tself.classeSimulatedAnnealing = SimulatedAnnealing(config)\n\t\tself.classePathRelinking = PathRelinking(config)\n\t\t\n\t\t# Strutture dati contenenti i contenitori per la grafica\n\t\tself.graficaGreedy = [mainW.greedy_1, mainW.greedy_2]\n\t\tself.graficaSA = [mainW.simulated_annealing_1, mainW.simulated_annealing_2]\n\t\tself.graficaPR = [mainW.path_relinking_1, mainW.path_relinking_2]\n\t\t\n\t\t# Struttura dati contenente le soluzioni create\n\t\tself.istanzaCorrente = None\n\t\tself.listaGreedy = []\n\t\tself.listaSimulatedAnnealing = []\n\t\tself.listaPathRelinking = []\n\t\n\t'''\n\tFunzione eseguita dal thread demone, gestisce l'interfaccia utente.\n\t'''\n\tdef start(self):\n\t\t# Menù contestuale\n\n\t\ttitolo = \"\"\"\n ______ _ _ _ _ \n | ___ \\ | | | | | |(_) \n | |_/ / _ __ ___ __ _ ___ | |_ | |_ ___ __| | _ \n | __/ | '__| / _ \\ / _` | / _ \\| __|| __| / _ \\ / _` || | \n | | | | | (_) || (_| || __/| |_ | |_ | (_) | | (_| || | \n \\_| |_| \\___/ \\__, | \\___| \\__| \\__| \\___/ \\__,_||_| \n __/ | \n |___/ \n______ _ _____ _ _ \n| ___ \\(_) | _ | | | (_) \n| |_/ / _ ___ ___ _ __ ___ __ _ | | | | _ __ ___ _ __ __ _ | |_ _ __ __ __ _ \n| / | | / __| / _ \\| '__| / __| / _` | | | | || '_ \\ / _ \\| '__| / _` || __|| |\\ \\ / / / _` |\n| |\\ \\ | || (__ | __/| | | (__ | (_| | \\ \\_/ /| |_) || __/| | | (_| || |_ | | \\ V / | (_| |\n\\_| \\_||_| \\___| \\___||_| \\___| \\__,_| \\___/ | .__/ \\___||_| \\__,_| \\__||_| \\_/ \\__,_|\n | | \n |_| \"\"\"\n\t\tprint(titolo)\n\n\t\t# Dizionario per gestire la scelta utente\n\t\tscelta = {\n\t\t\t\t\t1 : self.soluzioneAutomatica,\n\t\t\t\t\t2 : self.nuovaIstanza,\n\t\t\t\t\t3 : self.nuovaGreedy,\n\t\t\t\t\t4 : self.nuovoSA,\n\t\t\t\t\t5 : self.nuovoPR,\n\t\t\t\t\t6 : self.visualizzaMigliori,\n\t\t\t\t\t7 : self.visualizzaMigliore,\n\t\t\t\t\t8 : self.config.mostra,\n\t\t\t\t\t9 : self.config.modifica,\n\t\t\t\t\t10 : self.aiuto,\n\t\t\t\t\t11 : self.uscita\n\t\t}\n\t\t\n\t\t# Menù principale\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\trisposta = int(input(\n\"\"\"\\nSelezionare un'opzione:\n\n1) Crea soluzione automatica (istanza + GRASP + PR)\n2) Crea una nuova istanza\n3) Applica un algoritmo Greedy\n4) Applica Simulated Annealing\n5) Applica Path Relinking\n6) Visualizza dati soluzioni migliori per categoria\n7) Visualizza soluzione migliore\n8) Visualizza configurazione\n9) Modifica configurazione\n10) Aiuto\n11) Esci\n\n>: \"\"\"))\n\t\t\t\tif risposta < 1 or risposta > len(scelta):\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\")\n\t\t\telse:\n\t\t\t\tscelta[risposta]()\n\t\n\t'''\n\tFunzione per generare una soluzione ottima utilizzando il metodo GRASP + Path Relinking, tutto automatizzato.\n\t'''\n\tdef soluzioneAutomatica(self):\n\t\tprint(\"\\nGenerazione nuova istanza...\\n\")\n\t\tself.nuovaIstanza()\n\t\tprint(\"Generazione istanza completata.\\n\\n Inizio generazione soluzioni greedy...\\n\")\n\n\t\t# Greedy\n\t\ttipoGreedy = [\"LPT\", \"SPT\", \"FIFO\"]\n\t\tfor i in range(self.config.GreedyGenerabili):\n\t\t\tprint(\"Generazione soluzione {} di {}\\n\".format(i + 1, self.config.GreedyGenerabili))\n\t\t\tself.listaGreedy.append(self.classeGreedy.start(self.istanzaCorrente, choice(tipoGreedy)))\n\t\tprint(\"Generazione soluzioni greedy completata.\\n\")\n\t\t# Simulated Annealing\n\t\tprint(\"Inizio generazione soluzioni Simulated Annealing...\\n\")\n\t\tfor i, greedy in enumerate(self.listaGreedy, start=1):\n\t\t\tprint(\"Generazione soluzione {} di {}\\n\".format(i, len(self.listaGreedy)))\n\t\t\tself.listaSimulatedAnnealing.append(self.classeSimulatedAnnealing.start(greedy))\n\t\tprint(\"Generazione soluzioni Simulated Annealing completata.\\n\")\n\t\t\n\t\t# Path Relinking\n\t\tprint(\"Inizio generazione soluzioni Path Relinking...\\n\")\n\t\tfor i in range(self.config.PRGenerabili):\n\t\t\tprint(\"Generazione soluzione {} di {}\\n\".format(i + 1, self.config.PRGenerabili))\n\t\t\tself.listaPathRelinking.append(self.classePathRelinking.start(choice(self.listaSimulatedAnnealing), choice(self.listaSimulatedAnnealing)))\n\t\tprint(\"Generazione soluzioni Path Relinking completata.\")\n\t\t\n\t\t# Ricerca soluzione migliore\n\t\tsoluzioniTotali = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\tsoluzioneMigliore = nsmallest(1, soluzioniTotali, key=lambda x : x.makeSpan)[0]\n\n\t\tif soluzioneMigliore.tipo == \"G\":\n\t\t\tself.graficaGreedy[0].tipo = soluzioneMigliore.tipoGreedy\n\t\t\tself.graficaGreedy[0].popolamentoDati(soluzioneMigliore)\n\t\telif soluzioneMigliore.tipo == \"SA\":\n\t\t\tself.graficaSA[0].popolamentoDati(soluzioneMigliore)\n\t\telse:\n\t\t\tself.graficaPR[0].popolamentoDati(soluzioneMigliore)\n\t\t\n\t\tself.visualizzaSoluzione(soluzioneMigliore)\n\n\t'''\n\tFunzione per creare una nuova istanza del problema e graficarla.\n\t'''\n\tdef nuovaIstanza(self):\n\t\tself.istanzaCorrente = self.classeIstanza.start()\n\n\t\t# Reset completo di grafica e soluzioni\n\t\tself.resetGrafica()\n\t\tself.listaGreedy = []\n\t\tself.listaSimulatedAnnealing = []\n\t\tself.listaPathRelinking = []\n\n\t\tself.mainW.istanza.popolamentoDati(self.istanzaCorrente)\n\t\n\t'''\n\tFunzione per creare una nuova soluzione greedy. Viene richiesto all'utente la tipologia desiderata di greedy, infine viene graficata la soluzione creata.\n\t'''\n\tdef nuovaGreedy(self):\n\t\tif not self.istanzaCorrente:\n\t\t\tprint(\"\\nUna soluzione greedy necessita di una istanza di un problema per poter operare.\\nPrima di creare nuove soluzioni, generare una nuova istanza.\\n\")\n\t\t\tinput(\">: Premere un tasto per continuare\")\n\t\t\treturn\n\t\t\n\t\t# Dizionario per gestire la scelta utente\n\t\tscelta = {\n\t\t\t\t\t1 : \"LPT\",\n\t\t\t\t\t2 : \"SPT\",\n\t\t\t\t\t3 : \"FIFO\",\n\t\t}\n\t\t\n\t\t# Richiesta tipologia greedy iterativa\n\t\tflag = True\n\t\twhile flag:\n\t\t\tflag = False\n\t\t\trisposta = input(\n\"\"\"\\nQuale tipologia greedy utilizzare? (premere Invio per annullare):\n\n1) LPT (Longest Processing Time)\n2) SPT (Shortest Processing Time)\n3) FIFO (First In First Out)\n\n>: \"\"\")\n\t\t\tif risposta == \"\":\n\t\t\t\tprint(\"\\nAnnullato\")\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\trisposta = int(risposta)\n\t\t\t\tif risposta < 1 or risposta > len(scelta):\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\\n\\n\")\n\t\t\t\tflag = True\n\t\t\telse:\n\t\t\t\t# Nuova soluzione\n\t\t\t\tnuovaGreedy = self.classeGreedy.start(self.istanzaCorrente, scelta[risposta])\n\t\t\t\t\n\t\t\t\t# Visualizzazione e salvataggio in memoria\n\t\t\t\tself.listaGreedy.append(nuovaGreedy)\n\t\t\t\tself.resetGrafica()\n\t\t\t\tself.graficaGreedy[0].tipo = scelta[risposta] \n\t\t\t\tself.graficaGreedy[0].popolamentoDati(nuovaGreedy)\n\t\t\t\t\n\t\t\t\tself.visualizzaSoluzione(nuovaGreedy)\n\n\t'''\n\tFunzione che genera una nuova soluzione SA a partire da una soluzione greedy. La soluzione viene infine graficata.\n\t'''\n\tdef nuovoSA(self):\n\t\tif len(self.listaGreedy) + len(self.listaSimulatedAnnealing) + len(self.listaPathRelinking) == 0:\n\t\t\tprint(\"\\nSimulated Annealing necessita di una soluzione iniziale.\\nPrima di utilizzare questo algoritmo, generare una nuova soluzione di classe Greedy.\\n\")\n\t\t\tinput(\">: Premere un tasto per continuare\")\n\t\t\treturn\n\t\t\n\t\tflag = True\n\t\twhile flag:\n\t\t\tflag = False\n\t\t\tprint(\"\\nQuale soluzione adottare?\")\n\t\t\tindice = 1\n\t\t\tif len(self.listaGreedy) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Greedy]\\n\")\n\t\t\t\tfor soluzione in self.listaGreedy:\n\t\t\t\t\tprint(str(indice) + \") Tipo: \" + soluzione.tipoGreedy + \" Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaSimulatedAnnealing) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Simulated Annealing]\\n\")\n\t\t\t\tfor soluzione in self.listaSimulatedAnnealing:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaPathRelinking) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Path Relinking]\\n\")\n\t\t\t\tfor soluzione in self.listaPathRelinking:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\t\n\t\t\t# Input utente\n\t\t\trisposta = input(\"\\n(premere Invio per annullare)>: \")\n\t\t\tif risposta == \"\":\n\t\t\t\tprint(\"\\nAnnullato\")\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\trisposta = int(risposta)\n\t\t\t\tif risposta < 1 or risposta > indice - 1:\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\")\n\t\t\t\tflag = True\n\t\t\telse:\n\t\t\t\t# Nuova soluzione\n\t\t\t\tlistaTotale = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\t\t\tsoluzione = listaTotale[risposta - 1]\n\t\t\t\tnuovoSA = self.classeSimulatedAnnealing.start(soluzione)\n\t\t\t\t\n\t\t\t\tself.confrontaSoluzioni(nuovoSA, soluzione)\n\t\t\t\t\n\t\t\t\t# Visualizzazione e salvataggio in memoria\n\t\t\t\tself.listaSimulatedAnnealing.append(nuovoSA)\n\t\t\t\tself.resetGrafica()\n\t\t\t\tself.graficaSA[0].popolamentoDati(nuovoSA)\n\n\t\t\t\t# Visualizzazione soluzione di partenza\n\t\t\t\tif soluzione.tipo == \"G\":\n\t\t\t\t\tself.graficaGreedy[0].tipo = soluzione.tipoGreedy\n\t\t\t\t\tself.graficaGreedy[0].popolamentoDati(soluzione)\n\t\t\t\telif soluzione.tipo == \"SA\":\n\t\t\t\t\tself.graficaSA[1].popolamentoDati(soluzione)\n\t\t\t\telse:\n\t\t\t\t\tself.graficaPR[0].popolamentoDati(soluzione)\n\t\n\t'''\n\tFunzione che crea una soluzione Path Relinking partendo da due soluzioni iniziali, definite dall'utente, perciò di qualsiasi classe.\n\t'''\n\tdef nuovoPR(self):\n\t\tif len(self.listaGreedy) + len(self.listaSimulatedAnnealing) < 2:\n\t\t\tprint(\"\\nPath Relinking necessita di due soluzioni iniziali.\\nPrima di utilizzare questo algoritmo, generare due nuove soluzioni di classe Greedy o Simulated Annealing.\\n\")\n\t\t\tinput(\">: Premere un tasto per continuare\")\n\t\t\treturn\n\t\t\n\t\tsoluzioniScelte = []\n\t\tflag = True\n\t\twhile flag:\n\t\t\tflag = False\n\t\t\tprint(\"\\nQuale soluzione adottare?\")\n\t\t\tindice = 1\n\t\t\tif len(self.listaGreedy) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Greedy]\\n\")\n\t\t\t\tfor soluzione in self.listaGreedy:\n\t\t\t\t\tprint(str(indice) + \") Tipo: \" + soluzione.tipoGreedy + \" Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaSimulatedAnnealing) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Simulated Annealing]\\n\")\n\t\t\t\tfor soluzione in self.listaSimulatedAnnealing:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaPathRelinking) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Path Relinking]\\n\")\n\t\t\t\tfor soluzione in self.listaPathRelinking:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\t\n\t\t\t# Input utente\n\t\t\trisposta = input(\"\\n(premere Invio per annullare)>: \")\n\t\t\tif risposta == \"\":\n\t\t\t\tprint(\"\\nAnnullato\")\n\t\t\ttry:\n\t\t\t\trisposta = int(risposta)\n\t\t\t\tif risposta < 1 or risposta > indice - 1:\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\")\n\t\t\t\tflag = True\n\t\t\telse:\n\t\t\t\t# Nuova soluzione\n\t\t\t\tlistaTotale = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\t\t\tsoluzioniScelte.append(listaTotale[risposta - 1])\n\t\t\t\tif len(soluzioniScelte) < 2: # Se non sono state scelte due soluzioni, ne verrà richiesta un'altra\n\t\t\t\t\tflag = True\n\t\t\n\t\t# Avvio algoritmo Path Relinking\n\t\tnuovoPR = self.classePathRelinking.start(soluzioniScelte[0], soluzioniScelte[1])\n\t\n\t\t# Visualizzazione e salvataggio in memoria\n\t\tself.listaPathRelinking.append(nuovoPR)\n\t\tself.resetGrafica()\n\t\tself.graficaPR[0].popolamentoDati(nuovoPR)\n\n\t\t# Stampa delle informazioni delle soluzioni\n\t\tself.confrontaSoluzioni(nuovoPR, soluzioniScelte[0], soluzioniScelte[1])\n\t\t\n\t\t# Visualizzazione soluzioni iniziali\n\t\tindiceG = 0\n\t\tindiceSA = 0\n\t\tindicePR = 1\n\t\tfor soluzione in soluzioniScelte:\n\t\t\tif soluzione.tipo == \"G\":\n\t\t\t\tself.graficaGreedy[indiceG].tipo = soluzione.tipoGreedy\n\t\t\t\tself.graficaGreedy[indiceG].popolamentoDati(soluzione)\n\t\t\t\tindiceG += 1\n\t\t\telif soluzione.tipo == \"SA\":\n\t\t\t\tself.graficaSA[indiceSA].popolamentoDati(soluzione)\n\t\t\t\tindiceSA += 1\n\t\t\telse:\n\t\t\t\tself.graficaPR[indicePR].popolamentoDati(soluzione)\n\t\t\t\tindicePR += 1\n\n\t'''\n\tFunzione che mostra le soluzioni migliori ottenute attualmente per ogni classe di algoritmi.\n\t'''\n\tdef visualizzaMigliori(self):\n\t\t# Ricerca heap per visualizzare le soluzioni migliori\n\t\tsolG = nsmallest(2, self.listaGreedy, key= lambda x : x.makeSpan)\n\t\tsolSA = nsmallest(2, self.listaSimulatedAnnealing, key= lambda x : x.makeSpan)\n\t\tsolPR = nsmallest(2, self.listaPathRelinking, key= lambda x : x.makeSpan)\n\t\t\n\t\tself.resetGrafica()\n\t\t\n\t\tindiceG = 0\n\t\tindiceSA = 0\n\t\tindicePR = 0\n\t\tfor soluzione in solG:\n\t\t\tself.visualizzaSoluzione(soluzione)\n\t\t\tself.graficaGreedy[indiceG].popolamentoDati(soluzione)\n\t\t\tself.graficaGreedy[indiceG].tipo = soluzione.tipoGreedy\n\t\t\tindiceG += 1\n\t\tfor soluzione in solSA:\n\t\t\tself.visualizzaSoluzione(soluzione)\n\t\t\tself.graficaSA[indiceSA].popolamentoDati(soluzione)\n\t\t\tindiceSA += 1\n\t\tfor soluzione in solPR:\n\t\t\tself.visualizzaSoluzione(soluzione)\n\t\t\tself.graficaPR[indicePR].popolamentoDati(soluzione)\n\t\t\tindicePR += 1\n\t\n\t'''\n\tFunzione per visualizzare la soluzione migliore trovata finora.\n\t'''\n\tdef visualizzaMigliore(self):\n\t\tlistaCompleta = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\tsoluzione = nsmallest(1, listaCompleta, key=lambda x : x.makeSpan)[0]\n\t\t\n\t\tself.visualizzaSoluzione(soluzione)\n\n\t\t# Per la grafica\n\t\tself.resetGrafica()\n\t\t\n\t\tif soluzione.tipo == \"G\":\n\t\t\tself.graficaGreedy[0].popolamentoDati(soluzione)\n\t\telif soluzione.tipo == \"SA\":\n\t\t\tself.graficaSA[0].popolamentoDati(soluzione)\n\t\telse:\n\t\t\tself.graficaPR[0].popolamentoDati(soluzione)\n\t\n\t'''\n\tFunzione per cancellare tutte le visualizzazioni degli algoritmi.\n\t'''\n\tdef resetGrafica(self):\n\t\tfor grafico in self.graficaGreedy + self.graficaSA + self.graficaPR:\n\t\t\tgrafico.cancellaDati()\n\t\n\t'''\n\tFunzione per la schermata informativa.\n\t'''\n\tdef aiuto(self):\n\t\tprint(\"\"\"\nPremessa:\n\nIl programma gestisce il seguente problema:\n\nLo scenario si compone di un poliambulatorio, composto da tre ambulatori medici identici e cinque medici, ognuno specializzato in un esame medico diverso. In tutto, gli ambulatori possono fornire un totale di cinque esami diversi.\nNel poliambulatorio entrano alcuni pazienti (numero variabile), ognuno può scegliere a quali esami sottoporsi, da un minimo di uno, ad un massimo di cinque. Quando un paziente occupa un ambulatorio, deve rimanerci dentro fino alla completa risoluzione di tutti i suoi esami, inoltre egli preclude ad altri la possibilità di utilizzare l'ambulatorio occupato.\nSiccome ogni tipologia di esame può essere eseguita solo da un medico in particolare, nello stesso istante non possono essere in esecuzione esami della stessa natura in ambulatori diversi.\nL'obiettivo del problema è fornire tutte le prestazioni mediche richiete dai pazienti, avendo un makespan minimo.\n\nCaratteristiche:\n\nIl programma permette all'utente di creare un nuovo problema da risolvere, partendo da una configurazione estesa personalizzabile.\nSuccessivamente è possibile creare soluzioni utilizzando diversi algoritmi:\n\n- Greedy: soluzione di partenza in cui è possibile sceglierne la tipologia (LPT, SPT, FIFO) e se utilizzare la randomicità durante la creazione.\n- Simulated Annealing: ricerca locale utilizzata per migliorare una soluzione.\n- Path Relinking: ricerca nello spazio ristretto alle soluzioni simili a quelle di input della procedura\n\nAll'utente viene fornita la possibilità di gestire manualmente la creazione delle soluzioni, oppure di avvalersi di una procedura automatica che, partendo dalla creazione di una nuova istanza del problema e arrivando all'applicazione di Path Relinking, genera una soluzione ottima al problema attuale.\nL'interfaccia grafica prevede una semplice visualizzazione delle soluzioni generate, utile per il confronto manuale da parte dell'utente.\n\t\t\"\"\")\n\t\tinput(\">: Premere un tasto per continuare\")\n\t\n\t'''\n\tFunzione per visualizzare informazioni inerenti la soluzione ottenuta.\n\t'''\n\tdef visualizzaSoluzione(self, soluzione):\n\t\tprint(\"\\nTipologia soluzione: {}\\nMakespan: {}\\nEfficienza: {:.2%}\".format(soluzione.tipo, soluzione.makeSpan, soluzione.efficienza))\n\t\n\t'''\n\tFunzione che mostra eventuali migliorie ottenute con la nuova soluzione. nuovaSoluzione2 è la seconda soluzione utilizzata durante Path Relinking.\n\t'''\n\tdef confrontaSoluzioni(self, nuovaSoluzione, vecchiaSoluzione, vecchiaSoluzione2=None):\n\t\tprint(\"\\nNuova soluzione:\")\n\t\tself.visualizzaSoluzione(nuovaSoluzione)\n\t\tprint(\"------------------\")\n\t\t\n\t\tself.visualizzaSoluzione(vecchiaSoluzione)\n\t\tif vecchiaSoluzione2:\n\t\t\tself.visualizzaSoluzione(vecchiaSoluzione2)\n\t\t\tvecchiaSoluzioneMin = min([vecchiaSoluzione, vecchiaSoluzione2], key=lambda x : x.makeSpan)\n\t\telse:\n\t\t\tvecchiaSoluzioneMin = vecchiaSoluzione\n\t\tprint(\"\\nRisultato finale:\")\n\t\tpercentualeFinale = nuovaSoluzione.makeSpan / vecchiaSoluzioneMin.makeSpan\n\t\t\n\t\tif percentualeFinale > 1:\n\t\t\tprint(\"\\nLa nuova soluzione è peggiorata del {:.2%}.\".format(1 - percentualeFinale))\n\t\telif percentualeFinale == 1:\n\t\t\tprint(\"\\nLa nuova soluzione possiede lo stesso makespan.\\n\")\n\t\telse:\n\t\t\tprint(\"\\nLa nuova soluzione è migliorata del {:.2%}.\".format(1 - percentualeFinale))\n\t'''\n\tFunzione per la gestione dell'uscita dal thread e dal programma.\n\t'''\n\tdef uscita(self):\n\t\tos._exit(1)","repo_name":"MicheleCESO/ROAmbulatori","sub_path":"menù.py","file_name":"menù.py","file_ext":"py","file_size_in_byte":18537,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29349126179","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom backend import * \nimport architectures\nimport sys\nimport numpy as np\n\n\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('sup_per_class', -1,\n 'Number of labeled samples used per class.')\n\nflags.DEFINE_integer('sup_seed', -1,\n 'Integer random seed used for labeled set selection.')\n\nflags.DEFINE_integer('sup_per_batch', 16,\n 'Number of labeled samples per class per batch.')\n\nflags.DEFINE_integer('unsup_batch_size', 64,\n 'Number of unlabeled samples per batch.')\n\nflags.DEFINE_integer('eval_interval', 500,\n 'Number of steps between evaluations.')\n\nflags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')\n\nflags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')\n\nflags.DEFINE_float('decay_steps', 4000,\n 'Learning rate decay interval in steps.')\n\nflags.DEFINE_float('visit_weight', 0.65, 'Weight for visit loss.')\n\nflags.DEFINE_integer('max_steps', 20000, 'Number of training steps.')\n\nflags.DEFINE_string('checkpoint_dir', '/harddisk/hdd_c/camelyon/code1/new-2015-test/IDC-new/result/model-all-3000-all/model', \n 'Save checkpoint path.')\n\nflags.DEFINE_string('logdir', '/harddisk/hdd_c/camelyon/code1/new-2015-test/IDC-new/semisup_bach/semi-all-3000-all', 'Training log path.')\n\nimport dataset as dataset_tools \nimport sys\nNUM_LABELS = dataset_tools.NUM_LABELS\nIMAGE_SHAPE = dataset_tools.IMAGE_SHAPE\n\n\ndef main(_):\n train_images, train_labels, val_images, val_labels, test_images, test_labels = dataset_tools.get_data()\n\n\n # Sample labeled training subset.\n seed = FLAGS.sup_seed if FLAGS.sup_seed != -1 else None\n sup_by_label = sample_by_label(train_images, train_labels,\n FLAGS.sup_per_class, NUM_LABELS, seed)\n\n graph = tf.Graph()\n with graph.as_default():\n model = SemisupModel(architectures.dataset_model, NUM_LABELS, IMAGE_SHAPE)\n \n# unsup_num = 3000\n # Set up inputs.\n# t_unsup_images, _ = create_input(train_images[0:unsup_num], train_labels[0:unsup_num], FLAGS.unsup_batch_size)\n t_unsup_images, _ = create_input(train_images, train_labels, FLAGS.unsup_batch_size)\n \n t_sup_images, t_sup_labels = create_per_class_inputs(sup_by_label, FLAGS.sup_per_batch)\n\n # Compute embeddings and logits.\n t_sup_emb = model.image_to_embedding(t_sup_images)\n t_unsup_emb = model.image_to_embedding(t_unsup_images)\n t_sup_logit = model.embedding_to_logit(t_sup_emb)\n\n # Add losses.\n model.add_semisup_loss(t_sup_emb, t_unsup_emb, t_sup_labels, visit_weight = FLAGS.visit_weight)\n model.add_logit_loss(t_sup_logit, t_sup_labels)\n\n t_learning_rate = tf.train.exponential_decay(\n FLAGS.learning_rate,\n model.step,\n FLAGS.decay_steps,\n FLAGS.decay_factor,\n staircase=True)\n train_op, train_loss = model.create_train_op(t_learning_rate)\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(FLAGS.logdir, graph)\n\n saver = tf.train.Saver()\n\n with tf.Session(graph=graph) as sess:\n tf.global_variables_initializer().run()\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n for step in xrange(FLAGS.max_steps):\n _, loss ,summaries = sess.run([train_op, train_loss, summary_op])\n\n \n if step % 10 == 0:\n test_loss = model.classify_loss(val_images, val_labels)\n# print(test_loss)\n test_loss_summary = tf.Summary(\n value=[tf.Summary.Value(\n tag='Validation Loss', simple_value=test_loss)])\n \n summary_writer.add_summary(summaries, step)\n summary_writer.add_summary(test_loss_summary, step)\n \n val_pred_2 = model.classify(val_images).argmax(-1)\n test_acc = 100 - (np.array(val_labels) != np.array(val_pred_2)).mean() * 100\n \n test_acc_summary = tf.Summary(\n value=[tf.Summary.Value(\n tag='Validation acc', simple_value=test_acc)])\n summary_writer.add_summary(test_acc_summary, step)\n\n \n if (step + 1) % FLAGS.eval_interval == 0 or step == 99:\n print('Step: %d' % step)\n \n # validation\n val_pred = model.classify(val_images).argmax(-1)\n conf_mtx = confusion_matrix(val_labels, val_pred, NUM_LABELS)\n val_err = (val_labels != val_pred).mean() * 100\n print(conf_mtx)\n print('Validation error: %.2f %%' % val_err)\n print()\n\n\n saver.save(sess, FLAGS.checkpoint_dir, model.step)\n\n coord.request_stop()\n coord.join(threads)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"USTC-HIlab/Semi-HIC","sub_path":"IDC-code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"7379809936","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nclass ConcatModel(nn.Module):\n def __init__(self, model, out_channels, num_classes):\n super().__init__()\n self.cnn = model\n\n self.fc1 = nn.Linear(out_channels+2, int((out_channels+2)/2))\n self.fc2 = nn.Linear(int((out_channels+2)/2), num_classes)\n\n def forward(self, image, meta):\n x1 = self.cnn(image)\n x2 = meta\n\n x = torch.cat((x1,x2), dim=1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n\n return x","repo_name":"cch76/skin_classification","sub_path":"models/fc.py","file_name":"fc.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"7449857538","text":"from telegram import Update as _Update\nfrom telegram.ext import CallbackContext as _CallbackContext\n\n\nname = \"registrar\"\ndescription = \"Registra el chat\"\ndef cmd(update: _Update, context: _CallbackContext):\n\n # Crear lista de chats si es que no existe\n if \"chats\" not in context.bot_data:\n context.bot_data[\"chats\"] = set()\n\n chat_id = update.effective_chat.id\n\n context.bot_data[\"chats\"].add(chat_id)\n\n update.effective_message.reply_text(\n text=f\"Agregado chat con id {chat_id}\"\n )\n\n update.effective_message.reply_text(\n text=f\"Lista de ids: {str(context.bot_data['chats'])}\"\n )\n","repo_name":"CleoStoat/plantilla_bot_tg","sub_path":"comandos/registrar_chat.py","file_name":"registrar_chat.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"9999584960","text":"from datetime import datetime\nfrom picamera import PiCamera\nfrom ina219 import INA219\nimport FaBo9Axis_MPU9250\nimport RPi.GPIO as GPIO\nfrom time import sleep\nfrom math import atan2\nGPIO.setmode(GPIO.BCM)\nfrom PIL import Image\nimport numpy as np\nimport serial\nimport base64\nimport pigpio\nimport smbus\nimport time\nimport math\nimport sys\nimport PIL\nimport os\n\nservo_type = 270\nsl = 13\nsr = 12\npi = pigpio.pi()\npi.set_mode(sl, pigpio.OUTPUT)\npi.set_mode(sr, pigpio.OUTPUT)\n\ndef sa(a,b):\n a = servo_type-a\n pi.set_servo_pulsewidth(sl,500+2000*int(a)/servo_type)\n pi.set_servo_pulsewidth(sr,500+2000*int(b)/servo_type)\n\ncamera = PiCamera()\ncamera.resolution = (1280, 720)\ncamera.framerate = 30\nsensor = 6\nbuzz = 26\nled = 4\nu = 0.1\nlaunch = 11\nGPIO_TRIGGER = 18\nGPIO_ECHO = 24\nGPIO.setwarnings(False)\nGPIO.setup(GPIO_TRIGGER, GPIO.OUT)\nGPIO.setup(GPIO_ECHO, GPIO.IN)\nGPIO.setup(buzz, GPIO.OUT)\nGPIO.setup(led, GPIO.OUT)\nGPIO.setup(sensor, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(launch, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\npd = 0\nedis = 0\npressure = 0\ntemp = 0\ndef beep(t):\n GPIO.output(26,1)\n GPIO.output(4,1)\n sleep(t)\n GPIO.output(26,0)\n GPIO.output(4,0)\n sleep(t)\ndef bmpp():\n global temp,pressure\n bus = smbus.SMBus(1)\n try:\n b1 = bus.read_i2c_block_data(0x76, 0x88, 24)\n dig_T1 = b1[1] * 256 + b1[0]\n dig_T2 = b1[3] * 256 + b1[2]\n if dig_T2 > 32767 :\n dig_T2 -= 65536\n dig_T3 = b1[5] * 256 + b1[4]\n if dig_T3 > 32767 :\n dig_T3 -= 65536\n dig_P1 = b1[7] * 256 + b1[6]\n dig_P2 = b1[9] * 256 + b1[8]\n if dig_P2 > 32767 :\n dig_P2 -= 65536\n dig_P3 = b1[11] * 256 + b1[10]\n if dig_P3 > 32767 :\n dig_P3 -= 65536\n dig_P4 = b1[13] * 256 + b1[12]\n if dig_P4 > 32767 :\n dig_P4 -= 65536\n dig_P5 = b1[15] * 256 + b1[14]\n if dig_P5 > 32767 :\n dig_P5 -= 65536\n dig_P6 = b1[17] * 256 + b1[16]\n if dig_P6 > 32767 :\n dig_P6 -= 65536\n dig_P7 = b1[19] * 256 + b1[18]\n if dig_P7 > 32767 :\n dig_P7 -= 65536\n dig_P8 = b1[21] * 256 + b1[20]\n if dig_P8 > 32767 :\n dig_P8 -= 65536\n dig_P9 = b1[23] * 256 + b1[22]\n if dig_P9 > 32767 :\n dig_P9 -= 65536\n dig_H1 = bus.read_byte_data(0x76, 0xA1)\n b1 = bus.read_i2c_block_data(0x76, 0xE1, 7)\n dig_H2 = b1[1] * 256 + b1[0]\n if dig_H2 > 32767 :\n dig_H2 -= 65536\n dig_H3 = (b1[2] & 0xFF)\n dig_H4 = (b1[3] * 16) + (b1[4] & 0xF)\n if dig_H4 > 32767 :\n dig_H4 -= 65536\n dig_H5 = (b1[4] / 16) + (b1[5] * 16)\n if dig_H5 > 32767 :\n dig_H5 -= 65536\n dig_H6 = b1[6]\n if dig_H6 > 127 :\n dig_H6 -= 256\n bus.write_byte_data(0x76, 0xF2, 0x01)\n bus.write_byte_data(0x76, 0xF4, 0x27)\n bus.write_byte_data(0x76, 0xF5, 0xA0)\n data = bus.read_i2c_block_data(0x76, 0xF7, 8)\n adc_p = ((data[0] * 65536) + (data[1] * 256) + (data[2] & 0xF0)) / 16\n adc_t = ((data[3] * 65536) + (data[4] * 256) + (data[5] & 0xF0)) / 16\n adc_h = data[6] * 256 + data[7]\n var1 = ((adc_t) / 16384.0 - (dig_T1) / 1024.0) * (dig_T2)\n var2 = (((adc_t) / 131072.0 - (dig_T1) / 8192.0) * ((adc_t)/131072.0 - (dig_T1)/8192.0)) * (dig_T3)\n t_fine = (var1 + var2)\n cTemp = (var1 + var2) / 5120.0\n fTemp = cTemp * 1.8 + 32\n var1 = (t_fine / 2.0) - 64000.0\n var2 = var1 * var1 * (dig_P6) / 32768.0\n var2 = var2 + var1 * (dig_P5) * 2.0\n var2 = (var2 / 4.0) + ((dig_P4) * 65536.0)\n var1 = ((dig_P3) * var1 * var1 / 524288.0 + ( dig_P2) * var1) / 524288.0\n var1 = (1.0 + var1 / 32768.0) * (dig_P1)\n p = 1048576.0 - adc_p\n p = (p - (var2 / 4096.0)) * 6250.0 / var1\n var1 = (dig_P9) * p * p / 2147483648.0\n var2 = p * (dig_P8) / 32768.0\n pressure = (p + (var1 + var2 + (dig_P7)) / 16.0) / 100\n var_H = ((t_fine) - 76800.0)\n var_H = (adc_h - (dig_H4 * 64.0 + dig_H5 / 16384.0 * var_H)) * (dig_H2 / 65536.0 * (1.0 + dig_H6 / 67108864.0 * var_H * (1.0 + dig_H3 / 67108864.0 * var_H)))\n humidity = var_H * (1.0 - dig_H1 * var_H / 524288.0)\n if humidity > 100.0 :\n humidity = 100.0\n elif humidity < 0.0 :\n humidity = 0.0\n\n temp = \"%.2f\" %cTemp\n pressure = \"%.2f\" %pressure\n except:\n temp = \"\"\n pressure = \"\"\ndef GPS_Info():\n global NMEA_buff\n global lat_in_degrees\n global long_in_degrees\n global time\n nmea_time = []\n nmea_latitude = []\n nmea_longitude = []\n nmea_time = NMEA_buff[0] #extract time from GPGGA string\n nmea_latitude = NMEA_buff[1] #extract latitude from GPGGA string\n nmea_longitude = NMEA_buff[3]\n t =nmea_time #extract longitude from GPGGA string\n \n gpstime = str((int(t[0]+t[1])+7)%24),\":\",t[2],t[3],\":\",t[4],t[5]\n \n lat = float(nmea_latitude) #convert string into float for calculation\n longi = float(nmea_longitude) #convertr string into float for calculation\n \n lat_in_degrees = convert_to_degrees(lat) #get latitude in degree decimal format\n long_in_degrees = convert_to_degrees(longi) #get longitude in degree decimal format\ndef convert_to_degrees(raw_value):\n decimal_value = raw_value/100.00\n degrees = int(decimal_value)\n mm_mmmm = (decimal_value - int(decimal_value))/0.6\n position = degrees + mm_mmmm\n position = \"%.6f\" %(position)\n return position\ndef distance():\n global pd\n GPIO.output(GPIO_TRIGGER, True)\n sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n StartTime = time.time()\n StopTime = time.time()\n tmo = StartTime\n edis = 1\n while GPIO.input(GPIO_ECHO) == 0 and edis:\n StartTime = time.time()\n sleep(0.00001)\n if time.time()-tmo >= 0.06:\n edis = 0\n if edis:\n while GPIO.input(GPIO_ECHO) == 1:\n StopTime = time.time()\n TimeElapsed = StopTime - StartTime\n distance = TimeElapsed * 17150\n distance = \"%.2f\" % (distance/100)\n pd = distance\n return distance\n else:\n return pd\n\nina = INA219(0.1)\nina.configure()\n\ngpgga_info = \"$GNGGA,\"\nser = serial.Serial(\n port='/dev/ttyS0', #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=0.02\n)\nGPGGA_buffer = 0\nNMEA_buff = 0\nlat_in_degrees = \"\"\nlong_in_degrees = \"\"\ngpstime = \"\"\n\nPI = 3.14159265\nmpu9250 = FaBo9Axis_MPU9250.MPU9250()\nti=0\ncounter=0\nGPIO.output(buzz,1)\nGPIO.output(led,1)\nsleep(0.1)\nGPIO.output(buzz,0)\nsleep(0.9)\nGPIO.output(led,0)\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H-%M-%S\")\nfilen = str(current_time)\nfinum = 0\nvdnum = 0\nmilli_sec = int(round(time.time() * 1000))\nsmilli = milli_sec\nlmillis = 0\nnakono = 0\n\nf = open(\"/home/pi/cansat/gycal.txt\", \"r\")\ncalmy = float(f.readline())\ncalmz = float(f.readline())\nnorth = float(f.readline())\ncalax = float(f.readline())\ncalay = float(f.readline())\ncalaz = float(f.readline())\nf.close()\n\ndef pmm():\n milli_sec = int(round(time.time() * 1000))\n print(\"start\",milli_sec%100000)\n\nwhile GPIO.input(launch) == 0:\n ti+=1\n ti = round(ti,2)\n mm = str(ti)\n#GPS\n try:\n received_data = (str)(ser.readline())\n GPGGA_data_available = received_data.find(gpgga_info)\n if (GPGGA_data_available>0):\n print(\"GPS!!\")\n GPGGA_buffer = received_data.split(\"$GNGGA,\",1)[1] #store data coming after \"$GPGGA,\" string \n NMEA_buff = (GPGGA_buffer.split(',')) #store comma separated data in buffer\n GPS_Info() #get time, latitude, longitude\n mm+= ','+lat_in_degrees+','+long_in_degrees\n else:\n mm+= ','+lat_in_degrees+','+long_in_degrees\n except:\n mm+= ','+lat_in_degrees+','+long_in_degrees\n#MPU BMP\n try:\n ac = mpu9250.readAccel()\n ma = mpu9250.readMagnet()\n mm+= \",\"+\"%.3f\" % (ac['x']+calax)+\",\"+\"%.3f\" % (ac['y']+calay)+\",\"+\"%.3f\" % (ac['z']+calaz)\n angle = atan2(ma['z']+calmz,ma['y']+calmy) * 180 / PI\n angle += north\n if angle < -180: angle+=360\n if angle > 180: angle-=360\n angle = \"%d\" %angle\n mm+= \",\"+angle\n except:\n mm+= \",,,,\"\n#BMP\n bmpp()\n try:\n alt = 44331.5 - 4946.62 * (float(pressure)*100) ** (0.190263)\n alt = \"%.2f\" %alt\n except:\n alt = \"\"\n try:\n temp = int(str(\"%d\" %float(temp)))\n except:\n temp = ''\n mm+= \",\"+str(temp)+\",\"+str(alt)\n#Ultrasonic\n Dis = distance()\n if Dis > 700: Dis = 700\n Dis = str(Dis)\n mm+= \",\"+Dis\n\n#Sensor\n s1 = GPIO.input(sensor)\n mm+= \",\"+str(s1)\n\n#Battery\n V = ina.voltage()\n I = ina.current()\n percent = \"%d\" %((V-6)/(2.2)*100)\n if int(percent)>100: percent = \"100\"\n if int(percent)<0: percent = \"0\"\n mm+=\",\"+percent\n\n camera.capture('tem.jpg', use_video_port=True)\n picture = Image.open('tem.jpg')\n picture.thumbnail((128,128), Image.ANTIALIAS)\n picture.save(\"s_tem.jpg\",optimize=True,quality=10)\n with open(\"s_tem.jpg\", \"rb\") as img_file:\n simg = \"img,\"+str(base64.b64encode(img_file.read()).decode('utf-8'))+\",,\"\n try:\n ser.write(bytes(mm,'utf-8'))\n ser.write(b\"\\n\")\n ser.write(bytes(simg,'utf-8'))\n ser.write(b\"\\n\")\n except:\n print(\"send error\")\n print(mm)\n\n milli_sec = int(round(time.time() * 1000))\n sleep((1000 - milli_sec % 1000)/1000)\n\n\n\n\n\n\n\n\n\n\n\nbmpp()\nsleep(1)\nbmpp()\nspacey = 0\npercentMin = 100\ntry:\n spacey = (44331.5 - 4946.62 * (float(pressure)*100) ** (0.190263))\nexcept:\n spacey = 0\n\nlaunch = 0\n#camera.start_recording('camera/'+filen+' ('+str(vdnum)+').h264')\n\nwhile True:\n ti+=1\n ti = round(ti,2)\n mm = str(ti)\n mmf = str(ti)\n#GPS\n try:\n received_data = (str)(ser.readline())\n GPGGA_data_available = received_data.find(gpgga_info)\n if (GPGGA_data_available>0):\n print(\"GPS!!\")\n GPGGA_buffer = received_data.split(\"$GNGGA,\",1)[1] #store data coming after \"$GPGGA,\" string \n NMEA_buff = (GPGGA_buffer.split(',')) #store comma separated data in buffer\n GPS_Info() #get time, latitude, longitude\n mm+= ','+lat_in_degrees+','+long_in_degrees\n mmf+= ','+lat_in_degrees+','+long_in_degrees+','+gpstime\n else:\n #mm+= ',n/a,n/a'\n #mmf+= ',n/a,n/a,n/a'\n mm+= ','+lat_in_degrees+','+long_in_degrees\n mmf+= ','+lat_in_degrees+','+long_in_degrees+','+gpstime\n except:\n #mm+= ',n/a,n/a'\n #mmf+= ',n/a,n/a,n/a'\n mm+= ','+lat_in_degrees+','+long_in_degrees\n mmf+= ','+lat_in_degrees+','+long_in_degrees+','+gpstime\n#MPU\n try:\n ac = mpu9250.readAccel()\n gy = mpu9250.readGyro()\n ma = mpu9250.readMagnet()\n mm+= \",\"+\"%.3f\" % (ac['x']+calax)+\",\"+\"%.3f\" % (ac['y']+calay)+\",\"+\"%.3f\" % (ac['z']+calaz)\n mmf+= \",\"+\"%.3f\" % (ac['x']+calax)+\",\"+\"%.3f\" % (ac['y']+calay)+\",\"+\"%.3f\" % (ac['z']+calaz)\n mmf+= \",\"+str(gy['x'])+\",\"+str(gy['y'])+\",\"+str(gy['z'])\n mmf+= \",\"+str(ma['x'])+\",\"+str(ma['y'])+\",\"+str(ma['z'])\n angle = atan2(ma['z']+calmz,ma['y']+calmy) * 180 / PI\n angle += north\n if angle < -180: angle+=360\n if angle > 180: angle-=360\n angle = \"%d\" %angle\n mm+= \",\"+angle\n mmf+= \",\"+angle\n except:\n mm+= \",,,,\"\n mmf+= \",,,,,,,,,,\"\n#BMP\n bmpp()\n try:\n alt = (44331.5 - 4946.62 * (float(pressure)*100) ** (0.190263))-spacey\n alt = \"%.2f\" %alt\n except:\n alt = \"\"\n try:\n temp = int(str(\"%d\" %float(temp)))\n except:\n temp = ''\n mm+= \",\"+str(temp)+\",\"+str(alt)\n mmf+= \",\"+str(temp)+\",\"+str(pressure)+\",\"+str(alt)\n#Ultrasonic\n Dis = distance()\n if float(Dis) > 7: Dis = \"7\"\n Dis = str(Dis)\n mm+= \",\"+Dis\n mmf+= \",\"+Dis\n\n#Sensor\n s1 = GPIO.input(sensor)\n mm+= \",\"+str(s1)\n mmf+= \",\"+str(s1)\n\n#Battery\n V = ina.voltage()\n I = ina.current()\n percent = (V-6)/(2.2)*100\n if percent > percentMin: percent = percentMin\n else: percentMin = percent\n percent = \"%d\" %percent\n if int(percent)>100: percent = \"100\"\n if int(percent)<0: percent = \"0\"\n mm+=\",\"+percent\n mmf+= \",\"+percent+\",\"+\"%.2f\" %V+\",\"+\"%.1f\"%I\n\n#Servo\n camera.capture('tem.jpg', use_video_port=True)\n img = Image.open('tem.jpg')\n red = 0\n green = 0\n blue = 0\n for i in range(520,761,10):\n for j in range(0,241,10):\n nino = img.getpixel((i,j))\n red += nino[0]\n green += nino[1]\n blue += nino[2]\n red = int(red/576)\n green = int(green/576)\n blue = int(blue/576)\n\n redl = 0\n greenl = 0\n bluel = 0\n for i in range(0,181,10):\n for j in range(0,181,10):\n nino = img.getpixel((i,j))\n redl += nino[0]\n greenl += nino[1]\n bluel += nino[2]\n redl = int(redl/324)\n greenl = int(greenl/324)\n bluel = int(blue/576)\n\n redr = 0\n greenr = 0\n bluer = 0\n for i in range(1099,1280,10):\n for j in range(0,181,10):\n nino = img.getpixel((i,j))\n redr += nino[0]\n greenr += nino[1]\n bluer += nino[2]\n redr = int(redr/324)\n greenr = int(greenr/324)\n bluer = int(blue/576)\n\n print(redl,greenl,red,green,redr,greenr)\n \n if green > red-30 and green > blue-30 and green > 50:\n lg = greenl > redl and greenl > bluel\n rg = greenr > redr and greenr > bluer\n if lg > rg:\n sa(0,270)\n mm+=\",0,270\"\n mmf+=\",0,270\"\n elif lg < rg:\n sa(270,0)\n mm+=\",270,0\"\n mmf+=\",270,0\"\n else:\n sa(135,135)\n mm+=\",135,135\"\n mmf+=\",135,135\"\n else:\n sa(135,135)\n mm+=\",135,135\"\n mmf+=\",135,135\"\n\n#Launch\n vec = math.sqrt(pow(ac['x']+calax,2)+pow(ac['y']+calay,2)+pow(ac['z']+calaz,2))\n if vec >= 2: \n launch = 1\n lmillis = int(round(time.time() * 1000))\n if int(round(time.time() * 1000)) - lmillis >= 30000 and launch == 1:\n launch = 2\n nakono = int(round(time.time() * 1000))\n if launch == 2:\n beep(u)\n beep(u)\n beep(u)\n sleep(2*u)\n beep(3*u)\n beep(3*u)\n beep(3*u)\n sleep(2*u)\n beep(u)\n beep(u)\n beep(u)\n if int(round(time.time() * 1000)) - nakono >= 20000 and launch == 2: launch = 0\n mm += \",\"+str(launch)\n mmf += \",\"+str(launch)\n\n\n fie = open(str(\"/home/pi/cansat/log/\"+filen+\" (\"+str(finum)+\").csv\"), \"a\")\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n mmf+= \",\"+current_time\n fie.write(mmf+'\\n')\n fie.close()\n #if os.stat(\"/home/pi/cansat/log/\"+filen+\" (\"+str(finum)+\").csv\").st_size >= 4096: finum += 1\n\n picture = Image.open('tem.jpg')\n picture.thumbnail((96,96), Image.ANTIALIAS)\n picture.save(\"s_tem.jpg\",optimize=True,quality=10)\n with open(\"s_tem.jpg\", \"rb\") as img_file:\n simg = \"img,\"+str(base64.b64encode(img_file.read()).decode('utf-8'))+\",,\"\n try:\n ser.write(bytes(mm,'utf-8'))\n ser.write(b\"\\n\")\n ser.write(bytes(simg,'utf-8'))\n ser.write(b\"\\n\")\n except:\n print(\"send error\")\n print(mm)\n\n \n milli_sec = int(round(time.time() * 1000))\n sleep((1000 - milli_sec % 1000)/1000)\n\n # if(milli_sec - smilli >= 300000):\n # camera.stop_recording()\n # smilli = milli_sec\n # vdnum += 1\n # camera.start_recording('camera/'+filen+' ('+str(vdnum)+').h264')\n\n","repo_name":"SecretKr/NAV-Cansat-2021","sub_path":"Cansat/cansat.py","file_name":"cansat.py","file_ext":"py","file_size_in_byte":16010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38698420167","text":"import numpy as np\r\nfrom scipy.ndimage import affine_transform\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras import backend as K\r\nfrom keras.utils import Sequence\r\nfrom keras.models import Model, load_model\r\nfrom pandas import read_csv\r\nfrom PIL.ImageDraw import Draw\r\nfrom PIL import Image as pil_image\r\nfrom os.path import isfile\r\nimport pickle\r\nfrom tqdm import tqdm\r\n\r\nimg_shape = (128, 128, 1)\r\nanisotropy = 2.15\r\n\r\n\r\ndef expand_path(p):\r\n if isfile('../data-train/' + p):\r\n return '../data-train/' + p\r\n if isfile('../data-test/' + p):\r\n return '../data-test/' + p\r\n return p\r\n\r\n\r\n# Transform coordinates according to the provided affine transformation\r\ndef coord_transform(list, trans):\r\n result = []\r\n for x, y in list:\r\n y, x, _ = trans.dot([y, x, 1]).astype(np.int)\r\n result.append((x, y))\r\n return result\r\n\r\n\r\ndef read_raw_image(p):\r\n return pil_image.open(expand_path(p))\r\n\r\n\r\ndef read_array(p):\r\n img = read_raw_image(p).convert('L')\r\n return img_to_array(img)\r\n\r\n\r\n# Apply an affine transformation to an image represented as a numpy array.\r\ndef transform_img(x, affine):\r\n matrix = affine[:2, :2]\r\n offset = affine[:2, 2]\r\n x = np.moveaxis(x, -1, 0)\r\n channels = [affine_transform(channel, matrix, offset, output_shape=img_shape[:-1], order=1,\r\n mode='constant', cval=np.average(channel)) for channel in x]\r\n return np.moveaxis(np.stack(channels, axis=0), 0, -1)\r\n\r\n\r\n# Compute the coordinate transformation required to center the pictures, padding as required.\r\ndef center_transform(affine, input_shape):\r\n hi, wi = float(input_shape[0]), float(input_shape[1])\r\n ho, wo = float(img_shape[0]), float(img_shape[1])\r\n top, left, bottom, right = 0, 0, hi, wi\r\n if wi / hi / anisotropy < wo / ho: # input image too narrow, extend width\r\n w = hi * wo / ho * anisotropy\r\n left = (wi - w) / 2\r\n right = left + w\r\n else: # input image too wide, extend height\r\n h = wi * ho / wo / anisotropy\r\n top = (hi - h) / 2\r\n bottom = top + h\r\n center_matrix = np.array([[1, 0, -ho / 2], [0, 1, -wo / 2], [0, 0, 1]])\r\n scale_matrix = np.array([[(bottom - top) / ho, 0, 0], [0, (right - left) / wo, 0], [0, 0, 1]])\r\n decenter_matrix = np.array([[1, 0, hi / 2], [0, 1, wi / 2], [0, 0, 1]])\r\n return np.dot(np.dot(decenter_matrix, scale_matrix), np.dot(affine, center_matrix))\r\n\r\n\r\ndef read_for_validation(p):\r\n x = read_array(p)\r\n t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\r\n t = center_transform(t, x.shape)\r\n x = transform_img(x, t)\r\n x -= np.mean(x, keepdims=True)\r\n x /= np.std(x, keepdims=True) + K.epsilon()\r\n return x, t\r\n\r\n\r\ndef generate_bbox(to_do, model):\r\n print(len(to_do))\r\n ret = {}\r\n for p in tqdm(to_do):\r\n img, trans = read_for_validation(p)\r\n a = np.expand_dims(img, axis=0)\r\n x0, y0, x1, y1 = model.predict(a).squeeze()\r\n (u0, v0), (u1, v1) = coord_transform([(x0, y0), (x1, y1)], trans)\r\n ret[p] = (u0, v0, u1, v1)\r\n return ret\r\n\r\n\r\ndef preview(to_do, dic):\r\n for p in to_do:\r\n img = read_raw_image(p).convert('RGB')\r\n draw = Draw(img)\r\n x0, y0, x1, y1 = dic[p]\r\n draw.line([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)], fill='yellow', width=6)\r\n img.save(p)\r\n\r\n\r\nif __name__ == '__main__':\r\n model = load_model('cropping.model')\r\n model.summary()\r\n to_do = [p for _, p, _ in read_csv('../data-raw/train.csv').to_records()]\r\n to_do += [p for _, p, _ in read_csv('../data-raw/sample_submission.csv').to_records()]\r\n dic = generate_bbox(to_do, model)\r\n with open('bbox.pickle', 'wb') as fout:\r\n pickle.dump(dic, fout)\r\n # preview(to_do[:25], dic)\r\n # print(dic)\r\n","repo_name":"maye9999/Humpback-Whale-Identification","sub_path":"maye/bbox.py","file_name":"bbox.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"21589839935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nfrom datetime import datetime\n\nAUTHOR = u\"Project Fondue Team\"\nSITENAME = u\"L'Alpiniste\"\nSITEURL = 'http://blog.projectfondue.com:9901'\nSITESUBTITLE = u\"The blog of the Project Fondue Team\"\n\nDISQUS_SITENAME = \"projectfondue\"\nTIMEZONE = 'Europe/London'\n\nDEFAULT_LANG = 'en'\n\n# Blogroll\nLINKS = (('Stuart Colville', 'http://muffinresearch.co.uk/'),\n ('Cyril Doussin', 'cyril.doussin.name'),\n )\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = 10\nTAG_CLOUD_STEPS = 10\nTAG_CLOUD_MAX_ITEMS = 20\n\nTHEME = 'theme'\nTHEME_STATIC_PATHS = (['static', 'theme/static'])\n\nTWITTER_USERNAME = \"projectfondue\"\nLATEST_POST_LIMIT = 5\n\nYEAR = datetime.now().year\n\nDEFAULT_PAGINATION = 5\nRELATIVE_URLS = False\n\nARTICLE_URL = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}'\nARTICLE_SAVE_AS = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'\nARTICLE_LANG_URL = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}-{lang}'\nARTICLE_LANG_SAVE_AS = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}-{lang}.html'\n\nPAGE_URL = 'pages/{slug}'\nPAGE_SAVE_AS = 'pages/{slug}.html'\nPAGE_LANG_URL = 'pages/{slug}-{lang}'\nPAGE_LANG_SAVE_AS = 'pages/{slug}-{lang}.html'\n\nPAGINATION_URL = '{name}-{page_num}'\nPAGINATION_SAVE_AS = '{name}-{page_num}.html'\n\nAUTHOR_URL = 'author/{name}'\nAUTHOR_SAVE_AS = 'author/{name}.html'\n\nCATEGORY_URL = 'category/{name}'\nCATEGORY_SAVE_AS = False\nTAG_URL = 'tag/{name}'\nTAG_SAVE_AS = 'tag/{name}.html'\n\n# DIRECT TEMPLATES\nPAGINATED_DIRECT_TEMPLATES = ('index', 'archives', 'authors', 'author')\nDIRECT_TEMPLATES = ('index', 'tags', 'archives')\n\nARCHIVES_SAVE_AS = 'archives/index.html'\n","repo_name":"project-fondue/blog","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74769098814","text":"import unittest\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Iterable, Iterator, Sequence\nfrom dataclasses import dataclass, field\nfrom functools import partial\nfrom itertools import chain\nfrom typing import Any, Deque, Generic, Optional, TypeVar, Union\n\nimport casadi as cs\nimport numpy as np\nfrom csnlp import Nlp, Solution\nfrom csnlp.util.math import quad_form\nfrom csnlp.wrappers import Mpc\nfrom gymnasium.wrappers import TimeLimit\nfrom scipy.linalg import cho_solve\n\nfrom mpcrl import (\n ExperienceReplay,\n LearnableParameter,\n LearnableParametersDict,\n LstdQLearningAgent,\n MpcSolverError,\n UpdateError,\n)\nfrom mpcrl import exploration as E\nfrom mpcrl import schedulers as S\nfrom mpcrl.util.math import cholesky_added_multiple_identities\nfrom mpcrl.wrappers.agents import RecordUpdates\n\n# ==================================================================================== #\n# ---------------------------------- START OLD CODE ---------------------------------- #\n# ==================================================================================== #\n\n\n@dataclass\nclass QuadRotorEnvConfig:\n T: float = 0.1\n g: float = 9.81\n thrust_coeff: float = 1.4\n pitch_d: float = 10\n pitch_dd: float = 8\n pitch_gain: float = 10\n roll_d: float = 10\n roll_dd: float = 8\n roll_gain: float = 10\n winds: dict[float, float] = field(default_factory=lambda: {1: 1.0, 2: 0.7, 3: 0.85})\n x0: np.ndarray = field(\n default_factory=lambda: np.array([0, 0, 3.5, 0, 0, 0, 0, 0, 0, 0])\n )\n xf: np.ndarray = field(\n default_factory=lambda: np.array([3, 3, 0.2, 0, 0, 0, 0, 0, 0, 0])\n )\n soft_constraints: bool = True\n x_bounds: np.ndarray = field(\n default_factory=lambda: np.array(\n [\n [-0.5, 3.5],\n [-0.5, 3.5],\n [-0.175, 4],\n [-np.inf, np.inf],\n [-np.inf, np.inf],\n [-np.inf, np.inf],\n [np.deg2rad(-30), np.deg2rad(30)],\n [np.deg2rad(-30), np.deg2rad(30)],\n [-np.inf, np.inf],\n [-np.inf, np.inf],\n ]\n )\n )\n u_bounds: np.ndarray = field(\n default_factory=lambda: np.array(\n [[-np.pi, np.pi], [-np.pi, np.pi], [0, 2 * 9.81]]\n )\n )\n\n\nclass QuadRotorEnv:\n spec: dict = None\n nx: int = 10\n nu: int = 3\n\n def __init__(self, config: Union[dict, QuadRotorEnvConfig] = None) -> None:\n config = init_config(config, QuadRotorEnvConfig)\n self.config = config\n\n # create dynamics matrices\n self._A, self._B, self._C, self._e = self.get_dynamics(\n g=config.g,\n thrust_coeff=config.thrust_coeff,\n pitch_d=config.pitch_d,\n pitch_dd=config.pitch_dd,\n pitch_gain=config.pitch_gain,\n roll_d=config.roll_d,\n roll_dd=config.roll_dd,\n roll_gain=config.roll_gain,\n winds=config.winds,\n )\n # weight for positional, control action usage and violation errors\n self._Wx = np.ones(self.nx)\n self._Wu = np.ones(self.nu)\n self._Wv = np.array([1e2, 1e2, 3e2, 3e2])\n\n @property\n def A(self) -> np.ndarray:\n return self._A.copy()\n\n @property\n def B(self) -> np.ndarray:\n return self._B.copy()\n\n @property\n def C(self) -> np.ndarray:\n return self._C.copy()\n\n @property\n def e(self) -> np.ndarray:\n return self._e.copy()\n\n @property\n def x(self) -> np.ndarray:\n return self._x.copy()\n\n @x.setter\n def x(self, val: np.ndarray) -> None:\n self._x = val.copy()\n\n def position_error(self, x: np.ndarray) -> float:\n return (np.square(x - self.config.xf) * self._Wx).sum(axis=-1)\n\n def control_usage(self, u: np.ndarray) -> float:\n return (np.square(u) * self._Wu).sum(axis=-1)\n\n def constraint_violations(self, x: np.ndarray, u: np.ndarray) -> float:\n W = self._Wv\n return (\n W[0] * np.maximum(0, self.config.x_bounds[:, 0] - x).sum(axis=-1)\n + W[1] * np.maximum(0, x - self.config.x_bounds[:, 1]).sum(axis=-1)\n + W[2] * np.maximum(0, self.config.u_bounds[:, 0] - u).sum(axis=-1)\n + W[3] * np.maximum(0, u - self.config.u_bounds[:, 1]).sum(axis=-1)\n )\n\n def phi(self, alt: Union[float, np.ndarray]) -> np.ndarray:\n if isinstance(alt, np.ndarray):\n alt = alt.squeeze()\n assert alt.ndim == 1, \"Altitudes must be a vector\"\n\n return np.vstack([np.exp(-np.square(alt - h)) for h in self.config.winds])\n\n def reset(\n self,\n seed: int = None,\n x0: np.ndarray = None,\n xf: np.ndarray = None,\n options: Optional[dict[str, Any]] = None,\n ) -> tuple[np.ndarray, dict[str, Any]]:\n self.np_random = np.random.default_rng(seed)\n if x0 is None:\n x0 = self.config.x0\n if xf is None:\n xf = self.config.xf\n self.x = x0\n self.config.x0 = x0\n self.config.xf = xf\n self._n_within_termination = 0\n return self.x, {}\n\n def step(self, u: np.ndarray) -> tuple[np.ndarray, float, bool, bool, dict]:\n u = np.asarray(u).squeeze() # in case a row or col was passed\n wind = (\n self._C\n @ self.phi(self.x[2])\n * self.np_random.uniform(\n low=[0, 0, -1, 0, 0, 0, -1, -1, 0, 0],\n high=[1, 1, 0, 0, 0, 0, 1, 1, 0, 0],\n ).reshape(self.nx, 1)\n )\n self.x = (\n self._A @ self.x.reshape((-1, 1))\n + self._B @ u.reshape((-1, 1))\n + self._e\n + wind\n ).flatten()\n error = self.position_error(self.x)\n usage = self.control_usage(u)\n violations = self.constraint_violations(self.x, u)\n cost = float(error + usage + violations)\n return self.x, cost, False, False, {\"error\": error}\n\n def render(self):\n raise NotImplementedError(\"Render method unavailable.\")\n\n def get_dynamics(\n self,\n g: Union[float, cs.SX],\n thrust_coeff: Union[float, cs.SX],\n pitch_d: Union[float, cs.SX],\n pitch_dd: Union[float, cs.SX],\n pitch_gain: Union[float, cs.SX],\n roll_d: Union[float, cs.SX],\n roll_dd: Union[float, cs.SX],\n roll_gain: Union[float, cs.SX],\n winds: dict[float, float] = None,\n ) -> Union[\n tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],\n tuple[cs.SX, cs.SX, cs.SX],\n ]:\n T = self.config.T\n is_casadi = any(\n isinstance(o, (cs.SX, cs.MX, cs.DM))\n for o in [\n g,\n thrust_coeff,\n pitch_d,\n pitch_dd,\n pitch_gain,\n roll_d,\n roll_dd,\n roll_gain,\n ]\n )\n if is_casadi:\n diag = lambda o: cs.diag(cs.vertcat(*o)) # noqa: E731\n block = cs.blockcat\n else:\n diag = np.diag\n block = np.block\n assert winds is not None, \"Winds are required to compute matrix C.\"\n nw = len(winds)\n wind_mag = np.array(list(winds.values()))\n A = T * block(\n [\n [np.zeros((3, 3)), np.eye(3), np.zeros((3, 4))],\n [np.zeros((2, 6)), np.eye(2) * g, np.zeros((2, 2))],\n [np.zeros((1, 10))],\n [np.zeros((2, 6)), -diag((pitch_d, roll_d)), np.eye(2)],\n [np.zeros((2, 6)), -diag((pitch_dd, roll_dd)), np.zeros((2, 2))],\n ]\n ) + np.eye(10)\n B = T * block(\n [\n [np.zeros((5, 3))],\n [0, 0, thrust_coeff],\n [np.zeros((2, 3))],\n [pitch_gain, 0, 0],\n [0, roll_gain, 0],\n ]\n )\n if not is_casadi:\n C = T * block(\n [\n [wind_mag],\n [wind_mag],\n [wind_mag],\n [np.zeros((3, nw))],\n [wind_mag],\n [wind_mag],\n [np.zeros((2, nw))],\n ]\n )\n e = block([[np.zeros((5, 1))], [-T * g], [np.zeros((4, 1))]])\n return (A, B, e) if is_casadi else (A, B, C, e)\n\n\n@dataclass(frozen=True)\nclass QuadRotorSolution:\n f: float\n vars: dict[str, cs.SX]\n vals: dict[str, np.ndarray]\n stats: dict[str, Any]\n get_value: partial\n\n @property\n def status(self) -> str:\n return self.stats[\"return_status\"]\n\n @property\n def success(self) -> bool:\n return self.stats[\"success\"]\n\n def value(self, x: cs.SX) -> np.ndarray:\n return self.get_value(x)\n\n\nclass GenericMPC:\n def __init__(self, name: str = None) -> None:\n self.name = f\"MPC{np.random.random()}\" if name is None else name\n self.f: cs.SX = None # objective\n self.vars: dict[str, cs.SX] = {}\n self.pars: dict[str, cs.SX] = {}\n self.cons: dict[str, cs.SX] = {}\n self.p = cs.SX()\n self.x, self.lbx, self.ubx = cs.SX(), np.array([]), np.array([])\n self.lam_lbx, self.lam_ubx = cs.SX(), cs.SX()\n self.g, self.lbg, self.ubg = cs.SX(), np.array([]), np.array([])\n self.lam_g = cs.SX()\n self.h, self.lbh, self.ubh = cs.SX(), np.array([]), np.array([])\n self.lam_h = cs.SX()\n self.solver: cs.Function = None\n self.opts: dict = None\n\n @property\n def ng(self) -> int:\n return self.g.shape[0]\n\n def add_par(self, name: str, *dims: int) -> cs.SX:\n assert name not in self.pars, f\"Parameter {name} already exists.\"\n par = cs.SX.sym(name, *dims)\n self.pars[name] = par\n self.p = cs.vertcat(self.p, cs.vec(par))\n return par\n\n def add_var(\n self,\n name: str,\n *dims: int,\n lb: np.ndarray = -np.inf,\n ub: np.ndarray = np.inf,\n ) -> tuple[cs.SX, cs.SX, cs.SX]:\n assert name not in self.vars, f\"Variable {name} already exists.\"\n lb, ub = np.broadcast_to(lb, dims), np.broadcast_to(ub, dims)\n assert np.all(lb < ub), \"Improper variable bounds.\"\n\n var = cs.SX.sym(name, *dims)\n self.vars[name] = var\n self.x = cs.vertcat(self.x, cs.vec(var))\n self.lbx = np.concatenate((self.lbx, cs.vec(lb).full().flatten()))\n self.ubx = np.concatenate((self.ubx, cs.vec(ub).full().flatten()))\n\n # create also the multiplier associated to the variable\n lam_lb = cs.SX.sym(f\"lam_lb_{name}\", *dims)\n self.lam_lbx = cs.vertcat(self.lam_lbx, cs.vec(lam_lb))\n lam_ub = cs.SX.sym(f\"lam_ub_{name}\", *dims)\n self.lam_ubx = cs.vertcat(self.lam_ubx, cs.vec(lam_ub))\n return var, lam_lb, lam_ub\n\n def add_con(\n self, name: str, expr1: cs.SX, op: str, expr2: cs.SX\n ) -> tuple[cs.SX, cs.SX]:\n assert name not in self.cons, f\"Constraint {name} already exists.\"\n expr = expr1 - expr2\n dims = expr.shape\n if op in {\"=\", \"==\"}:\n is_eq = True\n lb, ub = np.zeros(dims), np.zeros(dims)\n elif op in {\"<\", \"<=\"}:\n is_eq = False\n lb, ub = np.full(dims, -np.inf), np.zeros(dims)\n elif op in {\">\", \">=\"}:\n is_eq = False\n expr = -expr\n lb, ub = np.full(dims, -np.inf), np.zeros(dims)\n else:\n raise ValueError(f\"Unrecognized operator {op}.\")\n expr = cs.simplify(expr)\n lb, ub = cs.vec(lb).full().flatten(), cs.vec(ub).full().flatten()\n self.cons[name] = expr\n group = \"g\" if is_eq else \"h\"\n setattr(self, group, cs.vertcat(getattr(self, group), cs.vec(expr)))\n setattr(self, f\"lb{group}\", np.concatenate((getattr(self, f\"lb{group}\"), lb)))\n setattr(self, f\"ub{group}\", np.concatenate((getattr(self, f\"ub{group}\"), ub)))\n lam = cs.SX.sym(f\"lam_{group}_{name}\", *dims)\n setattr(\n self, f\"lam_{group}\", cs.vertcat(getattr(self, f\"lam_{group}\"), cs.vec(lam))\n )\n return expr, lam\n\n def minimize(self, objective: cs.SX) -> None:\n self.f = objective\n\n def init_solver(self, opts: dict) -> None:\n g = cs.vertcat(self.g, self.h)\n nlp = {\"x\": self.x, \"p\": self.p, \"g\": g, \"f\": self.f}\n self.solver = cs.nlpsol(f\"nlpsol_{self.name}\", \"ipopt\", nlp, opts)\n self.opts = opts\n\n def solve(\n self, pars: dict[str, np.ndarray], vals0: dict[str, np.ndarray] = None\n ) -> QuadRotorSolution:\n assert self.solver is not None, \"Solver uninitialized.\"\n assert len(self.pars.keys() - pars.keys()) == 0, (\n \"Trying to solve the MPC with unspecified parameters: \"\n + \", \".join(self.pars.keys() - pars.keys())\n + \".\"\n )\n p = subsevalf(self.p, self.pars, pars)\n kwargs = {\n \"p\": p,\n \"lbx\": self.lbx,\n \"ubx\": self.ubx,\n \"lbg\": np.concatenate((self.lbg, self.lbh)),\n \"ubg\": np.concatenate((self.ubg, self.ubh)),\n }\n if vals0 is not None:\n kwargs[\"x0\"] = np.clip(\n subsevalf(self.x, self.vars, vals0), self.lbx, self.ubx\n )\n sol: dict[str, cs.DM] = self.solver(**kwargs)\n lam_lbx = -np.minimum(sol[\"lam_x\"], 0)\n lam_ubx = np.maximum(sol[\"lam_x\"], 0)\n lam_g = sol[\"lam_g\"][: self.ng, :]\n lam_h = sol[\"lam_g\"][self.ng :, :]\n S = cs.vertcat(\n self.p, self.x, self.lam_g, self.lam_h, self.lam_lbx, self.lam_ubx\n )\n D = cs.vertcat(p, sol[\"x\"], lam_g, lam_h, lam_lbx, lam_ubx)\n get_value = partial(subsevalf, old=S, new=D)\n vals = {name: get_value(var) for name, var in self.vars.items()}\n return QuadRotorSolution(\n f=float(sol[\"f\"]),\n vars=self.vars.copy(),\n vals=vals,\n get_value=get_value,\n stats=self.solver.stats().copy(),\n )\n\n def __str__(self) -> str:\n msg = \"not initialized\" if self.solver is None else \"initialized\"\n C = len(self.cons)\n return (\n f\"{type(self).__name__} {{\\n\"\n f\" name: {self.name}\\n\"\n f\" #variables: {len(self.vars)} (nx={self.nx})\\n\"\n f\" #parameters: {len(self.pars)} (np={self.np})\\n\"\n f\" #constraints: {C} (ng={self.ng}, nh={self.nh})\\n\"\n f\" CasADi solver {msg}.\\n}}\"\n )\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}: {self.name}\"\n\n\ndef subsevalf(\n expr: cs.SX,\n old: Union[cs.SX, dict[str, cs.SX], list[cs.SX], tuple[cs.SX]],\n new: Union[cs.SX, dict[str, cs.SX], list[cs.SX], tuple[cs.SX]],\n eval: bool = True,\n) -> Union[cs.SX, np.ndarray]:\n if isinstance(old, dict):\n for name, o in old.items():\n expr = cs.substitute(expr, o, new[name])\n elif isinstance(old, (tuple, list)):\n for o, n in zip(old, new):\n expr = cs.substitute(expr, o, n)\n else:\n expr = cs.substitute(expr, old, new)\n\n if eval:\n expr = cs.evalf(expr).full().squeeze()\n return expr\n\n\nConfigType = TypeVar(\"ConfigType\")\n\n\ndef init_config(\n config: Optional[Union[ConfigType, dict]], cls: type[ConfigType]\n) -> ConfigType:\n if config is None:\n return cls()\n if isinstance(config, cls):\n return config\n if isinstance(config, dict):\n if not hasattr(cls, \"__dataclass_fields__\"):\n raise ValueError(\"Configiration class must be a dataclass.\")\n keys = cls.__dataclass_fields__.keys()\n return cls(**{k: config[k] for k in keys if k in config})\n raise ValueError(\n \"Invalid configuration type; expected None, dict or \"\n f\"a dataclass, got {cls} instead.\"\n )\n\n\n@dataclass\nclass QuadRotorMPCConfig:\n N: int = 10\n solver_opts: dict = field(\n default_factory=lambda: {\n \"expand\": True,\n \"print_time\": False,\n \"ipopt\": {\n \"max_iter\": 500,\n \"tol\": 1e-6,\n \"barrier_tol_factor\": 1,\n \"sb\": \"yes\",\n # for debugging\n \"print_level\": 0,\n \"print_user_options\": \"no\",\n \"print_options_documentation\": \"no\",\n },\n }\n )\n\n\nclass QuadRotorMPC(GenericMPC):\n def __init__(\n self,\n env: QuadRotorEnv,\n config: Union[dict, QuadRotorMPCConfig] = None,\n mpctype: str = \"V\",\n ) -> None:\n assert mpctype in {\n \"V\",\n \"Q\",\n }, \"MPC must be either V (state value func) or Q (action value func)\"\n super().__init__(name=mpctype)\n self.config = init_config(config, QuadRotorMPCConfig)\n N = self.config.N\n\n # ======================= #\n # Variable and Parameters #\n # ======================= #\n lbx, ubx = env.config.x_bounds[:, 0], env.config.x_bounds[:, 1]\n not_red = ~(np.isneginf(lbx) & np.isposinf(ubx))\n not_red_idx = np.where(not_red)[0]\n lbx, ubx = lbx[not_red].reshape(-1, 1), ubx[not_red].reshape(-1, 1)\n nx, nu = env.nx, env.nu\n x, _, _ = self.add_var(\"x\", nx, N)\n u, _, _ = self.add_var(\"u\", nu, N)\n ns = not_red_idx.size + nu\n s, _, _ = self.add_var(\"slack\", ns * N - not_red_idx.size, 1, lb=0)\n sx: cs.SX = s[: not_red_idx.size * (N - 1)].reshape((-1, N - 1))\n su: cs.SX = s[-nu * N :].reshape((-1, N))\n\n # 2) create model parameters\n for name in (\n \"g\",\n \"thrust_coeff\",\n \"pitch_d\",\n \"pitch_dd\",\n \"pitch_gain\",\n \"roll_d\",\n \"roll_dd\",\n \"roll_gain\",\n ):\n self.add_par(name, 1, 1)\n\n # =========== #\n # Constraints #\n # =========== #\n\n # 1) constraint on initial conditions\n x0 = self.add_par(\"x0\", env.nx, 1)\n x_ = cs.horzcat(x0, x)\n\n # 2) constraints on dynamics\n A, B, e = env.get_dynamics(\n g=self.pars[\"g\"],\n thrust_coeff=self.pars[\"thrust_coeff\"],\n pitch_d=self.pars[\"pitch_d\"],\n pitch_dd=self.pars[\"pitch_dd\"],\n pitch_gain=self.pars[\"pitch_gain\"],\n roll_d=self.pars[\"roll_d\"],\n roll_dd=self.pars[\"roll_dd\"],\n roll_gain=self.pars[\"roll_gain\"],\n )\n self.add_con(\"dyn\", x_[:, 1:], \"==\", A @ x_[:, :-1] + B @ u + e)\n\n # 3) constraint on state (soft, backed off, without infinity in g, and\n # removing redundant entries, no constraint on first state)\n # constraint backoff parameter and bounds\n bo = self.add_par(\"backoff\", 1, 1)\n\n # set the state constraints as\n # - soft-backedoff minimum constraint: (1+back)*lb - slack <= x\n # - soft-backedoff maximum constraint: x <= (1-back)*ub + slack\n # NOTE: there is a mistake here in the old code, since we are excluding the\n # first state from constraints which is actually the second.\n self.add_con(\"x_min\", (1 + bo) * lbx - sx, \"<=\", x[not_red_idx, 1:])\n self.add_con(\"x_max\", x[not_red_idx, 1:], \"<=\", (1 - bo) * ubx + sx)\n self.add_con(\"u_min\", env.config.u_bounds[:, 0] - su, \"<=\", u)\n self.add_con(\"u_max\", u, \"<=\", env.config.u_bounds[:, 1] + su)\n\n # ========= #\n # Objective #\n # ========= #\n J = 0 # (no initial state cost not required since it is not economic)\n s = cs.blockcat([[cs.SX.zeros(sx.size1(), 1), sx], [su]])\n xf = self.add_par(\"xf\", nx, 1)\n uf = cs.vertcat(0, 0, self.pars[\"g\"])\n w_x = self.add_par(\"w_x\", nx, 1) # weights for stage/final state\n w_u = self.add_par(\"w_u\", nu, 1) # weights for stage/final control\n w_s = self.add_par(\"w_s\", ns, 1) # weights for stage/final slack\n J += sum(\n (\n quad_form(w_x, x[:, k] - xf)\n + quad_form(w_u, u[:, k] - uf)\n + cs.dot(w_s, s[:, k])\n )\n for k in range(N - 1)\n )\n J += (\n quad_form(w_x, x[:, -1] - xf)\n + quad_form(w_u, u[:, -1] - uf)\n + cs.dot(w_s, s[:, -1])\n )\n self.minimize(J)\n\n # ====== #\n # Others #\n # ====== #\n if mpctype == \"Q\":\n u0 = self.add_par(\"u0\", nu, 1)\n self.add_con(\"init_action\", u[:, 0], \"==\", u0)\n else:\n perturbation = self.add_par(\"perturbation\", nu, 1)\n self.f += cs.dot(perturbation, u[:, 0])\n self.init_solver(self.config.solver_opts)\n\n\nMPCType = TypeVar(\"MPCType\", bound=GenericMPC)\n\n\nclass DifferentiableMPC(Generic[MPCType]):\n def __init__(self, mpc: MPCType) -> None:\n self._mpc = mpc\n\n @property\n def mpc(self) -> MPCType:\n return self._mpc\n\n @property\n def _non_redundant_x_bound_indices(self) -> tuple[np.ndarray, np.ndarray]:\n return (\n np.where(self._mpc.lbx != -np.inf)[0],\n np.where(self._mpc.ubx != np.inf)[0],\n )\n\n @property\n def lagrangian(self) -> cs.SX:\n idx_lbx, idx_ubx = self._non_redundant_x_bound_indices\n h_lbx = self._mpc.lbx[idx_lbx, None] - self._mpc.x[idx_lbx]\n h_ubx = self._mpc.x[idx_ubx] - self._mpc.ubx[idx_ubx, None]\n return (\n self._mpc.f\n + cs.dot(self._mpc.lam_g, self._mpc.g)\n + cs.dot(self._mpc.lam_h, self._mpc.h)\n + cs.dot(self._mpc.lam_lbx[idx_lbx], h_lbx)\n + cs.dot(self._mpc.lam_ubx[idx_ubx], h_ubx)\n )\n\n def __getattr__(self, name) -> Any:\n return getattr(self._mpc, name)\n\n\nT = TypeVar(\"T\")\n\n\nclass ReplayMemory(Deque[T]):\n def __init__(\n self, iterable: Iterable[T] = (), maxlen: int = None, seed: int = None\n ) -> None:\n super().__init__(iterable, maxlen=maxlen)\n self.np_random = np.random.default_rng(seed)\n\n def sample(\n self, n: Union[int, float], include_last_n: Union[int, float]\n ) -> Iterable[T]:\n length = len(self)\n if isinstance(n, float):\n n = int(self.maxlen * n)\n n = np.clip(n, min(1, length), length)\n if isinstance(include_last_n, float):\n include_last_n = int(n * include_last_n)\n include_last_n = np.clip(include_last_n, 0, n)\n last_n = range(length - include_last_n, length)\n sampled = self.np_random.choice(\n range(length - include_last_n), n - include_last_n, replace=False\n )\n yield from (self[i] for i in chain(last_n, sampled))\n\n\n@dataclass\nclass RLParameter:\n name: str\n value: np.ndarray\n bounds: np.ndarray\n symV: cs.SX\n symQ: cs.SX\n\n @property\n def size(self) -> int:\n return self.symV.shape[0] # since rl pars are all column vectors\n\n def __post_init__(self) -> None:\n shape = self.symV.shape\n assert shape == self.symQ.shape, (\n f\"Parameter {self.name} has different shapes in \"\n f\"Q ({self.symQ.shape}) and V ({self.symV.shape}).\"\n )\n assert self.symV.is_column(), f\"Parameter {self.name} must be a column vector.\"\n self.bounds = np.broadcast_to(self.bounds, (shape[0], 2))\n self.update_value(self.value)\n\n def update_value(self, new_val: np.ndarray) -> None:\n \"\"\"Updates the parameter's current value to the new one.\"\"\"\n new_val = np.broadcast_to(new_val, self.bounds.shape[0])\n assert (\n (self.bounds[:, 0] <= new_val) | np.isclose(new_val, self.bounds[:, 0])\n ).all() and (\n (new_val <= self.bounds[:, 1]) | np.isclose(new_val, self.bounds[:, 1])\n ).all(), \"Parameter value outside bounds.\"\n self.value = np.clip(new_val, self.bounds[:, 0], self.bounds[:, 1])\n\n\nclass RLParameterCollection(Sequence[RLParameter]):\n \"\"\"Collection of learnable RL parameters, which can be accessed by string as a\n dictionary or by index as a list.\"\"\"\n\n def __init__(self, *parameters: RLParameter) -> None:\n \"\"\"Instantiate the collection from another iterable, if provided.\"\"\"\n self._list: list[RLParameter] = []\n self._dict: dict[str, RLParameter] = {}\n for parameter in parameters:\n self._list.append(parameter)\n self._dict[parameter.name] = parameter\n\n @property\n def n_theta(self) -> int:\n return sum(self.sizes())\n\n @property\n def as_dict(self) -> dict[str, RLParameter]:\n return self._dict\n\n def values(self, as_dict: bool = False) -> Union[np.ndarray, dict[str, np.ndarray]]:\n if as_dict:\n return {name: p.value for name, p in self.items()}\n return np.concatenate([p.value for p in self._list])\n\n def bounds(self, as_dict: bool = False) -> Union[np.ndarray, dict[str, np.ndarray]]:\n if as_dict:\n return {name: p.bounds for name, p in self.items()}\n return np.row_stack([p.bounds for p in self._list])\n\n def symQ(self, as_dict: bool = False) -> Union[cs.SX, dict[str, cs.SX]]:\n if as_dict:\n return {name: p.symQ for name, p in self.items()}\n return cs.vertcat(*(p.symQ for p in self._list))\n\n def sizes(self, as_dict: bool = False) -> Union[list[int], dict[str, int]]:\n if as_dict:\n return {p.name: p.size for p in self._list}\n return [p.size for p in self._list]\n\n def update_values(\n self, new_vals: Union[np.ndarray, list[np.ndarray], dict[str, np.ndarray]]\n ) -> None:\n if isinstance(new_vals, np.ndarray):\n new_vals = np.split(new_vals, np.cumsum(self.sizes())[:-1])\n for p, val in zip(self._list, new_vals):\n p.update_value(val)\n elif isinstance(new_vals, list):\n for p, val in zip(self._list, new_vals):\n p.update_value(val)\n elif isinstance(new_vals, dict):\n for n in self._dict.keys():\n self._dict[n].update_value(new_vals[n])\n\n def items(self) -> Iterable[tuple[str, RLParameter]]:\n return self._dict.items()\n\n def __getitem__(\n self, index: Union[str, Iterable[str], int, slice, Iterable[int]]\n ) -> Union[RLParameter, list[RLParameter]]:\n if isinstance(index, str):\n return self._dict[index]\n if isinstance(index, (int, slice)):\n return self._list[index]\n if isinstance(index, Iterable):\n return [self._list[i] for i in index]\n\n def __iter__(self) -> Iterator[RLParameter]:\n return iter(self._list)\n\n def __next__(self) -> RLParameter:\n return next(self._list)\n\n def __len__(self) -> int:\n return len(self._list)\n\n\nclass QuadRotorBaseAgent(ABC):\n def __init__(\n self,\n env: QuadRotorEnv,\n agentname: str = None,\n agent_config: Union[dict[str, Any], Any] = None,\n fixed_pars: dict[str, np.ndarray] = None,\n mpc_config: Union[dict, QuadRotorMPCConfig] = None,\n seed: int = None,\n ) -> None:\n super().__init__()\n self.name = \"Agent\" if agentname is None else agentname\n self.env = env\n self.config = (\n init_config(agent_config, self.config_cls)\n if hasattr(self, \"config_cls\")\n else None\n )\n self.fixed_pars = {} if fixed_pars is None else fixed_pars\n self.seed = seed\n self.np_random = np.random.default_rng(seed)\n self.perturbation_chance = 0.0\n self.perturbation_strength = 0.0\n self.last_solution: Solution = None\n self.Q = QuadRotorMPC(env, config=mpc_config, mpctype=\"Q\")\n self.V = QuadRotorMPC(env, config=mpc_config, mpctype=\"V\")\n\n @property\n def unwrapped(self) -> \"QuadRotorBaseAgent\":\n return self\n\n def reset(self) -> None:\n self.last_solution = None\n self.Q.failures = 0\n self.V.failures = 0\n\n def solve_mpc(\n self,\n type: str,\n state: np.ndarray = None,\n sol0: dict[str, np.ndarray] = None,\n ) -> Solution:\n mpc: QuadRotorMPC = getattr(self, type)\n if state is None:\n state = self.env.x\n pars = self.fixed_pars.copy()\n pars[\"x0\"] = state\n pars.update(self._merge_mpc_pars_callback())\n if sol0 is None:\n if self.last_solution is None:\n g = float(pars.get(\"g\", 0))\n sol0 = {\n \"x\": np.tile(state, (mpc.vars[\"x\"].shape[1], 1)).T,\n \"u\": np.tile([0, 0, g], (mpc.vars[\"u\"].shape[1], 1)).T,\n \"slack\": 0,\n }\n else:\n sol0 = self.last_solution.vals\n self.last_solution = mpc.solve(pars, sol0)\n return self.last_solution\n\n def predict(\n self,\n state: np.ndarray = None,\n deterministic: bool = False,\n perturb_gradient: bool = True,\n **solve_mpc_kwargs,\n ) -> tuple[np.ndarray, np.ndarray, Solution]:\n perturbation_in_dict = \"perturbation\" in self.fixed_pars\n if perturbation_in_dict:\n self.fixed_pars[\"perturbation\"] = 0\n if deterministic or self.np_random.random() > self.perturbation_chance:\n sol = self.solve_mpc(type=\"V\", state=state, **solve_mpc_kwargs)\n u_opt = sol.vals[\"u\"][:, 0]\n else:\n u_bnd = self.env.config.u_bounds\n rng = self.np_random.normal(\n scale=self.perturbation_strength * np.diff(u_bnd).flatten(),\n size=self.V.vars[\"u\"].shape[0],\n )\n if perturb_gradient:\n assert (\n perturbation_in_dict\n ), \"No parameter 'perturbation' found to perturb gradient.\"\n self.fixed_pars[\"perturbation\"] = rng\n sol = self.solve_mpc(type=\"V\", state=state, **solve_mpc_kwargs)\n u_opt = sol.vals[\"u\"][:, 0]\n if not perturb_gradient:\n u_opt = np.clip(u_opt + rng, u_bnd[:, 0], u_bnd[:, 1])\n x_next = sol.vals[\"x\"][:, 0]\n return u_opt, x_next, sol\n\n def _merge_mpc_pars_callback(self) -> dict[str, np.ndarray]:\n return {}\n\n @staticmethod\n def _make_seed_list(seed: Optional[Union[int, list[int]]], n: int) -> list[int]:\n if seed is None:\n return [None] * n\n if isinstance(seed, int):\n return [seed + i for i in range(n)]\n assert len(seed) == n, \"Seed sequence with invalid length.\"\n return seed\n\n\nclass QuadRotorBaseLearningAgent(QuadRotorBaseAgent, ABC):\n def __init__(\n self,\n *args,\n init_learnable_pars: dict[str, tuple[np.ndarray, np.ndarray]],\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.V = DifferentiableMPC[QuadRotorMPC](self.V)\n self.Q = DifferentiableMPC[QuadRotorMPC](self.Q)\n self._init_learnable_pars(init_learnable_pars)\n self._init_learning_rate()\n self._epoch_n = None # keeps track of epoch number just for logging\n\n @abstractmethod\n def update(self) -> np.ndarray:\n pass\n\n @abstractmethod\n def learn_one_epoch(\n self,\n n_episodes: int,\n perturbation_decay: float = 0.75,\n seed: Union[int, list[int]] = None,\n return_info: bool = True,\n ) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray, dict[str, np.ndarray]]]:\n pass\n\n def learn(\n self,\n n_epochs: int,\n n_episodes: int,\n perturbation_decay: float = 0.75,\n seed: Union[int, list[int]] = None,\n throw_on_exception: bool = False,\n return_info: bool = True,\n ) -> Union[\n tuple[bool, np.ndarray],\n tuple[bool, np.ndarray, list[np.ndarray], list[dict[str, np.ndarray]]],\n ]:\n ok = True\n results = []\n seeds = iter(map(int, np.random.SeedSequence(seed).generate_state(n_epochs)))\n for e in range(n_epochs):\n self._epoch_n = e # just for logging\n try:\n results.append(\n self.learn_one_epoch(\n n_episodes=n_episodes,\n perturbation_decay=perturbation_decay,\n seed=next(seeds),\n return_info=return_info,\n )\n )\n except (MpcSolverError, UpdateError) as ex:\n if throw_on_exception:\n raise ex\n ok = False\n break\n if not results:\n return (ok, np.nan, [], []) if return_info else (ok, np.nan)\n if not return_info:\n return ok, np.stack(results, axis=0)\n returns, grads, weightss = list(zip(*results))\n return ok, np.stack(returns, axis=0), grads, weightss\n\n def _init_learnable_pars(\n self, init_pars: dict[str, tuple[np.ndarray, np.ndarray]]\n ) -> None:\n \"\"\"Initializes the learnable parameters of the MPC.\"\"\"\n required_pars = sorted(\n set(self.Q.pars)\n .intersection(self.V.pars)\n .difference({\"x0\", \"xf\"})\n .difference(self.fixed_pars)\n )\n self.weights = RLParameterCollection(\n *(\n RLParameter(\n name, *init_pars[name], self.V.pars[name], self.Q.pars[name]\n )\n for name in required_pars\n )\n )\n\n def _init_learning_rate(self) -> None:\n cfg = self.config\n if cfg is None or not hasattr(cfg, \"lr\"):\n return\n n_pars, n_theta = len(self.weights), self.weights.n_theta\n lr = np.asarray(cfg.lr).squeeze()\n if lr.ndim == 0:\n lr = np.full((n_theta,), lr)\n elif lr.size == n_pars and lr.size != n_theta:\n lr = np.concatenate([np.full(p.size, r) for p, r in zip(self.weights, lr)])\n assert lr.shape == (\n n_theta,\n ), \"Learning rate must have the same size as the learnable parameter vector.\"\n cfg.lr = lr\n\n def _merge_mpc_pars_callback(self) -> dict[str, np.ndarray]:\n return self.weights.values(as_dict=True)\n\n @staticmethod\n def _get_percentage_bounds(\n theta: np.ndarray,\n theta_bounds: np.ndarray,\n max_perc_update: float,\n ) -> tuple[np.ndarray, np.ndarray]:\n max_delta = np.maximum(np.abs(max_perc_update * theta), 0.1)\n lb = np.maximum(theta_bounds[:, 0], theta - max_delta)\n ub = np.minimum(theta_bounds[:, 1], theta + max_delta)\n return lb, ub\n\n\n@dataclass\nclass QuadRotorLSTDQAgentConfig:\n init_pars: dict[str, tuple[float, tuple[float, float]]] = field(\n default_factory=lambda: {\n \"g\": (9.81, (1, 40)),\n \"thrust_coeff\": (0.3, (0.1, 4)),\n \"backoff\": (0.1, (1e-3, 0.5)),\n }\n )\n fixed_pars: dict[str, float] = field(\n default_factory=lambda: {\n \"pitch_d\": 12,\n \"pitch_dd\": 5,\n \"pitch_gain\": 12,\n \"roll_d\": 13,\n \"roll_dd\": 6,\n \"roll_gain\": 8,\n \"w_x\": 1e1,\n \"w_u\": 1e0,\n \"w_s\": 1e2,\n }\n )\n replay_maxlen: float = 20\n replay_sample_size: float = 10\n replay_include_last: float = 5\n gamma: float = 1.0\n lr: float = 1e-1\n max_perc_update: float = np.inf\n\n\nclass QuadRotorLSTDQAgent(QuadRotorBaseLearningAgent):\n config_cls: type = QuadRotorLSTDQAgentConfig\n\n def __init__(\n self,\n env: QuadRotorEnv,\n agentname: str = None,\n agent_config: Union[dict, QuadRotorLSTDQAgentConfig] = None,\n mpc_config: Union[dict, QuadRotorMPCConfig] = None,\n seed: int = None,\n ) -> None:\n # create base agent\n agent_config = init_config(agent_config, self.config_cls)\n fixed_pars, init_pars = agent_config.fixed_pars, agent_config.init_pars\n fixed_pars.update({\"xf\": env.config.xf, \"perturbation\": np.nan})\n super().__init__(\n env,\n agentname=agentname,\n agent_config=agent_config,\n fixed_pars=fixed_pars,\n init_learnable_pars=init_pars,\n mpc_config=mpc_config,\n seed=seed,\n )\n self.perturbation_chance = 0.0\n self.perturbation_strength = 0.0\n self.replay_memory = ReplayMemory[list[tuple[np.ndarray, ...]]](\n maxlen=self.config.replay_maxlen, seed=seed\n )\n self._episode_buffer: list[tuple[np.ndarray, ...]] = []\n self._init_derivative_symbols()\n self._init_qp_solver()\n\n def save_transition(self, cost: float, solQ: Solution, solV: Solution) -> None:\n target = cost + self.config.gamma * solV.f\n td_err = target - solQ.f\n dQ = solQ.value(self.dQdtheta).reshape(-1, 1)\n d2Q = solQ.value(self.d2Qdtheta)\n g = -td_err * dQ\n H = dQ @ dQ.T - td_err * d2Q\n self._episode_buffer.append((g, H))\n\n def consolidate_episode_experience(self) -> None:\n if len(self._episode_buffer) == 0:\n return\n self.replay_memory.append(self._episode_buffer.copy())\n self._episode_buffer.clear()\n\n def update(self) -> np.ndarray:\n # sample the memory\n cfg: QuadRotorLSTDQAgentConfig = self.config\n sample = self.replay_memory.sample(\n cfg.replay_sample_size, cfg.replay_include_last\n )\n g, H = (np.mean(o, axis=0) for o in zip(*chain.from_iterable(sample)))\n R = cholesky_added_multiple_identities(H)\n p = cho_solve((R, True), g).flatten()\n theta = self.weights.values()\n lb, ub = self._get_percentage_bounds(\n theta, self.weights.bounds(), cfg.max_perc_update\n )\n sol = self._solver(p=np.concatenate((p, cfg.lr)), lbx=lb, ubx=ub)\n if not self._solver.stats()[\"success\"]:\n raise UpdateError(f\"RL update failed in epoch {self._epoch_n}.\")\n self.weights.update_values(theta + sol[\"x\"].full().flatten())\n return p\n\n def learn_one_epoch(\n self,\n n_episodes: int,\n perturbation_decay: float = 0.75,\n seed: Union[int, list[int]] = None,\n return_info: bool = False,\n ) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray, dict[str, np.ndarray]]]:\n env, name, epoch_n = self.env, self.name, self._epoch_n\n returns = np.zeros(n_episodes)\n seeds = self._make_seed_list(seed, n_episodes)\n\n for e in range(n_episodes):\n state, _ = env.reset(seed=seeds[e])\n self.reset()\n truncated, terminated, t = False, False, 0\n action = self.predict(state, deterministic=False)[0]\n while not (truncated or terminated):\n # compute Q(s, a)\n self.fixed_pars.update({\"u0\": action})\n solQ = self.solve_mpc(\"Q\", state)\n # step the system\n state, r, truncated, terminated, _ = env.step(action)\n returns[e] += r\n # compute V(s+)\n action, _, solV = self.predict(state, deterministic=False)\n if solQ.success and solV.success:\n self.save_transition(r, solQ, solV)\n else:\n raise MpcSolverError(f\"{name}|{epoch_n}|{e}|{t}: mpc failed.\")\n t += 1\n self.consolidate_episode_experience()\n\n update_grad = self.update()\n self.perturbation_strength *= perturbation_decay\n self.perturbation_chance *= perturbation_decay\n return (\n (returns, update_grad, self.weights.values(as_dict=True))\n if return_info\n else returns\n )\n\n def _init_derivative_symbols(self) -> None:\n theta = self.weights.symQ()\n lagr = self.Q.lagrangian\n d2Qdtheta, dQdtheta = cs.hessian(lagr, theta)\n self.dQdtheta = cs.simplify(dQdtheta)\n self.d2Qdtheta = cs.simplify(d2Qdtheta)\n\n def _init_qp_solver(self) -> None:\n n_theta = self.weights.n_theta\n dtheta: cs.SX = cs.SX.sym(\"dtheta\", n_theta, 1)\n p: cs.SX = cs.SX.sym(\"p\", n_theta, 1)\n lr: cs.SX = cs.SX.sym(\"lr\", n_theta, 1)\n qp = {\n \"x\": dtheta,\n \"f\": 0.5 * dtheta.T @ dtheta + (lr * p).T @ dtheta,\n \"p\": cs.vertcat(p, lr),\n }\n opts = {\"print_iter\": False, \"print_header\": False}\n self._solver = cs.qpsol(f\"qpsol_{self.name}\", \"qrqp\", qp, opts)\n\n\nAgentType = TypeVar(\"AgentType\", bound=QuadRotorBaseLearningAgent)\n\n\nclass RecordLearningData(Generic[AgentType]):\n def __init__(self, agent: AgentType) -> None:\n self.agent = agent\n\n # initialize storages\n self.weights_history: dict[str, list[np.ndarray]] = {\n n: [p.value] for n, p in agent.weights.as_dict.items()\n }\n self.update_gradient: list[np.ndarray] = []\n\n @property\n def unwrapped(self) -> AgentType:\n return self.agent\n\n def learn_one_epoch(self, *args, **kwargs) -> tuple[np.ndarray, np.ndarray]:\n returns, grad, weights = self.agent.learn_one_epoch(*args, **kwargs)\n self._save(grad, weights)\n return returns, grad\n\n def learn(\n self, *args, **kwargs\n ) -> tuple[bool, np.ndarray, list[np.ndarray], list[dict[str, np.ndarray]]]:\n ok, returns, grads, weightss = self.agent.learn(*args, **kwargs)\n for grad, weights in zip(grads, weightss):\n self._save(grad, weights)\n return ok, returns, grads, weightss\n\n def _save(self, grad: np.ndarray, weights: dict[str, np.ndarray]) -> None:\n self.update_gradient.append(grad)\n for n, w in self.weights_history.items():\n w.append(weights[n])\n\n def __getattr__(self, name: str) -> Any:\n if name.startswith(\"_\"):\n raise AttributeError(f\"accessing private attribute '{name}' is prohibited.\")\n return getattr(self.agent, name)\n\n\n# ==================================================================================== #\n# ----------------------------------- END OLD CODE ----------------------------------- #\n# ==================================================================================== #\n\n\nclass QuadRotorMpcActual(Mpc):\n def __init__(self, env: QuadRotorEnv) -> None:\n N = QuadRotorMPCConfig.N\n super().__init__(Nlp(sym_type=\"SX\"), prediction_horizon=N, shooting=\"multi\")\n\n # ======================= #\n # Variable and Parameters #\n # ======================= #\n lbx, ubx = env.config.x_bounds[:, 0], env.config.x_bounds[:, 1]\n not_red = ~(np.isneginf(lbx) & np.isposinf(ubx))\n not_red_idx = np.where(not_red)[0]\n lbx, ubx = lbx[not_red].reshape(-1, 1), ubx[not_red].reshape(-1, 1)\n nx, nu = env.nx, env.nu\n x, _ = self.state(\"x\", nx)\n u, _ = self.action(\"u\", nu)\n ns = not_red_idx.size + nu\n s, _, _ = self.variable(\"slack\", (ns * N - not_red_idx.size, 1), lb=0)\n sx: cs.SX = s[: not_red_idx.size * (N - 1)].reshape((-1, N - 1))\n su: cs.SX = s[-nu * N :].reshape((-1, N))\n\n # 2) create model parameters\n for name in (\n \"g\",\n \"thrust_coeff\",\n \"pitch_d\",\n \"pitch_dd\",\n \"pitch_gain\",\n \"roll_d\",\n \"roll_dd\",\n \"roll_gain\",\n ):\n self.parameter(name, (1, 1))\n\n # =========== #\n # Constraints #\n # =========== #\n A, B, e = env.get_dynamics(\n g=self.parameters[\"g\"],\n thrust_coeff=self.parameters[\"thrust_coeff\"],\n pitch_d=self.parameters[\"pitch_d\"],\n pitch_dd=self.parameters[\"pitch_dd\"],\n pitch_gain=self.parameters[\"pitch_gain\"],\n roll_d=self.parameters[\"roll_d\"],\n roll_dd=self.parameters[\"roll_dd\"],\n roll_gain=self.parameters[\"roll_gain\"],\n )\n self.set_dynamics(lambda x, u: A @ x + B @ u + e, n_in=2, n_out=1)\n\n # 3) constraint on state\n bo = self.parameter(\"backoff\", (1, 1))\n self.constraint(\"x_min\", (1 + bo) * lbx - sx, \"<=\", x[not_red_idx, 2:])\n self.constraint(\"x_max\", x[not_red_idx, 2:], \"<=\", (1 - bo) * ubx + sx)\n self.constraint(\"u_min\", env.config.u_bounds[:, 0] - su, \"<=\", u)\n self.constraint(\"u_max\", u, \"<=\", env.config.u_bounds[:, 1] + su)\n\n # ========= #\n # Objective #\n # ========= #\n J = 0 # (no initial state cost not required since it is not economic)\n s = cs.blockcat([[cs.SX.zeros(sx.size1(), 1), sx], [su]])\n xf = self.parameter(\"xf\", (nx, 1))\n uf = cs.vertcat(0, 0, self.parameters[\"g\"])\n w_x = self.parameter(\"w_x\", (nx, 1)) # weights for stage/final state\n w_u = self.parameter(\"w_u\", (nu, 1)) # weights for stage/final control\n w_s = self.parameter(\"w_s\", (ns, 1)) # weights for stage/final slack\n J += sum(\n (\n quad_form(w_x, x[:, k + 1] - xf)\n + quad_form(w_u, u[:, k] - uf)\n + cs.dot(w_s, s[:, k])\n )\n for k in range(N - 1)\n )\n J += (\n quad_form(w_x, x[:, -1] - xf)\n + quad_form(w_u, u[:, -1] - uf)\n + cs.dot(w_s, s[:, -1])\n )\n self.minimize(J)\n self.init_solver(\n QuadRotorMPCConfig.__dataclass_fields__[\"solver_opts\"].default_factory()\n )\n\n\nclass TestQuadRotorQlearning(unittest.TestCase):\n def test(self):\n # for comparison\n # - replay maxlen must be 1, i.e., use only the latest episode for updates\n # - no exploration since np_randoms are placed differently\n seed = 42\n Tlimit = 20\n env = TimeLimit(QuadRotorEnv(), Tlimit)\n agent_config = {\n \"gamma\": 0.9792,\n \"lr\": [0.498],\n \"max_perc_update\": np.inf,\n \"replay_maxlen\": 1,\n \"replay_sample_size\": 1.0,\n \"replay_include_last\": 1,\n \"perturbation_decay\": 0.885,\n }\n agent_expected = RecordLearningData(\n QuadRotorLSTDQAgent(\n env=env, agentname=\"LSTDQ_0\", agent_config=agent_config, seed=seed\n )\n )\n results_expected = agent_expected.learn(\n n_epochs=2,\n n_episodes=1,\n perturbation_decay=agent_config[\"perturbation_decay\"],\n seed=seed + 1,\n throw_on_exception=True,\n )\n self.assertTrue(results_expected[0])\n\n mpc = QuadRotorMpcActual(env)\n fp_field = QuadRotorLSTDQAgentConfig.__dataclass_fields__[\"fixed_pars\"]\n fixed_pars = fp_field.default_factory()\n fixed_pars[\"xf\"] = env.config.xf\n lp_field = QuadRotorLSTDQAgentConfig.__dataclass_fields__[\"init_pars\"]\n learnable_pars = LearnableParametersDict[cs.SX](\n (\n LearnableParameter(\n name=name,\n shape=1,\n value=init,\n lb=lb,\n ub=ub,\n sym=cs.vec(mpc.parameters[name]),\n )\n for name, (init, (lb, ub)) in lp_field.default_factory().items()\n )\n )\n agent_actual = RecordUpdates(\n LstdQLearningAgent(\n mpc=mpc,\n discount_factor=agent_config[\"gamma\"],\n learning_rate=agent_config[\"lr\"][0],\n learnable_parameters=learnable_pars,\n fixed_parameters=fixed_pars,\n exploration=E.EpsilonGreedyExploration(\n S.ExponentialScheduler(0.0, agent_config[\"perturbation_decay\"]),\n S.ExponentialScheduler(0.0, agent_config[\"perturbation_decay\"]),\n seed=seed,\n ),\n experience=ExperienceReplay(maxlen=Tlimit, sample_size=1.0),\n update_strategy=Tlimit,\n cho_before_update=True,\n )\n )\n results_actual = LstdQLearningAgent.train(\n agent_actual,\n env=env,\n episodes=2,\n seed=seed + 1,\n )\n\n np.testing.assert_allclose(results_actual, results_expected[1].flatten())\n for n, weights in agent_actual.updates_history.items():\n np.testing.assert_allclose(weights, agent_expected.weights_history[n])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"FilippoAiraldi/mpc-reinforcement-learning","sub_path":"tests/test_quadrotor_q_learning.py","file_name":"test_quadrotor_q_learning.py","file_ext":"py","file_size_in_byte":48471,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"}
+{"seq_id":"29130687420","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport math\n\n\ndef gelu(x):\n \"\"\" gelu激活函数\n 在GPT架构中,使用的是gelu函数的近似版本,公式如下:\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n 这里是直接求的解析解,就是原始论文给出的公式\n 论文 https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nactivations = {\"gelu\": gelu, \"relu\": F.relu, \"swish\": swish}\n\n\nclass LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12, conditional=False):\n \"\"\"layernorm 层,这里自行实现,目的是为了兼容 conditianal layernorm,使得可以做条件文本生成、条件分类等任务\n 条件layernorm来自于苏剑林的想法,详情:https://spaces.ac.cn/archives/7124\n \"\"\"\n super(LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.eps = eps\n self.conditional = conditional\n if conditional:\n # 条件layernorm, 用于条件文本生成,\n # 这里采用全零初始化, 目的是在初始状态不干扰原来的预训练权重\n self.dense1 = nn.Linear(2 * hidden_size, hidden_size, bias=False)\n self.dense1.weight.data.uniform_(0, 0)\n self.dense2 = nn.Linear(2 * hidden_size, hidden_size, bias=False)\n self.dense2.weight.data.uniform_(0, 0)\n\n def forward(self, x):\n if self.conditional:\n inputs = x[0]\n cond = x[1]\n for _ in range(len(inputs.shape) - len(cond.shape)):\n cond = cond.unsqueeze(dim=1)\n u = inputs.mean(-1, keepdim=True)\n s = (inputs - u).pow(2).mean(-1, keepdim=True)\n x = (inputs - u) / torch.sqrt(s + self.eps)\n return (self.weight + self.dense1(cond)) * x + (self.bias + self.dense2(cond))\n else:\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n return self.weight * x + self.bias\n\n\nclass MultiHeadAttentionLayer(nn.Module):\n def __init__(self, hidden_size, num_attention_heads, dropout_rate, attention_scale=True,\n return_attention_scores=False):\n super(MultiHeadAttentionLayer, self).__init__()\n\n assert hidden_size % num_attention_heads == 0\n\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.attention_head_size = int(hidden_size / num_attention_heads)\n self.attention_scale = attention_scale\n self.return_attention_scores = return_attention_scores\n\n self.q = nn.Linear(hidden_size, hidden_size)\n self.k = nn.Linear(hidden_size, hidden_size)\n self.v = nn.Linear(hidden_size, hidden_size)\n\n self.o = nn.Linear(hidden_size, hidden_size)\n\n self.dropout = nn.Dropout(dropout_rate)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, query, key, value, attention_mask=None):\n\n # query shape: [batch_size, query_len, hidden_size]\n # key shape: [batch_size, key_len, hidden_size]\n # value shape: [batch_size, value_len, hidden_size]\n # 一般情况下,query_len、key_len、value_len三者相等\n\n mixed_query_layer = self.q(query)\n mixed_key_layer = self.k(key)\n mixed_value_layer = self.v(value)\n\n # mixed_query_layer shape: [batch_size, query_len, hidden_size]\n # mixed_query_layer shape: [batch_size, key_len, hidden_size]\n # mixed_query_layer shape: [batch_size, value_len, hidden_size]\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # query_layer shape: [batch_size, num_attention_heads, query_len, attention_head_size]\n # key_layer shape: [batch_size, num_attention_heads, key_len, attention_head_size]\n # value_layer shape: [batch_size, num_attention_heads, value_len, attention_head_size]\n\n # 交换k的最后两个维度,然后q和k执行点积, 获得attention score\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n # attention_scores shape: [batch_size, num_attention_heads, query_len, key_len]\n\n # 是否进行attention scale\n if self.attention_scale:\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # 执行attention mask,对于mask为0部分的attention mask,\n # 值为-1e10,经过softmax后,attention_probs几乎为0,所以不会attention到mask为0的部分\n if attention_mask is not None:\n # attention_scores = attention_scores.masked_fill(attention_mask == 0, -1e10)\n attention_mask = (1.0 - attention_mask) * -10000.0\n attention_scores = attention_scores + attention_mask\n\n # 将attention score 归一化到0-1\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n # context_layer shape: [batch_size, num_attention_heads, query_len, attention_head_size]\n\n # transpose、permute等维度变换操作后,tensor在内存中不再是连续存储的,而view操作要求tensor的内存连续存储,\n # 所以在调用view之前,需要contiguous来返回一个contiguous copy;\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # context_layer shape: [batch_size, query_len, num_attention_heads, attention_head_size]\n\n new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n # 是否返回attention scores\n if self.return_attention_scores:\n # 这里返回的attention_scores没有经过softmax, 可在外部进行归一化操作\n return self.o(context_layer), attention_scores\n else:\n return self.o(context_layer)\n\n\nclass PositionWiseFeedForward(nn.Module):\n def __init__(self, hidden_size, intermediate_size, dropout_rate=0.5, hidden_act='gelu', is_dropout=True):\n # 原生的tf版本的bert在激活函数后,没有添加dropout层,但是在google AI的bert-pytorch开源项目中,多了一层dropout;\n # 并且在pytorch官方的TransformerEncoderLayer的实现中,也有一层dropout层,就像这样:self.linear2(self.dropout(self.activation(self.linear1(src))));\n # 这样不统一做法的原因不得而知,不过有没有这一层,差别可能不会很大;\n\n # 为了适配是否dropout,用is_dropout,dropout_rate两个参数控制;如果是实现原始的transformer,直接使用默认参数即可;如果是实现bert,则is_dropout为False,此时的dropout_rate参数并不会使用.\n super(PositionWiseFeedForward, self).__init__()\n\n self.is_dropout = is_dropout\n self.intermediate_act_fn = activations[hidden_act]\n self.intermediateDense = nn.Linear(hidden_size, intermediate_size)\n self.outputDense = nn.Linear(intermediate_size, hidden_size)\n if self.is_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, x):\n # x shape: (batch size, seq len, hidden_size)\n if self.is_dropout:\n x = self.dropout(self.intermediate_act_fn(self.intermediateDense(x)))\n else:\n x = self.intermediate_act_fn(self.intermediateDense(x))\n\n # x shape: (batch size, seq len, intermediate_size)\n x = self.outputDense(x)\n\n # x shape: (batch size, seq len, hidden_size)\n return x\n","repo_name":"MuQiuJun-AI/bert4pytorch","sub_path":"bert4pytorch/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":8184,"program_lang":"python","lang":"en","doc_type":"code","stars":373,"dataset":"github-code","pt":"79"}
+{"seq_id":"8834712574","text":"# import data into a numpy array\r\n\r\nimport numpy as np\r\n\r\ndata_array = np.genfromtxt(\"python_language_1_data.csv\", delimiter=\",\", names=True,\r\n dtype=[int, int, float])\r\n\r\nrainfall = \"rainfall_mmday\"\r\n\r\n# store end years\r\nfirst_year = data_array[0][0]\r\nlast_year = data_array[-1][0]\r\n\r\n# create a year key for dictionary\r\nyear_tuple = tuple(range(first_year, last_year + 1))\r\n\r\n# create dictionary, with year keys and empty list values\r\ndata_dic = {}\r\n\r\nfor year in year_tuple:\r\n data_dic[year] = [] \r\n\r\n# iterate through rows, adding rainfall data to appropriate year list of dictionary\r\n'''\r\n# alternative version\r\nfor day in range(data_array.size):\r\n data_dic[data_array[day][0]].append(data_array[day][2])\r\n'''\r\nfor day in data_array:\r\n data_dic[day[0]].append(day[2])\r\n\r\n\r\n#export dictionary to json\r\n\r\nimport json\r\n\r\nwith open('python_language_1_data.json', 'w') as json_file:\r\n json.dump(data_dic, json_file, indent=4)\r\n\r\n# function to create a plot in png format of rainfall across a year,\r\n# takes a json file, year, and optional colour\r\n \r\nfrom matplotlib import pyplot as plt\r\n\r\ndef plot_from_json(filename, year, colour='green'):\r\n \r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n \r\n plot_data_dic = json.loads(temp_string)\r\n plot_data = plot_data_dic[year]\r\n \r\n year_graph, year_graph_axes = plt.subplots()\r\n \r\n # attempt to plot graph, raise error if colour input is invalid\r\n try:\r\n year_graph_axes.plot(plot_data, color = colour)\r\n except ValueError:\r\n pass\r\n \r\n year_graph_axes.set_title(\"Average daily rainfall for 1988\")\r\n year_graph_axes.set_ylabel(\"rainfall / mmday\")\r\n year_graph_axes.set_xlabel(\"day\")\r\n \r\n # save as .png\r\n year_graph.savefig('year_rainfall_graph.png')\r\n\r\n#plot a chart for 1998, and export plot as png file, with magenta line \r\nplot_from_json('python_language_1_data.json', '1998', 'magenta')\r\n\r\n\r\n#write a function to plot a graph of yearly mean rainfall for a custom period\r\ndef mean_from_list(num_list):\r\n return (sum(num_list) / len(num_list))\r\n\r\ndef yearly_mean_plot(filename, start_year, end_year):\r\n \r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n\r\n plot_data_dic = json.loads(temp_string)\r\n \r\n cust_year_list = list(range(int(start_year), int(end_year) + 1)) \r\n year_mean_list = []\r\n for year in cust_year_list:\r\n year_mean = mean_from_list(plot_data_dic[str(year)])\r\n year_mean_list.append(year_mean)\r\n \r\n custom_graph, custom_graph_axes = plt.subplots()\r\n \r\n custom_graph_axes.plot(cust_year_list, year_mean_list)\r\n \r\n custom_graph_axes.set_title(\"Yearly rainfall averages from {} to {}\".format(start_year, end_year))\r\n custom_graph_axes.set_ylabel(\"average rainfall / mm per day\")\r\n custom_graph_axes.set_xlabel(\"year\")\r\n \r\n # save as .png custom_graph.savefig('custom_rainfall_graph.png')\r\n custom_graph.savefig('mean_rainfall_graph.png')\r\n\r\n#prod a plot 1988-2000 inclusive\r\nyearly_mean_plot('python_language_1_data.json', '1988', '2000')\r\n\r\n\r\n\r\n# function to apply correction code: (rainfall_value * 1.2 ^ root(2))\r\ndef rain_corrector(rain_value):\r\n root_two = 2**(1/2)\r\n correct_rain_value = rain_value * (1.2 ** root_two) \r\n return correct_rain_value\r\n\r\n# function to correct all of the data for a given year (v1 - using a for loop)\r\ndef year_corrector_loop(filename, year):\r\n # import dictionary\r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n bad_data_dic = json.loads(temp_string)\r\n \r\n for v_index in range(len(bad_data_dic[year])):\r\n bad_entry = bad_data_dic[year][v_index]\r\n fixed_entry = rain_corrector(bad_entry)\r\n bad_data_dic[year][v_index] = fixed_entry\r\n \r\n# for rain_value in bad_data_dic[str(year)]:\r\n# bad_data_dic[str(year)][v_index] = rain_corrector(day)\r\n \r\n \r\n with open('fixed_rain_data_loop.json', 'w') as fixed_json_file:\r\n json.dump(bad_data_dic, fixed_json_file, indent=4)\r\n \r\n# test corrector loop version\r\nyear_corrector_loop('python_language_1_data.json', '2000')\r\n\r\n# function to correct all of the data for a given year (v2 - using a list comp)\r\ndef year_corrector_comp(filename, year):\r\n # import dictionary\r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n bad_data_dic = json.loads(temp_string)\r\n \r\n fixed_year = [rain_corrector(entry) for entry in bad_data_dic[year]]\r\n \r\n bad_data_dic[year] = fixed_year\r\n \r\n with open('fixed_rain_data_comp.json', 'w') as fixed_json_file:\r\n json.dump(bad_data_dic, fixed_json_file, indent=4)\r\n\r\n# test corrector comp version\r\nyear_corrector_comp('python_language_1_data.json', '1942')\r\n'''\r\nThe loop version benefits from spreading out the operations,\r\nwhich can make them easier to follow.\r\n\r\nThe list comprehension version benefits from conciseness,\r\nand general readibility\r\n'''\r\n\r\n\r\n'''\r\nspare code:\r\n #clear figure\r\n year_graph.clf()\r\n\r\n'''","repo_name":"cji1/Exam-Prep","sub_path":"lang/rainfall.py","file_name":"rainfall.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74009725374","text":"from typing import OrderedDict\n\nfrom mindspore_federated._mindspore_federated import VFLContext\n\nfrom ..common import check_type\n\n\nclass ServerConfig:\n \"\"\"\n Define the vertical server configuration.\n\n Args:\n server_name (str): Name of server, such as \"leader_server\", user defined.\n server_address (str): Address of server, such as 127.0.0.1:1086, user defined.\n \"\"\"\n def __init__(self, server_name, server_address):\n check_type.check_str(\"server_name\", server_name)\n check_type.check_str(\"server_address\", server_address)\n self.server_name = server_name\n self.server_address = server_address\n\n\ndef init_server_config(http_server_config, remote_server_config):\n \"\"\"\n Initialize local server configuration and remote server configuration.\n\n Args:\n http_server_config (ServerConfig): Configuration of local http server.\n remote_server_config (ServerConfig): Configuration of remote http server.\n \"\"\"\n ctx = VFLContext.get_instance()\n check_type.check_str(\"http_server_config.server_name\", http_server_config.server_name)\n check_type.check_str(\"http_server_config.server_address\", http_server_config.server_address)\n ctx.set_http_server_name(http_server_config.server_name)\n ctx.set_http_server_address(http_server_config.server_address)\n\n remote_server_dict = OrderedDict()\n if isinstance(remote_server_config, ServerConfig):\n check_type.check_str(\"remote_server_config.server_name\", remote_server_config.server_name)\n check_type.check_str(\"remote_server_config.server_address\", remote_server_config.server_address)\n remote_server_dict[remote_server_config.server_name] = remote_server_config.server_address\n\n elif isinstance(remote_server_config, list):\n for item in remote_server_config:\n check_type.check_str(\"remote_server_config.server_name\", item.server_name)\n check_type.check_str(\"remote_server_config.server_address\", item.server_address)\n remote_server_dict[item.server_name] = item.server_address\n ctx.set_remote_server_address(remote_server_dict)\n","repo_name":"gaoyang-zhang/mindspore-federated","sub_path":"mindspore_federated/fl_arch/python/mindspore_federated/startup/server_config.py","file_name":"server_config.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2979631773","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file deals with AOI pattern recognition for an interpretation purpose.\n\"\"\"\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n IMPORTS\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport Constants as const\nimport pandas as pd\nimport numpy as np\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n FUNCTIONS\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\ndef clean_AOI(full_pd, seuil):\n \"\"\"\n This function will compress all the data to keep only the first row for each AOI, allowing to detect patterns after\n\n Parameters\n ----------\n full_pd : TYPE\n DESCRIPTION.\n seuil : TYPE\n DESCRIPTION.\n\n Returns\n -------\n clean : TYPE\n DESCRIPTION.\n\n \"\"\"\n full_pd=full_pd.copy(deep=True)\n clean = full_pd.loc[(full_pd.loc[:,\"AOI\"].shift() != full_pd.loc[:,\"AOI\"])].copy(deep=True)\n clean.loc[:,\"delta\"]=(-clean[\"FD_TIME_S\"]+clean[\"FD_TIME_S\"].shift(-1)).fillna(0)\n clean=clean.loc[(clean[\"delta\"]>seuil)]\n\n clean.reset_index(drop=True,inplace=True)\n return clean\n\n\n\n\ndef count_transitions(AOI_pd):\n \"\"\"\n\n\n Parameters\n ----------\n AOI_pd : TYPE\n DESCRIPTION.\n\n Returns\n -------\n pivot : TYPE\n DESCRIPTION.\n transition : TYPE\n DESCRIPTION.\n\n \"\"\"\n AOI_pd[\"next_AOI\"]=AOI_pd.loc[:,\"AOI\"].shift(-1,fill_value=\"0\")\n AOI_pd[\"prev_AOI\"]=AOI_pd[\"AOI\"].shift(1,fill_value=\"0\")\n AOI_pd[\"transition\"]=AOI_pd[\"AOI\"]+\"=>\"+AOI_pd[\"next_AOI\"]\n AOI_pd[\"prev_transition\"]=AOI_pd[\"prev_AOI\"]+\"=>\"+AOI_pd[\"AOI\"]\n\n AOI=AOI_pd.drop_duplicates(subset=\"AOI\").sort_values(\"AOI\").set_index(\"AOI\")\n\n transition=AOI_pd.drop_duplicates(subset=\"transition\").sort_values(\"transition\").set_index(\"transition\")\n transition.loc[:,\"count\"]=0\n transition.loc[:,\"average_time_bef\"]=0\n transition.loc[:,\"average_time_aft\"]=0\n transition.loc[:,\"%from\"]=0 # Depuis l'AOI de départ, % de fois ou on arrive à AOI arrivé\n transition.loc[:,\"%to\"]=0 # D'ou vient on depuis cet AOI d'arrivé\n transition.loc[:,\"%count\"]=0\n\n for a in transition.index:\n AOI1=transition.loc[a,\"AOI\"]\n AOI2=transition.loc[a,\"next_AOI\"]\n transition.loc[a:,\"count\"]=AOI_pd.loc[a==AOI_pd[\"transition\"]].count()[\"transition\"]\n transition.loc[a:,\"average_time_bef\"]=AOI_pd.loc[a==AOI_pd[\"transition\"],\"delta\"].mean()\n transition.loc[a:,\"average_time_aft\"]=AOI_pd.loc[a==AOI_pd[\"prev_transition\"],\"delta\"].mean()\n transition.loc[a,\"%from\"]=int((100*transition.loc[a,\"count\"]/AOI_pd.loc[AOI_pd[\"AOI\"]==AOI1].count()[\"AOI\"]))\n transition.loc[a,\"%to\"]=int((100*transition.loc[a,\"count\"]/AOI_pd.loc[AOI_pd[\"next_AOI\"]==AOI2].count()[\"next_AOI\"]))\n for b in transition.index:\n transition.loc[b,\"%count\"]=int((100*transition.loc[b,\"count\"]/transition[\"count\"].sum()))\n\n ind=[a for a in AOI.index]\n col=ind.copy()\n col.append(\"0\")\n pivot=pd.DataFrame(index=col,columns=ind)\n pivot.fillna(0,inplace=True)\n for i in ind:\n for j in col:\n a=i+\"=>\"+j\n\n if a in transition.index:\n pivot.loc[j,i]=transition.loc[a,\"%from\"]\n pivot=pivot.astype(int)\n transition.drop(columns=[\"AOI\",\"FD_TIME_S\",\"next_AOI\",\"average_time_aft\",\"prev_AOI\",\"prev_transition\"],inplace=True)\n return pivot,transition\n\n\n\n\ndef tete_fixe_tunnel(aois,t1,t2):\n \"\"\"\n\n\n Parameters\n ----------\n aois : TYPE\n DESCRIPTION.\n t1 : TYPE\n DESCRIPTION.\n t2 : TYPE\n DESCRIPTION.\n\n Returns\n -------\n fixe : TYPE\n DESCRIPTION.\n\n \"\"\"\n ref=aois.loc[t1,\"AOI\"]\n fixe=(aois.loc[aois.loc[:, \"FD_TIME_S\"]t1,\"AOI\"]==ref).all()\n return fixe\n\n\ndef tete_fixe(data,t1,t2,seuil=const.SEUIL_TETE_FIXE):\n \"\"\"\n\n\n Parameters\n ----------\n data : TYPE\n DESCRIPTION.\n t1 : TYPE\n DESCRIPTION.\n t2 : TYPE\n DESCRIPTION.\n seuil : TYPE, optional\n DESCRIPTION. The default is const.SEUIL_TETE_FIXE.\n\n Returns\n -------\n fixe : TYPE\n DESCRIPTION.\n\n \"\"\"\n local=data.loc[data[\"FD_TIME_S\"]t1,[\"FD_PILOT_HEAD_HEADING\",\"FD_PILOT_HEAD_PITCH\"]]\n mean=local.mean()\n fixe=((abs(local-mean)>seuil).all()).all()\n return fixe\n\ndef count_AOI(AOI_pd,full_pd):\n \"\"\"\n\n\n Parameters\n ----------\n AOI_pd : TYPE\n DESCRIPTION.\n full_pd : TYPE\n DESCRIPTION.\n\n Returns\n -------\n AOI : TYPE\n DESCRIPTION.\n\n \"\"\"\n AOI=AOI_pd.drop_duplicates(subset=\"AOI\").sort_values(\"AOI\").set_index(\"AOI\")\n AOI[\"count\"]=0\n AOI[\"average_time\"]=0\n AOI[\"total_time\"]=0\n AOI[\"%_time\"]=0\n total_time=full_pd[\"FD_TIME_S\"].max()-full_pd[\"FD_TIME_S\"].min()\n\n for a in AOI.index:\n AOI.loc[a,\"count\"]=AOI_pd.loc[a==AOI_pd[\"AOI\"]].count()[\"AOI\"]\n AOI.loc[a,\"average_time\"]=AOI_pd.loc[a==AOI_pd[\"AOI\"],\"delta\"].mean()\n AOI.loc[a,\"total_time\"]=AOI_pd.loc[a==AOI_pd[\"AOI\"],\"delta\"].sum()\n AOI.loc[:,\"%_time\"]=(100*AOI[\"total_time\"]/total_time).astype(int)\n return AOI\n\n\n\n\ndef chain_AOI(pivot,liste_aoi):\n \"\"\"\n\n\n Parameters\n ----------\n pivot : TYPE\n DESCRIPTION.\n liste_aoi : TYPE\n DESCRIPTION.\n\n Returns\n -------\n aoi_chain : TYPE\n DESCRIPTION.\n\n \"\"\"\n aois=pivot.index.copy().to_numpy()\n liste_aois=\"\".join(liste_aoi)\n aoi_chain=pd.DataFrame(columns=[\"count\"])\n for i in aois:\n for j in np.delete(aois,np.where(aois==i)):\n if liste_aois.count(i+j)>0:\n for k in np.delete(aois,np.where(aois==j)):\n if liste_aois.count(i+j+k)>0:\n temp=liste_aois.count(i+j+k)\n if temp>0 :\n aoi_chain.loc[i+j+k,\"count\"]=temp\n\n aoi_chain[\"pourcent\"]=100*aoi_chain.loc[:,\"count\"]/aoi_chain[\"count\"].sum()\n aoi_chain=aoi_chain.loc[aoi_chain[\"pourcent\"]>1]\n return aoi_chain","repo_name":"NatanVachon/PIE-018","sub_path":"DataAnalysis/Features/Pattern_From_AOI.py","file_name":"Pattern_From_AOI.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"35439081580","text":"def solution(answers):\n n = len(answers)\n s1 = [1, 2, 3, 4, 5]\n s2 = [2, 1, 2, 3, 2, 4, 2, 5]\n s3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n answer = []\n\n score = [0, 0, 0]\n max_score = 0\n\n for i in range(n):\n if answers[i] == s1[i%5]:\n score[0] += 1\n if answers[i] == s2[i%8]:\n score[1] += 1\n if answers[i] == s3[i%10]:\n score[2] += 1\n\n for idx, j in enumerate(score):\n if j > max_score:\n answer = [idx+1]\n max_score = j\n elif j == max_score:\n answer.append(idx+1)\n\n return answer\n\n\n\nanswers2 = [1,2,3,4,5]\nanswers = [1,3,2,4,2]\nprint(solution(answers))","repo_name":"Mingdoo/coding_test_boom","sub_path":"210912/smile/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"32274320880","text":"def is_same_tree(inorder, preorder, postorder):\n if len(inorder) != len(preorder) or len(inorder) != len(postorder):\n return False\n\n if len(inorder) == 0:\n return True\n\n if len(inorder) == 1:\n return inorder[0] == preorder[0] == postorder[0]\n\n if set(inorder) != set(preorder) or set(inorder) != set(postorder):\n return False\n\n root = preorder[0]\n root_index = inorder.index(root)\n\n left_inorder = inorder[:root_index]\n right_inorder = inorder[root_index + 1:]\n\n left_preorder = preorder[1:root_index + 1]\n right_preorder = preorder[root_index + 1:]\n\n left_postorder = postorder[:root_index]\n right_postorder = postorder[root_index:-1]\n\n return is_same_tree(left_inorder, left_preorder, left_postorder) and \\\n is_same_tree(right_inorder, right_preorder, right_postorder)\n\n\n# Test Case 1\ninorder1 = [4, 2, 5, 1, 3]\npreorder1 = [1, 2, 4, 5, 3]\npostorder1 = [4, 5, 2, 3, 1]\nprint(is_same_tree(inorder1, preorder1, postorder1)) # Output: True\n\n# Test Case 2\ninorder2 = [4, 2, 5, 1, 3]\npreorder2 = [1, 5, 4, 2, 3]\npostorder2 = [4, 1, 2, 3, 5]\nprint(is_same_tree(inorder2, preorder2, postorder2)) # Output: False\n","repo_name":"Krutheesh/Placement_Assignment_Krutheesh","sub_path":"DSA/assignment22/four.py","file_name":"four.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"25975508251","text":"#!/usr/bin/env python3\n#coding: utf-8\n### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command\n### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it\n\n\n\n__doc__ = \"This module concern volumes.\"#information describing the purpose of this module\n__status__ = \"Development\"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release'\n__version__ = \"2.0.0\"# version number,date or about last modification made compared to the previous version\n__license__ = \"public domain\"# ref to an official existing License\n#__copyright__ = \"Copyright 2000, The X Project\"\n__date__ = \"2016\"#started creation date / year month day\n__author__ = \"N-zo syslog@laposte.net\"#the creator origin of this prog,\n__maintainer__ = \"Nzo\"#person who curently makes improvements, replacing the author\n__credits__ = []#passed mainteners and any other helpers\n__contact__ = \"syslog@laposte.net\"# current contact adress for more info about this file\n\n\n\n### import the required modules\n#import antiprism_python # a collection of geometry \n\nfrom math import sqrt\nfrom numpy import array\n\n\n\n### ICOSAHEDRON\nPHI = (sqrt(5) + 1) / 2\nRAD = sqrt(PHI+2)\nA = 1/RAD\nB = PHI/RAD\nICO_VERTEX=[ (-A,0,B),(A,0,B),(-A,0,-B),(A,0,-B),(0,B,A),(0,B,-A),\n(0,-B,A),(0,-B,-A),(B,A,0),(-B,A,0),(B,-A,0),(-B,-A,0) ]\nICO_FACES=[ (1,4,0),(4,9,0),(4,5,9),(8,5,4),(1,8,4),\n(1,10,8),(10,3,8),(8,3,5),(3,2,5),(3,7,2),\n(3,10,7),(10,6,7),(6,11,7),(6,0,11),(6,1,0),\n(10,1,6),(11,0,9),(2,11,9),(5,2,9),(11,2,7) ]\n\n### TETRAHEDRON\nC= 1 / sqrt(3)\nTETRA_VERTEX=[(-C,C,-C),(-C,-C,C),(C,C,C),(C,-C,-C)]\nTETRA_FACES=[(0,2,1),(3,0,1),(3,1,2),(0,2,3)]\n\n\n\ndef normaliz(vector):\n\tlong= sqrt( sum(vector**2) )\n\treturn vector/long\n\n\ndef iterator(qantum,vertex_list,face_list):\n\twhile qantum:\n\t\tnew_face_list=[]\n\t\tfor face in face_list :\n\n\t\t\ta=array(vertex_list[face[0]])\n\t\t\tb=array(vertex_list[face[1]])\n\t\t\tc=array(vertex_list[face[2]])\n\t\t\t\n\t\t\tna= tuple(normaliz( (a+b)/2. ))\n\t\t\tnb= tuple(normaliz( (b+c)/2. ))\n\t\t\tnc= tuple(normaliz( (c+a)/2. ))\n\t\t\t\n\t\t\tindex=[]\n\t\t\tfor v in [na,nb,nc] :\n\t\t\t\tif v in vertex_list :\n\t\t\t\t\ti=vertex_list.index(v)\n\t\t\t\t\t#print(\"in list\")\n\t\t\t\telse :\n\t\t\t\t\ti=len(vertex_list)\n\t\t\t\t\tvertex_list.append(v)\n\t\t\t\tindex.append(i)\n\t\t\t\n\t\t\tfa=(face[0],index[0],index[2])\n\t\t\tfb=(face[1],index[1],index[0])\n\t\t\tfc=(face[2],index[2],index[1])\n\t\t\tfd=(index[0],index[1],index[2])\n\t\t\t\n\t\t\tnew_face_list.extend([fa,fb,fc,fd])\n\t\tface_list=new_face_list\n\t\tqantum-=1\n\treturn vertex_list,tuple(face_list)\n","repo_name":"N-z0/commonz","sub_path":"src/geometry/polyhedra.py","file_name":"polyhedra.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"36663042334","text":"#! /usr/bin/env python3\n\nimport difflib\nimport requests\n\nwith open(\"expected_binding.py\") as f:\n # remove \\n from end of each line\n expected_binding = [line.rstrip() for line in f]\n\n\nsource = \"\"\"\n#include \"ffig/attributes.h\"\n\nstruct FFIG_EXPORT Asset\n{\n virtual FFIG_EXPORT_NAME(value) double PV() const = 0;\n virtual FFIG_PROPERTY_NAME(name) const char* id() const = 0;\n};\n virtual ~Asset() = default;\n\nstruct FFIG_NAME(CDO) CollateralisedDebtObligation : Asset\n{\n CollateralisedDebtObligation() {}\n\n double PV() const override { return 99.99; }\n const char* id() const override { return \"CDO\"; }\n};\n\"\"\"\n\npayload = {'module_name': \"test\", 'inp_file': source,\n \"bindings_to_generate\": [\"py3\"]}\n\nr = requests.post(\n \"http://127.0.0.1:5000/api/gen_bindings_from_tu\", data=payload)\n\nassert r.status_code == requests.codes.ok\n \njson_resp = r.json()\ndiffer = difflib.Differ()\nbinding_from_api = json_resp['res'].splitlines()\nres = list(differ.compare(binding_from_api, expected_binding))\nfor line in res:\n # Each line of a Differ delta begins with a two-letter code.\n # ' ' represents a line common to both sequences.\n assert line[0:2] == ' '\n","repo_name":"FFIG/rest-api","sub_path":"requests_at_explorer.py","file_name":"requests_at_explorer.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29946212028","text":"from pprint import pprint\nfrom .base import Base\nfrom utils.io import load_BFM\nimport numpy as np\nimport tensorflow as tf\n\n\nclass SCRFDTDMMPostModel(tf.keras.Model):\n\n def __init__(self, tdmm_cfg, pred_model, n_objs, top_k_n, kp_thres,\n nms_iou_thres, resize_shape, *args, **kwargs):\n super(SCRFDTDMMPostModel, self).__init__(*args, **kwargs)\n self.n_R = tdmm_cfg['n_R']\n self.n_shp, self.n_exp = tdmm_cfg['n_shp'], tdmm_cfg['n_exp']\n pms = tf.cast(np.load(tdmm_cfg['pms_path']), tf.float32)\n pms_R = pms[:, :self.n_R]\n pms_shp, pms_exp = pms[:, self.n_R:self.n_R + self.n_shp], pms[:,\n 208:-3]\n pms = tf.concat([pms_R, pms_shp, pms_exp], axis=-1)\n self.pms = pms[:2, :]\n head_model = load_BFM(tdmm_cfg['model_path'])\n kpt_ind = head_model['kpt_ind']\n X_ind_all = np.stack([kpt_ind * 3, kpt_ind * 3 + 1, kpt_ind * 3 + 2])\n X_ind_all = tf.concat([\n X_ind_all[:, :17], X_ind_all[:, 17:27], X_ind_all[:, 36:48],\n X_ind_all[:, 27:36], X_ind_all[:, 48:68]\n ],\n axis=-1)\n valid_ind = tf.reshape(tf.transpose(X_ind_all), (-1))\n self.valid_ind = tf.cast(valid_ind, tf.int32)\n self.u_base = tf.cast(head_model['shapeMU'], tf.float32)\n self.u_base = tf.gather(self.u_base, self.valid_ind)\n self.u_base = tf.reshape(self.u_base,\n (tf.shape(self.u_base)[0] // 3, 3))\n self.u_base = tf.reshape(self.u_base, (tf.shape(self.u_base)[0] * 3, 1))\n self.shp_base = tf.cast(head_model['shapePC'],\n tf.float32)[:, :self.n_shp]\n self.shp_base = tf.gather(self.shp_base, self.valid_ind)\n self.exp_base = tf.cast(head_model['expPC'], tf.float32)\n self.exp_base = tf.gather(self.exp_base, self.valid_ind)\n self.pred_model = pred_model\n self.n_objs = n_objs\n self.top_k_n = top_k_n\n self.kp_thres = kp_thres\n self.nms_iou_thres = nms_iou_thres\n self.resize_shape = tf.cast(resize_shape, tf.float32)\n self.cls_out_channels = 2\n self._feat_stride_fpn = [8, 16, 32]\n self.num_levels = len(self._feat_stride_fpn)\n self.num_level_anchors = [3200, 800, 200]\n self._num_anchors = 2\n\n def call(self, x, training=False):\n imgs, origin_shapes = x\n batch_size = tf.shape(imgs)[0]\n self.resize_ratio = tf.cast(origin_shapes / self.resize_shape,\n tf.dtypes.float32)\n preds = self.pred_model(imgs, training=False)\n box_results, lnms_results = self._anchor_assign(batch_size,\n preds[\"multi_lv_feats\"])\n return box_results, lnms_results\n\n # @tf.function\n def _anchor_assign(self, batch_size, multi_lv_feats):\n b_bbox_outputs = -tf.ones(shape=(batch_size, self.n_objs,\n self.cls_out_channels, 5))\n b_lnmk_outputs = -tf.ones(shape=(batch_size, self.n_objs,\n self.cls_out_channels, 68, 2))\n obj_start_idx = 0\n bbox_list, lnmk_list = [], []\n idxs_list = []\n for i, (lv_feats,\n stride) in enumerate(zip(multi_lv_feats,\n self._feat_stride_fpn)):\n if i == 0:\n continue\n b_cls_preds, b_bbox_preds, b_param_preds = lv_feats\n b_cls_preds = tf.math.sigmoid(b_cls_preds)\n\n b_bbox_preds = tf.reshape(b_bbox_preds, [-1, 4])\n b_param_preds = tf.reshape(b_param_preds,\n [-1, self.n_R + self.n_shp + self.n_exp])\n b_mask = b_cls_preds > self.kp_thres\n btach_idxs = tf.cast(tf.where(b_mask == True), tf.int32)[:, :1]\n b_cls_preds = tf.reshape(b_cls_preds, [-1, self.cls_out_channels])\n mask = b_cls_preds > self.kp_thres\n idxs = tf.where(mask == True)\n channel_idxs = tf.cast(idxs, tf.int32)[:, -1:]\n b_cls_preds = tf.expand_dims(b_cls_preds[mask], axis=-1)\n b_bboxes = self.decode_bbox(batch_size, stride, idxs, b_bbox_preds)\n pred_R, pred_shp, pred_exp = self.decod_params(idxs, b_param_preds)\n n_lnmks = self.reconstruct_lnmks(batch_size, b_bboxes, pred_R,\n pred_shp, pred_exp)\n num_detected_objs = tf.math.reduce_sum(tf.cast(mask, tf.float32))\n obj_idxs = tf.range(num_detected_objs, dtype=tf.int32)[:, None]\n obj_idxs += obj_start_idx\n b_bboxes = tf.einsum('n c d, b d -> n c d', b_bboxes[..., ::-1],\n self.resize_ratio)\n b_bboxes = tf.reshape(b_bboxes, (-1, 4))\n b_bboxes = tf.concat([b_bboxes, b_cls_preds], axis=-1)\n idxs = tf.concat([btach_idxs, obj_idxs, channel_idxs], axis=-1)\n n_lnmks = tf.einsum('n c d, b d -> n c d', n_lnmks[..., ::-1],\n self.resize_ratio)\n bbox_list.append(b_bboxes[:, :-1])\n lnmk_list.append(n_lnmks)\n idxs_list.append(idxs)\n b_bbox_outputs = tf.tensor_scatter_nd_update(\n b_bbox_outputs, idxs, b_bboxes)\n bbox_tensor = tf.concat(bbox_list, axis=0)\n lnmk_tensor = tf.concat(lnmk_list, axis=0)\n idxs_tensor = tf.concat(idxs_list, axis=0)\n b_scores = b_bbox_outputs[..., -1]\n b_bbox_outputs = b_bbox_outputs[..., :-1]\n # [B, N, Cate, 4]\n nms_reuslt = tf.image.combined_non_max_suppression(\n b_bbox_outputs,\n b_scores,\n self.n_objs,\n self.n_objs,\n iou_threshold=self.nms_iou_thres,\n clip_boxes=False)\n box_results = tf.where(nms_reuslt[0] == -1., np.inf, nms_reuslt[0])\n\n search_tensors = tf.reshape(\n box_results, [-1, 4])[:, None, :] - bbox_tensor[None, :, :]\n search_mask = tf.math.reduce_all(search_tensors == 0.0, axis=-1)\n idxs = tf.where(search_mask == True)[:, -1:]\n lnmk_tensor = tf.gather_nd(lnmk_tensor, idxs)\n idxs_tensor = tf.gather_nd(idxs_tensor, idxs)\n\n box_results = tf.where((box_results - 1.) == -1., np.inf, box_results)\n b_bboxes = tf.concat(\n [box_results, nms_reuslt[1][..., None], nms_reuslt[2][..., None]],\n axis=-1)\n b_bboxes = tf.where(b_bboxes == -1., np.inf, b_bboxes)\n b_bboxes = tf.reshape(b_bboxes, [-1, self.n_objs, 6])\n\n b_lnmk_outputs = tf.tensor_scatter_nd_update(b_lnmk_outputs,\n idxs_tensor, lnmk_tensor)\n b_lnmk_outputs = tf.where(b_lnmk_outputs == -1., np.inf, b_lnmk_outputs)\n return b_bboxes, b_lnmk_outputs\n\n def decod_params(self, idxs, b_param_preds):\n b_param_preds = tf.gather_nd(b_param_preds, idxs[:, 0][:, None])\n b_param_preds = b_param_preds * self.pms[1][None, :] + self.pms[0][\n None, :]\n R = b_param_preds[:, :self.n_R]\n shp = b_param_preds[:, self.n_R:self.n_R + self.n_shp]\n exp = b_param_preds[:, self.n_R + self.n_shp:]\n return R, shp, exp\n\n def decode_bbox(self, batch_size, stride, idxs, b_bbox_preds):\n b_bbox_preds = b_bbox_preds * stride\n height = self.resize_shape[0] // stride\n width = self.resize_shape[1] // stride\n X, Y = tf.meshgrid(tf.range(0, width), tf.range(0, height))\n anchor_centers = tf.stack([X, Y], axis=-1)\n anchor_centers = tf.reshape((anchor_centers * stride), (-1, 2))\n\n if self._num_anchors > 1:\n anchor_centers = tf.reshape(\n tf.stack([anchor_centers] * self._num_anchors, axis=1), (-1, 2))\n\n anchor_centers = tf.cast(anchor_centers, tf.float32)\n anchor_centers = tf.tile(anchor_centers[None, ...], (batch_size, 1, 1))\n anchor_centers = tf.reshape(anchor_centers, (-1, 2))\n b_bboxes = self.distance2bbox(anchor_centers, b_bbox_preds)\n b_bboxes = tf.gather_nd(b_bboxes, idxs[:, :1])\n b_bboxes = tf.reshape(b_bboxes, (-1, 2, 2))\n return b_bboxes\n\n def reconstruct_lnmks(self, batch_size, b_bboxes, R, shp, exp):\n n_lnmks = self.u_base + tf.linalg.matmul(\n self.shp_base, shp[..., None]) + tf.linalg.matmul(\n self.exp_base, exp[..., None])\n n_lnmks = tf.reshape(n_lnmks, (-1, tf.shape(n_lnmks)[-2] // 3, 3))\n R = tf.reshape(R, [-1, 3, 3])\n n_lnmks = tf.linalg.matmul(n_lnmks, R, transpose_b=(0, 2, 1))\n n_lnmks = n_lnmks[..., :2]\n n_lnmk_tls = tf.math.reduce_min(n_lnmks, axis=-2, keepdims=True)\n n_lnmk_brs = tf.math.reduce_max(n_lnmks, axis=-2, keepdims=True)\n n_bbox_tls = b_bboxes[:, :1, :]\n n_bbox_brs = b_bboxes[:, 1:, :]\n n_lnmks_wh = n_lnmk_brs - n_lnmk_tls\n n_bbox_wh = n_bbox_brs - n_bbox_tls\n n_scales = n_bbox_wh / n_lnmks_wh\n n_lnmks = tf.math.abs(n_scales) * n_lnmks\n return n_lnmks[..., :2]\n\n def distance2bbox(self, points, distance, max_shape=None):\n \"\"\"Decode distance prediction to bounding box.\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n distance (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom).\n max_shape (tuple): Shape of the image.\n\n Returns:\n Tensor: Decoded bboxes.\n \"\"\"\n x1 = points[..., 0] - distance[..., 0]\n y1 = points[..., 1] - distance[..., 1]\n x2 = points[..., 0] + distance[..., 2]\n y2 = points[..., 1] + distance[..., 3]\n if max_shape is not None:\n x1 = tf.clip_by_value(x1,\n clip_value_min=0,\n clip_value_max=max_shape[1])\n y1 = tf.clip_by_value(y1,\n clip_value_min=0,\n clip_value_max=max_shape[0])\n x2 = tf.clip_by_value(x2,\n clip_value_min=0,\n clip_value_max=max_shape[1])\n y2 = tf.clip_by_value(y2,\n clip_value_min=0,\n clip_value_max=max_shape[0])\n return tf.stack([x1, y1, x2, y2], axis=-1)\n","repo_name":"a0910257137/behavior_predictor","sub_path":"core/scrfdtdmm_model.py","file_name":"scrfdtdmm_model.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2201331454","text":"from Bio import SeqIO, AlignIO, Seq, SeqRecord\nfrom Bio.Align.Applications import ClustalwCommandline\nfrom Bio.Application import ApplicationError\nfrom Bio.Align.Applications import MuscleCommandline\nimport argparse\nfrom enum import Enum\nimport subprocess\nfrom shutil import which\n\nclass MSA(Enum):\n CLUSTALW = 1\n MUSCLE = 2\n\ndef isMethodInstalled(method):\n if method == MSA.CLUSTALW:\n return which(\"clustalw2\") is not None\n elif method == MSA.MUSCLE:\n return which(\"muscle\") is not None\n else:\n return False\n\ndef msa(in_file, out_file, method):\n \n if method == MSA.CLUSTALW:\n cline = ClustalwCommandline(\"clustalw2\",infile=in_file, outfile=out_file)\n\n elif method == MSA.MUSCLE:\n command = [\"muscle\", \"-align\", in_file, \"-output\", out_file]\n cline = lambda : subprocess.run(command, check=True) \n \n else: \n print(\"Error: Invalid MSA method\")\n exit(1)\n\n try:\n if not isMethodInstalled(method):\n print(f\"Error: Unable to run {method.name}. Make sure is installed\")\n exit(1)\n cline()\n except ApplicationError:\n print(f\"Error: Unable to run {method.name}. Make sure is installed\")\n exit(1)\n except OSError as e:\n print(f\"Error: Unable to open {in_file}: {e}\")\n exit(1)\n\nif \"__main__\" == __name__:\n\n parser = argparse.ArgumentParser(prog=\"ej3.py\", description=\"Execute Multiple Sequence Alignment with Clustawl or Muscle\")\n parser.add_argument(\"--method\", help=\"MSA method (clustalw or muscle)\", type=str, required=True, choices=[\"clustalw\", \"muscle\"])\n parser.add_argument(\"--input\", help=\"Input file (.fas)\", type=str, required=True)\n parser.add_argument(\"--output\", help=\"Output file\", type=str, required=True)\n\n args = parser.parse_args()\n in_file = args.input\n out_file = args.output\n\n extension = args.input.split(\".\")[-1]\n if extension != \"fas\" and extension != \"fasta\": \n print(\"Error: Please enter .fas or .fasta file\") \n exit(1) \n\n method = MSA.CLUSTALW if args.method == \"clustalw\" else MSA.MUSCLE\n msa(in_file, out_file, method)","repo_name":"eugepineiro/bioinformatica","sub_path":"ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38884856074","text":"from typing import Optional, Callable\n\nfrom urwid_utils.palette import *\nimport urwid\n\nfrom .logger import get_logger\nfrom .user_input import MouseButton, MouseState, MouseEvent\n\nlogger = get_logger()\n\n\n__all__ = [\"ScrollingListBox\"]\n\n\nclass ListBoxScrollBar(urwid.WidgetWrap):\n def __init__(self, parent):\n self.parent = parent\n self.pile = urwid.Pile([])\n super(ListBoxScrollBar, self).__init__(self.pile)\n\n def update(self, size):\n width, height = size\n scroll_marker_height = 1\n del self.pile.contents[:]\n\n if (len(self.parent.body) and self.parent.row_count and\n self.parent.focus is not None and self.parent.row_count > height):\n scroll_position = int(self.parent.focus_position / self.parent.row_count * height)\n scroll_marker_height = max(height * (height / self.parent.row_count), 1)\n else:\n scroll_position = -1\n\n pos_marker = urwid.AttrMap(urwid.Text(\" \"), {None: \"scroll_pos\"})\n down_marker = urwid.AttrMap(urwid.Text(u\"\\N{DOWNWARDS ARROW}\"), {None: \"scroll_marker\"})\n begin_marker = urwid.AttrMap(urwid.Text(u\"\\N{CIRCLED MINUS}\"), {None: \"scroll_marker\"})\n end_marker = urwid.AttrMap(urwid.Text(u\"\\N{CIRCLED PLUS}\"), {None: \"scroll_marker\"})\n view_marker = urwid.AttrMap(urwid.Text(\" \"), {None: \"scroll_view\"})\n bg_marker = urwid.AttrMap(urwid.Text(\" \"), {None: \"scroll_bg\"})\n\n for i in range(height):\n if abs(i - scroll_position) <= scroll_marker_height // 2:\n if i == 0 and self.parent.focus_position == 0:\n marker = begin_marker\n elif i + 1 == height and self.parent.row_count == self.parent.focus_position+1:\n marker = end_marker\n elif len(self.parent.body) == self.parent.focus_position + 1 \\\n and i == scroll_position + scroll_marker_height // 2:\n marker = down_marker\n else:\n marker = pos_marker\n else:\n if i < scroll_position:\n marker = view_marker\n elif self.parent.row_count and i / height < (len(self.parent.body) / self.parent.row_count):\n marker = view_marker\n else:\n marker = bg_marker\n\n self.pile.contents.append((urwid.Filler(marker), self.pile.options(\"weight\", 1)))\n\n self._invalidate()\n\n def selectable(self):\n # FIXME: mouse click/drag\n return False\n\n\nclass ScrollingListBox(urwid.WidgetWrap):\n signals = [\"select\", \"drag_start\", \"drag_continue\", \"drag_stop\", \"load_more\"]\n SCROLL_WHEEL_HEIGHT_RATIO = 0.5\n\n def __init__(self, body: urwid.Widget,\n infinite: bool = False,\n with_scrollbar: bool = False,\n row_count_fn: Optional[Callable] = None):\n self.infinite = infinite\n self.with_scrollbar = with_scrollbar\n self.row_count_fn = row_count_fn\n\n self.mouse_state: MouseState = MouseState.released\n self.drag_from = None\n self.drag_last = None\n self.drag_to = None\n self.load_more = False\n self.width: int = 0\n self.height: int = 0\n self.page: int = 0\n\n self.queued_keypress = None\n\n self.listbox = urwid.ListBox(body)\n self.columns = urwid.Columns([('weight', 1, self.listbox)])\n\n if self.with_scrollbar:\n self.scroll_bar = ListBoxScrollBar(self)\n self.columns.contents.append((self.scroll_bar, self.columns.options(\"given\", 1)))\n\n super(ScrollingListBox, self).__init__(self.columns)\n\n def mouse_event(self, size, event: str, button: int, col: int, row: int, focus: bool):\n if row < 0 or row >= self.height:\n return\n\n if event == MouseEvent.press:\n if button == MouseButton.left_button:\n self.mouse_state = MouseState.pressed\n self.drag_from = self.drag_last = (col, row)\n\n elif button == MouseButton.scroll_wheel_up:\n pos = self.listbox.focus_position - int(self.height * self.SCROLL_WHEEL_HEIGHT_RATIO)\n if pos < 0:\n pos = 0\n self.listbox.focus_position = pos\n self.listbox.make_cursor_visible(size)\n self._invalidate()\n\n elif button == MouseButton.scroll_wheel_down:\n pos = self.listbox.focus_position + int(self.height * self.SCROLL_WHEEL_HEIGHT_RATIO)\n if pos > len(self.listbox.body) - 1:\n if self.infinite:\n self.load_more = True\n pos = len(self.listbox.body) - 1\n self.listbox.focus_position = pos\n self.listbox.make_cursor_visible(size)\n self._invalidate()\n\n elif event == MouseEvent.drag:\n if self.drag_from is None:\n return\n\n if button == MouseButton.left_button:\n self.drag_to = (col, row)\n if self.mouse_state == MouseState.pressed:\n self.mouse_state = MouseState.dragging\n urwid.signals.emit_signal(self, \"drag_start\", self, self.drag_from)\n else:\n urwid.signals.emit_signal(self, \"drag_continue\", self, self.drag_last, self.drag_to)\n\n self.drag_last = (col, row)\n\n elif event == MouseEvent.release:\n if self.mouse_state == MouseState.dragging:\n self.drag_to = (col, row)\n urwid.signals.emit_signal(self, \"drag_stop\", self, self.drag_from, self.drag_to)\n self.mouse_state = MouseState.released\n\n return super(ScrollingListBox, self).mouse_event(size, event, button, col, row, focus)\n\n def keypress(self, size, key: str):\n command = self._command_map[key]\n if not command:\n return super(ScrollingListBox, self).keypress(size, key)\n\n # down, page down at end trigger load of more data\n if (\n command in [\"cursor down\", \"cursor page down\"]\n and self.infinite\n and (\n not len(self.body)\n or self.focus_position == len(self.body) - 1)\n ):\n self.load_more = True\n self.queued_keypress = key\n self._invalidate()\n\n elif command == \"activate\":\n urwid.signals.emit_signal(self, \"select\", self, self.selection)\n\n return super(ScrollingListBox, self).keypress(size, key)\n\n @property\n def selection(self):\n if len(self.body):\n return self.body[self.focus_position]\n\n def render(self, size, focus: bool = False):\n max_column: int = size[0]\n max_row: Optional[int] = size[1] if len(size) > 1 else None\n\n self.width = max_column\n if max_row:\n self.height = max_row\n\n if self.load_more and len(self.body) == 0 or \"bottom\" in self.ends_visible((max_column, max_row)):\n self.load_more = False\n self.page += 1\n try:\n focus = self.focus_position\n except IndexError:\n focus = None\n\n urwid.signals.emit_signal(self, \"load_more\", focus)\n\n if self.queued_keypress and focus and focus < len(self.body):\n self.keypress(size, self.queued_keypress)\n self.queued_keypress = None\n\n if self.with_scrollbar and len(self.body):\n self.scroll_bar.update(size)\n\n return super(ScrollingListBox, self).render(size, focus)\n\n def disable(self):\n self._selectable = False\n\n def enable(self):\n self._selectable = True\n\n @property\n def contents(self):\n return self.columns.contents\n\n @property\n def focus(self):\n return self.listbox.focus\n\n @property\n def focus_position(self):\n if not len(self.listbox.body):\n raise IndexError\n if len(self.listbox.body):\n return self.listbox.focus_position\n return None\n\n @focus_position.setter\n def focus_position(self, value):\n if not len(self.body):\n return\n self.listbox.focus_position = value\n self.listbox._invalidate()\n\n @property\n def row_count(self):\n if self.row_count_fn:\n return self.row_count_fn()\n return len(self.body)\n\n def __getattr__(self, attr):\n if attr in [\"ends_visible\", \"focus_position\", \"set_focus\", \"set_focus_valign\", \"body\", \"focus\"]:\n return getattr(self.listbox, attr)\n raise AttributeError(attr)\n\n @classmethod\n def get_palette_entries(cls):\n return {\n \"scroll_pos\": PaletteEntry(\n mono=\"white\",\n foreground=\"black\",\n background=\"white\",\n foreground_high=\"black\",\n background_high=\"white\"\n ),\n \"scroll_marker\": PaletteEntry(\n mono=\"white,bold\",\n foreground=\"black,bold\",\n background=\"white\",\n foreground_high=\"black,bold\",\n background_high=\"white\"\n ),\n \"scroll_view\": PaletteEntry(\n mono=\"black\",\n foreground=\"black\",\n background=\"light gray\",\n foreground_high=\"black\",\n background_high=\"g50\"\n ),\n \"scroll_bg\": PaletteEntry(\n mono=\"black\",\n foreground=\"light gray\",\n background=\"dark gray\",\n foreground_high=\"light gray\",\n background_high=\"g23\"\n ),\n\n }\n","repo_name":"emreay-/bank-statement-wizard","sub_path":"src/bank_statement_wizard/thirdparty/panwid/listbox.py","file_name":"listbox.py","file_ext":"py","file_size_in_byte":9745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"23208345593","text":"def to_ternary(n):\n result = []\n while n > 0:\n result.append(str(n % 3))\n n //= 3\n return \"\".join(result[::-1])\n\ndef to_decimal(n: str):\n decimal = 0\n for i in range(len(n)):\n decimal += int(n[i]) * (3 ** (len(n) - 1 - i))\n return decimal\n\ndef solution(n):\n ternary = to_ternary(n)\n flipped_ternary = ternary[::-1]\n answer = to_decimal(flipped_ternary)\n return answer\n\nn = 45\nprint(solution(n))\n","repo_name":"AppleYoujatea/OriginalApplePie","sub_path":"2nd_quarter/week05/pepe/3진법만들기.py","file_name":"3진법만들기.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"40466495298","text":"import random\r\nimport plotly.express as px\r\nimport plotly.figure_factory as ff\r\nimport statistics\r\n\r\ndice_result=[]\r\ncount=[]\r\nfor i in range(0,1000):\r\n dice1=random.randint(1,6)\r\n dice2=random.randint(1,6)\r\n dice_result.append(dice1+dice2)\r\n count.append(i)\r\nmean=sum(dice_result)/len(dice_result)\r\nstd_deviation=statistics.stdev(dice_result)\r\nmedian=statistics.median(dice_result)\r\nmode=statistics.mode(dice_result)\r\nprint(mean)\r\nprint(std_deviation)\r\nprint(median)\r\nprint(mode)\r\nfirst_std_dev_start, first_std_dev_end = mean-std_deviation, mean+std_deviation\r\nsec_std_dev_start, sec_std_dev_end = mean-(2*std_deviation), mean+(2*std_deviation)\r\nthi_std_dev_start, thi_std_dev_end = mean-(3*std_deviation), mean+(3*std_deviation)\r\nlist_of_data_within_1_std_deviation=[result for result in dice_result if result > first_std_dev_start and result < first_std_dev_end]\r\nlist_of_data_within_2_std_deviation=[result for result in dice_result if result > sec_std_dev_start and result < sec_std_dev_end]\r\nlist_of_data_within_3_std_deviation=[result for result in dice_result if result > thi_std_dev_start and result < thi_std_dev_end]\r\nprint(\"{}% of data lies within 1 standard deviation\".format(len(list_of_data_within_1_std_deviation)*100.0/len(dice_result)))\r\nprint(\"{}% of data lies within 2 standard deviation\".format(len(list_of_data_within_2_std_deviation)*100.0/len(dice_result)))\r\nprint(\"{}% of data lies within 3 standard deviation\".format(len(list_of_data_within_3_std_deviation)*100.0/len(dice_result)))","repo_name":"TanviLodhavia/Class_109","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"20686976480","text":"#!/usr/bin/python3\ndef add_tuple(tuple_a=(), tuple_b=()):\n lent_a = len(tuple_a)\n lent_b = len(tuple_b)\n if lent_a == 0:\n a1 = 0\n b1 = 0\n elif lent_a < 2 and lent_a != 0:\n a1 = tuple_a[0]\n b1 = 0\n else:\n a1 = tuple_a[0]\n b1 = tuple_a[1]\n if lent_b == 0:\n a2 = 0\n b2 = 0\n elif lent_b < 2 and lent_b != 0:\n a2 = tuple_b[0]\n b2 = 0\n else:\n a2 = tuple_b[0]\n b2 = tuple_b[1]\n new_tuple = (a1 + a2, b1 + b2)\n return new_tuple\n","repo_name":"XimeonLeo/alx-higher_level_programming","sub_path":"0x03-python-data_structures/7-add_tuple.py","file_name":"7-add_tuple.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"3305823767","text":"import sys\nsys.path.append(\"..\")\nfrom client import SAVNConnectionAssistant\nimport json\nimport unittest\nimport asyncio\nfrom unittest.mock import Mock\n\nclass AsyncMock(Mock):\n def __call__(self, *args, **kwargs):\n parent = super(AsyncMock, self)\n async def coro():\n return parent.__call__(*args, **kwargs)\n return coro()\n\n def __await__(self):\n return self().__await__()\n\nclass TestFrameworkClientMethods(unittest.TestCase):\n def setUp(self):\n self.connection = SAVNConnectionAssistant(42)\n self.connection.alive = True\n self.connection.ws = Mock()\n self.loop = asyncio.get_event_loop()\n\n def test_updateState(self):\n state = {\"car\": 1}\n timestamp = 0\n packet = {'type': 'simulation-state-update',\n 'content':\n {'simulationID': self.connection.simulationID,\n 'timestamp': timestamp,\n 'objects': state,\n 'frameworkID': 0}}\n self.connection.updateState(timestamp, state, sleepTime=0)\n message = self.loop.run_until_complete(self.connection.fetchMessage())\n self.assertEqual(packet, message)\n\n def test_message_reception(self):\n self.loop.run_in_executor = Mock()\n msg = {'content': 'fish'}\n async def op():\n return json.dumps(msg)\n self.connection.ws.recv = op\n self.loop.run_until_complete(self.connection.handler())\n self.loop.run_in_executor.assert_called_with(None,\n self.connection.onMessage,{'content': 'fish'})\n\n def test_messageQueue_drainage(self):\n self.loop.run_in_executor = Mock()\n packet = {'content': 'fish'}\n async def op():\n await asyncio.sleep(100)\n self.connection.ws.recv = op\n self.connection.send_packet = AsyncMock()\n msg = json.dumps(packet)\n self.connection.messageQueue.put_nowait(msg)\n self.loop.run_until_complete(self.connection.handler())\n self.connection.send_packet.assert_called_with(msg)\n\n def test_simulationRun(self):\n self.connection.handleSimulationRun = Mock()\n packet = {'type': 'simulation-start-parameters', 'content': {'frameworkID': 0}}\n self.connection.onMessage(packet)\n self.connection.handleSimulationRun.assert_called_with(packet['content'])\n\n def test_simulationStop(self):\n self.connection.handleSimulationStop = Mock()\n packet = {'type': 'framework-disconnect', 'content': {}}\n self.connection.onMessage(packet)\n self.connection.handleSimulationStop.assert_called_with(packet['content'])\n\n def test_simulationDataUpdate(self):\n self.connection.handleSimulationDataUpdate = Mock()\n packet = {'type': 'simulation-update', 'content': {}}\n self.connection.onMessage(packet)\n self.connection.handleSimulationDataUpdate.assert_called_with(packet['content'])\n","repo_name":"franklinsch/driverlesscarsimulations","sub_path":"framework/test/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"21821346625","text":"from pickle import load\n\n# load doc into memory\ndef load_doc(filename):\n\t# open the file as read only\n\tfile = open(filename, 'r')\n\t# read all text\n\ttext = file.read()\n\t# close the file\n\tfile.close()\n\treturn text\n\n# load a pre-defined list of photo identifiers\ndef load_set(filename):\n\tdoc = load_doc(filename)\n\tdataset = list()\n\t# process line by line\n\tfor line in doc.split('\\n'):\n\t\t# skip empty lines\n\t\tif len(line) < 1:\n\t\t\tcontinue\n\t\t# get the image identifier\n\t\tidentifier = line.split('.')[0]\n\t\tdataset.append(identifier)\n\treturn set(dataset)\n\n# load clean descriptions into memory\ndef load_clean_descriptions(filename, dataset):\n\t# load document\n\tdoc = load_doc(filename)\n\tdescriptions = dict()\n\tfor line in doc.split('\\n'):\n\t\t# split line by white space\n\t\ttokens = line.split()\n\t\t# split id from description\n\t\timage_id, image_desc = tokens[0], tokens[1:]\n\t\t# skip images not in the set\n\t\tif image_id in dataset:\n\t\t\t# create list\n\t\t\tif image_id not in descriptions:\n\t\t\t\tdescriptions[image_id] = list()\n\t\t\t# wrap description in tokens\n\t\t\tdesc = 'startseq ' + ' '.join(image_desc) + ' endseq'\n\t\t\t# store\n\t\t\tdescriptions[image_id].append(desc)\n\treturn descriptions\n\n# load photo features\ndef load_photo_features(filename, dataset):\n\t# load all features\n\tall_features = load(open(filename, 'rb'))\n\t# filter features\n\tfeatures = {k: all_features[k] for k in dataset}\n\treturn features\n\n# load training dataset (6K)\nfilename = 'Flickr8k_text/Flickr_8k.trainImages.txt'\ntrain = load_set(filename)\nprint('Dataset: %d' % len(train))\n# descriptions\ntrain_descriptions = load_clean_descriptions('descriptions.txt', train)\nprint('Descriptions: train=%d' % len(train_descriptions))\n# photo features\ntrain_features = load_photo_features('features.pkl', train)\nprint('Photos: train=%d' % len(train_features))","repo_name":"enuguru/aiandml","sub_path":"nlp/code/chapter_26/3_load_prepared_data.py","file_name":"3_load_prepared_data.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"}
+{"seq_id":"29691122025","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^photos/$', views.photos, name = 'photos'),\n url(r'^videos/$', views.seasons, name = 'seasons'),\n url(r'^videos/(?P\\d+)/$', views.season_videos, name = 'season_videos'),\n url(r'^artists/$', views.artists, name='artists'),\n url(r'^about/$', views.about, name='about'),\n\n]","repo_name":"RoikYurii/brooklyn99","sub_path":"content/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"70598726335","text":"from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom mptt.admin import MPTTModelAdmin\nfrom .models import Taxon\nfrom .models import Upload_dwca\nfrom .models import DwcaTaxon\nfrom .models import DwcaDistribution\nfrom .models import DwcaResourceRelationship\nfrom .models import DwcaVernacular\nfrom .models import RawName\nfrom .models import RawNameAdmin\nfrom .models import NameFinderResult\nfrom .models import NameFinderJSON\n#from .models import NameFinderResultAdmin\n\nfrom django.contrib.admin import AdminSite\nfrom django.http import HttpResponse\nimport logging\n\n'''\nThe following code extends the admin change form for Publication\n(/publications/publication).\n\nIt adds a new action, \"find_names\".\n\nReference: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/actions/\n'''\nfrom publications.admin import PublicationAdmin\nfrom publications.models import Publication\nfrom publications.models import CustomFile\nimport requests\nimport json\nimport time\nfrom django.core.files.base import ContentFile\nimport json\nfrom taxonomy.functions import json_to_db\nfrom taxonomy.functions import find_names\nfrom taxonomy.functions import json_to_name_finder_results\n\n#Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\nclass CustomPublicationAdmin(PublicationAdmin):\n actions = ['add_extracted_taxon_names_file']\n\n def add_extracted_taxon_names_file(self, request, queryset):\n for pub in queryset:\n print('title: {} url: {}'.format(pub.title, pub.url))\n file_list = CustomFile.objects.filter(\n publication_id=pub.id).filter(\n description='extracted taxon names')\n if not file_list:\n taxa = find_names(pub.url)\n json_string = json.dumps(taxa)\n django_file = ContentFile(json_string)\n newfile = CustomFile()\n newfile.publication_id = pub.id\n newfile.description = 'extracted taxon names'\n newfile.file.save('extracted_taxon_names.json', django_file, save=True)\n print('new file attached.')\n print('adding data to NameFinderResults model ...')\n json_to_name_finder_results(pub, taxa)\n json_to_db(pub, taxa)\n print('FINIS')\n else:\n print('A file with description \"extracted_taxon_names\" already exists')\n\n add_extracted_taxon_names_file.short_description = \"Extract scientific names from selected publications\"\n\nadmin.site.unregister(Publication)\nadmin.site.register(Publication, CustomPublicationAdmin)\n'''\nEnd of code section.\n'''\n\n# Add mark_as_verified action to NameFinderResultAdmin change page\n\ndef mark_as_verified(self, request, queryset):\n queryset.update(verified=True)\nmark_as_verified.short_description = 'Mark selected results as verified'\n\n# http://www.gbif.org/species/1406619\n\n\nclass NameFinderResultAdmin(admin.ModelAdmin):\n list_filter = ('pub', 'verified',)\n list_display = ('verified', 'classification_path', 'GBIF')\n list_display_links = ('classification_path',)\n readonly_fields = (\n 'GBIF',\n 'pub',\n 'is_known_name',\n 'supplied_name_string',\n 'classification_path_ranks',\n 'classification_path',\n 'current_name_string',\n 'imported_at',\n 'canonical_form',\n 'data_source_id',\n 'match_value',\n 'data_source_title',\n 'gni_uuid',\n 'edit_distance',\n 'match_type',\n 'name_string',\n 'current_taxon_id',\n 'taxon_id',\n 'prescore',\n 'classification_path_ids',\n 'score',)\n actions = [mark_as_verified]\n\n def GBIF(self, obj):\n return '{}'.format(obj.taxon_id, obj.taxon_id)\n GBIF.allow_tags = True\n\n\n\nadmin.site.register(NameFinderResult, NameFinderResultAdmin)\n\n\n\nadmin.site.register(Taxon, MPTTModelAdmin)\nadmin.site.register(Upload_dwca)\nadmin.site.register(DwcaTaxon)\nadmin.site.register(DwcaDistribution)\nadmin.site.register(DwcaResourceRelationship)\nadmin.site.register(DwcaVernacular)\nadmin.site.register(RawName, RawNameAdmin)\nadmin.site.register(NameFinderJSON)\n\n\n\n\n# Ref for subclassing AdminSite:\n# http://stackoverflow.com/questions/35875454/django-admin-extending-admin-with-custom-views\nclass MyAdminSite(AdminSite):\n\n def custom_view(self, request):\n return HttpResponse(\"Test\")\n\n def get_urls(self):\n from django.conf.urls import url\n urls = super(MyAdminSite, self).get_urls()\n urls += [\n url(r'^custom_view/$', self.admin_view(self.custom_view))\n ]\n return urls\n\nadmin_site = MyAdminSite()\n\n\n# @admin.register(DwcaTaxon, site=admin_site)\n# class SomeModelAdmin(admin.ModelAdmin):\n# pass\n","repo_name":"aubreymoore/GuamInvasiveSpeciesList","sub_path":"taxonomy/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"70151990015","text":"from flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef primario():\n return \"\"\"Site teste de Gabriel Bugmann
\n 301 - Info\n Link\"\"\"\n\n@app.route(\"/lista_pessoas\")\ndef lista_pessoas():\n lista = [\"João da Silva\",\"Maria Oliveira\"]\n for i in lista:\n return f'{i}
'\n\napp.run(debug=True, host=\"0.0.0.0\")","repo_name":"Bugmenn/prog","sub_path":"server_web.py","file_name":"server_web.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"43699046855","text":"#!/usr/bin/python3\n\n# The purpose of this software is to create PDF-file from\n# Finnish national archive scanned document available as JPG-files\n# Software needs as an input required document or file with list of documents\n# and optional maximum size for single PDF-file\n\nfrom urllib.request import Request, urlopen\nfrom urllib.error import URLError\nimport re\nimport os\nimport sys\nimport argparse\nfrom PIL import Image as PILImage\nfrom PIL import ImageDraw, ImageFont\nimport numpy as np\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.platypus.flowables import Image as RepImage\nimport textwrap\n\n# function to get list of pages or exit if the required document doesn't exist\n# NEEDS TO BE FIXED TO SUPPORT MULTIPLE DOCUMENT DOWNLOAD\n\ndef getPageList(IndexText):\n PageList = re.findall('view.ka\\?kuid=(\\d*)',IndexText)\n if not len(PageList):\n print('Ei löytynyt sivuja, tarkista arkistoyksikkönumero')\n sys.exit(1)\n return PageList \n\n# function to create an error page if a page from archives fails to download\n# page has required text to inform reader\n\ndef makeErrorPage(text, pagenumber):\n errorpage=PILImage.new('RGB',(595,842),(255,255,255))\n drawing=ImageDraw.Draw(errorpage)\n drawing.text((10,10),text,(0,0,0))\n errorpage=errorpage.resize((5950,8420))\n errorpage.save('%s.jpg'%pagenumber)\n return \n\n# function to download pages as jpg-files from narc-service and call error page\n# creation function for failed pages\n# NEEDS OUTPUT FOR SUCCESS/FAILURE\n\ndef downloadPages(ListOfPages):\n for page in ListOfPages:\n try:\n image=urlopen('http://digi.narc.fi/digi/fetch_hqjpg.ka?kuid=%s' % page)\n \n except URLError as e:\n if hasattr(e, 'reason'):\n reason='Palvelimeen ei saatu yhteyttä.\\nIlmoitettu syy: '+e.reason\n elif hasattr(e, 'code'):\n reason='Palvelin ei voinut täyttää hakua.\\nVirhekoodi: '+e.code\n makeErrorPage(reason,page)\n else:\n image=urlopen('http://digi.narc.fi/digi/fetch_hqjpg.ka?kuid=%s' % page)\n typeinfo=image.info().get_content_type()\n if typeinfo=='image/jpeg':\n file=open('%s.jpg' % page,'wb')\n file.write(image.read())\n file.close()\n else:\n makeErrorPage(image.read(),page)\n\n return\n\n# function to create name for PDF-file from the title of the narc document\n\ndef createFilename(title,part):\n Filename=re.subn('(\\\\|\\/|:|\\*|\\\"|\\||;|,|/)',\"\",title)\n Filename=re.subn('(\\.|\\s)','_',Filename[0])\n fname=Filename[0]\n fname+='_osa_'+str(part)+'.pdf'\n return fname\n\n# function to calculate scaling to a4\n\ndef calcScale(imagesize,a4size):\n SizeOfX=imagesize[0]/a4size[0]\n SizeOfY=imagesize[1]/a4size[1]\n for X in np.arange(0,11,0.25):\n difference = abs(SizeOfX-X)\n if difference<0.25:\n break\n\n for Y in np.arange(0,11,0.25):\n difference = abs(SizeOfY-Y)\n if difference<0.25:\n break\n return (X,Y)\n\n# function to delete downloaded jpg-files\n\ndef cleanUp(ListOfPages):\n for page in ListOfPages:\n os.remove('%s.jpg'%page)\n return\n \n# function to create the pdf-file by\n# 1) getting list of document pages from narc\n# 2) find title from the narc\n# 3) download pages\n# 4) create pdf\n# 5) save downloaded jpg to pdf\n# 5b) close and create new pdf if size limit is exceeded\n# 6) clean downloaded jpg-files\n\ndef doPDFFile(IndexText,MaxSize):\n ListOfPages=getPageList(IndexText)\n TitleMatch = re.search(r\"dosearch\\.ka\\?sartun=\\d*\\.\\w*\\\">(.*?)<\\/b>\",IndexText)\n Title=TitleMatch.group(1)\n downloadPages(ListOfPages)\n Canvas = canvas.Canvas(createFilename(Title,1))\n Canvas.setTitle(Title)\n First = True\n counter=1\n for page in ListOfPages:\n filename='%s.jpg'%page\n if First:\n size=os.stat(filename).st_size\n else:\n size+=os.stat(filename).st_size\n\n if MaxSize and not First and size>(MaxSize*1024*1024):\n Canvas.save()\n counter+=1\n Canvas=canvas.Canvas(createFilename(Title,counter))\n size=os.stat(filename).st_size\n \n SavedImage = PILImage.open(filename)\n if First:\n SizeOfA4=SavedImage.size\n First=False\n\n #scale pages so that first image is A4-sized \n scale=calcScale(SavedImage.size,SizeOfA4)\n Canvas.setPageSize((A4[0]*scale[0],A4[1]*scale[1]))\n Canvas.drawImage(filename,0,0,A4[0]*scale[0],A4[1]*scale[1],preserveAspectRatio=True)\n Canvas.showPage()\n SavedImage.close()\n\n cleanUp(ListOfPages)\n\n Canvas.save()\n\n return 0\n\n# Check validity of input (either pure number or link to narc page\n\ndef checkInputString(inputstring):\n if re.fullmatch('\\d*',inputstring):\n output='http://digi.narc.fi/digi/slistaus.ka?ay='+inputstring\n elif re.fullmatch('http://digi\\.narc\\.fi/digi/slistaus\\.ka\\?ay=\\d*',inputstring):\n output=inputstring\n else:\n return\n return output\n\n# Get list of documents from input file\n# single number or fullurl = directly single url\n# rangeset = generate range with the numpy.arange from [start,end(not included),step]\n# rangeset2 = generate range with the numpy.arange from start-end(included), with 1 as a step\n\ndef getList(urlfile):\n lines = [line.strip() for line in open(urlfile)]\n urls = []\n for line in lines:\n singlenumber=re.fullmatch('\\d*',line)\n fullurl=re.fullmatch('http://digi\\.narc\\.fi/digi/slistaus\\.ka\\?ay=\\d*',line)\n rangeset=re.fullmatch('\\[(\\d*),(\\d*),(\\d)\\]',line)\n rangeset2=re.fullmatch('(\\d*)-(\\d*)',line)\n if singlenumber:\n urls.append(line)\n elif fullurl:\n urls.append(line)\n elif rangeset:\n for value in np.arange(int(rangeset.group(1)),int(rangeset.group(2)),int(rangeset.group(3))):\n urls.append(str(value))\n elif rangeset2:\n for value in np.arange(int(rangeset2.group(1)),int(rangeset2.group(2))+1,1):\n urls.append(str(value))\n \n return urls\n \n# Main function\n# if single document requested run it directly\n# if multiple create list from input file and run them consecutively\n# MAKE EXIT ONLY AFTER ALL FILES HAVE BEEN RUN\n\ndef main(url,size,file):\n ExitValue=0\n if not (file):\n ExitValue=run(url,size)\n \n else:\n ListOfUrls=getList(url)\n for url_value in ListOfUrls:\n ExitValue=run(url_value,size)\n if(ExitValue):\n sys.exit(ExitValue) \n\n sys.exit(ExitValue)\n\n# Run single document unit to download it by\n# 1) check validity of the input url\n# 2) request the document from narc\n# 3) send narc html-page to pdf-creating subprogram\n# return values different from 0 indicate error\n\ndef run(url,size): \n SourceUrl=url\n Url=checkInputString(SourceUrl)\n MaxSize=size #maximum size for pdf (may be exceeded a bit because of pdf format)\n if(Url):\n Req=Request(Url)\n try:\n Response=urlopen(Req)\n except URLError as e:\n if hasattr(e, 'reason'):\n print('Palvelimeen ei saatu yhteyttä.')\n print('Ilmoitettu syy: ',e.reason)\n sys.exit(1)\n elif hasattr(e, 'code'):\n print('Palvelin ei voinut täyttää hakua.')\n print('Virhekoodi: ',e.code)\n sys.exit(1)\n else:\n IndexText=Response.read().decode('latin-1')\n doPDFFile(IndexText,MaxSize)\n return 0\n \n else:\n print(\"Väärä osoite, osoitteen tulee olla joko http://digi.narc.fi/digi/slistaus.ka?ay=X -muotoa tai pelkkä X,\"+\n \" joka on halutun arkistoyksikön numero digi narcissa.\")\n return 2\n\n sys.stderr.write(\"Tuntematon virhe \\n\")\n return 1\n\n\n# argument parser\n# CREATE BETTER HELP AND FILE PARSING INPUT\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='Lataa digi.narc.fi palvelusta arkistoyksiköitä pdf-muodossa.',\n epilog=textwrap.dedent('''\\\n numerolistan muoto\n rivillä joko\n yksittäinen arkistointiyksikkö numero tai url\n tai listan generointi seuraavilla tavoin\n [aloitusnumero,lopetusnumero,askel] \n tämä generoi listan numeroita aloituksesta lopetukseen (ei mukana)\n \t\t\t aloitusnumero-lopetusnumero \n tämä generoi listan numeroita aloituksesta lopetukseen (mukana) 1 välein\n '''))\n parser.add_argument('url', metavar='URL', help='arkistointiyksikön numero tai url muodossa http://digi.narc.fi/digi/slistaus.ka?ay=numero')\n parser.add_argument('-m','--maxsize',default=0,type=int, help='Maksimikoko pdf-tieodostolle, oletus 0 = ei rajoitusta')\n parser.add_argument('-f','--file',action='store_true', help='Lataa useampi yksikkö kerralla, numerolista tiedostossa ja tiedoston nimi URL:n sijaan')\n args=parser.parse_args()\n main(args.url,args.maxsize,args.file)\n\n \n","repo_name":"teakfi/kansallisarkisto_downloader","sub_path":"narchaku.py","file_name":"narchaku.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"37566672515","text":"# Poly - Many\n# morphism - Form\n# ============================\n# Duck Typing\n# Operator overloading\n# Method overloading\n# Method Overriding\n# ============================\n\"\"\"\nDuck Typing\n\"\"\"\n# x = 5\n# print(type(x), id(x))\n# x = 'Gopi'\n# print(type(x), id(x))\nclass PyCharm:\n def execute(self):\n print('Compiling', \"\\nRunning\")\n\n\nclass MyEditor:\n def execute(self):\n print('Spell check')\n print('Convention check')\n print('Compiling', \"\\nRunning\")\n\n\nclass Laptop:\n def code(self, ide):\n ide.execute()\n\n\nide = PyCharm()\nide1 = MyEditor()\n\nlap1 = Laptop()\nlap1.code(ide)\nlap1.code(ide1)\n","repo_name":"Gopi25071993/TeluskoAllFiles","sub_path":"Polymorphism.py","file_name":"Polymorphism.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"12469481095","text":"import os\nimport sys\nimport pandas\nimport random\nimport pytz\nimport pandas as pd\nimport uuid\nimport django\nimport uuid\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.config.local')\ndjango.setup()\n\nfrom swift_parsing_app.models import SourceFile, MessageType, SwiftMessage, SwiftField, SwiftFieldValueDetail, \\\n SwiftFieldValue\n\n# Columns: MT;Status;Tag;Field_Name;Content_Options;KeyMTTag\nswift_fields_df = pandas.read_csv('../../mock/SM.csv', sep=';')\nswift_messages_df = pandas.read_pickle('../../mock/output_dataframe.pkl')\n\n\ndef populate_swift_msg_types():\n list_of_msg_types = swift_fields_df['MT'].unique()\n list_of_msg_types = sorted(list_of_msg_types)\n\n for msg_type in list_of_msg_types:\n new_object = MessageType.objects.get_or_create(type_name=msg_type)\n print('Message Types were created')\n\n\ndef populate_swift_fields():\n for row_index, row in swift_fields_df.iterrows():\n mandatory = 1 if row['Status'] == 'M' else 2\n msg_type = MessageType.objects.filter(type_name=row['MT']).first()\n swift_field = SwiftField.objects.get_or_create(key_mt_tag=row['KeyMTTag'], field_name=row['Tag'],\n field_tag=row['Field_Name'], status=mandatory,\n content_options=row['Content_Options'],\n message_type=msg_type)\n\n print('Swift Fields were created')\n\n\ndef populate_source_file():\n source_file = SourceFile.objects.get_or_create(file_name='test_file_001.csv', status=2)\n print('Source Files were created')\n pass\n\n\ndef populate_swift_message():\n list_of_msgs = swift_messages_df['transaction_id'].unique()\n list_of_msgs = sorted(list_of_msgs)\n\n source_file = SourceFile.objects.first()\n\n for transaction_id in list_of_msgs:\n direction = swift_messages_df[\n (swift_messages_df['transaction_id'] == transaction_id) & (swift_messages_df['field_name'] == 'Direction')][\n 'field_value'].item()\n direction_value = 1 if direction == 'I' else 2\n\n application_header = swift_messages_df[\n (swift_messages_df['transaction_id'] == transaction_id) & (swift_messages_df['field_name'] == '2')][\n 'field_value'].item()\n\n msg_type = swift_messages_df[\n (swift_messages_df['transaction_id'] == transaction_id) & (swift_messages_df['field_name'] == 'MT')][\n 'field_value'].item()\n msg_type_object = MessageType.objects.filter(type_name=msg_type).first()\n # transaction_id = transaction_id.replace('-','')\n new_object = SwiftMessage.objects.get_or_create(transaction_id=transaction_id, source_file=source_file,\n direction=direction_value, message_type=msg_type_object,\n application_header=application_header)\n\n print('Swift Messages were created')\n pass\n\n\ndef populate_swift_field_values():\n list_of_msgs = swift_messages_df['transaction_id'].unique()\n list_of_msgs = sorted(list_of_msgs)\n\n for transaction_id in list_of_msgs:\n transaction_object = SwiftMessage.objects.get(transaction_id=transaction_id)\n\n list_of_fields = swift_messages_df[swift_messages_df['transaction_id'] == transaction_id]\n\n for index, row in list_of_fields.iterrows():\n\n swift_fields_not_in_dictionary = ['Direction', 'MT', 'Rest of 2', '2', '3']\n swift_field_name = row['field_name']\n if swift_field_name not in swift_fields_not_in_dictionary:\n related_swift_field = SwiftField.objects.get(field_name=swift_field_name,\n message_type=transaction_object.message_type)\n swift_field_value = row['field_value']\n new_object = SwiftFieldValue.objects.get_or_create(swift_message=transaction_object,\n swift_field=related_swift_field,\n field_value=swift_field_value)\n\n print('Swift Fields Values were created')\n pass\n\n\ndef format_db():\n # SourceFile, MessageType, SwiftMessage, SwiftField, SwiftFieldValueDetail\n SwiftFieldValueDetail.objects.all().delete()\n SwiftFieldValue.objects.all().delete()\n SwiftMessage.objects.all().delete()\n SourceFile.objects.all().delete()\n SwiftField.objects.all().delete()\n\n\nif __name__ == '__main__':\n print(\"Formating the Database\")\n format_db()\n\n populate_swift_msg_types()\n populate_swift_fields()\n populate_source_file()\n populate_swift_message()\n populate_swift_field_values()\n\n print('Populating Complete')\n","repo_name":"NightingaleV/sweeper-swift-parsing-web-app","sub_path":"swift_parsing_app/models/populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14478367642","text":"import RPi.GPIO as GPIO\nimport usb.core\nimport usb.util\nimport os \nimport sys\nfrom time import gmtime, strftime\nimport time\nimport copy\nimport serial\n#control_motorディレクトリへのパを追加\nsys.path.append(os.path.join(os.path.dirname(__file__), '../control_motor'))\nimport blv_lib\nimport az_lib_direct\n\n#GPIO_init###########################################\npin_list = [12,16,18] #move,rclu,arm\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(pin_list[0],GPIO.OUT)\nGPIO.setup(pin_list[1],GPIO.OUT)\nGPIO.setup(pin_list[2],GPIO.OUT)\n#####################################################\n\n#定数################################################\nDED_ZONE = 150\nZ_DED_ZONE = 250\nDIFF_SIZE = 1\nZ_DIFF_SIZE = 10\n#####################################################\n\n\n#状態変数############################################\nMode = 0 #0:クローラ, 1:リモートセンタ機構&リフトアップ, 2:ロボットアーム\nRC_mode = 1 #0:階段降り, 1:真ん中, 2:椅子座り, 3:階段上り\nLU_mode = 1 #0:収納, 1:テンション維持モード 2:リフトアップ\n#####################################################\n\n#LED#################################################\ndef LED_setting(pin_data_list):\n global pin_list\n for i in range(len(pin_list)):\n GPIO.output(pin_list[i],pin_data_list[i])\n#####################################################\n\n#サーフティーの状態\nSafety = 0\n\nwhile True:\n #セーフティの読み込み\n Safety = 1 #ここで確定1だが実際はボタンの値を読み込む\n if Safety == 0:\n continue\n\n #コントローラ変数(セーフティ解除時に初期化される)#######\n Z_push = 0 #Z軸方向の変位\n old_Z_push = 0 #前回のZ軸方向の変位\n R_list = [0,0,0] #軸に対する回転の変位\n old_R_list = 0 #前回の軸に対する回転の変位\n Button_number = 0 #左右のボタンの値\n ########################################################\n\n #RC変数#################################################\n RC_flag = 1 #クリックの判定(1の時は次への移動をしない)\n ########################################################\n #LED_setting############################################\n LED_setting([1,0,0]) \n ########################################################\n\n dev = usb.core.find(idVendor=0x46d, idProduct=0xc626)\n if dev is None:\n raise ValueError('SpaceNavigator not found');\n else:\n print(dev)\n cfg = dev.get_active_configuration()\n print('cfg is ', cfg)\n intf = cfg[(0,0)]\n print('intf is ', intf)\n ep = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN)\n print('ep is ', ep)\n reattach = False\n if dev.is_kernel_driver_active(0):\n reattach = True\n dev.detach_kernel_driver(0)\n\n ep_in = dev[0][(0,0)][0]\n ep_out = dev[0][(0,0)][1]\n print('')\n print('Exit by pressing any button on the SpaceNavigator')\n print('')\n\n\n #自分の端末ごとに適切に設定する\n client = serial.Serial(\"/dev/ttyXRUSB0\",115200,timeout=0.1,parity=serial.PARITY_EVEN,stopbits=serial.STOPBITS_ONE)\n #モータのインスタンス化##############################\n motor1 = blv_lib.blv_motor(client,1) #右クローラ\n motor2 = blv_lib.blv_motor(client,2) #左のクローラ\n motor3 = az_lib_direct.az_motor_direct(client,3) #リフトアップ右\n motor4 = az_lib_direct.az_motor_direct(client,4) #リフトアップ左\n motor5 = az_lib_direct.az_motor_direct(client,5,[0,58436,90000,116750]) #リモートセンタ\n #####################################################\n\n #初期移動ステッピングモータ関連######################\n #リモートセンターの移動\n motor5.go_list(RC_mode)\n #リフトアップの移動\n if LU_mode == 0:\n motor3.go(0)\n motor4.go(0)\n elif LU_mode == 1:\n motor3.go_torque(300)#15%\n motor4.go_torque(300)#15%\n elif LU_mode == 2:\n motor3.go(13200)#位置移動\n motor4.go(13200)#位置移動\n #####################################################\n\n #初期設定ブラシレスモータ関連########################\n motor1.set_acc_dec_time(2)\n motor2.set_acc_dec_time(2)\n #####################################################\n\n\n\n while True:\n try:\n data = dev.read(ep_in.bEndpointAddress, ep_in.bLength, 0)\n\n #Z軸のプッシュ判定#############################################################\n if data[0] == 1:\n old_Z_push = copy.deepcopy(Z_push)\n Z_push = data[5] + (data[6]*256)\n\n if data[6] > 127:\n Z_push -= 65536\n\n #デッドゾーンの処理\n if Z_push <= Z_DED_ZONE and Z_push >= -Z_DED_ZONE:\n Z_push = 0\n\n #感度の処理\n diff = abs(Z_push - old_Z_push)\n if diff > Z_DIFF_SIZE and sum(R_list) == 0:\n print(\"Push: \",Z_push)\n\n #Mode:0 クローラモード\n if Mode == 0:\n pass\n #Mode:1 リモート&リフトアップ \n elif Mode == 1:\n if Z_push > 300:\n LU_mode = 2\n motor5.go_list(3)\n time.sleep(5)\n motor3.go(point=13200,speed=200,rate=1)\n motor4.go(point=13200,speed=200,rate=1)\n motor5.go_list(RC_mode)\n \n elif Z_push < -250:\n LU_mode = 0\n motor3.go(point=0)\n motor4.go(point=0)\n\n #Mode2 : アームモード\n elif Mode == 2:\n pass\n ##############################################################################\n\n #Rの移動判定##################################################################\n if data[0] == 2:\n old_R_list = copy.deepcopy(R_list)\n R_list[0] = data[1] + (data[2]*256)\n R_list[1] = data[3] + (data[4]*256)\n R_list[2] = data[5] + (data[6]*256)\n\n if data[2] > 127:\n R_list[0] -= 65536\n if data[4] > 127:\n R_list[1] -= 65536\n if data[6] > 127:\n R_list[2] -= 65536\n\n #デッドゾーンの処理\n for i in range(3):\n if R_list[i] <= DED_ZONE and R_list[i] >= -DED_ZONE :\n R_list[i] = 0\n\n #感度の処理\n diff = abs(sum(R_list) - sum(old_R_list))\n if diff > DIFF_SIZE and abs(Z_push) < Z_DED_ZONE:\n print(\"R: \", R_list[0], R_list[1], R_list[2])\n\n #Mode:0 クローラモード\n if Mode == 0:\n if R_list[0] == 0 and R_list[1] == 0 and R_list[2]==0: #停止\n #motor1.set_speed(0)\n #motor2.set_speed(0)\n motor1.go(1,1)\n motor2.go(1,1)\n elif R_list[0] > 0: #前進移動\n if R_list[1] >= 0:#左をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)) + int(R_list[2]*0.04))\n elif R_list[0] < 0:#右をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)) + int(R_list[2]*0.04))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)))\n #motor1.go(1,0)\n #motor2.go(0,1)\n motor1.go(0,1)\n motor2.go(1,0)\n elif R_list[0] < 0: #後進移動\n if R_list[1] >= 0:#左をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)) + int(abs(R_list[2]*0.04)))\n elif R_list[1] < 0:#右をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)) + int(abs(R_list[2]*0.04)))\n #motor1.go(0,1)\n #motor2.go(1,0)\n motor1.go(1,0)\n motor2.go(0,1)\n elif R_list[2] > 0: #右は前,左は後ろ\n motor1.set_speed(int(abs(80*R_list[2]*0.01)))\n motor2.set_speed(int(abs(80*R_list[2]*0.01)))\n motor1.go(1,0)\n motor2.go(1,0)\n \n elif R_list[2] < 0: #左は前,右は後ろ\n motor1.set_speed(int(abs(80*R_list[2]*0.01)))\n motor2.set_speed(int(abs(80*R_list[2]*0.01)))\n motor1.go(0,1)\n motor2.go(0,1)\n\n #Mode:1 リモート&リフトアップ\n elif Mode == 1:\n #リモートセンターの判定##########################################\n if R_list[0] == 0 and RC_flag==1:\n RC_flag = 0\n elif R_list[0] > 300 and RC_flag==0:#前への移動\n if RC_mode == 3:\n pass\n else:#移動処理\n RC_mode+=1\n motor5.go_list(RC_mode)\n RC_flag = 1\n elif R_list[0] < -170 and RC_flag==0:#後ろへの移動\n if RC_mode == 0:\n pass\n else:#移動処理\n RC_mode -=1\n motor5.go_list(RC_mode)\n RC_flag = 1\n ##################################################################\n\n #リフトアップの判定###############################################\n if abs(R_list[2]) > 340:\n LU_mode = 1\n #motor3.go_torque(150)\n #motor4.go_torque(150)\n motor3.set_position_deviation(30000)\n motor4.set_position_deviation(30000)\n motor3.go_torque_pos(point=9000,op_current=150)\n motor4.go_torque_pos(point=9000,op_current=150)\n ##################################################################\n\n #Mode:2 アームモード\n elif Mode == 1:\n pass\n ##############################################################################\n\n #ボタンの判定(左が2,右が1)####################################################\n if data[0] == 3:\n if data[1]== 0:\n print(\"push button : \", Button_number)\n if Button_number == 1:\n if Mode == 2:\n Mode = 0\n else:\n Mode += 1\n if Mode == 1:\n RC_flag = 0\n elif Button_number == 2:\n if Mode == 0:\n Mode = 2\n else:\n Mode -= 1\n if Mode == 1:\n RC_flag = 0\n print(\"Now Mode:\",Mode)\n if Mode == 0:\n LED_setting([1,0,0])\n elif Mode == 1:\n LED_setting([0,1,0])\n elif Mode == 2:\n LED_setting([0,0,1])\n\n Button_number = 0\n\n else:\n Button_number = data[1]\n ##############################################################################\n\n except KeyboardInterrupt:\n print(\"end\")\n Safety = 0\n break\n\n except usb.core.USBError:\n print(\"USB error\")\n Safety = 0\n break\n except:\n print(\"Error\")\n Safety = 0\n break\n\n\n # end while\n usb.util.dispose_resources(dev)\n\n if reattach:\n dev.attach_kernel_driver(0)\n","repo_name":"KobayashiRui/CYBATHLON","sub_path":"complete_version/Controler_bac_no_arm.py","file_name":"Controler_bac_no_arm.py","file_ext":"py","file_size_in_byte":13104,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"6534291830","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 9 18:17:04 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nimport boto3\r\nimport pprint\r\nimport pandas as pd\r\nimport time\r\n\r\n# with open('./config/config.json', 'r') as file:\r\n# config = json.loads(file.read())\r\n\r\ndynamodb = boto3.resource(\r\n 'dynamodb',\r\n region_name='ap-northeast-2',\r\n # aws_access_key_id=config['ID'],\r\n # aws_secret_access_key=config['KEY']\r\n)\r\n\r\n\r\n# 1. Table 제거\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n response = table.delete()\r\n printer = pprint.PrettyPrinter(indent=2)\r\n printer.pprint(response)\r\n\r\n\r\n# 2. DynamoDB 내 Table 생성하기\r\n# 키 정리할 때, 기준 키만 설정하면 된다. 핵 좋아!\r\nif __name__ == '__main__':\r\n table = dynamodb.create_table(\r\n TableName='relatedTags',\r\n KeySchema=[\r\n {\r\n 'AttributeName': 'idx',\r\n 'KeyType': 'HASH'\r\n }\r\n ],\r\n AttributeDefinitions=[\r\n {\r\n 'AttributeName': 'idx',\r\n 'AttributeType': 'N'\r\n }\r\n ],\r\n ProvisionedThroughput={\r\n 'ReadCapacityUnits': 50,\r\n 'WriteCapacityUnits': 50\r\n }\r\n )\r\n\r\n# 3. DynamoDB 내 생성된 특정 Table 정보 확인 및 아이템 가져오기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n print(1, table.creation_date_time)\r\n\r\n response = table.get_item(\r\n Key={\r\n 'idx': 1\r\n }\r\n )\r\n item = response['Item']\r\n print(2, item)\r\n\r\n# 4. 아이템 업데이트 하기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n table.update_item(\r\n Key={\r\n 'idx': 2\r\n },\r\n UpdateExpression='SET createdTime = :val1',\r\n ExpressionAttributeValues={\r\n ':val1': \"2018-08-08T05:07:13.515Z\"\r\n }\r\n )\r\n\r\n# 5. 아이템 삭제��기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n table.delete_item(\r\n Key={\r\n 'idx': 1\r\n }\r\n )\r\n\r\n# 6. 항목 생성하기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n table.put_item(\r\n Item={\r\n 'idx': 1,\r\n 'cretedTime': \"2018-08-09T05:07:13.515Z\",\r\n 'relatedTag': {\r\n \"1\": [\r\n 177,\r\n 60,\r\n 1231,\r\n 1298423,\r\n 8831092,\r\n 20931\r\n ],\r\n \"2\": [\r\n 54,\r\n 782,\r\n 229,\r\n 7821,\r\n 49632,\r\n 85214\r\n ],\r\n \"3\": [\r\n 285,\r\n 2,\r\n 987,\r\n 128,\r\n 6356,\r\n 6684\r\n ]\r\n },\r\n }\r\n )\r\n \r\n# 7. Json 파일을 이용해 항목 데이터로 생성하기\r\nif __name__ == '__main__':\r\n def load_json_to_dict(load_path):\r\n import json\r\n with open(load_path, 'r', encoding=\"utf-8\") as data_file:\r\n data = data_file.read()\r\n data_file.close()\r\n d = json.loads(data)\r\n return d\r\n \r\n table = dynamodb.Table('relatedTags')\r\n load_path = \"D:\\\\vora_recommendation\\\\data_add_time_dynamo1.json\"\r\n data = load_json_to_dict(load_path)\r\n table = dynamodb.Table('relatedTags')\r\n \r\n dataIdx = data['idx']\r\n dataCreatedTime = data['createdTime']\r\n dataRelatedTags = data['relatedTags']\r\n \r\n response = table.put_item(\r\n Item={\r\n 'idx': dataIdx,\r\n 'createdTime': dataCreatedTime,\r\n 'relatedTags': dataRelatedTags\r\n })\r\n printer = pprint.PrettyPrinter(indent=2)\r\n printer.pprint(response)\r\n time.sleep(.500)\r\n","repo_name":"boohk/Python","sub_path":"AWS/DynamoDBConnector.py","file_name":"DynamoDBConnector.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"40246396807","text":"from flask_mqtt import Mqtt\nfrom sqlalchemy import exc\nfrom FASToryLine.configurations import BASE_TOPIC\nfrom FASToryLine.dbModels import AuthResult,Emotion\nfrom FASToryLine import app,db\nimport json,datetime\nfrom pprint import pprint as P\nmqtt = Mqtt(app)\n#####MQTT callbacks################\n\n@mqtt.on_connect()\ndef handle_connect(client, userdata, flags, rc):\n if rc==0:\n mqtt.unsubscribe_all()\n mqtt.subscribe(f'{BASE_TOPIC}authentication')\n print(f'[X-Routes] Subscribed to topic: {BASE_TOPIC}authentication')\n mqtt.subscribe(f'{BASE_TOPIC}emotion')\n print(f'[X-Routes] Subscribed to topic: {BASE_TOPIC}emotion') \n else:\n print(\"[X-Routes] Bad connection Returned code=\",rc)\n\n@mqtt.on_subscribe()\ndef handle_subscribe(client, userdata, mid, granted_qos):\n print('[X-Routes] Subscription id {} granted with qos {}.'\n .format(mid, granted_qos)) \n\n@mqtt.on_disconnect()\ndef handle_disconnect():\n mqtt.unsubscribe_all()\n print(\"[X-Routes] CLIENT DISCONNECTED\")\n\n@mqtt.on_message()\ndef handle_mqtt_message(client, userdata, message):\n try:\n message_in=json.loads(message.payload)\n #print(f\"[X-Routes] {type(message_in)},'??',{message_in}\")\n if message.retain ==1:\n print(f'[X] Retained message from zRefApp......')\n return \n \n if message.topic == f'{BASE_TOPIC}authentication':\n \n authResults = message_in\n result = AuthResult( \n Authenticated = authResults.get(\"authenticated\"), \n Description = authResults.get(\"description\"),\n DetectedFaces = authResults.get(\"detectedFaces\"), \n DistanceScore = authResults.get(\"distanceScore\")\n )\n db.session.add(result)\n db.session.commit()\n P(message_in)\n print(f'[X]: Auth result added to DB @ {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n elif message.topic == f'{BASE_TOPIC}emotion':\n #{\"detail\":\"Not a valid file was uploaded\"}\n #print(message.topic)\n if message_in.get(\"Response\"):\n result = Emotion( \n StressLevel = message_in.get(\"Response\").get('stress_level')\n )\n db.session.add(result)\n db.session.commit()\n P(message_in)\n print(f'[X]: Emotion response result added to DB @ {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n else:\n result = Emotion( \n Description = message_in.get(\"detail\")\n )\n db.session.add(result)\n db.session.commit()\n P(message_in)\n print(f'[X]: Emotion Not valid profile result added to DB @ {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n except exc.SQLAlchemyError as e:\n print(f'[XE] {e}')\n except ValueError:\n print('[X-Routes] Decoding JSON has failed')\n\n# @app.route('/welcomes', methods = ['GET'])\n# def welcomes():\n# return ''\n","repo_name":"mahboobelahi/ZDMPStuff","sub_path":"Quadible-CALM_Old/FASToryLine/messageBus.py","file_name":"messageBus.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"22583805606","text":"import time\nimport unittest\nfrom selenium import webdriver\nfrom Pages.MainPage import MainPageClass\nfrom Pages.MyAccountHomePage import MyAccountHomePageClass\nfrom Pages.AmazonCardSection import AmazonCardSectionClass\nfrom Pages.AmazonItemSearchField import AmazonItemSearchFieldClass\nfrom Pages.SearchResultPage import SearchResultPageClass\nfrom Pages.FoundItemPage import FoundItemPageClass\n\n\n\n\nclass AmazonSimpleTestClass(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.delete_all_cookies()\n self.driver.maximize_window()\n self.mainPage = MainPageClass(self.driver)\n self.MyAccountHomePage = MyAccountHomePageClass(self.driver)\n self.AmazonCardSection = AmazonCardSectionClass(self.driver)\n self.AmazonItemSearchField = AmazonItemSearchFieldClass(self.driver)\n self.SearchResultPage = SearchResultPageClass(self.driver)\n self.FoundItemPage = FoundItemPageClass(self.driver)\n\n\n\n def test_simpleTC(self):\n self.driver.get(\"https://www.amazon.com/\")\n self.mainPage.press_amazon_SignIn_account_Button()\n self.mainPage.fill_signin_field(\"kimkrugeractress@gmail.com\")\n\n time.sleep(4)\n self.mainPage.press_amazon_continue_Button()\n\n time.sleep(3)\n self.mainPage.fill_password_field(\"kim2002++\")\n\n time.sleep(5)\n self.mainPage.press_amazon_checkbox_field()\n\n time.sleep(5)\n self.mainPage.press_amazon_SignIn_Button()\n\n time.sleep(5)\n self.MyAccountHomePage.press_amazon_bucket_Button()\n\n time.sleep(3)\n self.AmazonCardSection.delete_one_product()\n\n time.sleep(3)\n self.AmazonItemSearchField.fill_item_search_field(\"jbl bluetooth headphones\")\n\n time.sleep(3)\n self.AmazonItemSearchField.press_item_search_button()\n\n time.sleep(2)\n self.SearchResultPage.scroll(\"window.scrollto(0, 0)\")\n\n time.sleep(3)\n self.SearchResultPage.finde_certain_item_button()\n\n time.sleep(3)\n self.FoundItemPage.change_location_button()\n\n time.sleep(3)\n self.FoundItemPage.fill_zip_code_filde(\"19701\")\n\n time.sleep(3)\n self.FoundItemPage.press_zip_code_apply_button()\n\n time.sleep(3)\n self.FoundItemPage.press_add_to_card_button()\n\n\n\n\n\n def tearDown(self):\n time.sleep(4)\n self.driver.close()","repo_name":"petrosyankn/pythonProjectSelenium","sub_path":"TestCases/AmazonTest.py","file_name":"AmazonTest.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"42378162633","text":"import sys\nimport re\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport os\nimport pdb\nimport numpy as np\nfrom sklearn import metrics\nimport json\n\ndef readGOPToDF(df, gop_file, method):\n temp_list = []\n with open(gop_file, 'r') as in_file:\n for line in in_file:\n line = line.strip()\n fields = line.split(' ')\n if len(fields) != 5:\n sys.exit(\"wrong line in the input GOP files\")\n temp_list.append([fields[1], round(float(fields[2]),3), fields[3], method])\n return df.append(pd.DataFrame(temp_list, columns=('phoneme','score','label', 'method')))\n \ndef plot(df, json_dict, outFile):\n methods = df['method'].unique()\n all_phonemes = df['phoneme'].unique()\n fig, axs = plt.subplots(len(all_phonemes), len(methods), figsize=(20, 4*len(all_phonemes)))\n df[\"label\"] = np.where(df['label']=='C', 0, 1 )\n for row,phoneme in enumerate(all_phonemes):\n for col,mtd in enumerate(methods):\n data_true = df.loc[(df[\"phoneme\"] == phoneme) & (df[\"method\"] == mtd) & (df[\"label\"] == 1), ['score', 'label']].to_numpy()\n data_false = df.loc[(df[\"phoneme\"] == phoneme) & (df[\"method\"] == mtd) & (df[\"label\"] == 0), ['score','label']].to_numpy()\n ax = axs[row][col]\n plot_labels = []\n add_label(ax.violinplot(data_true[:,0],vert=False, quantiles=[0.25,0.5,0.75], points=500, positions=[0]), \"Sub or Del({})\".format(data_true[:,0].shape[0]), plot_labels)\n add_label(ax.violinplot(data_false[:,0],vert=False, quantiles=[0.25,0.5,0.75], points=100, positions=[1]), \"Correct({})\".format(data_false[:,0].shape[0]), plot_labels)\n ax.set_xlim([-70, 5])\n ax.set_xlim([-70, 5])\n ax.set_title(mtd + ', Gop for phoneme: ' + phoneme)\n ax.get_yaxis().set_visible(False)\n auc_value = auc_cal(np.concatenate((data_true, data_false), axis=0))\n auc_artist, = plt.plot([], [])\n auc_label = (auc_artist, \"AUC = {}\".format(auc_value))\n if phoneme in json_dict[mtd][\"phonemes\"].keys(): \n #p:(closest_phoneme, mean_diff, auc_value, entropy, count_of_real, count_of_error)\n entropy = json_dict[mtd][\"phonemes\"][phoneme][3]\n auc_teacher = json_dict[mtd][\"phonemes\"][phoneme][2]\n L = round(entropy*auc_teacher, 3)\n json_artist, = plt.plot([], [])\n json_label = (json_artist, \"E={}, A={}, L={}\".format(entropy, auc_teacher, L))\n ax.legend(*zip(*(plot_labels+[auc_label, json_label])), loc=2)\n else:\n ax.legend(*zip(*(plot_labels+[auc_label])), loc=2)\n os.makedirs(os.path.dirname(outFile), exist_ok=True)\n plt.savefig(outFile)\n\ndef auc_cal(array): #input is a nX2 array, with the columns \"score\", \"label\"\n labels = [ 0 if i == 0 else 1 for i in array[:, 1]]\n if len(set(labels)) <= 1:\n return \"NoDef\"\n else:\n #negative because GOP is negatively correlated to the probablity of making an error\n return round(metrics.roc_auc_score(labels, -array[:, 0]),3)\n \n\ndef add_label(violin, method, labels):\n color = violin[\"bodies\"][0].get_facecolor().flatten()\n labels.append((mpatches.Patch(color=color), method))\n\ndef read_json(path):\n with open(path,\"r\") as injson:\n return json.load(injson)\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1 :\n sys.exit(\"this script takes ... as arguments. It plots the GOP distributions for each phoneme\")\n\n df = pd.DataFrame(columns=('phoneme','score','label', 'method'))\n #methods = ['GMM-mono', 'GMM-mono-frame', 'DNN-mono', 'DNN-tri']\n methods = ['GMM-mono', 'DNN-tri']\n json_dict = { mtd:None for mtd in methods}\n assert(len(methods) == (len(sys.argv) - 2)/2)\n for i,mtd in enumerate(methods):\n df = readGOPToDF(df, sys.argv[2*i+1], mtd)\n json_dict[mtd] = read_json(sys.argv[2*i+2])\n print(\"read one GOP\")\n\n plot(df, json_dict, sys.argv[-1])\n","repo_name":"frank613/tools-ntnu","sub_path":"cmu_miss_pron/exp-new/plot_gop_entropy.py","file_name":"plot_gop_entropy.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71494160254","text":"import re\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\ndelta = 1e-12\nIs = 1\n\n\ndef f(x):\n y = 2 / 3 * x - 5 / 3 + math.exp(40 * x)\n return y\n\n\ndef df(x):\n d = (f(x) - f(x - delta)) / delta\n return d\n\n\ndef line2zero(x0):\n y = f(x0)\n k = df(x0)\n x1 = x0 - y / k\n return x1\n\n\ndef cmpr(x):\n if f(x) <= delta:\n flag = True\n else:\n flag = False\n return flag\n\n\nx = 10\n\nwhile True:\n if cmpr(x):\n print(x)\n break\n else:\n x = line2zero(x)\n\n\ndef i_diode(vd):\n i = Is * (math.exp(40 * vd) - 1)\n return i\n\n\ndef v_diode(id):\n v = math.log((id / Is + 1), math.e) / 40\n return v\n\n\ndef plot_i_v(start, stop, point_num):\n x = np.linspace(start, stop, point_num, endpoint=True)\n i = []\n for index in range(len(x)):\n i += [i_diode(x[index])]\n\n ymax = max(i, key=lambda v : v)\n ymin = min(i, key=lambda v : v)\n m = ymax * 1.2\n n = ymin * 1.2\n\n plt.plot(x, i, color=\"blue\", linewidth=1.0, linestyle=\"-\")\n plt.xlim(start, stop)\n plt.xticks(np.linspace(start, stop, 9, endpoint=True))\n plt.ylim(n, m)\n plt.yticks(np.linspace(n, m, 5, endpoint=True))\n plt.xlabel('voltage $V_D$/V')\n plt.ylabel('current $i_D$/A')\n plt.title('I-V for diode\\n', fontsize=12)\n\n ax = plt.gca()\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.spines['bottom'].set_position(('data', 0))\n ax.yaxis.set_ticks_position('left')\n ax.spines['left'].set_position(('data', 0))\n\n plt.savefig(\"I-V_D.png\", dpi=288)\n plt.show()\n\n return\n\n\nplot_i_v(-0.1, 0.1, 1000)\n","repo_name":"yangbyangb/EDA_python","sub_path":"pyEDA/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14712838799","text":"from string import Template\nfrom datetime import datetime\n\n\ndef welcome(login_user, name_user):\n with open ('src/template/template_welcome.html', 'r') as file:\n template = Template(file.read())\n date_now = datetime.now().strftime('%d/%m/%y')\n body_message = template.substitute(login=login_user , name=name_user, date=date_now)\n print(body_message)\n return body_message\n\n\nwelcome('Superman', 'Clark')","repo_name":"wagnerberna/cursos-python","sub_path":"Flask/07_RESTX_Mongo_Token_email_users_v4/src/view/view_v1.py","file_name":"view_v1.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"10355985591","text":"class MyCircularQueue:\n def __init__(self, k: int):\n self.Length = k\n self.Queue = [None for i in range(k)]\n self.FrontIDX = 0\n self.RearIDX = 0\n self.Full = False\n def enQueue(self, value: int) -> bool:\n if not self.Full:\n self.Queue[self.RearIDX] = value\n self.RearIDX += 1\n if self.RearIDX == self.Length:\n self.RearIDX = 0\n if self.RearIDX == self.FrontIDX:\n self.Full = True\n return True\n return False\n def deQueue(self) -> bool:\n if self.isEmpty():\n return False\n self.Full = False\n self.FrontIDX += 1\n if self.FrontIDX == self.Length:\n self.FrontIDX = 0\n return True\n def Front(self) -> int:\n if self.isEmpty():\n return -1\n return self.Queue[self.FrontIDX]\n def Rear(self) -> int:\n if self.isEmpty():\n return -1\n return self.Queue[self.RearIDX-1]\n def isEmpty(self) -> bool:\n return self.Full == False and self.RearIDX == self.FrontIDX\n def isFull(self) -> bool:\n return self.Full\n\n\n# Your MyCircularQueue object will be instantiated and called as such:\n# obj = MyCircularQueue(k)\n# param_1 = obj.enQueue(value)\n# param_2 = obj.deQueue()\n# param_3 = obj.Front()\n# param_4 = obj.Rear()\n# param_5 = obj.isEmpty()\n# param_6 = obj.isFull()","repo_name":"hyuneie/LeetCode","sub_path":"622-design-circular-queue/622-design-circular-queue.py","file_name":"622-design-circular-queue.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"1650763699","text":"import sys\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential,Model\nfrom gensim.models import Word2Vec\nfrom keras.layers import Input,LSTM,Bidirectional,Flatten, GRU, Dropout, Dense,TimeDistributed, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint,EarlyStopping\nfrom keras import optimizers\nimport _pickle as pk\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom keras import regularizers\nimport gensim\n\ndef loaddata(file_label,file_nolab):\n\tlabel = []\n\tword_train = []\n\tlab_data = []\n\tla_data = open(file_label,\"r\",encoding='utf-8')\n\tno_la = open(file_nolab,\"r\",encoding='utf-8')\n\tfor l in la_data:\n\t\ttmp = l.strip().split(\" +++$+++ \")\n\t\tlabel.append(int(tmp[0]))\n\t\tword_train.append(tmp[1])\n\t\tlab_data.append(tmp[1])\n\tfor n in no_la:\n\t\ttmp1 = n.strip()\n\t\tword_train.append(tmp1)\n\tlabel = np.array(label)\n\tword_train = np.array(word_train)\n\tlab_data = np.array(lab_data)\n\t#print(\"yes/no label :\",len(label),\"tr_data : \",len(word_train))\n\treturn label,word_train,lab_data\n\ndef random(Xtrain,Ytrain):\n r_list = np.array(range(0,len(Xtrain)))\n np.random.shuffle(r_list)\n Xtrain = Xtrain[r_list]\n Ytrain = Ytrain[r_list]\n return Xtrain,Ytrain\ndef split_data(X,Y, ratio):\n\tdata_size = len(X)\n\tval_size = int(data_size * ratio)\n\treturn X[val_size:],Y[val_size:],X[:val_size],Y[:val_size]\n\nfile_label = sys.argv[1]\nfile_nolab = sys.argv[2]\ntok_path = sys.argv[3]\nWord_path = sys.argv[4]\nmodel_path = sys.argv[5]\n(tr_lab,word_data,tr_data) = loaddata(file_label,file_nolab)\n#print(\"num 0 \",tr_lab[0],tr_data[0])\nprint(\"label :\",tr_lab.shape,\"tr_data : \",tr_data.shape)\nMax_len = 40\n\nstem = gensim.parsing.porter.PorterStemmer()\ntr_data = [e for e in stem.stem_documents(tr_data)]\nword_data = [k for k in stem.stem_documents(word_data)]\n\ntokenizer = Tokenizer(num_words=None, filters='\\t\\n')\ntokenizer.fit_on_texts(word_data)\n\npk.dump(tokenizer,open(tok_path,'wb'))\ntokenizer = pk.load(open(tok_path,'rb'))\n(tr_data_f,tr_lab_f,va_data,va_lab) = split_data(tr_data,tr_lab,0.1)\n\nsequences = tokenizer.texts_to_sequences(tr_data_f)\ndata = np.array(pad_sequences(sequences, maxlen=Max_len))\nval_sequences = tokenizer.texts_to_sequences(va_data)\nvalid_data = np.array(pad_sequences(val_sequences, maxlen=Max_len))\n\n\n#labels = np.array(to_categorical(tr_lab))\n#labels = tr_lab\n\n\n#print(\"tr_data_f,tr_lab_f,va_data,va_lab : \",tr_data_f.shape,tr_lab_f.shape,va_data.shape,va_lab.shape)\n\nword2vec_data = [w.split(\" \") for w in word_data]\nprint(\"=============Word2Vec=============\")\nWVmodel = Word2Vec(word2vec_data, size=100, window=5, min_count=0, workers=4)\nWVmodel.save(Word_path)\nR_WVmodel = Word2Vec.load(Word_path)\n\n\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\nprint(\"Sequence 0 :\",sequences[0])\nprint(\"tr_data 0 : \",tr_data[0])\nlen_tr = len(word2vec_data)\n\n#translate\nembeded = np.zeros((len(word_index),100))\ncou = 0\nfor w ,i in word_index.items():\n\ttry:\n\t\ttmp = R_WVmodel.wv[w]\n\t\tembeded[i] = tmp\n\texcept:\n\t\tcou+=1\n#train\ninputs = Input(shape=(Max_len,))\n\n# Embedding layer\nembedding_inputs = Embedding(len(word_index),100,weights=[embeded],trainable=False)(inputs)\n# RNN \nRNN_cell_f = Bidirectional(LSTM(128,activation=\"tanh\",dropout=0.3,return_sequences = True))(embedding_inputs)\nRNN_cell = Bidirectional(LSTM(50,activation=\"tanh\",dropout=0.2,return_sequences = False))(RNN_cell_f)\n\n#RNN_cell= LSTM(128,dropout=0.3,return_sequences = False)\n#RNN_output = RNN_cell(embedding_inputs)\n# DNN layer\noutputs = Dense(50,activation='relu',kernel_regularizer=regularizers.l2(0.1))(RNN_cell)\noutputs = Dropout(0.3)(outputs)\noutputs = Dense(1, activation='sigmoid')(outputs)\n \nmodel = Model(inputs=inputs,outputs=outputs)\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\",metrics=[\"accuracy\"])\n\nModel_Check_Point = []\nModel_Check_Point.append(ModelCheckpoint('model-{epoch:05d}-{val_acc:.5f}-{val_loss:.5f}.hdf5', monitor='val_acc', save_best_only=True,mode='auto', period=1))\n#for i in rangwe(3):\nmodel.summary()\nmodel.fit(data,tr_lab_f,validation_data=(valid_data,va_lab) ,batch_size=64, epochs=10,callbacks = Model_Check_Point)\nmodel.save(model_path)\n","repo_name":"yuju30/NTUML18","sub_path":"hw5/HW5_sentiment.py","file_name":"HW5_sentiment.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"34928167191","text":"# -*- coding: utf-8 -*-\n\n'''bluetoothd mock template\n\nThis creates the expected methods and properties of the object manager\norg.bluez object (/), the manager object (/org/bluez), but no adapters or\ndevices.\n\nThis supports BlueZ 5 only.\n'''\n\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation; either version 3 of the License, or (at your option) any\n# later version. See http://www.gnu.org/copyleft/lgpl.html for the full text\n# of the license.\n\n__author__ = 'Philip Withnall'\n__copyright__ = '''\n(c) 2013 Collabora Ltd.\n(c) 2017 - 2022 Martin Pitt \n'''\n\nfrom pathlib import Path\n\nimport dbus\n\nfrom dbusmock import OBJECT_MANAGER_IFACE, mockobject\n\nBUS_NAME = 'org.bluez'\nMAIN_OBJ = '/'\nSYSTEM_BUS = True\nIS_OBJECT_MANAGER = True\n\nBLUEZ_MOCK_IFACE = 'org.bluez.Mock'\nAGENT_MANAGER_IFACE = 'org.bluez.AgentManager1'\nPROFILE_MANAGER_IFACE = 'org.bluez.ProfileManager1'\nADAPTER_IFACE = 'org.bluez.Adapter1'\nMEDIA_IFACE = 'org.bluez.Media1'\nNETWORK_SERVER_IFACE = 'org.bluez.Network1'\nDEVICE_IFACE = 'org.bluez.Device1'\n\n# The device class of some arbitrary Android phone.\nMOCK_PHONE_CLASS = 5898764\n\n\n@dbus.service.method(AGENT_MANAGER_IFACE,\n in_signature='os', out_signature='')\ndef RegisterAgent(manager, agent_path, capability):\n all_caps = ['DisplayOnly', 'DisplayYesNo', 'KeyboardOnly',\n 'NoInputNoOutput', 'KeyboardDisplay']\n\n if agent_path in manager.agent_paths:\n raise dbus.exceptions.DBusException(\n 'Another agent is already registered ' + manager.agent_path,\n name='org.bluez.Error.AlreadyExists')\n\n if capability not in all_caps:\n raise dbus.exceptions.DBusException(\n 'Unsupported capability ' + capability,\n name='org.bluez.Error.InvalidArguments')\n\n if not manager.default_agent:\n manager.default_agent = agent_path\n manager.agent_paths += [agent_path]\n manager.capabilities[str(agent_path)] = capability\n\n\n@dbus.service.method(AGENT_MANAGER_IFACE,\n in_signature='o', out_signature='')\ndef UnregisterAgent(manager, agent_path):\n if agent_path not in manager.agent_paths:\n raise dbus.exceptions.DBusException(\n 'Agent not registered ' + agent_path,\n name='org.bluez.Error.DoesNotExist')\n\n manager.agent_paths.remove(agent_path)\n del manager.capabilities[agent_path]\n if manager.default_agent == agent_path:\n if len(manager.agent_paths) > 0:\n manager.default_agent = manager.agent_paths[-1]\n else:\n manager.default_agent = None\n\n\n@dbus.service.method(AGENT_MANAGER_IFACE,\n in_signature='o', out_signature='')\ndef RequestDefaultAgent(manager, agent_path):\n if agent_path not in manager.agent_paths:\n raise dbus.exceptions.DBusException(\n 'Agent not registered ' + agent_path,\n name='org.bluez.Error.DoesNotExist')\n manager.default_agent = agent_path\n\n\ndef load(mock, _parameters):\n mock.AddObject('/org/bluez', AGENT_MANAGER_IFACE, {}, [\n ('RegisterAgent', 'os', '', RegisterAgent),\n ('RequestDefaultAgent', 'o', '', RequestDefaultAgent),\n ('UnregisterAgent', 'o', '', UnregisterAgent),\n ])\n\n bluez = mockobject.objects['/org/bluez']\n bluez.AddMethods(PROFILE_MANAGER_IFACE, [\n ('RegisterProfile', 'osa{sv}', '', ''),\n ('UnregisterProfile', 'o', '', ''),\n ])\n bluez.agent_paths = []\n bluez.capabilities = {}\n bluez.default_agent = None\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='o', out_signature='')\ndef RemoveDevice(adapter, path):\n adapter.RemoveObject(path)\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(path),\n [DEVICE_IFACE],\n ])\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='', out_signature='')\ndef StartDiscovery(adapter):\n adapter.props[ADAPTER_IFACE]['Discovering'] = True\n # NOTE: discovery filter support is minimal to mock\n # the Discoverable discovery filter\n if adapter.props[ADAPTER_IFACE]['DiscoveryFilter'] is not None:\n adapter.props[ADAPTER_IFACE]['Discoverable'] = True\n adapter.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n ADAPTER_IFACE,\n {\n 'Discoverable': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discoverable'], variant_level=1),\n 'Discovering': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discovering'], variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='', out_signature='')\ndef StopDiscovery(adapter):\n adapter.props[ADAPTER_IFACE]['Discovering'] = False\n # NOTE: discovery filter support is minimal to mock\n # the Discoverable discovery filter\n if adapter.props[ADAPTER_IFACE]['DiscoveryFilter'] is not None:\n adapter.props[ADAPTER_IFACE]['Discoverable'] = False\n adapter.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n ADAPTER_IFACE,\n {\n 'Discoverable': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discoverable'], variant_level=1),\n 'Discovering': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discovering'], variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='a{sv}', out_signature='')\ndef SetDiscoveryFilter(adapter, discovery_filter):\n adapter.props[ADAPTER_IFACE]['DiscoveryFilter'] = discovery_filter\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='s')\ndef AddAdapter(self, device_name, system_name):\n '''Convenience method to add a Bluetooth adapter\n\n You have to specify a device name which must be a valid part of an object\n path, e. g. \"hci0\", and an arbitrary system name (pretty hostname).\n\n Returns the new object path.\n '''\n path = '/org/bluez/' + device_name\n address_start = int(device_name[-1])\n address = (f\"{address_start:02d}:{address_start+1:02d}:{address_start+2:02d}:\"\n f\"{address_start+3:02d}:{address_start+4:02d}:{address_start+5:02d}\")\n adapter_properties = {\n 'UUIDs': dbus.Array([\n # Reference:\n # http://git.kernel.org/cgit/bluetooth/bluez.git/tree/lib/uuid.h\n # PNP\n '00001200-0000-1000-8000-00805f9b34fb',\n # Generic Access Profile\n '00001800-0000-1000-8000-00805f9b34fb',\n # Generic Attribute Profile\n '00001801-0000-1000-8000-00805f9b34fb',\n # Audio/Video Remote Control Profile (remote)\n '0000110e-0000-1000-8000-00805f9b34fb',\n # Audio/Video Remote Control Profile (target)\n '0000110c-0000-1000-8000-00805f9b34fb',\n ], variant_level=1),\n 'Discoverable': dbus.Boolean(False, variant_level=1),\n 'Discovering': dbus.Boolean(False, variant_level=1),\n 'Pairable': dbus.Boolean(True, variant_level=1),\n 'Powered': dbus.Boolean(True, variant_level=1),\n 'Address': dbus.String(address, variant_level=1),\n 'AddressType': dbus.String('public', variant_level=1),\n 'Alias': dbus.String(system_name, variant_level=1),\n 'Modalias': dbus.String('usb:v1D6Bp0245d050A', variant_level=1),\n 'Name': dbus.String(system_name, variant_level=1),\n # Reference:\n # http://bluetooth-pentest.narod.ru/software/\n # bluetooth_class_of_device-service_generator.html\n 'Class': dbus.UInt32(268, variant_level=1), # Computer, Laptop\n 'DiscoverableTimeout': dbus.UInt32(180, variant_level=1),\n 'PairableTimeout': dbus.UInt32(0, variant_level=1),\n }\n\n self.AddObject(path,\n ADAPTER_IFACE,\n # Properties\n adapter_properties,\n # Methods\n [\n ('RemoveDevice', 'o', '', RemoveDevice),\n ('StartDiscovery', '', '', StartDiscovery),\n ('StopDiscovery', '', '', StopDiscovery),\n ('SetDiscoveryFilter', 'a{sv}', '', SetDiscoveryFilter),\n ])\n\n adapter = mockobject.objects[path]\n adapter.AddMethods(MEDIA_IFACE, [\n ('RegisterEndpoint', 'oa{sv}', '', ''),\n ('UnregisterEndpoint', 'o', '', ''),\n ])\n adapter.AddMethods(NETWORK_SERVER_IFACE, [\n ('Register', 'ss', '', ''),\n ('Unregister', 's', '', ''),\n ])\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',\n 'oa{sa{sv}}', [\n dbus.ObjectPath(path),\n {ADAPTER_IFACE: adapter_properties},\n ])\n\n return path\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='s')\ndef RemoveAdapter(self, device_name):\n '''Convenience method to remove a Bluetooth adapter\n '''\n path = '/org/bluez/' + device_name\n # We could remove the devices related to the adapters here, but\n # when bluez crashes, the InterfacesRemoved aren't necessarily sent\n # devices first, so in effect, our laziness is testing an edge case\n # in the clients\n self.RemoveObject(path)\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(path),\n [ADAPTER_IFACE],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='s')\ndef RemoveAdapterWithDevices(self, device_name):\n '''Convenience method to remove a Bluetooth adapter and all\n the devices associated to it\n '''\n adapter_path = '/org/bluez/' + device_name\n adapter = mockobject.objects[adapter_path]\n manager = mockobject.objects['/']\n\n to_remove = []\n for path in mockobject.objects:\n if path.startswith(adapter_path + '/'):\n to_remove.append(path)\n\n for path in to_remove:\n adapter.RemoveObject(path)\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(path),\n [DEVICE_IFACE],\n ])\n\n self.RemoveObject(adapter_path)\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(adapter_path),\n [ADAPTER_IFACE],\n ])\n\n\n@dbus.service.method(DEVICE_IFACE,\n in_signature='', out_signature='')\ndef Pair(device):\n if device.paired:\n raise dbus.exceptions.DBusException(\n 'Device already paired',\n name='org.bluez.Error.AlreadyExists')\n device_address = device.props[DEVICE_IFACE]['Address']\n adapter_device_name = Path(device.props[DEVICE_IFACE]['Adapter']).name\n device.PairDevice(adapter_device_name, device_address, MOCK_PHONE_CLASS)\n\n\n@dbus.service.method(DEVICE_IFACE,\n in_signature='', out_signature='')\ndef Connect(device):\n if device.connected:\n raise dbus.exceptions.DBusException(\n 'Already Connected',\n name='org.bluez.Error.AlreadyConnected')\n device.connected = True\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Connected': dbus.Boolean(device.connected, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(DEVICE_IFACE,\n in_signature='', out_signature='')\ndef Disconnect(device):\n if not device.connected:\n raise dbus.exceptions.DBusException(\n 'Not Connected',\n name='org.bluez.Error.NotConnected')\n device.connected = False\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Connected': dbus.Boolean(device.connected, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='sss', out_signature='s')\ndef AddDevice(self, adapter_device_name, device_address, alias):\n '''Convenience method to add a Bluetooth device\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The alias is the human-readable name\n for the device (e.g. as set on the device itself), and the adapter device\n name is the device_name passed to AddAdapter.\n\n This will create a new, unpaired and unconnected device.\n\n Returns the new object path.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n\n properties = {\n 'Address': dbus.String(device_address, variant_level=1),\n 'AddressType': dbus.String('public', variant_level=1),\n 'Name': dbus.String(alias, variant_level=1),\n 'Icon': dbus.String('', variant_level=1),\n 'Class': dbus.UInt32(0, variant_level=1),\n 'Appearance': dbus.UInt16(0, variant_level=1),\n 'UUIDs': dbus.Array([], signature='s', variant_level=1),\n 'Paired': dbus.Boolean(False, variant_level=1),\n 'Connected': dbus.Boolean(False, variant_level=1),\n 'Trusted': dbus.Boolean(False, variant_level=1),\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'WakeAllowed': dbus.Boolean(False, variant_level=1),\n 'Alias': dbus.String(alias, variant_level=1),\n 'Adapter': dbus.ObjectPath(adapter_path, variant_level=1),\n 'LegacyPairing': dbus.Boolean(False, variant_level=1),\n 'Modalias': dbus.String('', variant_level=1),\n 'RSSI': dbus.Int16(-79, variant_level=1), # arbitrary\n 'TxPower': dbus.Int16(0, variant_level=1),\n 'ManufacturerData': dbus.Array([], signature='a{qv}', variant_level=1),\n 'ServiceData': dbus.Array([], signature='a{sv}', variant_level=1),\n 'ServicesResolved': dbus.Boolean(False, variant_level=1),\n 'AdvertisingFlags': dbus.Array([], signature='ay', variant_level=1),\n 'AdvertisingData': dbus.Array([], signature='a{yv}', variant_level=1),\n }\n\n self.AddObject(path,\n DEVICE_IFACE,\n # Properties\n properties,\n # Methods\n [\n ('CancelPairing', '', '', ''),\n ('Connect', '', '', Connect),\n ('ConnectProfile', 's', '', ''),\n ('Disconnect', '', '', Disconnect),\n ('DisconnectProfile', 's', '', ''),\n ('Pair', '', '', Pair),\n ])\n device = mockobject.objects[path]\n device.paired = False\n device.connected = False\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',\n 'oa{sa{sv}}', [\n dbus.ObjectPath(path),\n {DEVICE_IFACE: properties},\n ])\n\n return path\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ssi', out_signature='')\ndef PairDevice(_self, adapter_device_name, device_address, class_):\n '''Convenience method to mark an existing device as paired.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This unblocks the device if it was blocked.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(f'Device {device_name} does not exist.', name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n device.paired = True\n\n # Based off pairing with an Android phone.\n uuids = [\n '00001105-0000-1000-8000-00805f9b34fb',\n '0000110a-0000-1000-8000-00805f9b34fb',\n '0000110c-0000-1000-8000-00805f9b34fb',\n '00001112-0000-1000-8000-00805f9b34fb',\n '00001115-0000-1000-8000-00805f9b34fb',\n '00001116-0000-1000-8000-00805f9b34fb',\n '0000111f-0000-1000-8000-00805f9b34fb',\n '0000112f-0000-1000-8000-00805f9b34fb',\n '00001200-0000-1000-8000-00805f9b34fb',\n ]\n\n device.props[DEVICE_IFACE]['UUIDs'] = dbus.Array(uuids, variant_level=1)\n device.props[DEVICE_IFACE]['Paired'] = dbus.Boolean(True, variant_level=1)\n device.props[DEVICE_IFACE]['LegacyPairing'] = dbus.Boolean(True,\n variant_level=1)\n device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(False,\n variant_level=1)\n\n try:\n device.props[DEVICE_IFACE]['Modalias']\n except KeyError:\n device.AddProperties(DEVICE_IFACE, {\n 'Modalias': dbus.String('bluetooth:v000Fp1200d1436',\n variant_level=1),\n 'Class': dbus.UInt32(class_, variant_level=1),\n 'Icon': dbus.String('phone', variant_level=1),\n })\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'UUIDs': dbus.Array(uuids, variant_level=1),\n 'Paired': dbus.Boolean(True, variant_level=1),\n 'LegacyPairing': dbus.Boolean(True, variant_level=1),\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'Modalias': dbus.String('bluetooth:v000Fp1200d1436',\n variant_level=1),\n 'Class': dbus.UInt32(class_, variant_level=1),\n 'Icon': dbus.String('phone', variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='')\ndef BlockDevice(_self, adapter_device_name, device_address):\n '''Convenience method to mark an existing device as blocked.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This disconnects the device if it was connected.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(f'Device {device_name} does not exist.', name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n\n device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(True, variant_level=1)\n device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False,\n variant_level=1)\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Blocked': dbus.Boolean(True, variant_level=1),\n 'Connected': dbus.Boolean(False, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='')\ndef ConnectDevice(_self, adapter_device_name, device_address):\n '''Convenience method to mark an existing device as connected.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This unblocks the device if it was blocked.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Device {device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n\n device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(False,\n variant_level=1)\n device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(True,\n variant_level=1)\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'Connected': dbus.Boolean(True, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='')\ndef DisconnectDevice(_self, adapter_device_name, device_address):\n '''Convenience method to mark an existing device as disconnected.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This does not change the device's blocked status.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Device {device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n\n device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False,\n variant_level=1)\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Connected': dbus.Boolean(False, variant_level=1),\n },\n [],\n ])\n","repo_name":"martinpitt/python-dbusmock","sub_path":"dbusmock/templates/bluez5.py","file_name":"bluez5.py","file_ext":"py","file_size_in_byte":23968,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"}
+{"seq_id":"1263551559","text":"import requests\nimport os\n\n\nclass TmdbApi(object):\n \"\"\"\n This is a base class that all api endpoints will inherit from\n \"\"\"\n\n def __init__(self):\n\n # Grab the api key from the os environment an verify we actually have it\n api_key = os.getenv(\"TMDB_KEY\")\n if api_key is None:\n raise Exception(\"The api_key is missing.\")\n\n self.base_url = \"https://api.themoviedb.org/3\"\n self.api_key = \"?api_key={}\".format(api_key)\n\n\n def _get_appended_data(self, data_to_append):\n return \"&append_to_response={}\".format(\",\".join(data_to_append) if isinstance(data_to_append, list) else data_to_append)\n\n def _check_status_code(self, status_code):\n if status_code != 200:\n raise AssertionError(\"The api call failed. The response's status code was {}\".format(status_code))\n\n\nclass TmdbMoviesApi(TmdbApi):\n \"\"\"\n This is a class specific to testing the movies endpoint.\n \"\"\"\n\n def __init__(self):\n super(TmdbMoviesApi, self).__init__()\n self.movie_url = \"{}/movie\".format(self.base_url)\n\n def get_movie_details(self, media_id, detail_type=None, append_detail=None, check_response_code=True):\n # Verify detail_type and append_detail aren't being used at the same time\n if detail_type is not None and append_detail is not None:\n raise Exception(\"You don't need to set data_type if you are using append_detail.\")\n\n # Create the url\n detail_type = \"/{}\".format(detail_type) if detail_type is not None else \"\"\n append = \"{}\".format(self._get_appended_data(append_detail)) if append_detail is not None else \"\"\n url = \"{}/{}{}{}{}\".format(self.movie_url, str(media_id), detail_type, self.api_key, append)\n\n response = requests.get(url)\n\n if check_response_code:\n self._check_status_code(response.status_code)\n\n return response\n","repo_name":"gontib/roger_api_test","sub_path":"lib/tmdb_api.py","file_name":"tmdb_api.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"17622754441","text":"import locale\nimport os\nimport sys\nimport yaml\nfrom collections import OrderedDict\nimport projectconfig_yamllib as pcy\n\ndef main():\n locale.setlocale(locale.LC_COLLATE, 'C')\n\n yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n pcy.construct_yaml_map)\n\n yaml.add_representer(OrderedDict, pcy.project_representer,\n Dumper=pcy.IndentedDumper)\n\n chandata = yaml.load(open('gerritbot/channels.yaml'))\n for k,v in chandata.items():\n v['projects'] = sorted(v['projects'])\n\n sys.stdout.write('# This file is sorted alphabetically by channel name.\\n')\n first = True\n for k in sorted(chandata.keys()):\n if not first:\n sys.stdout.write('\\n')\n first = False\n sys.stdout.write(yaml.dump({k: chandata[k]}, default_flow_style=False,\n Dumper=pcy.IndentedDumper, width=80, indent=2))\n\nif __name__ == '__main__':\n main()\n","repo_name":"nibalizer/openstack-infra-combined","sub_path":"project-config/tools/normalize_channels_yaml.py","file_name":"normalize_channels_yaml.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"22597624142","text":"import re\nfrom typing import List, Optional, Union\n\nfrom ..core import Config, Field, Schema\nfrom .virtual_field import VirtualField\n\n\nclass StringField(Field):\n \"\"\"\n A string field.\n \"\"\"\n\n storage_type = str\n\n def __init__(\n self,\n *,\n min_len: Optional[int] = None,\n max_len: Optional[int] = None,\n regex: Optional[str] = None,\n choices: Optional[List[str]] = None,\n transform_case: Optional[str] = None,\n transform_strip: Optional[Union[bool, str]] = None,\n **kwargs\n ):\n \"\"\"\n The string field can perform transformations on the value prior to validating it if either\n *transform_case* or *transform_strip* are specified.\n\n :param min_len: minimum allowed length\n :param max_len: maximum allowed length\n :param regex: regex pattern that the value must match\n :param choices: list of valid choices\n :param transform_case: transform the value's case to either ``upper`` or ``lower`` case\n :param transform_strip: strip the value by calling :meth:`str.strip`.\n Setting this to ``True`` will call :meth:`str.strip` without any arguments (ie.\n striping all whitespace characters) and if this is a ``str``, then :meth:`str.strip`\n will be called with ``transform_strip``.\n \"\"\"\n super().__init__(**kwargs)\n self.min_len = min_len\n self.max_len = max_len\n self.regex = re.compile(regex) if regex else None\n self.choices = choices\n self.transform_case = transform_case.lower() if transform_case else None\n self.transform_strip = transform_strip\n\n if self.transform_case and self.transform_case not in (\"lower\", \"upper\"):\n raise TypeError('transform_case must be \"lower\" or \"upper\"')\n\n def _validate(self, cfg: Config, value: str) -> str:\n \"\"\"\n Validate a value.\n\n :param cfg: current Config\n :param value: value to validate\n \"\"\"\n if not isinstance(value, str):\n raise ValueError(\"value must be a string, not a %s\" % type(value).__name__)\n\n if self.transform_strip:\n if isinstance(self.transform_strip, str):\n value = value.strip(self.transform_strip)\n else:\n value = value.strip()\n\n if self.required and not value:\n raise ValueError(\"value is required\")\n\n if self.transform_case:\n value = value.lower() if self.transform_case == \"lower\" else value.upper()\n\n if self.min_len is not None and len(value) < self.min_len:\n raise ValueError(\"value must be at least %d characters\" % self.min_len)\n\n if self.max_len is not None and len(value) > self.max_len:\n raise ValueError(\"value must not be more than %d characters\" % self.max_len)\n\n if self.regex and not self.regex.match(value):\n raise ValueError(\"value does not match pattern %s\" % self.regex.pattern)\n\n if self.choices and value not in self.choices:\n if len(self.choices) < 6:\n postfix = \": must be one of: \" + \", \".join(self.choices)\n else:\n postfix = \"\"\n raise ValueError(\"value is not a valid choice\" + postfix)\n\n return value\n\n\nclass LogLevelField(StringField):\n \"\"\"\n A field representing the Python log level.\n \"\"\"\n\n storage_type = str\n\n def __init__(self, levels: Optional[List[str]] = None, **kwargs):\n \"\"\"\n :param levels: list of log levels. If not specified, the default Python log levels will be\n used: ``debug``, ``info``, ``warning``, ``error``, and ``critical``.\n \"\"\"\n if not levels:\n levels = [\"debug\", \"info\", \"warning\", \"error\", \"critical\"]\n\n self.levels = levels\n kwargs.setdefault(\"transform_case\", \"lower\")\n kwargs.setdefault(\"transform_strip\", True)\n kwargs[\"choices\"] = levels\n super().__init__(**kwargs)\n\n\nclass ApplicationModeField(StringField):\n \"\"\"\n A field representing the application operating mode.\n \"\"\"\n\n storage_type = str\n HELPER_MODE_PATTERN = re.compile(\"^[a-zA-Z0-9_]+$\")\n\n def __init__(\n self, modes: Optional[List[str]] = None, create_helpers: bool = True, **kwargs\n ):\n \"\"\"\n The *create_helpers* parameter will create a boolean :class:`VirtualField` for each\n ``mode`` named ``is__mode``, that returns ``True`` when the mode is active. When\n *create_helpers=True* then each mode name must be a valid Python variable name.\n\n :param modes: application modes, if not specified the default modes will be used:\n ``production`` and ``development``\n :param create_helpers: create helper a bool ``VirtualField`` for each mode\n \"\"\"\n if not modes:\n modes = [\"development\", \"production\"]\n\n self.modes = modes\n self.create_helpers = create_helpers\n\n if create_helpers:\n for mode in modes:\n if not self.HELPER_MODE_PATTERN.match(mode):\n raise TypeError(\"invalid mode name: %s\" % mode)\n\n kwargs.setdefault(\"transform_case\", \"lower\")\n kwargs.setdefault(\"transform_strip\", True)\n kwargs[\"choices\"] = modes\n super().__init__(**kwargs)\n\n def _create_helper(self, mode: str) -> \"VirtualField\":\n \"\"\"\n Create helper VirtualField.\n \"\"\"\n return VirtualField(lambda cfg: self.__getval__(cfg) == mode)\n\n def __setkey__(self, schema: Schema, key: str) -> None:\n \"\"\"\n Set the key and optionally add ``VirtualField`` helpers to the schema if\n *create_helpers=True*.\n \"\"\"\n super().__setkey__(schema, key)\n if self.create_helpers:\n for mode in self.modes:\n schema._add_field(\"is_%s_mode\" % mode, self._create_helper(mode))\n","repo_name":"ameily/cincoconfig","sub_path":"cincoconfig/fields/string_field.py","file_name":"string_field.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"}
+{"seq_id":"32929846982","text":"from dateutil.relativedelta import relativedelta\nimport datetime\nimport logging\nimport time\nimport os\nfrom openerp.osv import osv, fields\nimport openerp.tools\nfrom openerp.tools.translate import _\nfrom config import file_location\n\nfrom openerp.addons.decimal_precision import decimal_precision as dp\n\n_logger = logging.getLogger(__name__)\n\n\nclass custom_contract(osv.osv):\n _inherit = 'account.analytic.account'\n _columns = {\n 'include_cdr_amount': fields.boolean('Calculate amount from CDR files', store=True),\n }\n\n def cron_save_cdr_logs(self, cr, uid, context=None):\n cdr_log = self.pool.get('cdr.logs')\n logs = self.read_cdr_files(cr,uid)\n for log in logs:\n if len(log) == 16:\n hash_key = log[14].replace('\"', '')\n cr.execute(\"select id,name from res_partner where hash_key='\" + hash_key.strip() + \"'\")\n partner = cr.dictfetchall()\n if len(partner) > 0:\n res = {\n 'customer_id': partner[0]['id'],\n 'customer_name': partner[0]['name'],\n 'hash_key': hash_key.strip(),\n 'region': log[9].replace('\"', '').strip(),\n 'incoming_call_receiver': log[2].replace('\"', '').strip(),\n 'dialer': log[3].replace('\"', '').strip(),\n 'time_stamp': log[5].replace('\"', '').strip() + \" \" + log[6].replace('\"', '').strip(),\n 'total_call_time_from_dialing': log[7].replace('\"', '').strip(),\n 'calling_talk_time': log[8].replace('\"', '').strip(),\n 'charging_rate': log[11].replace('\"', '').strip(),\n 'call_type': log[10].replace('\"', '').strip(),\n 'type': 'normal'\n }\n cdr_log.create(cr, uid, res, context=context)\n elif len(log) == 18:\n hash_key = log[16].replace('\"','')\n cr.execute(\"select id,name from res_partner where hash_key='\"+hash_key.strip()+\"'\")\n partner = cr.dictfetchall()\n if len(partner)>0:\n res = {\n 'customer_id': partner[0]['id'],\n 'customer_name': partner[0]['name'] ,\n 'hash_key': hash_key.strip() ,\n 'region': log[11].replace('\"','').strip(),\n 'incoming_call_receiver':log[2].replace('\"','').strip() ,\n 'dialer': log[3].replace('\"','').strip() ,\n 'time_stamp': log[7].replace('\"','').strip() + \" \" + log[8].replace('\"','').strip(),\n 'total_call_time_from_dialing': log[9].replace('\"','').strip(),\n 'calling_talk_time': log[10].replace('\"','').strip(),\n 'charging_rate': log[13].replace('\"','').strip(),\n 'type': 'tf'\n }\n cdr_log.create(cr, uid, res, context=context)\n return True\n\n # Get Wizard Record\n def read_cdr_files(self, cr, uid, context=None):\n end_lst = []\n for loc in file_location:\n path = os.path.expanduser(loc)\n try:\n #make sure using r'filepath' to mean its a string literal\n fl = open(path,'r')\n fl_all = fl.read()\n lst_rec = fl_all.split('\\n')\n for rec in lst_rec:\n rec_lst = rec.split(',')\n if len(rec_lst) > 1:\n end_lst.append(rec_lst)\n except:\n print(\"File is not present in current directory\")\n return end_lst\n\n\n def cal_invoice_amount(self, cr, uid, partner_id, context=None):\n total = 0.0\n cr.execute(\"Select * from call_rates where partner_id='\"+str(partner_id.id)+\"'\")\n call_rates = cr.dictfetchall()\n free_mintues = call_rates[0]['free_mins']\n counter = 0.0\n cr.execute(\"SELECT * FROM public.cdr_logs where charging_rate>0 and customer_id='\" + str(partner_id.id) + \"'\"+\"order by charging_rate asc\")\n call_history = cr.dictfetchall()\n for log in call_history:\n if counter > free_mintues:\n talk_time = log['calling_talk_time']/60\n if log['charging_rate']== 0.02 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_one']\n elif log['charging_rate']== 0.04 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_two']\n elif log['charging_rate']== 0.12 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_three']\n elif log['charging_rate']== 0.16 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_four']\n elif log['charging_rate']== 0.25 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_five']\n elif log['call_type']=='National' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['national_rates']\n elif log['call_type']=='Mobile' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['mobile_rates']\n elif log['call_type']=='Local' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['local_rates']\n elif log['call_type']=='Special' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['local_rates']\n else:\n counter = counter + (log['calling_talk_time']/60)\n return total\n\n # This is the function which is reponsible to create invoice lines from cron job we must modified these lines\n def _prepare_invoice_line(self, cr, uid, line,contract, fiscal_position, context=None):\n amount = self.cal_invoice_amount(cr, uid, contract.partner_id, context=context)\n fpos_obj = self.pool.get('account.fiscal.position')\n res = line.product_id\n account_id = res.property_account_income.id\n if not account_id:\n account_id = res.categ_id.property_account_income_categ.id\n account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)\n\n taxes = res.taxes_id or False\n tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes, context=context)\n if contract.include_cdr_amount:\n values = {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': line.analytic_account_id.id,\n 'price_unit': amount or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }\n else:\n values = {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': line.analytic_account_id.id,\n 'price_unit': line.price_unit or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }\n return values\n\n def _prepare_invoice_lines(self, cr, uid, contract,fiscal_position_id, context=None):\n fpos_obj = self.pool.get('account.fiscal.position')\n fiscal_position = None\n if fiscal_position_id:\n fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)\n invoice_lines = []\n for line in contract.recurring_invoice_line_ids:\n values = self._prepare_invoice_line(cr, uid, line,contract,fiscal_position, context=context)\n invoice_lines.append((0, 0, values))\n return invoice_lines\n\n def _prepare_invoice(self, cr, uid, contract, context=None):\n invoice = self._prepare_invoice_data(cr, uid, contract, context=context)\n invoice['invoice_line'] = self._prepare_invoice_lines(cr, uid, contract,invoice['fiscal_position'], context=context)\n return invoice\n\n def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):\n context = context or {}\n invoice_ids = []\n current_date = time.strftime('%Y-%m-%d')\n if ids:\n contract_ids = ids\n else:\n contract_ids = self.search(cr, uid, [('recurring_next_date', '<=', current_date), ('state', '=', 'open'),\n ('recurring_invoices', '=', True), ('type', '=', 'contract')])\n if contract_ids:\n cr.execute(\n 'SELECT company_id, array_agg(id) as ids FROM account_analytic_account WHERE id IN %s GROUP BY company_id',\n (tuple(contract_ids),))\n for company_id, ids in cr.fetchall():\n context_contract = dict(context, company_id=company_id, force_company=company_id)\n for contract in self.browse(cr, uid, ids, context=context_contract):\n try:\n if contract.include_cdr_amount:\n invoice_values = self._prepare_invoice(cr, uid, contract,context=context_contract)\n invoice_values['invoice_type'] = 'CDR'\n else:\n invoice_values = self._prepare_invoice(cr, uid, contract, context=context_contract)\n invoice_ids.append(\n self.pool['account.invoice'].create(cr, uid, invoice_values, context=context))\n next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, \"%Y-%m-%d\")\n interval = contract.recurring_interval\n if contract.recurring_rule_type == 'daily':\n new_date = next_date + relativedelta(days=+interval)\n elif contract.recurring_rule_type == 'weekly':\n new_date = next_date + relativedelta(weeks=+interval)\n elif contract.recurring_rule_type == 'monthly':\n new_date = next_date + relativedelta(months=+interval)\n else:\n new_date = next_date + relativedelta(years=+interval)\n self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')},\n context=context)\n if automatic:\n cr.commit()\n except Exception:\n if automatic:\n cr.rollback()\n _logger.exception('Fail to create recurring invoice for contract %s', contract.code)\n else:\n raise\n return invoice_ids\n","repo_name":"Parkash067/ERP","sub_path":"custom_contracts/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":11217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"24065909619","text":"# -*- coding: utf-8 -*-\n\"\"\"\nInfo\n----\nThis file contains the basic functionalities of the ThermalEnergyStorage class.\n\n\"\"\"\n\nimport pandas as pd\nfrom .component import Component\n\n\nclass ThermalEnergyStorage(Component):\n def __init__(\n self,\n target_temperature,\n min_temperature,\n hysteresis,\n mass,\n cp,\n thermal_energy_loss_per_day,\n unit,\n identifier=None,\n environment=None,\n user_profile=None,\n cost=None,\n ):\n\n \"\"\"\n Info\n ----\n ...\n \n Parameters\n ----------\n \n The parameter timebase determines the resolution of the given data. \n Furthermore the parameter environment (Environment) is given to provide weather data and further external influences.\n To account for different people using a component, a use case (VPPUseCase) can be passed in to improve the simulation.\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n # Call to super class\n super(ThermalEnergyStorage, self).__init__(\n unit, environment, user_profile, cost\n )\n\n # Configure attributes\n self.identifier = identifier\n self.target_temperature = target_temperature\n self.current_temperature = target_temperature - hysteresis\n self.min_temperature = min_temperature\n self.timeseries = pd.DataFrame(\n columns=[\"temperature\"],\n index=pd.date_range(\n start=self.environment.start,\n end=self.environment.end,\n freq=self.environment.time_freq,\n name=\"time\",\n ),\n )\n self.hysteresis = hysteresis\n self.mass = mass\n self.cp = cp\n self.state_of_charge = mass * cp * (self.current_temperature + 273.15)\n # Aus Datenblättern ergibt sich, dass ein Wärmespeicher je Tag rund 10%\n # Bereitschaftsverluste hat (ohne Rohrleitungen!!)\n self.thermal_energy_loss_per_day = thermal_energy_loss_per_day\n self.efficiency_per_timestep = 1 - (\n thermal_energy_loss_per_day\n / (24 * (60 / self.environment.timebase))\n )\n self.needs_loading = None\n\n def operate_storage(self, timestamp, thermal_energy_generator):\n\n if self.get_needs_loading():\n thermal_energy_generator.ramp_up(timestamp)\n else:\n thermal_energy_generator.ramp_down(timestamp)\n\n thermal_energy_demand = self.user_profile.thermal_energy_demand.thermal_energy_demand.loc[\n timestamp\n ]\n observation = thermal_energy_generator.observations_for_timestamp(\n timestamp\n )\n thermal_production = observation[\"thermal_energy_output\"]\n\n # Formula: E = m * cp * T\n # <=> T = E / (m * cp)\n self.state_of_charge -= (\n (thermal_energy_demand - thermal_production)\n * 1000 # kWh to Wh ?? Why?\n / (60 / self.environment.timebase)\n )\n self.state_of_charge *= self.efficiency_per_timestep\n self.current_temperature = (\n self.state_of_charge\n# * 3600 # kWh to KJ\n / (self.mass * self.cp)\n ) - 273.15\n\n if thermal_energy_generator.is_running:\n el_load = observation[\"el_demand\"]\n else:\n el_load = 0\n\n self.timeseries.temperature[timestamp] = self.current_temperature\n\n # log timeseries of thermal_energy_generator_class:\n thermal_energy_generator.log_observation(observation, timestamp)\n\n return self.current_temperature, el_load\n\n def get_needs_loading(self):\n\n if self.current_temperature <= (\n self.target_temperature - self.hysteresis\n ):\n self.needs_loading = True\n\n if self.current_temperature >= (\n self.target_temperature + self.hysteresis\n ):\n self.needs_loading = False\n\n if self.current_temperature < self.min_temperature:\n raise ValueError(\n \"Thermal energy production to low to maintain \"\n + \"heat storage temperature!\"\n )\n\n return self.needs_loading\n\n def value_for_timestamp(self, timestamp):\n\n \"\"\"\n Info\n ----\n This function takes a timestamp as the parameter and returns the \n corresponding value for that timestamp. \n A positiv result represents a load. \n A negative result represents a generation. \n \n This abstract function needs to be implemented by child classes.\n Raises an error since this function needs to be implemented by child classes.\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n raise NotImplementedError(\n \"value_for_timestamp needs to be implemented by child classes!\"\n )\n\n def observations_for_timestamp(self, timestamp):\n\n \"\"\"\n Info\n ----\n This function takes a timestamp as the parameter and returns a \n dictionary with key (String) value (Any) pairs. \n Depending on the type of component, different status parameters of the \n respective component can be queried. \n \n For example, a power store can report its \"State of Charge\".\n Returns an empty dictionary since this function needs to be \n implemented by child classes.\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n return {}\n\n def prepare_time_series(self):\n\n \"\"\"\n Info\n ----\n This function is called to prepare the time series.\n Currently equals reset_time_series. Adjust if needed in later versions.\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n self.timeseries = pd.DataFrame(\n columns=[\"temperature\"],\n index=pd.date_range(\n start=self.environment.start,\n end=self.environment.end,\n freq=self.environment.time_freq,\n name=\"time\",\n ),\n )\n return self.timeseries\n\n def reset_time_series(self):\n\n \"\"\"\n Info\n ----\n This function is called to reset the time series\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n self.timeseries = pd.DataFrame(\n columns=[\"temperature\"],\n index=pd.date_range(\n start=self.environment.start,\n end=self.environment.end,\n freq=self.environment.time_freq,\n name=\"time\",\n ),\n )\n\n return self.timeseries\n","repo_name":"Pyosch/vpplib","sub_path":"vpplib/thermal_energy_storage.py","file_name":"thermal_energy_storage.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"79"}
+{"seq_id":"37117246239","text":"'''\nhttps://keras.io/activations/\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\n\n(X_train0, y_train0), (X_test0, y_test0) = mnist.load_data()\nX_train = X_train0.reshape(60000, 784).astype('float32')/255.0\nX_test = X_test0.reshape(10000, 784).astype('float32')/255.0\nY_train = np_utils.to_categorical(y_train0, 10)\nY_test = np_utils.to_categorical(y_test0, 10)\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\n\nnp.random.seed(0)\nmodel0 = Sequential()\nmodel0.add(Dense(15, input_dim=784, activation=\"sigmoid\"))\n#model0.add(Dense(15, input_dim=784, activation=\"tanh\"))\nmodel0.add(Dense(10, activation=\"sigmoid\"))\nmodel0.compile(optimizer=SGD(lr=0.2), loss='mean_squared_error', metrics=[\"accuracy\"])\n\n#%%time\nhist0 = model0.fit(X_train, Y_train, epochs=30, batch_size=100, validation_data=(X_test, Y_test), verbose=0)\n\nnp.random.seed(0)\nmodel1 = Sequential()\nmodel1.add(Dense(15, input_dim=784, activation=\"sigmoid\"))\nmodel1.add(Dense(10, activation=\"sigmoid\"))\n#model1.add(Dense(15, input_dim=784, activation=\"relu\"))\n#model1.add(Dense(10, activation=\"softmax\"))\nmodel1.compile(optimizer=SGD(lr=0.2), loss='categorical_crossentropy', metrics=[\"accuracy\"])\n#model1.compile(optimizer=SGD(lr=0.2), loss='binary_crossentropy', metrics=[\"accuracy\"])\n\n#%%time\nhist1 = model1.fit(X_train, Y_train, epochs=30, batch_size=100, validation_data=(X_test, Y_test), verbose=0)\n\nplt.plot(hist0.history['val_acc'], ls=\":\", label=\"mean squared error\")\nplt.plot(hist1.history['val_acc'], label=\"cross entropy\")\nplt.legend()\nplt.show()\n","repo_name":"cjsong21/Machine-learning","sub_path":"딥러닝/01.딥러닝모델예/ModelDL.py","file_name":"ModelDL.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"25704972253","text":"import logging\n\nfrom PySide2 import QtWidgets, QtGui, QtCore\nfrom pynput import mouse\n\nfrom auto_assistant.model import actions\n\nlogger = logging.getLogger(__name__)\n_SECONDS_IN_A_DAY = 86400\n\n\nclass AddActionDialog(QtWidgets.QDialog):\n def __init__(self):\n super().__init__()\n self.resize(300, 200)\n self.__result = None\n self.__my_layout = QtWidgets.QGridLayout()\n\n button_grid = QtWidgets.QGridLayout()\n self.__ok_button = QtWidgets.QPushButton('Ok')\n self.__cancel_button = QtWidgets.QPushButton('Cancel')\n button_grid.addWidget(self.__cancel_button, 0, 0)\n button_grid.addWidget(self.__ok_button, 0, 1)\n self.__ok_button.clicked.connect(self.accept)\n self.__cancel_button.clicked.connect(self.reject)\n\n pick_action_combo_box = QtWidgets.QComboBox()\n pick_action_combo_box.addItems([i.value for i in actions.ActionType])\n pick_action_combo_box.currentTextChanged.connect(self.__handle_action_type_change)\n\n # The default type of grid will be one for a ClickAction\n self.__input_grid = self.__generate_grid_layout_for_click_action()\n\n self.__my_layout.addWidget(pick_action_combo_box, 0, 0)\n self.__my_layout.addLayout(self.__input_grid, 1, 0)\n self.__my_layout.addLayout(button_grid, 2, 0)\n self.setLayout(self.__my_layout)\n\n def __clear_items_in(self, layout: QtWidgets.QLayout):\n while layout.count() > 0:\n item = layout.takeAt(0)\n if isinstance(item, QtWidgets.QLayout):\n self.__clear_items_in(item)\n else:\n logger.debug(f'Removing {type(item.widget())}')\n item.widget().deleteLater()\n logger.debug(f'Removing {type(layout)}')\n layout.deleteLater()\n\n def __handle_action_type_change(self, selected_action_type: str):\n logger.info(f'Generating UI for {selected_action_type}')\n if self.__input_grid is not None:\n self.__my_layout.removeItem(self.__input_grid)\n self.__clear_items_in(self.__input_grid)\n self.__input_grid = None\n logger.info('\\tOld UI removed')\n try:\n self.__input_grid = self.__generate_grid_layout_for(actions.ActionType(selected_action_type))\n self.__my_layout.addLayout(self.__input_grid, 1, 0)\n except RuntimeError:\n logger.error('Unable to generate the input grid', exc_info=True)\n\n def __generate_grid_layout_for(self, action_type: actions.ActionType) -> QtWidgets.QGridLayout:\n if actions.ActionType.CLICK_ACTION == action_type:\n return self.__generate_grid_layout_for_click_action()\n elif actions.ActionType.SLEEP_ACTION == action_type:\n return self.__generate_grid_layout_for_sleep_action()\n else:\n raise RuntimeError(f'Unsupported action type: {action_type}')\n\n def __generate_grid_layout_for_sleep_action(self) -> QtWidgets.QGridLayout:\n self.__ok_button.setEnabled(True)\n return_value = QtWidgets.QGridLayout()\n\n # create the label\n duration_label = QtWidgets.QLabel('Sleep for (secs): ')\n duration_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n return_value.addWidget(duration_label, 0, 0)\n\n # create the input for the time\n self.__sleep_text_line = QtWidgets.QLineEdit()\n self.__sleep_text_line.setPlaceholderText('Enter time in seconds')\n self.__sleep_text_line.setValidator(QtGui.QIntValidator(0, _SECONDS_IN_A_DAY))\n self.__sleep_text_line.editingFinished.connect(self.__handle_sleep_time_input)\n return_value.addWidget(self.__sleep_text_line, 0, 1)\n\n return return_value\n\n def __handle_sleep_time_input(self):\n self.__result = actions.SleepAction(int(self.__sleep_text_line.text()))\n\n def __generate_grid_layout_for_click_action(self) -> QtWidgets.QGridLayout:\n self.__ok_button.setEnabled(False)\n return_value = QtWidgets.QGridLayout()\n x_label = QtWidgets.QLabel('x: ')\n x_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n self.__x_value = QtWidgets.QLabel('1')\n y_label = QtWidgets.QLabel('y: ')\n y_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n self.__y_value = QtWidgets.QLabel('1')\n return_value.addWidget(x_label, 0, 0)\n return_value.addWidget(self.__x_value, 0, 1)\n return_value.addWidget(y_label, 0, 2)\n return_value.addWidget(self.__y_value, 0, 3)\n self.__get_click_button = QtWidgets.QPushButton('Get click')\n self.__get_click_button.clicked.connect(self.__get_click)\n self.__mouse_listener = mouse.Listener(on_click=self.__on_click)\n return_value.addWidget(self.__get_click_button, 1, 0, 1, -1)\n return return_value\n\n def __toggle_buttons_to(self, enabled: bool):\n self.__ok_button.setEnabled(enabled)\n self.__cancel_button.setEnabled(enabled)\n self.__get_click_button.setEnabled(enabled)\n\n def __get_click(self):\n self.__mouse_listener.start()\n self.__toggle_buttons_to(False)\n\n def __on_click(self, x: int, y: int, button: mouse.Button, pressed: bool) -> bool:\n logger.debug(f'Clicked at ({x}, {y}) with button {button} and pressed={pressed}')\n if pressed and button == mouse.Button.left:\n self.__x_value.setText(str(x))\n self.__y_value.setText(str(y))\n self.__toggle_buttons_to(True)\n\n # reset the mouse listener for next time\n self.__mouse_listener = mouse.Listener(on_click=self.__on_click)\n self.__result = actions.ClickAction(int(self.__x_value.text()), int(self.__y_value.text()))\n return False\n\n def get_result(self) -> actions.Action:\n return self.__result\n\n def accept(self):\n super().accept()\n\n def reject(self):\n super().reject()\n self.__result = None\n","repo_name":"NateJSchmidt/autoassistant","sub_path":"src/auto_assistant/view/add_action_dialog.py","file_name":"add_action_dialog.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38327902164","text":"from collections import deque\nfrom pprint import pprint\nn = int(input())\narr = [list(map(int, input().split())) for _ in range(n)]\nG = dict()\nfor i in range(n):\n G[i] = []\n for j in range(n):\n if arr[i][j] == 1:\n value = G[i]\n value.append(j)\n G[i] = value\n\nq = deque()\nfor i in range(len(G)):\n q.append(i)\n visit = [0] * len(G)\n while q:\n start = q.popleft()\n for w in G[start]:\n if visit[w]:\n continue\n visit[w] = 1\n arr[i][w] = 1\n q.append(w)\n\nfor i in range(n):\n print(*arr[i])\n\n# for m in range(len(G)): # 경유지 기준으로\n# for st in range(len(G)): # 시작점 다 돌려보고\n# for end in range(len(G)): # 도착점 다 돌려봤을 때\n# if arr[st][end] == 0: # 만약 시작점에서 도착점으로 가는 곳이 현재까지는 없는 경우\n# arr[st][end] = arr[st][m] & arr[m][end] # 가능하다면 갈 수 있다고 판단하여 배열 바꿔줌\n# print(arr)","repo_name":"swanious/Algorithm","sub_path":"BOJ/11403_경로찾기.py","file_name":"11403_경로찾기.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"71836524734","text":"from datetime import datetime\n\nfrom flask import request\nfrom flask_restful import Resource\n\nfrom config import db\nfrom models.user import User\nfrom schemas.user import UserSchema\nfrom utils import get_args_parser\n\nuser_schema = UserSchema(many=False)\nparser = get_args_parser([\n {'name': \"num_mark_tasks\", 'type': int, 'required': True,\n 'help': 'number of mark tasks the user has'},\n {'name': \"password\", 'type': str, 'required': False,\n 'help': 'unprocessed password of the user'},\n {'name': \"name\", 'type': str, 'required': True,\n 'help': 'name of the user'}\n])\n\n\nclass UserResource(Resource):\n \"\"\"Resource to handle CRUD operations for users table\"\"\"\n\n @staticmethod\n def get(user_id: int):\n \"\"\"\n Returns single user\n \"\"\"\n\n if not user_id:\n return {'status': 'failed', 'message': \"Empty ID field\"}, 404\n\n user = User.query.filter(User.user_id == user_id).one_or_none()\n if not user:\n return {'status': 'failed', 'message': \"User not found\"}, 404\n\n return {'data': user_schema.dump(user)}, 200\n\n @staticmethod\n def post(user_id: int):\n \"\"\"\n Creates new user\n \"\"\"\n\n request.get_json(force=True)\n data = parser.parse_args(strict=True)\n if not data:\n return {'status': 'failed', 'message': 'No input data provided'}, 204\n\n user = User.query.filter_by(user_id=user_id).one_or_none()\n if user:\n return {'status': 'failed', 'message': 'User already exists'}, 400\n\n data.update({'user_id': user_id, 'last_activity_ds': datetime.now(),\n 'registration_date': datetime.now()})\n user = User(**data)\n db.session.add(user)\n db.session.commit()\n\n result = user_schema.dump(user)\n\n return {\"status\": 'success', 'data': result}, 201\n\n @staticmethod\n def put(user_id: int):\n \"\"\"\n Updates the user\n Possible fields for update:\n 'num_mark_tasks': int,\n 'password': str,\n 'name': str,\n 'last_name': str\n \"\"\"\n\n request.get_json(force=True)\n data = parser.parse_args(strict=True)\n if not data:\n return {'status': 'failed', 'message': 'No input data provided'}, 204\n\n user = User.query.filter_by(user_id=user_id).first()\n if not user:\n return {'status': 'failed', 'message': 'User does not exist'}, 204\n\n for k, v in data.items():\n user.__setattr__(k, v)\n db.session.commit()\n\n result = user_schema.dump(user)\n return {\"status\": 'success', 'data': result}, 202\n\n @staticmethod\n def delete(user_id):\n \"\"\"\n Deletes single user\n \"\"\"\n\n user = User.query.filter_by(user_id=user_id).one_or_none()\n if not user:\n return {'status': 'failed',\n 'message': 'User does not exist'}, 204\n User.query.filter_by(user_id=user_id).delete()\n db.session.commit()\n\n result = user_schema.dump(user)\n return {\"status\": 'success', 'data': result}, 202\n","repo_name":"kirilllzaitsev/datamark-backend","sub_path":"backend/flaskr/res/UserRes.py","file_name":"UserRes.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27995752137","text":"#!/usr/bin/env python3\n\nimport buffer\n\nbuf = bytearray(6)\nbuffer.snprintf(buf, \"Hello world!\")\nprint(buf)\n\nsize = 256\npybuf = bytearray(size)\nfor i in range(size):\n pybuf[i] = i\n\nbuf = buffer.Buffer()\nbuf.put(2*size)\nbuffer.write2(pybuf, buf, size)\n\nfor i in range(2*size):\n print(f\"{i} : {hex(buf[i])}\")\n\n","repo_name":"savagesmc/swig_play","sub_path":"pybuffer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"43870270734","text":"from PySide2.QtWidgets import QListWidget,QPushButton\r\nimport PySide2.QtCore\r\nfrom src import event_key, event_dispatcher\r\nimport copy\r\n\r\nclass CategoryApplyWindow:\r\n def __init__(self, window: QListWidget,clear_button: QPushButton):\r\n self.list_window = window\r\n self.current_filter = []\r\n self.clear_button = clear_button\r\n self.list_window.itemDoubleClicked.connect(self.delete)\r\n self.clear_button.clicked.connect(self.clear_button_pushed)\r\n\r\n def add(self, category: str):\r\n self.current_filter.append(category)\r\n self.list_window.addItem(category)\r\n\r\n def delete(self, category_item):\r\n category_text = category_item.text()\r\n self.current_filter.remove(category_text)\r\n remove_list = self.list_window.findItems(category_text, PySide2.QtCore.Qt.MatchFixedString)\r\n for item in remove_list:\r\n row = self.list_window.row(item)\r\n self.list_window.takeItem(row)\r\n # dispatch\r\n dispatch_data = copy.deepcopy(self.current_filter)\r\n event_dispatcher.emit_event(event_key.SEND_CATEGORY_FILTER, dispatch_data)\r\n event_dispatcher.emit_event(event_key.LOG_FILTERING, None)\r\n\r\n def clear(self):\r\n self.current_filter.clear()\r\n self.list_window.clear()\r\n\r\n def is_contain(self, category: str) -> bool:\r\n return True if category in self.current_filter else False\r\n\r\n # @Event\r\n def receive_add_filter_event(self, category):\r\n if not self.is_contain(category):\r\n self.add(category)\r\n # dispatch\r\n dispatch_data = copy.deepcopy(self.current_filter)\r\n event_dispatcher.emit_event(event_key.SEND_CATEGORY_FILTER, dispatch_data)\r\n event_dispatcher.emit_event(event_key.LOG_FILTERING, None)\r\n\r\n # @Slot\r\n def clear_button_pushed(self):\r\n self.clear()\r\n event_dispatcher.emit_event(event_key.SEND_CATEGORY_FILTER, [])\r\n event_dispatcher.emit_event(event_key.LOG_FILTERING, None)\r\n","repo_name":"TERABYTE0130/logViewer","sub_path":"src/category_apply_window.py","file_name":"category_apply_window.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"33548619678","text":"from network import LoRa\r\nimport socket\r\nimport time\r\nimport ubinascii\r\nfrom lora_help import connect_lora_socket\r\n\r\nimport pycom # \"pycom\" will be an error in your\r\n# IDE because it's not on your computer, but on\r\n# the device\r\nimport time\r\nimport machine\r\n\r\nfrom machine import ADC\r\nfrom machine import Pin\r\nfrom network import WLAN\r\nimport socket\r\n\r\n#LoRa\r\n#from network import LoRa\r\n#import binascii\r\n#print(binascii.hexlify(LoRa().mac()).upper())\r\n\r\npycom.heartbeat(False)\r\npycom.rgbled(0x0000FF) # blue\r\n#time.sleep(2) #sleep for 1 second\r\n\r\n##====== LoRa ======\r\n\r\n## Initialise LoRa in LORAWAN mode.\r\n## Please pick the region that matches where you are using the device:\r\n## Asia = LoRa.AS923\r\n## Australia = LoRa.AU915\r\n## Europe = LoRa.EU868\r\n## United States = LoRa.US915\r\nlora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868)\r\n\r\n# create an OTAA authentication parameters, change them to the provided credentials\r\napp_eui = ubinascii.unhexlify('6081F9FF68E87979')\r\napp_key = ubinascii.unhexlify('B8078474D99CC4CCAEFE3B563AECB8E7')\r\n#uncomment to use LoRaWAN application provided dev_eui\r\ndev_eui = ubinascii.unhexlify('70B3D549957622C1')\r\n\r\n## Uncomment for US915 / AU915 & Pygate\r\n## for i in range(0,8):\r\n## lora.remove_channel(i)\r\n## for i in range(16,65):\r\n## lora.remove_channel(i)\r\n## for i in range(66,72):\r\n## lora.remove_channel(i)\r\n\r\n## join a network using OTAA (Over the Air Activation)\r\n##uncomment below to use LoRaWAN application provided dev_eui\r\n##lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)\r\n#lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\r\n\r\n#pycom.rgbled(0xFF0000) # Red\r\n\r\n## wait until the module has joined the network\r\n#while not lora.has_joined():\r\n# time.sleep(2.5)\r\n# print('Not yet joined...')\r\n\r\n#print('Joined')\r\n#pycom.rgbled(0x00FF00) # Green\r\n\r\n## create a LoRa socket\r\n#s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\r\n\r\n## set the LoRaWAN data rate\r\n#s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)\r\n\r\n##====== End LoRa ======\r\n\r\n#====== WiFi ======\r\n\r\nwlan = WLAN(mode=WLAN.STA)\r\n\r\nwlan.connect(ssid='Stargate_IoT', auth=(WLAN.WPA2, 'TieFighter'))\r\n#wlan.connect(ssid='Martins iPhone', auth=(WLAN.WPA2, 'j1aqdr2q2heb9'))\r\n#while not wlan.isconnected():\r\n# print(\"WiFi not connected\")\r\n# time.sleep(2) #sleep for 2 seconds\r\n# machine.idle()\r\n\r\ntime.sleep(5) #sleep for 5 seconds\r\n\r\n#====== End WiFi ======\r\n\r\ndata = ''\r\nadc = ADC()\r\ntempsensor = adc.channel(pin='P15') # create an analog pin on P15\r\nbat_voltage = adc.channel(attn=ADC.ATTN_11DB, pin='P16')\r\n\r\nwhile True: #Forever loop\r\n\r\n vbat = bat_voltage.voltage()*2\r\n # note that the expansionboard 3 has a voltage divider of 1M / 1M to account for\r\n # 1M / 1M, ratio = 1:2\r\n\r\n millivolts = tempsensor.voltage() # Analog temperature measured in millivolts\r\n degC = (millivolts - 500.0) / 10.0 # Convert millivolts to celsius\r\n degF = ((degC * 9.0) / 5.0) + 32.0 # Convert celsius to fahrenheit\r\n\r\n print('battery voltage:', vbat, 'mV')\r\n print('temperature:', degC, ' C')\r\n\r\n if vbat >= 4420:\r\n pycom.rgbled(0x00FF00) # Green\r\n else:\r\n pycom.rgbled(0xFF0000) # Red\r\n\r\n if wlan.isconnected():\r\n\r\n print(\"WiFi connected\")\r\n time.sleep(5) #sleep for 5 seconds\r\n print(wlan.ifconfig())\r\n\r\n # setup socket for connection\r\n wifi_socket = socket.socket()\r\n #s = ssl.wrap_socket(s)\r\n host = 'dev.electra.se'\r\n addr = socket.getaddrinfo(host,80)[0][-1]\r\n wifi_socket.connect(addr)\r\n print('socket connected')\r\n\r\n data = '2,' + str(vbat) + ',' + str(degC) + ',' + '4'\r\n httpreq = 'POST /MessageHandler.ashx HTTP/1.1 \\r\\nHOST: '+ host + '\\r\\nContent-Length: ' + str(len(data)) + '\\r\\nConnection: keep-alive \\r\\n\\r\\n' + data\r\n print('http request: \\n', httpreq)\r\n wifi_socket.send(httpreq)\r\n rec_bytes = wifi_socket.recv(10000)\r\n print(rec_bytes)\r\n else:\r\n print(\"WiFi not connected\")\r\n\r\n # Try to join LoRa\r\n if not lora.has_joined():\r\n # join a network using OTAA (Over the Air Activation)\r\n #uncomment below to use LoRaWAN application provided dev_eui\r\n #lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)\r\n lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\r\n\r\n # wait until the module has joined the network\r\n while not lora.has_joined():\r\n time.sleep(2.5)\r\n print('LoRa not yet joined...')\r\n\r\n # create a LoRa socket\r\n lora_socket = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\r\n\r\n #else:\r\n #lora_socket = connect_lora_socket()\r\n\r\n print('LoRa joined')\r\n\r\n ## send some data\r\n data = '2,' + str(vbat) + ',' + str(degC) + ',' + '3'\r\n lora_socket.send(data)\r\n\r\n #time.sleep(600) #sleep for 10 minutes\r\n time.sleep(10) #sleep for 10 seconds\r\n","repo_name":"martinkvarmo/my_summerhouse_IOT_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"3181610332","text":"#!/usr/bin/env python3\n\n#_____________________________________________________________________________\n#\n# filter for changing the gender of pronouns in a plaintext\n#\n# Author: Samdney \n# D4A7 35E8 D47F 801F 2CF6 2BA7 927A FD3C DE47 E13B \n# License: See LICENSE for licensing information\n#_____________________________________________________________________________\n\"\"\"\n***\nGENDER SWITCHING\n***\nSwitching pronouns in a plaintext message of one gender, to the pronouns of the \nother gender.\n\n---\nThe problems of gender switching\n---\nAssumptions:\n1. The message is in English => English pronouns\n\tThis are: he, she, him, her, his, hers (6 single pronouns)\n2. We can have two possible cases:\n\t=> A random pattern (or something written by a person with terrible writing \n\tskills ;)\n\t=> A natural language text with established English grammar\n\n3. We have the following possible pronoun pairs:\n\the \t<=> she\t\tPersonal pronoun - subject\n\thim <=> her\t\tPersonal pronoun - object\n\this <=> her\t\tPossessive determine\n\this <=> hers\tPossessive pronoun\n\t(4 pairs)\n\n\tProblem:\tThis map is NOT injective!\n\tBecause:\ther -> him or his\n\t\t\t\this -> her or hers\n\t=> We have to find an additional helpful quality!\n\t\n\tFirst idea:\tWe look for the position within a sentence.\n\tProblem: An pronoun can be direct or indirect\n\t=> Idea not helpful\n\t\n\tSecond idea: Looking for a natural language parser which can\n\ttell me which kind of word it is (SUB=subject, OBJ=object, etc., ...)\n\t\n\t=> Solution is only so good like the natural language parser!\n\t=> I found this wrapper parser: \n\thttps://github.com/EducationalTestingService/python-zpar\t\t\t\t\n\"\"\"\n\nimport string\nimport sys\n\nclass gender_filter():\n\n\tdef __init__(self):\n\t\tself.msg_new\t= \" \"\n\n\tdef change_msg(self,filter_switch,msg):\n\t\tswitch = gender_filter()\n\t\tif filter_switch == 0:\n\t\t\t_msg_new = msg\n\t\telif filter_switch == 1:\n\t\t\t_msg_new = switch.simple_switch(msg)\n\t\telse:\n\t\t\t_msg_new = switch.lingu_switch(msg)\n\n\t\tself.msg_new = _msg_new\n\n\t# Switch one pronoun pair \n\tdef switch_one_pronoun_pair(self,pn1,pn2,msg):\n\t\t# Placeholder should be no \"real\" word\n\t\t# Something with lim -> 0 probability to appear in msg\n\t\ttmp = \"6m7Q6q16\"\n\n\t\tmsg1 \t= msg.replace(pn1,tmp)\n\t\tmsg2 \t= msg1.replace(pn2,pn1)\n\t\tmsg3\t= msg2.replace(tmp,pn2)\n\n\t\tmsg_switched = msg3\n\t\treturn msg_switched\n\n\t# Switch for all possible positions within a sentence and msg\n\tdef switch_one_pronoun_pair_allpos(self,pn1,pn2,msg):\n\t\tmyfilter = gender_filter()\n\t\t_msg_new = msg\n\n\t\t# Beginning and Middle\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \" \", \" \" + str(pn2) + \" \",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \", \", \" \" + str(pn2) + \", \",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \"'s \", \" \" + str(pn2) + \"'s \",_msg_new)\n\t\t\n\t\t# End\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \".\", \" \" + str(pn2) + \".\",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \"!\", \" \" + str(pn2) + \"!\",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \"?\", \" \" + str(pn2) + \"?\",_msg_new)\n\t\treturn _msg_new\n\t\t\n\n\t\"\"\"\n\t# SIMPLE_SWITCH\n\t\"\"\"\t\n\t# Idea: Simple find and replace.\n\t#\tStep1:\tSwitch he <=> she\n\t#\tStep2:\tSwitch him <=> her\n\t#\tStep3:\tSwitch his <=> her\n\t#\tStep4:\tSwitch his <=> hers\n\t# Comment: The pronoun parsing only works correct for an msg which follows \n\t# the established rules of English grammar. Absolutely not, for a random \n\t# pattern text\n\t# Comment: The input msg variable should contain the full message, at one. \n\t# If we do parsing for each single data of buffer_size, parsing will not \n\t# work if a pronoun is splited between two buffer packages. \n\t# E.g.: package1|package2 = msg = \"He and sh\"|\"e are good friends.\"\n\t# => Has to be fixed.\n\t# TODO: Result would be better, if we do switching not chronologically \n\t# (step1, step2, step3, step4). Instead we should have an additional look at\n\t# probability tables for the probability of the appereance of a single \n\t# pronoun in an English text. Then do the switching of the not-injective \n\t# pronoun pairs under consideration of this probabilities.\n\tdef simple_switch(self,msg):\n\t\t\n\t\t_msg_new = \" \"\n\t\tmyfilter = gender_filter()\n\t\n\t\t# Add an additional space character at the beginng of msg\n\t\t# Reason: Then you can clearly identify pronouns at the beginning of msg\n\t\t_msg_new = \" \" + str(msg)\n\t\n\t\t# Find and replace for different pronoun pairs\n\t\t# Find and replace for different notations: he, He, HE ...\n\t\t# Find and replace for different positions within a sentence\n\t\t\n\t\t#pronoun_pairs = {\"he\" : \"she\", \"him\":\"her\", \"his\":\"her\", \"his\":\"hers\"}\n\t\t\n\t\t# Switching of Step3 with Step4\n\t\tpronoun_pairs = {\"he\" : \"she\", \"him\":\"her\", \"his\":\"hers\", \"his\":\"her\"}\n\t\t\n\t\t# Example of the different results\n\t\t# Old: He, and SHE likes me so much. HELP him! \n\t\t# \tHis dog likes tea and eats with him cake. That's hers.\n\t\t# New: She, and HE likes me so much. HELP her! \n\t\t# \tHers dog likes tea and eats with her cake. That's his.\n\n\t\t# Old: He, and SHE likes me so much. HELP him! \n\t\t# \tHis dog likes tea and eats with him cake. That's hers.\n\t\t# New: She, and HE likes me so much. HELP his! \n\t\t# \tHer dog likes tea and eats with his cake. That's hers.\n\n # TODO If we have very long messages, we should add 'if cases' within \n\t\t# the loop, to not always run all 'find and replace' functions for each\n # pronoun pair. E.g. 'he' or 'she' aren't at the end of a senctence, if\n # the sentence follows english grammar rules, or? -> Saving of\n # computation time\n\t\tfor male in pronoun_pairs:\n\t\t\tpn1 = male\n\t\t\tpn2 = pronoun_pairs[male]\n\t\n\t\t\t# Lower\n\t\t\tpn1_lower = pn1.lower()\n\t\t\tpn2_lower = pn2.lower()\n\t\t\t_msg_new = myfilter.switch_one_pronoun_pair_allpos(pn1_lower,pn2_lower,_msg_new)\n\n\t\t\t# Upper\n\t\t\tpn1_upper = pn1.upper()\n\t\t\tpn2_upper = pn2.upper()\n\t\t\t_msg_new = myfilter.switch_one_pronoun_pair_allpos(pn1_upper,pn2_upper,_msg_new)\n\t\t\n\t\t\t# Titled\n\t\t\tpn1_titled = pn1.title()\n\t\t\tpn2_titled = pn2.title()\n\t\t\t_msg_new = myfilter.switch_one_pronoun_pair_allpos(pn1_titled,pn2_titled,_msg_new)\n\t\t\t\t\n\t\t# Remove the additional space character from the beginng of msg\n\t\tlen_msg = len(_msg_new)\n\t\t_msg_new = _msg_new[1:len_msg]\n\n\t\tself.msg_new = _msg_new\n\t\treturn _msg_new\n\n\t\"\"\"\n\t# LINGU_SWITCH\n\t\"\"\"\t\n\t# TODO: Not implemented until now\n\t# Idea: \n\t# - Send msg to natural language parser to determine the kind of word \n\t# \t(subject, object, ...).\n\t# - Search for all her and his and their result of the natural language \n\t# \tparsing\n\t# - Use this information to decide if we have: her => him or her => his, \n\t#\this => her or his => hers\n\t# - Change pronouns under consideration of this additional information\n\tdef lingu_switch(self,msg):\n\t\t_msg_new = \" \"\n\t\tself.msg_new = _msg_new\n\t\treturn _msg_new\n\n\"\"\"\n# TEST\n\"\"\"\n\ndef test():\n\t# Test messages\n\t#msg = \"She, you and me. Sheer is funny! He is it, too.\"\n\tmsg = \"He, and SHE likes me so much. HELP him! His dog likes tea and eats with him cake. That's hers. He's great.\"\n\tprint(\"Old: \" + msg)\n\t\n\tmyfilter = gender_filter()\n\tmyfilter.change_msg(1,msg)\n\tmsg_new = myfilter.msg_new\n\tprint(\"New: \" + msg_new)\n\nif __name__=='__main__':\n\ttest()\n","repo_name":"Samdney/pysocks5sys","sub_path":"myproxyfilter.py","file_name":"myproxyfilter.py","file_ext":"py","file_size_in_byte":7204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"23176883372","text":"l = int(input())\nheight = list(map(int, input().split()))\nm = int(input())\ncnt = 0\ninfo = {}\nindex = []\n\nfor i in range(1, l+1):\n info[i] = height[i-1]\n# print(info)\n\n\ndef find_index():\n global info\n global index\n info = dict(sorted(info.items(), key = lambda x : x[1], reverse=True))\n # print(info)\n index = list(info.keys())\n return index\n\n\nwhile cnt < m:\n cnt += 1\n find_index()\n high_index = index[0]\n low_index = index[-1]\n info[high_index] -= 1\n info[low_index] += 1\n # print(cnt, info)\nresult = list(info.values())\nprint(max(result) - min(result))","repo_name":"Seoyun0626/CodingTest","sub_path":"인프런/창고정리.py","file_name":"창고정리.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"30270343324","text":"from decimal import Decimal\nfrom itertools import chain\nfrom numbers import Number\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import FieldDoesNotExist\nfrom django.conf import settings\nimport copy\nimport datetime\nimport inspect\n\n\ndef javascript_date_format(python_date_format):\n format = python_date_format.replace(r'Y', 'yyyy')\n format = format.replace(r'm', 'mm')\n format = format.replace(r'd', 'dd')\n if not format:\n format = 'yyyy-mm-dd'\n return format\n\n\ndef duplicate(obj, changes=None):\n \"\"\" Duplicates any object including m2m fields\n changes: any changes that should occur, example\n changes = (('fullname','name (copy)'), ('do not copy me', ''))\"\"\"\n if not obj.pk:\n raise ValueError('Instance must be saved before it can be cloned.')\n duplicate = copy.copy(obj)\n duplicate.pk = None\n for change in changes:\n duplicate.__setattr__(change[0], change[1])\n duplicate.save()\n # trick to copy ManyToMany relations.\n for field in obj._meta.many_to_many:\n source = getattr(obj, field.attname)\n destination = getattr(duplicate, field.attname)\n for item in source.all():\n try: # m2m, through fields will fail.\n destination.add(item)\n except:\n pass\n return duplicate\n\n\nDATE = 1\nNUMBER = 2\n\n\ndef sort_helper(x, sort_key, sort_type):\n \"\"\" Sadly python 3 makes it very hard to sort mixed types\n We can work around this by forcing the types\n \"\"\"\n result = x[sort_key]\n if result is None:\n if sort_type == DATE:\n result = datetime.date(datetime.MINYEAR, 1, 1)\n elif sort_type == NUMBER:\n result = 0\n else: # Last try - make it a string\n result = ''\n return result\n\n\ndef sort_data(data_list, display_field):\n \"\"\" Sort data based on display_field settings\n data_list - 2d array of data\n display_field - report_builder.DisplayField object\n returns sorted data_list\n \"\"\"\n position = display_field.position\n is_reverse = display_field.sort_reverse\n # Try to inspect sample data to determine type\n sample_data = data_list[0][position]\n if sample_data is None:\n sample_data = data_list[-1][position]\n sort_type = None\n if isinstance(sample_data, (datetime.date, datetime.datetime)):\n sort_type = DATE\n elif isinstance(sample_data, (int, float, complex)):\n sort_type = NUMBER\n return sorted(\n data_list,\n key=lambda x: sort_helper(x, position, sort_type),\n reverse=is_reverse\n )\n\n\ndef increment_total(display_field, data_row):\n val = data_row[display_field.position]\n if isinstance(val, bool):\n # True: 1, False: 0\n display_field.total_count += Decimal(val)\n elif isinstance(val, Number):\n display_field.total_count += Decimal(str(val))\n elif val:\n display_field.total_count += Decimal(1)\n\n\ndef formatter(value, style):\n \"\"\" Convert value to Decimal to apply numeric formats.\n value - The value we wish to format.\n style - report_builder.Format object\n \"\"\"\n try:\n value = Decimal(value)\n except Exception:\n pass\n\n try:\n return style.string.format(value)\n except ValueError:\n return value\n\n\n# Model Utils\n\n\ndef isprop(v):\n return isinstance(v, property)\n\n\ndef get_properties_from_model(model_class):\n \"\"\" Show properties from a model \"\"\"\n properties = []\n attr_names = [name for (name, value) in inspect.getmembers(model_class, isprop)]\n for attr_name in attr_names:\n if attr_name.endswith('pk'):\n attr_names.remove(attr_name)\n else:\n properties.append(dict(label=attr_name, name=attr_name.strip('_').replace('_', ' ')))\n return sorted(properties, key=lambda k: k['label'])\n\n\ndef get_relation_fields_from_model(model_class):\n \"\"\" get related fields (m2m, fk, and reverse fk) \"\"\"\n relation_fields = []\n all_fields_names = get_all_field_names(model_class)\n for field_name in all_fields_names:\n field = copy.deepcopy(model_class._meta.get_field(field_name))\n direct = field.concrete\n m2m = field.many_to_many\n # get_all_field_names will return the same field\n # both with and without _id. ignore the duplicate.\n if field_name[-3:] == '_id' and field_name[:-3] in all_fields_names:\n continue\n if m2m or not direct or field.is_relation:\n field.field_name = field_name\n relation_fields += [field]\n return relation_fields\n\n\ndef get_all_field_names(model_class):\n \"\"\" Restores a function from django<1.10 \"\"\"\n return list(set(chain.from_iterable(\n (field.name, field.attname) if hasattr(field, 'attname') else (field.name,)\n for field in model_class._meta.get_fields()\n # For complete backwards compatibility, you may want to exclude\n # GenericForeignKey from the results.\n if not (field.many_to_one and field.related_model is None)\n )))\n\n\ndef get_direct_fields_from_model(model_class):\n \"\"\" Direct, not m2m, not FK \"\"\"\n direct_fields = []\n all_fields_names = get_all_field_names(model_class)\n for field_name in all_fields_names:\n field = model_class._meta.get_field(field_name)\n direct = field.concrete\n m2m = field.many_to_many\n if direct and not m2m and not field.is_relation:\n direct_fields += [field]\n return direct_fields\n\n\ndef get_custom_fields_from_model(model_class):\n \"\"\" django-custom-fields support \"\"\"\n if 'custom_field' in settings.INSTALLED_APPS:\n from custom_field.models import CustomField\n try:\n content_type = ContentType.objects.get(\n model=model_class._meta.model_name,\n app_label=model_class._meta.app_label)\n except ContentType.DoesNotExist:\n content_type = None\n custom_fields = CustomField.objects.filter(content_type=content_type)\n return custom_fields\n\n\ndef get_model_from_path_string(root_model, path):\n \"\"\" Return a model class for a related model\n root_model is the class of the initial model\n path is like foo__bar where bar is related to foo\n \"\"\"\n for path_section in path.split('__'):\n if path_section:\n try:\n field = root_model._meta.get_field(path_section)\n direct = field.concrete\n except FieldDoesNotExist:\n return root_model\n if direct:\n if hasattr(field, 'related'):\n try:\n root_model = field.related.parent_model()\n except AttributeError:\n root_model = field.related.model\n\n elif hasattr(field, 'related_model') and field.related_model:\n root_model = field.related_model\n\n else:\n if hasattr(field, 'related_model'):\n root_model = field.related_model\n else:\n root_model = field.model\n return root_model\n","repo_name":"burke-software/django-report-builder","sub_path":"report_builder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":753,"dataset":"github-code","pt":"77"}
+{"seq_id":"72561631930","text":"import numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\nprint(\"This script is used to print the accesses of a TPG to a CU according to the out_best_stats.md file. Result is plot using matplotlib and corresponds to a sizeXsize image (CU) with a color bar legend.\")\nprint(\"This script is often used with launch_all_TPGs-Accesses_print.py which calls it many times.\")\n\nif (len(sys.argv) != 2):\n print(\"Illegal number of parameters\")\n print(\"Usage: python3.6 print_TPGAcesses.py FILE_NAME\")\n print(\"Example: python3.6 /home/cleonard/dev/stage/scripts/python/printData/print_TPGAcesses.py /home/cleonard/dev/stage/results/scripts_results/Binary/Actions_bal_dataset1/NP/out_best_stats_ent0_bNP_63,63.md\")\n\n# Global variable\nsize = 32\n\n# Get file name\nprint(\"Script name : \", str(sys.argv[0]))\nprint(\"File name : \", str(sys.argv[1]))\ninputFile = str(sys.argv[1])\nsplitName = inputFile.split(\"/\")[-2]\nprint(splitName)\n\n# Open file, get last line and print it\nfile = open(inputFile, \"r\")\ndata = file.readlines()[-1]\nprint(data)\nfile.close()\n\n# Remove first ('{') and last ('}' + any space if there is) char from data\ndata = data[1:]\nwhile data[-1] != \"}\":\n data = data[:-1]\ndata = data[:-1]\n\n# Split data in a tab containing every pair\npixels = data.split(\"} {\")\n\n# Init the access array\naccess = np.zeros((size, size))\n\n# Store data in access\nfor p in pixels:\n # Split every pair with the pixel index (var[0]) and the number of accesses (var[1])\n var = p.split(\",\")\n # Compute 2D indexes\n row = int(var[0]) // size\n col = int(var[0]) % size\n # Store number of accesses in the corresponding pixel\n access[row][col] = int(var[1])\n\n# *** Own colormap (pretty but not really efficient) ***\n# # Create the colors (normalized)\n# topo_colors = [(255/255, 255/255, 255/255), # Blanc\n# (243/255, 232/255, 77/255), # Jaune\n# (255/255, 146/255, 3/255), # Orange\n# (255/255, 0/255, 0/255), # Rouge\n# (197/255, 3/255, 255/255), # Violet\n# (3/255, 205/255, 255/255), # Bleu\n# (75/255, 255/255, 9/255) # Vert\n# ]\n# # Create the colormap from my personnalized colors\n# my_cmap = LinearSegmentedColormap.from_list('topo_basic', topo_colors)\n\n# *** JET colormap (internet) ***\ncmap = plt.cm.jet # define the colormap\n# Extract all colors from the .jet map\ncmaplist = [cmap(i) for i in range(cmap.N)]\n# Force the first color entry to be white\ncmaplist[0] = (1, 1, 1, 1.0)\n# Create the new map\ncmap = LinearSegmentedColormap.from_list(\n 'Custom cmap', cmaplist, cmap.N)\n\n# Show image\nfig = plt.figure(splitName)\nplt.imshow(access, cmap=cmap)\nplt.colorbar(extend = 'both')\nplt.show()\n","repo_name":"CedricLeon/scripts","sub_path":"python/printData/print_TPGAcesses.py","file_name":"print_TPGAcesses.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"15593867412","text":"#!flask/bin/python\nfrom flask import Flask, jsonify, request\nfrom random import uniform\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return jsonify({'ok': True}), 200\n\n\n@app.route('/dimensions', methods=['POST'])\ndef calculate_dimensions():\n\n json = request.get_json(silent=True)\n errors = check_params(json)\n\n if len(errors) > 0:\n response = jsonify(errors)\n response.status_code = 400\n return response\n\n dimensions = random_dimensions()\n\n return jsonify(dimensions), 200\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return jsonify({'error': 'Not found'}), 404\n\n\n@app.errorhandler(405)\ndef method_not_allowed(e):\n return jsonify({'error': 'Method not allowed'}), 405\n\n\ndef check_params(json):\n errors = []\n\n if not json:\n errors.append(error('Incorrect JSON body'))\n else:\n if 'image' not in json:\n errors.append(error('Missing parameter', 'image'))\n elif not decode_image(json['image']):\n errors.append(error('Invalid base64 representation', 'image'))\n\n return errors\n\n\ndef decode_image(img):\n try:\n img = img.replace('data:image/png;base64,', '')\n img.decode('base64')\n return True\n except:\n return False\n\n\ndef random_dimensions():\n return {\n 'height': round(uniform(0, 10), 2),\n 'length': round(uniform(0, 20), 2),\n 'weight': round(uniform(0, 15), 2)\n }\n\n\ndef error(message, field=None):\n msg = {\n 'message': message\n }\n if field:\n msg['field'] = field\n return msg\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"mathifonseca/sizer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"10562023353","text":"import sys\nimport pytest\nimport unittest\nimport boto\nfrom boto.ec2.autoscale.launchconfig import LaunchConfiguration\nfrom boto.ec2.autoscale.group import AutoScalingGroup\nfrom boto.ec2.cloudwatch.alarm import MetricAlarm\nfrom moto import mock_autoscaling_deprecated\nfrom moto import mock_ec2_deprecated\nfrom moto import mock_elb_deprecated\nfrom moto.cloudwatch import mock_cloudwatch_deprecated\n\nfrom License2Deploy.rolling_deploy import RollingDeploy\nfrom License2Deploy.AWSConn import AWSConn\n\n\nclass RollingDeployTest(unittest.TestCase):\n\n autoscaling_group_name = 'autoscaling_group_name'\n launch_configuration_name = 'launch_configuration_name'\n load_balancer_name = 'load_balancer_name'\n\n GMS_LAUNCH_CONFIGURATION_STG = 'server-backend-stg-servergmsextenderLCstg-46TIE5ZFQTLB'\n GMS_LAUNCH_CONFIGURATION_PRD = 'server-backend-prd-servergmsextenderLCprd-46TIE5ZFQTLB'\n GMS_AUTOSCALING_GROUP_STG = 'server-backend-stg-servergmsextenderASGstg-3ELOD1FOTESTING'\n GMS_AUTOSCALING_GROUP_PRD = 'server-backend-prd-servergmsextenderASGprd-3ELOD1FOTESTING'\n\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n @mock_ec2_deprecated\n def setUp(self):\n self.setUpELB()\n self.rolling_deploy = RollingDeploy('stg', 'server-gms-extender', '0', 'ami-abcd1234', None, './regions.yml', force_redeploy=True)\n\n def get_autoscaling_configurations(self, launch_configuration_name, autoscaling_group_name):\n return {\n self.autoscaling_group_name: autoscaling_group_name,\n self.launch_configuration_name: launch_configuration_name\n }\n\n @mock_autoscaling_deprecated\n def setUpAutoScaleGroup(self, configurations, env=\"stg\"):\n conn = boto.connect_autoscale()\n for configuration in configurations:\n config = LaunchConfiguration(\n name=configuration[self.launch_configuration_name],\n image_id='ami-abcd1234',\n instance_type='m1.medium',\n )\n load_balancer_name = self.load_balancer_name\n group = AutoScalingGroup(\n name=configuration[self.autoscaling_group_name],\n availability_zones=['us-east-1a'],\n default_cooldown=300,\n desired_capacity=2,\n health_check_period='0',\n health_check_type=\"EC2\",\n max_size=10,\n min_size=2,\n launch_config=config,\n load_balancers=[load_balancer_name],\n vpc_zone_identifier='subnet-1234abcd',\n termination_policies=[\"Default\"],\n )\n conn.create_launch_configuration(config)\n conn.create_auto_scaling_group(group)\n\n @mock_elb_deprecated\n def setUpELB(self, env='stg'):\n conn_elb = boto.connect_elb()\n zones = ['us-east-1a']\n ports = [(80, 8080, 'http')]\n load_balancer_name = self.load_balancer_name\n conn_elb.create_load_balancer(load_balancer_name, zones, ports)\n balancers = conn_elb.get_all_load_balancers(load_balancer_names=[load_balancer_name])\n self.assertEqual(balancers[0].name, load_balancer_name)\n\n @mock_ec2_deprecated\n @mock_elb_deprecated\n def setUpEC2(self, tag=True):\n self.setUpELB()\n conn_elb = boto.connect_elb()\n conn = boto.connect_ec2()\n instance_id_list = []\n reservation = conn.run_instances('ami-1234abcd', min_count=2, private_ip_address=\"10.10.10.10\")\n instance_ids = reservation.instances\n for instance in instance_ids:\n if tag:\n instance.add_tag('BUILD', 0)\n instance_id_list.append(instance.id)\n elb = conn_elb.get_all_load_balancers(load_balancer_names=[self.load_balancer_name])[0]\n elb.register_instances(instance_id_list)\n elb_ids = [instance.id for instance in elb.instances]\n self.assertEqual(instance_id_list.sort(), elb_ids.sort())\n\n return [conn, instance_id_list]\n\n @mock_cloudwatch_deprecated\n def setUpCloudWatch(self, instance_ids, env=\"stg\"):\n alarm = MetricAlarm(\n name = \"servergmsextender_CloudWatchAlarm\" + env,\n namespace = \"AWS/EC2\",\n metric = \"CPUUtilization\",\n comparison = \">=\",\n threshold = \"90\",\n evaluation_periods = 1,\n statistic = \"Average\",\n period = 300,\n dimensions = {'InstanceId': instance_ids},\n alarm_actions=['arn:alarm'],\n ok_actions=['arn:ok']\n )\n watch_conn = boto.connect_cloudwatch()\n watch_conn.put_metric_alarm(alarm)\n\n @mock_cloudwatch_deprecated\n def setUpCloudWatchWithWrongConfig(self, instance_ids, env=\"stg\"):\n alarm = MetricAlarm(\n name = \"servergmsextender_CloudWatchAlarm\" + env,\n namespace = \"AWS/EC2\",\n metric = \"CPUUtilization\",\n comparison = \"GreaterThanThreshold\", # wrong configuration that would generate error.\n threshold = \"90\",\n evaluation_periods = 1,\n statistic = \"Average\",\n period = 300,\n dimensions = {'InstanceId': instance_ids},\n alarm_actions=['arn:alarm'],\n ok_actions=['arn:ok']\n )\n watch_conn = boto.connect_cloudwatch()\n watch_conn.put_metric_alarm(alarm)\n\n @mock_cloudwatch_deprecated\n def test_retrieve_project_cloudwatch_alarms(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n cloud_watch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms()\n print(cloud_watch_alarms)\n self.assertEqual(1, len(cloud_watch_alarms))\n\n @mock_cloudwatch_deprecated\n def test_retrieve_project_cloudwatch_alarms_with_no_valid_alarms(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n self.rolling_deploy.env = \"wrong_env_prd\" # set a wrong environment\n cloud_watch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms()\n self.assertEqual(0, len(cloud_watch_alarms))\n\n @mock_cloudwatch_deprecated\n def test_retrieve_project_cloudwatch_alarms_with_wrong_config(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatchWithWrongConfig(instance_ids)\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.retrieve_project_cloudwatch_alarms())\n\n @mock_cloudwatch_deprecated\n def test_enable_project_cloudwatch_alarms_Error(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.enable_project_cloudwatch_alarms())\n\n @mock_cloudwatch_deprecated\n def test_disable_project_cloudwatch_alarms_Error(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.disable_project_cloudwatch_alarms())\n\n @mock_ec2_deprecated\n def test_tag_ami(self):\n conn = self.setUpEC2()[0]\n reservation = conn.run_instances('ami-1234xyz1', min_count=1)\n instance_ids = reservation.instances\n conn.create_image(instance_ids[0].id, \"test-ami\", \"this is a test ami\")\n _ami_ids = conn.get_all_images()\n _ami_id = _ami_ids[0].id\n self.rolling_deploy = RollingDeploy('stg', 'server-gms-extender', '0', _ami_id, None, './regions.yml')\n self.rolling_deploy.tag_ami(str(_ami_id), 'stg')\n self.rolling_deploy.tag_ami(str(_ami_id), 'qa')\n self.rolling_deploy.tag_ami(str(_ami_id), 'qa')\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.tag_ami('blargness', 'qa'))\n\n @mock_ec2_deprecated\n def test_load_config(self):\n self.assertEqual(AWSConn.load_config('regions.yml').get('qa'), 'us-west-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('stg'), 'us-east-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('prd'), 'us-east-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('default'), 'us-west-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('zero'), None)\n\n @mock_ec2_deprecated\n def test_load_config(self):\n self.assertEqual(AWSConn.determine_region('get-shwifty'), 'us-west-1')\n\n @mock_ec2_deprecated\n def test_wait_ami_availability(self):\n conn = self.setUpEC2()[0]\n inst_ids = self.setUpEC2()[1]\n conn.create_image(inst_ids[0], \"test-ami\", \"this is a test ami\")\n ami_ids = conn.get_all_images()\n ami_id = ami_ids[0]\n self.assertEqual(str(ami_id), str(self.rolling_deploy.get_ami_id_state(ami_id.id)))\n self.assertTrue(self.rolling_deploy.wait_ami_availability(ami_id.id))\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.wait_ami_availability('bad-id')) #Will raise exception because ami can't be found\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.wait_ami_availability(ami_id.id, -100)) #Will raise exception as time limit is over\n\n @mock_ec2_deprecated\n @mock_elb_deprecated\n def test_confirm_lb_has_only_new_instances(self):\n instance_ids = self.setUpEC2()[1]\n self.rolling_deploy.load_balancer = self.load_balancer_name\n self.assertEqual(len(instance_ids), len(self.rolling_deploy.confirm_lb_has_only_new_instances())) #Return All LB's with the proper build number\n\n @mock_ec2_deprecated\n @mock_elb_deprecated\n def test_lb_healthcheck(self):\n instance_ids = self.setUpEC2()[1]\n self.rolling_deploy.load_balancer = self.load_balancer_name\n self.assertTrue(self.rolling_deploy.lb_healthcheck(instance_ids)) #Return InService for all instances in ELB\n # Below doesn't work as I am unable to change the instance state. Need to modify elb_healthcheck method and also modify instance_health template.\n ## https://github.com/spulec/moto/blob/master/moto/elb/responses.py#L511 ##\n ## https://github.com/spulec/moto/blob/master/moto/elb/responses.py#L219 ##\n #self.assertRaises(SystemExit, lambda: self.rolling_deploy.lb_healthcheck(instance_ids, 1, 1)) #Return OutOfService for the first instance in the ELB which will raise an exit call\n\n @mock_autoscaling_deprecated\n def test_get_group_info(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n group = self.rolling_deploy.get_group_info([self.GMS_AUTOSCALING_GROUP_STG])[0]\n self.assertEqual(group.name, self.GMS_AUTOSCALING_GROUP_STG)\n\n @mock_autoscaling_deprecated\n def test_failure_get_group_info(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.get_group_info('cool'))\n\n @mock_autoscaling_deprecated\n def test_get_autoscale_group_name_stg(self):\n autoscaling_configurations = list()\n autoscaling_configurations.append(self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG))\n autoscaling_configurations.append(self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_PRD, self.GMS_AUTOSCALING_GROUP_PRD))\n self.setUpAutoScaleGroup(autoscaling_configurations)\n group = self.rolling_deploy.get_autoscale_group_name()\n self.assertEqual(group, self.GMS_AUTOSCALING_GROUP_STG)\n self.assertNotEqual(group, self.GMS_AUTOSCALING_GROUP_PRD)\n\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_get_autoscale_group_name_prd(self):\n self.setUpELB(env='prd')\n self.rolling_deploy = RollingDeploy('prd', 'server-gms-extender', '0', 'ami-test212', None, './regions.yml')\n autoscaling_configurations = list()\n autoscaling_configurations.append(self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_PRD, self.GMS_AUTOSCALING_GROUP_PRD))\n self.setUpAutoScaleGroup(autoscaling_configurations, env='prd')\n group = self.rolling_deploy.get_autoscale_group_name()\n self.assertEqual(group, self.GMS_AUTOSCALING_GROUP_PRD)\n self.assertNotEqual(group, self.GMS_AUTOSCALING_GROUP_STG)\n\n @mock_autoscaling_deprecated\n def test_calculate_autoscale_desired_instance_count(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n increase = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'increase')\n decrease = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'decrease')\n self.assertEqual(increase, 4)\n self.assertEqual(decrease, 1)\n\n @mock_autoscaling_deprecated\n def test_calculate_autoscale_desired_instance_count_failure(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'nothing'))\n\n @mock_ec2_deprecated\n def test_get_instance_ip_addrs(self):\n self.setUpEC2()\n self.rolling_deploy.get_instance_ip_addrs(self.setUpEC2()[1])\n self.rolling_deploy.log_instances_ips(self.setUpEC2()[1], 'group')\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.get_instance_ip_addrs(['blah', 'blarg']))\n\n @mock_ec2_deprecated\n def test_is_redeploy(self):\n self.setUpEC2()\n self.assertTrue(self.rolling_deploy.is_redeploy())\n\n @mock_ec2_deprecated\n def test_is_redeploy_fails(self):\n self.setUpEC2(tag=False)\n with pytest.raises(SystemExit):\n self.rolling_deploy.is_redeploy()\n\n def test_stop_deploy(self):\n with pytest.raises(SystemExit):\n self.rolling_deploy.stop_deploy('error!')\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_get_all_instance_ids(self):\n self.setUpELB()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n reservation = conn.run_instances('ami-1234abcd', min_count=2, private_ip_address=\"10.10.10.10\")\n instance_ids = reservation.instances\n rslt = self.rolling_deploy.get_all_instance_ids(self.GMS_AUTOSCALING_GROUP_STG)\n self.assertEqual(len(instance_ids), len(rslt))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_validate_instance_list(self):\n self.setUpELB()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n reservation = conn.run_instances('ami-1234abcd', min_count=2, private_ip_address=\"10.10.10.10\")\n instances = reservation.instances\n self.assertTrue(self.rolling_deploy.validate_instance_list(instances))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_failure_validate_instance_list(self):\n instances = []\n self.assertRaises(Exception, lambda: self.rolling_deploy.validate_instance_list(instances))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n def test_get_instance_ids_by_requested_build_tag(self):\n self.setUpEC2()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n new_inst = []\n res_ids = conn.get_all_instances()\n for i_id in res_ids:\n for name in i_id.instances:\n if [y for y in name.tags if y == 'BUILD' and name.tags['BUILD'] == '0']:\n new_inst.append(name.id)\n self.rolling_deploy.new_desired_capacity = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'increase')\n\n self.assertEqual(len(self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 0)), 2)\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 1))\n\n self.rolling_deploy.original_instance_ids = list(new_inst)\n self.rolling_deploy.force_redeploy = False\n self.assertEqual(len(self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 0)), 2)\n self.rolling_deploy.force_redeploy = True\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 0))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n def test_get_instance_ids_by_requested_build_tag_race_condition(self):\n self.setUpEC2()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n new_inst = []\n res_ids = conn.get_all_instances()\n for i_id in res_ids:\n for name in i_id.instances:\n if [y for y in name.tags if y == 'BUILD' and name.tags['BUILD'] == '0']:\n new_inst.append(name.id)\n break\n self.rolling_deploy.force_redeploy = True\n self.rolling_deploy.new_desired_capacity = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'increase')\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 1))\n\n\n @mock_ec2_deprecated\n def test_get_instance_ids_by_requested_build_tag_failure(self):\n self.setUpEC2()\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag([], 0))\n\n @mock_autoscaling_deprecated\n def test_set_autoscale_instance_desired_count(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n self.assertTrue(self.rolling_deploy.set_autoscale_instance_desired_count(4, self.GMS_AUTOSCALING_GROUP_STG))\n\n @mock_ec2_deprecated\n def test_wait_for_new_instances(self):\n instance_ids = self.setUpEC2()[1]\n self.assertEqual(self.rolling_deploy.wait_for_new_instances(instance_ids, 9), None)\n\n @mock_ec2_deprecated\n def test_wait_for_new_instances_failure(self):\n conn = self.setUpEC2()[0]\n instance_ids = self.setUpEC2()[1]\n reservations = conn.get_all_instances()\n reservations[0].instances[0].stop()\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.wait_for_new_instances(instance_ids, 3, 1))\n\n def test_set_autoscale_instance_desired_count_failure(self):\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.set_autoscale_instance_desired_count(4, self.GMS_AUTOSCALING_GROUP_STG))\n\n def test_double_autoscale_instance_count(self):\n self.assertEqual(self.rolling_deploy.double_autoscale_instance_count(2), 4)\n\n def test_decrease_autoscale_instance_count(self):\n self.assertEqual(self.rolling_deploy.decrease_autoscale_instance_count(4), 2)\n","repo_name":"dandb/License2Deploy","sub_path":"tests/rolling_deploy_test.py","file_name":"rolling_deploy_test.py","file_ext":"py","file_size_in_byte":18154,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"}
+{"seq_id":"42755615175","text":"from p5 import *\n\nclass Matrix:\n ## first constructor\n def __init__(self,r,c):\n self.rows = r\n self.cols = c\n self.matrix = [[0.] * self.cols for i in range(self.rows)]\n\n def returnMatrix(self):\n return self.matrix\n \n ## second constructor\n @classmethod\n def float(cls,m):\n rows = len(m)\n cols = len(m[0])\n return cls(rows,cols)\n \n @classmethod\n def initializedVector(cls,vector):\n vlen = len(vector)\n m = cls(vlen,1)\n for i in range(0,vlen):\n m.matrix[i][0] = vector[i]\n return m\n \n def dot(self,n):\n result = []\n result = Matrix(self.rows,n.cols)\n \n if self.cols == n.rows :\n for i in range(self.rows) :\n for j in range(n.cols):\n sum = 0 \n for k in range(self.cols):\n sum += self.matrix[i][k]*n.matrix[k][j]\n result.matrix[i][j] = sum\n return result\n \n def randomize(self):\n for i in range(self.rows):\n for j in range(self.cols):\n self.matrix[i][j] = random_uniform(-1,1)\n \n def matrixToVector(self):\n arr = []\n for i in range(self.rows):\n for j in range(self.cols):\n arr.append(self.matrix[i][j])\n return arr\n \n def addBias(self):\n n = Matrix(self.rows+1,1)\n for i in range(self.rows):\n n.matrix[i][0] = self.matrix[i][0]\n n.matrix[self.rows][0] = 1.\n return n\n \n def activate(self):\n n = Matrix(self.rows,self.cols)\n for i in range(self.rows):\n for j in range(self.cols):\n n.matrix[i][j] = self.relu(self.matrix[i][j])\n return n\n \n @staticmethod\n def relu(x):\n return max(0,x)\n \n def mutate(self,mutationRate):\n for i in range(self.rows) :\n for j in range(self.cols) :\n rand = random_uniform(1)\n if rand < mutationRate :\n self.matrix[i][j] += random_gaussian()/5\n \n if self.matrix[i][j] > 1 :\n self.matrix[i][j] = 1\n if self.matrix[i][j] < -1 :\n self.matrix[i][j] = -1\n \n def crossover(self,partner):\n child = Matrix(self.rows,self.cols)\n \n randR = floor(random_uniform(self.rows))\n randC = floor(random_uniform(self.cols))\n \n for i in range(self.rows):\n for j in range(self.cols):\n if i < randR or (i == randR and j <= randC) :\n child.matrix[i][j] = self.matrix[i][j]\n else:\n child.matrix[i][j] = partner.matrix[i][j];\n return child\n \n def clone(self):\n clone = Matrix(self.rows,self.cols)\n for i in range(self.rows):\n for j in range(self.cols):\n clone.matrix[i][j] = self.matrix[i][j]\n return clone\n \n\n \n\n\n\n \n \n \n \n \n \n \n \n \n\n \n","repo_name":"ElirazO/IronDomeAI","sub_path":"Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"75285391289","text":"from django.urls import path\nfrom . import views\n\napp_name = 'front'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path(r'login/', views.login, name='login'),\n path('work/', views.work, name='work'),\n path('refreshlog/', views.refresh_log, name='refresh_log'),\n path('logout/', views.logout, name='logout'),\n path('connect/admin/', views.connect_admin, name='connect_admin'),\n]","repo_name":"bopopescu/refreshHuaweiCdn","sub_path":"front/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"3758257464","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport random\n\ndef upper_confidence_bound(file):\n dataset = pd.read_csv(file)\n d = 10\n N = 10000\n ads_selected = []\n numbers_of_selections = [0] * d\n sums_of_rewards = [0] * d\n total_reward = 0\n for n in range(0, N):\n ad = 0\n max_upper_bound = 0\n for i in range(0, d):\n if (numbers_of_selections[i] > 0):\n upper_bound = (sums_of_rewards[i] / numbers_of_selections[i]) + \\\n (math.sqrt(3/2 * math.log(n + 1) / numbers_of_selections[i]))\n else:\n upper_bound = 1e400\n if upper_bound > max_upper_bound:\n max_upper_bound = upper_bound\n ad = i\n ads_selected.append(ad)\n numbers_of_selections[ad] += 1\n reward = dataset.values[n, ad]\n sums_of_rewards[ad] += reward\n total_reward += reward\n\n # Visualizing results\n plt.hist(ads_selected)\n plt.title('Histogram of Ad Selections')\n plt.xlabel('Ads')\n plt.ylabel('Number of times each ad selected')\n plt.savefig('Images/UCB.png')\n plt.show()\n\ndef thompson_sampling(file):\n dataset = pd.read_csv(file)\n\n # Implementing Thompson Sampling\n d = 10\n N = 10000\n ads_selected = []\n number_of_rewards1 = [0] * d\n number_of_rewards0 = [0] * d\n total_rewards = 0\n for n in range(0, N):\n ad = 0\n max_random = 0\n for i in range(0, d):\n random_beta = random.betavariate(number_of_rewards1[i] + 1, number_of_rewards0[i] + 1)\n if random_beta > max_random:\n max_random = random_beta\n ad = i\n ads_selected.append(ad)\n reward = dataset.values[n, ad]\n if reward == 1:\n number_of_rewards1[ad] += 1\n else:\n number_of_rewards0[ad] += 1\n total_rewards += reward\n\n # Visualize Histogram of results\n plt.hist(ads_selected)\n plt.title('Histogram of Ad Selections')\n plt.xlabel('Ads')\n plt.ylabel('Number of times each ad selected')\n plt.savefig('Images/Thompson_Sampling.png')\n plt.show()","repo_name":"jmgccp4eva/machinelearningaipython","sub_path":"Reinforcement_Learning.py","file_name":"Reinforcement_Learning.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"42163718402","text":"\"\"\"\nThis example illustrates how to manually use a temporary array manager (if you must).\n\"\"\"\n\nimport numpy\nfrom reikna.cluda import dtypes, any_api\nfrom reikna.cluda.tempalloc import ZeroOffsetManager\n\n\napi = any_api()\nthr = api.Thread.create()\n\n\ndef demo_array_dependencies():\n\n # ZeroOffsetManager attempts to pack temporary allocations\n # in a collection of real allocations with minimal total size.\n # All the virtual allocations start at the beginning of the real allocations.\n\n # Create a manager that will try to minimize the total size of real allocations\n # every time a temporary allocation occurs, or a temporary array is freed.\n # Note that this may involve re-pointing a temporary array to a different part of memory,\n # so all of the data in it is lost.\n temp_manager = ZeroOffsetManager(thr, pack_on_alloc=True, pack_on_free=True)\n\n # Alternatively one can pass `False` to these keywords and call `.pack()` manually.\n # This can be useful if a lot of allocations are happening in a specific place at once.\n\n # Create two arrays that do not depend on each other.\n # This means the manager will allocate a single (200, int32) real array,\n # and point both `a1` and `a2` to its beginning.\n a1 = temp_manager.array(100, numpy.int32)\n a2 = temp_manager.array(200, numpy.int32)\n\n # You can see that the total size of virtual arrays is 1200,\n # but the total size of real arrays is only 800 (the size of the larger array).\n print(\"Allocated a1 = (100, int32) and a2 = (200, int32)\")\n print(temp_manager._statistics())\n\n # Now we allocate a dependent array.\n # This means that the real memory `a3` points to cannot intersect with that of `a1`.\n # If we could point temporary arrays at any address within real allocations,\n # we could fit it into the second half of the existing real allocation.\n # But `ZeroOffsetManager` cannot do that, so it has to create another allocation.\n a3 = temp_manager.array(100, numpy.int32, dependencies=[a1])\n\n print(\"Allocated a3 = (100, int32) depending on a1\")\n print(temp_manager._statistics())\n\n # Now that we deallocated `a1`, `a3` can now fit in the same real allocation as `a2`,\n # so one of the real allocations will be removed.\n del a1\n\n print(\"Freed a1\")\n print(temp_manager._statistics())\n\n\nclass MyComputation:\n\n def __init__(self, temp_manager):\n self.temp_array = temp_manager.array(100, numpy.int32)\n\n # The magic property containing temporary arrays used\n self.__tempalloc__ = [self.temp_array]\n\n def __call__(self, array1, array2):\n # a sequence of kernel calls using `self.temp_array` to store some intermediate results\n pass\n\n\ndef demo_object_dependencies():\n\n temp_manager = ZeroOffsetManager(thr, pack_on_alloc=True, pack_on_free=True)\n\n # A `MyComputation` instance creates a temporary array for internal usage\n comp = MyComputation(temp_manager)\n\n print(\"MyComputation created\")\n print(temp_manager._statistics())\n\n # Create another temporary array whose usage does not intersect with `MyComputation` usage.\n # This means that if `comp` is called, the contents of `a1` may be rewritten.\n a1 = temp_manager.array(100, numpy.int32)\n\n # It is put in the same real allocation as the temporary array of `comp`.\n print(\"Allocated a1 = (100, int32)\")\n print(temp_manager._statistics())\n\n # Now let's say we want to put the result of `comp` call somewhere.\n # This means we want to make sure it does not occupy the same memory\n # as any of the temporary arrays in `comp`, so we are passing `comp` as a dependency.\n # It will pick up whatever `comp` declared in its `__tempalloc__` attribute.\n result = temp_manager.array(100, numpy.int32, dependencies=[comp])\n\n # You can see that a new real allocation was created to host the result.\n print(\"Allocated result = (100, int32)\")\n print(temp_manager._statistics())\n\n\nif __name__ == '__main__':\n demo_array_dependencies()\n demo_object_dependencies()\n","repo_name":"fjarri/reikna","sub_path":"examples/demo_tempalloc.py","file_name":"demo_tempalloc.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"77"}
+{"seq_id":"69995453688","text":"from django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\n\n# Create your models here.\n\n\nclass User(models.Model):\n FAMILY_ROLE = [('엄마', '엄마'), ('아빠', '아빠'),]\n username = models.CharField(max_length=10, unique=True)\n role = models.CharField(max_length=5, choices=FAMILY_ROLE)\n \n def __str__(self):\n return self.username\n\n\nclass SetLocation(models.Model):\n user_id = models.OneToOneField('User', on_delete=models.CASCADE)\n homeX = models.FloatField()\n homeY = models.FloatField()\n companyX = models.FloatField()\n companyY = models.FloatField()\n\n\nclass Location(models.Model):\n user_id = models.ForeignKey('User', on_delete=models.CASCADE)\n geoX = models.FloatField()\n geoY = models.FloatField()\n timeStamp = models.DateTimeField(auto_now_add=True)\n onHomeRoad = models.IntegerField(default=0)\n onCompanyRoad = models.IntegerField(default=0)\n\n\nclass Alert(models.Model):\n user_id = models.ForeignKey('User', on_delete=models.CASCADE)\n alertType = models.IntegerField()\n timeStamp = models.DateTimeField(auto_now_add=True)\n","repo_name":"sseonnn/FAFA","sub_path":"Back-End/FAFA/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"22212128210","text":"import time\nimport numpy as np\nimport torch\nfrom torch.nn.functional import binary_cross_entropy_with_logits\nfrom torch.utils.data import DataLoader, RandomSampler\nfrom rmp_nav.common.utils import save_model, load_model, module_grad_stats\nimport tabulate\nimport os\n\n\ndef _load_weights(model_file, nets, net_opts):\n state = load_model(os.path.dirname(model_file),\n os.path.basename(model_file), load_to_cpu=True)\n epoch = int(state['epoch'])\n\n for name, net in nets.items():\n net.load_state_dict(state['nets'][name])\n\n for name, opt in net_opts.items():\n opt.load_state_dict(state['optims'][name])\n # Move the parameters stored in the optimizer into gpu\n for opt_state in opt.state.values():\n for k, v in opt_state.items():\n if torch.is_tensor(v):\n opt_state[k] = v.to(device='cuda')\n return epoch\n\n\ndef _save_model(nets, net_opts, epoch, global_args, model_file):\n state = {\n 'epoch': epoch,\n 'global_args': global_args,\n 'optims': {\n name: opt.state_dict() for name, opt in net_opts.items()\n },\n 'nets': {\n name: net.state_dict() for name, net in nets.items()\n }\n }\n save_model(state, epoch, '', model_file)\n\n\ndef train_multiframedst(nets, net_opts, dataset, vis, global_args):\n (\n model_file,\n max_epochs,\n batch_size,\n n_worker,\n log_interval,\n vis_interval,\n save_interval,\n train_device,\n resume,\n weight_loss,\n weight_loss_min_clip,\n model_variant,\n proximity_label,\n heading_diff_label\n ) = [global_args[_] for _ in ['model_file',\n 'max_epochs',\n 'batch_size',\n 'n_dataset_worker',\n 'log_interval',\n 'vis_interval',\n 'save_interval',\n 'train_device',\n 'resume',\n 'weight_loss',\n 'weight_loss_min_clip',\n 'model_variant',\n 'proximity_label',\n 'heading_diff_label']]\n\n epoch = 0\n if resume:\n epoch = _load_weights(model_file, nets, net_opts)\n torch.manual_seed(231239 + epoch)\n print('loaded saved state. epoch: %d' % epoch)\n\n # FIXME: hack to mitigate the bug in torch 1.1.0's schedulers\n if epoch <= 1:\n last_epoch = epoch - 1\n else:\n last_epoch = epoch - 2\n\n net_scheds = {\n name: torch.optim.lr_scheduler.StepLR(\n opt,\n step_size=global_args['lr_decay_epoch'],\n gamma=global_args['lr_decay_rate'],\n last_epoch=last_epoch)\n for name, opt in net_opts.items()\n }\n\n n_samples = global_args['samples_per_epoch']\n\n while True:\n print('===== epoch %d =====' % epoch)\n\n sampler = RandomSampler(dataset, True, n_samples)\n\n loader = DataLoader(dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=n_worker,\n pin_memory=True,\n drop_last=True)\n\n last_log_time = time.time()\n\n for idx, (batch_src_imgs, batch_dst_imgs, batch_waypoints, batch_extras) in enumerate(loader):\n for _, opt in net_opts.items():\n opt.zero_grad()\n\n if idx % vis_interval == 0:\n imgs = []\n for i in range(3):\n src_img = batch_src_imgs[i].data.numpy()\n dst_imgs = batch_dst_imgs[i].data.numpy()\n imgs.append(src_img[None])\n imgs.append(dst_imgs)\n imgs = np.concatenate(imgs, axis=0)\n vis.images(imgs, nrow=(dst_imgs.shape[0] + 1),\n win='batch_imgs', opts={'title': 'src-dst'})\n\n batch_src_imgs = batch_src_imgs.to(device=train_device, non_blocking=True)\n batch_dst_imgs = batch_dst_imgs.to(device=train_device, non_blocking=True)\n batch_waypoints = batch_waypoints.to(device=train_device, non_blocking=True)\n\n for k, v in batch_extras.items():\n batch_extras[k] = v.to(device=train_device, non_blocking=True)\n\n batch_size, win_size, c, h, w = batch_dst_imgs.size()\n\n if model_variant == 'attention':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(-1, c, h, w)).view(\n batch_size, win_size, -1) # batch_size x win_size x dim\n\n # FIXME: disabled attention temporarily\n # dst_terminal_features = dst_features[:, -1, :]\n # attention = nets['attention_encoder'](torch.cat([src_features,\n # dst_terminal_features], dim=1))\n dst_temporal_features = nets['seq_encoder'](dst_features)\n final_features = torch.cat([src_features, dst_temporal_features], dim=1)\n pred_waypoints = nets['wp_regressor'](final_features)\n\n elif model_variant == 'concat_early':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(-1, c, h, w)).view(\n batch_size, win_size, -1) # batch_size x win_size x dim\n\n src_dst_features = torch.cat([src_features.unsqueeze(1).expand_as(dst_features),\n dst_features], dim=-1)\n temporal_features = nets['seq_encoder'](src_dst_features)\n pred_waypoints = nets['wp_regressor'](temporal_features)\n\n elif model_variant == 'future':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(-1, c, h, w)).view(\n batch_size, win_size, -1) # batch_size x win_size x dim\n\n win_size = dst_features.size(1) // 2\n\n past_features = dst_features[:, :win_size + 1]\n future_features = dst_features[:, win_size:]\n\n past_temporal_features = nets['seq_encoder'](past_features)\n future_temporal_features = nets['seq_encoder'](future_features)\n\n final_features = torch.cat([src_features,\n past_temporal_features,\n future_temporal_features], dim=1)\n pred_waypoints = nets['wp_regressor'](final_features)\n\n elif model_variant == 'future_stack':\n img_stack = torch.cat([batch_src_imgs.unsqueeze(1), batch_dst_imgs], dim=1)\n features = nets['stack_encoder'](img_stack)\n pred_waypoints = nets['wp_regressor'](features)\n\n elif model_variant == 'future_stack_v2':\n # Only stack dst images.\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['stack_encoder'](batch_dst_imgs)\n features = torch.cat([src_features, dst_features], dim=-1)\n pred_waypoints = nets['wp_regressor'](features)\n\n elif model_variant == 'future_pair':\n batch_src_imgs2 = batch_src_imgs.unsqueeze(1).expand_as(batch_dst_imgs).contiguous()\n pair_features = nets['img_pair_encoder'](\n batch_src_imgs2.view(batch_size * win_size, c, h, w),\n batch_dst_imgs.view(batch_size * win_size, c, h, w)).view(batch_size, -1)\n pred_waypoints = nets['wp_regressor'](pair_features)\n if proximity_label:\n pred_proximity = nets['proximity_regressor'](pair_features)\n if heading_diff_label:\n pred_heading_diff = nets['heading_diff_regressor'](pair_features)\n\n elif model_variant == 'future_pair_conv':\n batch_src_imgs2 = batch_src_imgs.unsqueeze(1).expand_as(batch_dst_imgs).contiguous()\n pair_features = nets['img_pair_encoder'](\n batch_src_imgs2.view(batch_size * win_size, c, h, w),\n batch_dst_imgs.view(batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n conv_feature = nets['conv_encoder'](pair_features.transpose(1, 2))\n pred_waypoints = nets['wp_regressor'](conv_feature)\n if proximity_label:\n pred_proximity = nets['proximity_regressor'](conv_feature)\n if heading_diff_label:\n pred_heading_diff = nets['heading_diff_regressor'](conv_feature)\n\n elif model_variant == 'future_pair_featurized':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(\n batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n src_features = src_features.unsqueeze(1).expand_as(dst_features).contiguous()\n pair_features = nets['feature_pair_encoder'](\n src_features.view(batch_size * win_size, -1),\n dst_features.view(batch_size * win_size, -1)).view(batch_size, -1)\n pred_waypoints = nets['wp_regressor'](pair_features)\n\n elif model_variant == 'future_pair_featurized_v2':\n src_features = nets['src_img_encoder'](batch_src_imgs)\n dst_features = nets['dst_img_encoder'](batch_dst_imgs.view(\n batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n src_features = src_features.unsqueeze(1).expand_as(dst_features).contiguous()\n pair_features = nets['feature_pair_encoder'](\n src_features.view(batch_size * win_size, -1),\n dst_features.view(batch_size * win_size, -1)).view(batch_size, -1)\n pred_waypoints = nets['wp_regressor'](pair_features)\n\n elif model_variant == 'raw_control':\n batch_src_imgs2 = batch_src_imgs.unsqueeze(1).expand_as(batch_dst_imgs).contiguous()\n pair_features = nets['img_pair_encoder'](\n batch_src_imgs2.view(batch_size * win_size, c, h, w),\n batch_dst_imgs.view(batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n conv_feature = nets['conv_encoder'](pair_features.transpose(1, 2))\n\n velocity = batch_extras['velocity'].to(device=train_device, non_blocking=True)\n angular_vel = batch_extras['angular_vel'].to(device=train_device, non_blocking=True)\n\n all_features = torch.cat([conv_feature, velocity, angular_vel], dim=-1)\n\n # Note that pred_waypoints here are actually raw controls.\n pred_waypoints = nets['wp_regressor'](all_features)\n\n if proximity_label:\n pred_proximity = nets['proximity_regressor'](conv_feature)\n\n if heading_diff_label:\n pred_heading_diff = nets['heading_diff_regressor'](conv_feature)\n\n else:\n raise RuntimeError('Unknown model variant %s' % model_variant)\n\n l2_loss = torch.sum(torch.pow(pred_waypoints - batch_waypoints, 2), dim=1)\n if weight_loss:\n l2_loss *= 1.0 / torch.max(batch_waypoints.norm(p=2, dim=1),\n batch_waypoints.new_tensor(weight_loss_min_clip))\n loss = torch.mean(l2_loss)\n if proximity_label:\n assert pred_proximity.size() == batch_extras['proximity'].size()\n proximity_loss = binary_cross_entropy_with_logits(pred_proximity,\n batch_extras['proximity'])\n loss += proximity_loss\n\n if heading_diff_label:\n assert pred_heading_diff.size() == batch_extras['heading_diff'].size()\n heading_diff_loss = torch.mean(torch.sum(torch.pow(\n pred_heading_diff - batch_extras['heading_diff'], 2), dim=1))\n loss += heading_diff_loss\n\n loss.backward()\n\n for _, opt in net_opts.items():\n opt.step()\n\n if idx % log_interval == 0:\n print('epoch %d batch time %.2f sec loss: %6.2f' % (\n epoch, (time.time() - last_log_time) / log_interval, loss.item()))\n print('learning rate:\\n%s' % tabulate.tabulate([\n (name, opt.param_groups[0]['lr']) for name, opt in net_opts.items()]))\n for name, net in nets.items():\n print('%s grad:\\n%s' % (name, module_grad_stats(net)))\n\n vis.line(X=np.array([epoch * n_samples + idx * batch_size]),\n Y=np.array([loss.item()]),\n win='loss', update='append', opts={'title': 'loss'})\n\n if proximity_label:\n def format(l):\n return '(' + ','.join(['%.2f' % _ for _ in l]) + ')'\n print('proximity:\\n%s' % tabulate.tabulate([\n ['pred'] + [format(_) for _ in torch.sigmoid(pred_proximity[:10]).tolist()],\n ['gt'] + [format(_) for _ in batch_extras['proximity'][:10].tolist()]\n ]))\n vis.line(X=np.array([epoch * n_samples + idx * batch_size]),\n Y=np.array([proximity_loss.item()]),\n win='proximity loss', update='append',\n opts={'title': 'proximity loss'})\n\n if heading_diff_label:\n def format(l):\n return '(' + ','.join(['%.2f' % _ for _ in l]) + ')'\n print('heading_diff:\\n%s' % tabulate.tabulate([\n ['pred'] + [format(_) for _ in pred_heading_diff[:10].tolist()],\n ['gt'] + [format(_) for _ in batch_extras['heading_diff'][:10].tolist()]\n ]))\n vis.line(X=np.array([epoch * n_samples + idx * batch_size]),\n Y=np.array([heading_diff_loss.item()]),\n win='heading_diff loss', update='append',\n opts={'title': 'heading diff loss'})\n\n last_log_time = time.time()\n vis.save([vis.env])\n\n for _, sched in net_scheds.items():\n sched.step()\n\n epoch += 1\n if epoch > max_epochs:\n break\n\n if epoch % save_interval == 0:\n print('saving model...')\n _save_model(nets, net_opts, epoch, global_args, model_file)\n","repo_name":"xymeng/rmp_nav","sub_path":"topological_nav/controller/train_fixture.py","file_name":"train_fixture.py","file_ext":"py","file_size_in_byte":15154,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"77"}
+{"seq_id":"15257555545","text":"from flask import Blueprint, render_template, render_template_string, request, flash, jsonify\nfrom flask_login import login_required, current_user\nfrom .models import Note, User\nfrom . import db\nfrom datetime import datetime\nviews = Blueprint('views', __name__)\nimport json\n\n@views.route('/', methods=['POST','GET'])\n@login_required\ndef home():\n if request.method == 'POST':\n note = request.form.get('note')\n\n if len(note) < 1:\n flash('Note is too short!', category='error')\n else:\n now = datetime.now()\n new_note = Note(data=note, date=now, user_id=current_user.id)\n db.session.add(new_note)\n db.session.commit()\n flash('Note added!', category='success')\n\n return render_template('home.html', user=current_user)\n\n@views.route('/admin', methods=['GET', 'POST'])\n@login_required\ndef admin():\n if request.method == 'GET':\n users = User.query.all()\n notes = Note.query.all()\n\n return render_template('admin.html', user=current_user, users=users, notes=notes)\n\n@views.route('/delete-note', methods=['POST'])\ndef delete_note():\n note = json.loads(request.data)\n noteId = note['noteId']\n note = Note.query.get(noteId)\n if note:\n if note.user_id == current_user.id: #POSSIAMO CANCELLARE LE NOTE DA QUEST'IF\n db.session.delete(note)\n db.session.commit()\n \n return jsonify({}) \n\n@views.route('/delete-user', methods=['POST'])\ndef delete_user():\n user = json.loads(request.data)\n userId = user['userId']\n user = User.query.get(userId)\n if user:\n db.session.delete(user)\n db.session.commit()\n return jsonify({})\n\n@views.route('/delete-note-admin', methods=['POST'])\ndef delete_note_admin():\n note = json.loads(request.data)\n noteId = note['noteId']\n note = Note.query.get(noteId)\n if note: \n db.session.delete(note)\n db.session.commit()\n \n return jsonify({}) \n\n@views.route('/user', methods=['GET'])\ndef user():\n username = request.args.get('username', default = current_user.first_name)\n\n template = ''' \n {% extends \"base.html\" %} {% block title %}User panel{% endblock %}\n \n {%block content%}\n
\n User panel
\n My name is: ''' + username + '''
\n My email address is: {{user.email}}
\n